def load_specific_dataset(self):
        dataset_config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
                                            'dataset', 'composite', 'hats_3_demo_composite.yaml')

        dataset_config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config',
                                               'dense_correspondence',
                                               'dataset', 'composite', '4_shoes_all.yaml')

        dataset_config = utils.getDictFromYamlFilename(dataset_config_filename)
        self._dataset = SpartanDataset(config=dataset_config)
コード例 #2
0
    def load_specific_dataset(self):
        dataset_config_filename = os.path.join(
            utils.getDenseCorrespondenceSourceDir(), 'config',
            'dense_correspondence', 'dataset', 'composite',
            'hats_3_demo_composite.yaml')

        dataset_config_filename = os.path.join(
            utils.getDenseCorrespondenceSourceDir(), 'config',
            'dense_correspondence', 'dataset', 'composite', '4_shoes_all.yaml')

        dataset_config = utils.getDictFromYamlFilename(dataset_config_filename)
        self._dataset = SpartanDataset(config=dataset_config)
コード例 #3
0
    def load_default_config():
        dc_source_dir = utils.getDenseCorrespondenceSourceDir()
        config_file = os.path.join(dc_source_dir, 'config', 'dense_correspondence',
                                   'training', 'training.yaml')

        config = utils.getDictFromYamlFilename(config_file)
        return config
コード例 #4
0
    def load_default_config():
        dc_source_dir = utils.getDenseCorrespondenceSourceDir()
        config_file = os.path.join(dc_source_dir, 'config', 'dense_correspondence',
                                   'training', 'training.yaml')

        config = utils.getDictFromYamlFilename(config_file)
        return config
コード例 #5
0
    def load_specific_dataset(self):
        dataset_config_filename = os.path.join(
            utils.getDenseCorrespondenceSourceDir(), 'config',
            'dense_correspondence', 'dataset', 'composite',
            'rope_nonrigid_412vert_only.yaml')

        dataset_config = utils.getDictFromYamlFilename(dataset_config_filename)
        self._dataset = SpartanDataset(config=dataset_config)
コード例 #6
0
 def get_descriptor_target_from_yaml(self):
     """
     Grabs a 1-dimensional numpy array of length D from the descriptor yaml file
     """
     descriptor_filename = os.path.join(pdc_utils.getDenseCorrespondenceSourceDir(), "../config", "new_descriptor_picked.yaml")
     descriptor_dict = pdc_utils.getDictFromYamlFilename(descriptor_filename)
     descriptor_list = descriptor_dict["descriptor"]
     return np.asarray(descriptor_list)
コード例 #7
0
 def get_unet(config):
     """
     Returns a Unet nn.module that satisifies the fcn properties stated in get_fcn() docstring
     """
     dc_source_dir = utils.getDenseCorrespondenceSourceDir()
     sys.path.append(os.path.join(dc_source_dir, 'external/unet-pytorch'))
     from unet_model import UNet
     model = UNet(num_classes=config["descriptor_dimension"]).cuda()
     return model
コード例 #8
0
    def make_default_caterpillar():
        """
        Makes a default SpartanDatase from the 10_scenes_drill data
        :return:
        :rtype:
        """
        config_file = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
                                   'dataset', 'composite',
                                   'caterpillar_only.yaml')

        config = utils.getDictFromYamlFilename(config_file)
        dataset = SpartanDataset(mode="train", config=config)
        return dataset
コード例 #9
0
    def __init__(self, config_filename='shoes_all.yaml'):

        with HiddenPrints():

            self.config_filename = os.path.join(
                utils.getDenseCorrespondenceSourceDir(), 'config',
                'dense_correspondence', 'dataset', 'composite',
                config_filename)
            self.train_config_filename = os.path.join(
                utils.getDenseCorrespondenceSourceDir(), 'config',
                'dense_correspondence', 'training', 'training.yaml')

            self.config = utils.getDictFromYamlFilename(self.config_filename)
            self.train_config = utils.getDictFromYamlFilename(
                self.train_config_filename)

            self.dataset = SpartanDataset(config=self.config)
            self.dataset.set_parameters_from_training_config(self.train_config)

        # holds centroid and radius for each scene
        # these are for min and max z values currently. maybe include x, y, and z in the future.
        # self.centroid_and_radius[scene_name]["centroid"] or self.centroid_and_radius[scene_name]["radius"]
        self.centroid_and_radius = {}
コード例 #10
0
def run(data_folder, ply_binary_filename='images.ply'):

    # install ply if do not already have it
    os.chdir(data_folder)
    vtp_filename = os.path.join(data_folder, 'images.vtp')
    dc_source_dir = utils.getDenseCorrespondenceSourceDir()

    ply_to_ascii_executable = os.path.join(dc_source_dir, 'src', 'ply',
                                           'ply2ascii')
    path_to_ply = os.path.join(dc_source_dir, "src", "ply")
    if not (os.path.isfile(ply_to_ascii_executable)):
        os.system("cd " + path_to_ply + " && make")

    correct_ply_header_file = os.path.join(dc_source_dir, 'config',
                                           'correct_ply_header.txt')

    ply_binary_full_filename = os.path.join(data_folder, ply_binary_filename)
    converted_ascii_filename = os.path.join(data_folder,
                                            "converted_to_ascii.ply")
    converted_ascii_modified_header_filename = os.path.join(
        data_folder, "converted_to_ascii_modified_header.ply")

    # call ply2ascii
    os.system(ply_to_ascii_executable + "<./" + ply_binary_filename + "> " +
              converted_ascii_filename)

    # change header to be compatible with Director
    # TODO: make so Director accepts other header?
    line_elements_vertex = ""
    with open(converted_ascii_modified_header_filename, 'w') as outfile:
        with open(converted_ascii_filename) as infile:
            counter = 0
            for line in infile:
                counter += 1
                if counter == 3:
                    line_elements_vertex = line
                    break
        with open(correct_ply_header_file) as infile:
            counter = 0
            for line in infile:
                counter += 1
                if counter == 4:
                    outfile.write(line_elements_vertex)
                    continue
                outfile.write(line)
        with open(converted_ascii_filename) as infile:
            num_skip = 14
            counter = 0
            for line in infile:
                counter += 1
                if counter <= 14:
                    continue
                outfile.write(line)

    # convert to vtp
    convert_ply_to_vtp_script = os.path.join(
        dc_source_dir, 'modules', 'dense_correspondence_manipulation',
        'scripts', 'convertPlyToVtp.py')

    print "converted to ascii ply format"

    os.system("directorPython " + convert_ply_to_vtp_script + " " +
              converted_ascii_modified_header_filename)

    converted_ascii_modified_header_vtp_filename = os.path.join(
        data_folder, "converted_to_ascii_modified_header.vtp")

    print "finished convert_ply_to_vtp_script"

    # clean up and rename
    # os.system("rm *.ply *.freiburg")
    os.rename(converted_ascii_modified_header_vtp_filename, vtp_filename)
import numpy as np

# pdc
import dense_correspondence_manipulation.utils.utils as utils
from dense_correspondence.evaluation.evaluation import DenseCorrespondenceEvaluation
from dense_correspondence.dataset.spartan_dataset_masked import SpartanDataset
from dense_correspondence.dataset.scene_structure import SceneStructure
"""
Computes descriptor images for a given scene and network. Saves them as 
npy files. 


Usage: Modify the global variables in CAPS as needed
"""

DC_SOURCE_DIR = utils.getDenseCorrespondenceSourceDir()
NETWORK_NAME = "caterpillar_M_background_0.500_3"
EVALUATION_CONFIG_FILENAME = os.path.join(DC_SOURCE_DIR, 'config',
                                          'dense_correspondence', 'evaluation',
                                          'lucas_evaluation.yaml')
DATASET_CONFIG_FILE = os.path.join(DC_SOURCE_DIR, 'config',
                                   'dense_correspondence', 'dataset',
                                   'composite', 'caterpillar_only_9.yaml')

SCENE_NAME = "2018-04-16-14-25-19"

SAVE_DIR = os.path.join("/home/manuelli/code/data_volume/pdc/logs_test",
                        SCENE_NAME, "processed", "descriptor_images",
                        NETWORK_NAME)

コード例 #12
0
import os
import dense_correspondence_manipulation.utils.utils as utils

CHANGE_DETECTION_CONFIG_FILE = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'stations', 'RLG_iiwa_1', 'change_detection.yaml')

CHANGE_DETECTION_BACKGROUND_SUBTRACTION_CONFIG_FILE = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'stations', 'RLG_iiwa_1', 'change_detection_background_subtraction.yaml')

BACKGROUND_SCENE_DATA_FOLDER = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'data_volume', 'pdc','logs_proto', '14_background')

DEPTH_IM_SCALE = 1000.0   # This represents that depth images are saved as uint16, where the integer value
                          # is depth in millimeters.  So this scale just converts millimeters to meters.

DEPTH_IM_RESCALE = 4000.0 # Only for visualizaiton purposes

IMAGE_NET_MEAN = [0.485, 0.456, 0.406]
IMAGE_NET_STD_DEV = [0.229, 0.224, 0.225]

DEFAULT_IMAGE_MEAN = [0.5573105812072754, 0.37420374155044556, 0.37020164728164673]
DEFAULT_IMAGE_STD_DEV = [0.24336038529872894, 0.2987397611141205, 0.31875079870224]
コード例 #13
0
    if not debug:
        TimerCallback(callback=single_shot_function).singleShot(0)

    app.app.start(restoreWindow=True)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--data_dir",
        type=str,
        default=
        '/home/manuelli/code/data_volume/sandbox/drill_scenes/04_drill_long_downsampled'
    )

    default_config_file = os.path.join(utils.getDenseCorrespondenceSourceDir(),
                                       'config', 'stations', 'RLG_iiwa_1',
                                       'change_detection.yaml')
    parser.add_argument("--config_file", type=str, default=default_config_file)

    parser.add_argument(
        '--current_dir',
        action='store_true',
        default=False,
        help="run the script with --data_dir set to the current directory")

    parser.add_argument('--debug',
                        action='store_true',
                        default=False,
                        help="launch the app in debug mode")
コード例 #14
0
from utils import *

# pdc_ros_msgs
import pdc_ros_msgs.msg

import time
import sys
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from PIL import Image as PILImage

import dense_correspondence_manipulation.utils.utils as pdc_utils
pdc_utils.add_dense_correspondence_to_python_path()
dc_source_dir = pdc_utils.getDenseCorrespondenceSourceDir()
from dense_correspondence.evaluation.evaluation import DenseCorrespondenceEvaluation
from dense_correspondence.evaluation.plotting import normalize_descriptor

import torch

NETWORK_CONFIG_FILENAME = os.path.join(get_config_directory(),
                                       'trained_networks.yaml')
NETWORK_NAME = "shoes_consistent_M_background_0.500_3"
RGB_TOPIC = "/camera_carmine_1/rgb/image_rect_color"


class StreamingPdcRos(object):
    def __init__(self):
        self.bridge = None
        self.load_dcn_network()
import sys
import os
import cv2
import numpy as np
import copy

import dense_correspondence_manipulation.utils.utils as utils
dc_source_dir = utils.getDenseCorrespondenceSourceDir()
sys.path.append(dc_source_dir)
sys.path.append(os.path.join(dc_source_dir, "dense_correspondence", "correspondence_tools"))
from dense_correspondence.dataset.spartan_dataset_masked import SpartanDataset, ImageType

import dense_correspondence
from dense_correspondence.evaluation.evaluation import *
from dense_correspondence.evaluation.plotting import normalize_descriptor
from dense_correspondence.network.dense_correspondence_network import DenseCorrespondenceNetwork

sys.path.append(os.path.join(os.path.dirname(__file__), "../simple-pixel-correspondence-labeler"))
from annotate_correspondences import label_colors, draw_reticle, pil_image_to_cv2, drawing_scale_config, numpy_to_cv2, label_colors



COLOR_RED = np.array([0, 0, 255])
COLOR_GREEN = np.array([0,255,0])

utils.set_default_cuda_visible_devices()
eval_config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence', 'evaluation', 'lucas_evaluation.yaml')
EVAL_CONFIG = utils.getDictFromYamlFilename(eval_config_filename)


class HeatmapVisualization(object):
import sys
import os
import cv2
import numpy as np
import copy

import dense_correspondence_manipulation.utils.utils as utils
dc_source_dir = utils.getDenseCorrespondenceSourceDir()
sys.path.append(dc_source_dir)
sys.path.append(os.path.join(dc_source_dir, "dense_correspondence", "correspondence_tools"))
from dense_correspondence.dataset.spartan_dataset_masked import SpartanDataset, ImageType

config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence', 
                               'dataset', 'composite', 'star_bot_front_only.yaml')
config = utils.getDictFromYamlFilename(config_filename)
sd = SpartanDataset(config=config)
sd.set_train_mode()


USE_FIRST_IMAGE = False # force using first image in each log
RANDOMIZE_TEST_TRAIN = False # randomize seletcting

def numpy_to_cv2(numpy_img):
    return numpy_img[:, :, ::-1].copy() # open and convert between BGR and RGB

def pil_image_to_cv2(pil_image):
    return np.array(pil_image)[:, :, ::-1].copy() # open and convert between BGR and RGB
def get_cv2_img_pair_from_spartan():
    scene_name_a = sd.get_random_scene_name()
    num_attempts = 50
    for i in range(num_attempts):
        if not os.path.isdir(processed_dir):
            raise ValueError("Need to extract and run fusion on " + full_dir)

        if already_ran_change_detection(processed_dir):
            print "already_ran_change_detection for", processed_dir
            continue

        # print "full_dir", full_dir
        #print "would have run on ", full_dir
        run(processed_dir, config_file)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--data_dir", type=str, default='/home/manuelli/code/data_volume/sandbox/drill_scenes/01_drill')

    default_config_file = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'stations', 'RLG_iiwa_1', 'change_detection.yaml')
    parser.add_argument("--config_file", type=str, default=default_config_file)

    parser.add_argument('--current_dir', action='store_true', default=True, help="run the script with --data_dir set to the current directory")

    args = parser.parse_args()
    data_folder = args.data_dir

    if args.current_dir:
        print "running with data_dir set to current working directory . . . "
        data_folder = os.getcwd()

    # run(data_folder, config_file=args.config_file)
    run_on_all_subfolders(data_folder, args.config_file)
コード例 #18
0
    def __init__(self, config_path):
        # manuly write configs, debug use only
        self.config = {}
        self.config['generate_dataset'] = {}
        self.config['generate_dataset']['required'] = False
        self.config['generate_dataset']['generate_depth'] = True
        self.config['generate_dataset']['data_source'] = '../data/pdc'
        self.config['generate_dataset'][
            'original_data'] = 'logs_proto_original'
        self.config['generate_dataset']['meta_dir'] = None
        self.config['generate_dataset']['output_target'] = 'rendered_images'
        self.config['generate_dataset']['image_dir'] = None
        self.config['generate_dataset']['output_dir'] = None
        self.config['generate_dataset'][
            'model_path'] = '../data/pdc/depth_models/weights_199'
        self.config['generate_dataset']['scaling_method'] = 'default_scaling'
        self.config['generate_dataset']['zero_masked'] = False
        self.config['generate_dataset']['ext'] = 'png'
        self.config['generate_dataset']['no_cuda'] = False
        self.config['generate_dataset']['replace_poses'] = False
        self.config['generate_dataset']['pose_data_path'] = '../data/pdc/poses'

        self.config['train'] = {}
        self.config['train']['required'] = True
        # self.config['train']['dataset_config_file'] = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
        #                        'dataset', 'composite', 'caterpillar_upright.yaml')
        self.config['train']['dataset_config_file'] = os.path.join(
            utils.getDenseCorrespondenceSourceDir(), 'config', 'kitti',
            'kitti.yaml')
        self.config['train']['train_config_file'] = os.path.join(
            utils.getDenseCorrespondenceSourceDir(), 'config',
            'dense_correspondence', 'training', 'training.yaml')
        self.config['train'][
            'logging_dir'] = "trained_models/new_new_test_caterpillar/"
        self.config['train']['num_iterations'] = (1500 / 4) - 1
        self.config['train']['dimension'] = 3
        # self.config['train']['dataset'] = "logs_proto_original"
        # self.config['train']['dataset'] = "experiments/exp_07232020-184440/logs_proto_default_scaling_gt_pose"
        # self.config['train']['dataset'] = "experiments/exp_05262020-174446/logs_proto_default_scaling_gt_pose"
        # self.config['train']['dataset'] = "logs_proto_unit_scaling_gt_pose"
        # self.config['train']['dataset'] = "logs_proto_default_scaling_gt_pose"
        # self.config['train']['dataset'] = None
        self.config['train']['dataset'] = "logs_proto_kitti_raw"

        self.config['evaluate'] = {}
        self.config['evaluate']['required'] = False
        # self.config['evaluate']['model_lst'] = ['trained_models/new_test_caterpillar/default_scaling_gt_pose_3','trained_models/new_test_caterpillar/original_3','trained_models/new_test_caterpillar/unit_scaling_gt_pose_3']
        self.config['evaluate']['model_lst'] = [
            'experiments/exp_08202020-152733/trained_models/kitti_whole_3'
        ]
        self.config['evaluate']['model_lst'].append(
            'experiments/exp_08202020-155210/trained_models/kitti_left_3')
        self.config['evaluate']['model_lst'].append(
            'experiments/exp_08202020-161940/trained_models/kitti_right_3')

        self.config['evaluate']['num_image_pairs'] = 100
        self.config['evaluate']['gt_dataset_config_file'] = os.path.join(
            utils.getDenseCorrespondenceSourceDir(), 'config',
            'dense_correspondence', 'evaluation', 'kitti_whole.yaml')
        # self.config['evaluate']['gt_dataset_config_file'] = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config','dense_correspondence', 'evaluation', 'gt_dataset.yaml')

        self.config['experiments'] = {}
        self.config['experiments']['as_experiment'] = True

        # init start
        self.config_path = config_path
        # self.config = utils.getDictFromYamlFilename(config_path)

        # configs about experiment phase
        self.as_experiment = self.config['experiments']['as_experiment']
        # create experiment folder if needed
        if self.as_experiment:
            self.current_timestamp = datetime.now().strftime("%m%d%Y-%H%M%S")
            self.exp_dir = 'experiments/exp_' + self.current_timestamp

        # configs about dataset generation
        if not self.config['generate_dataset']['required']:
            self.generate_dataset_required = False
        else:
            self.generate_dataset_required = True
            if self.config['generate_dataset']['generate_depth']:
                self.generate_depth_required = True
                self.data_source = self.config['generate_dataset'][
                    'data_source']
                self.original_data = self.config['generate_dataset'][
                    'original_data']
                self.meta_dir = self.config['generate_dataset']['meta_dir']
                self.output_target = self.config['generate_dataset'][
                    'output_target']
                self.image_dir = self.config['generate_dataset']['image_dir']
                self.output_dir = self.config['generate_dataset']['output_dir']
                self.model_path = self.config['generate_dataset']['model_path']
                self.scaling_method = self.config['generate_dataset'][
                    'scaling_method']
                self.zero_masked = self.config['generate_dataset'][
                    'zero_masked']
                self.ext = self.config['generate_dataset']['ext']
                self.no_cuda = self.config['generate_dataset']['no_cuda']
            else:
                self.generate_depth_required = False

            if self.config['generate_dataset']['replace_poses']:
                self.replace_poses_required = True
                self.pose_data_path = self.config['generate_dataset'][
                    'replace_poses']['pose_data_path']
            else:
                self.replace_poses_required = False

            # set up datset name
            self.dataset_name = 'logs_proto_'
            if self.generate_depth_required:
                self.dataset_name += self.scaling_method + '_'
            else:
                self.dataset_name += 'gt_depth_'

            if self.replace_poses_required:
                self.dataset_name += 'pred_pose'
            else:
                self.dataset_name += 'gt_pose'

            if self.as_experiment:
                self.dataset_name = self.exp_dir + '/' + self.dataset_name

            if self.meta_dir is None:
                self.meta_dir = os.path.join(self.data_source,
                                             self.dataset_name)

        # configs about training phase
        if not self.config['train']['required']:
            self.train_required = False
        else:
            self.train_required = True
            self.train_dataset_config_file = self.config['train'][
                'dataset_config_file']
            self.train_config_file = self.config['train']['train_config_file']
            self.train_logging_dir = self.config['train']['logging_dir']
            self.train_num_iterations = self.config['train']['num_iterations']
            self.train_dimension = self.config['train']['dimension']
            self.train_dataset = ''
            if self.generate_dataset_required:
                self.train_dataset = self.dataset_name
            if self.config['train']['dataset'] is not None:
                self.train_dataset = self.config['train']['dataset']

            if self.as_experiment:
                self.train_logging_dir = self.exp_dir + '/trained_models'

        # configs about evaluation phase
        if not self.config['evaluate']['required']:
            self.evaluate_required = False
        else:
            self.evaluate_required = True
            self.eval_model_lst = self.config['evaluate']['model_lst']
            self.eval_num_image_pairs = self.config['evaluate'][
                'num_image_pairs'] = 100
            self.eval_gt_dataset_config_file = self.config['evaluate'][
                'gt_dataset_config_file']
import os
import cv2
import numpy as np
import copy
from config.params import *
import logging

os.environ['DC_SOURCE_DIR'] = DIR_PROJ
os.environ['DC_DATA_DIR'] = "{}/pdc".format(DIR_DATA)
import ipdb
st = ipdb.set_trace
# st()

import dense_correspondence_manipulation.utils.utils as utils

dc_source_dir = utils.getDenseCorrespondenceSourceDir()
sys.path.append(dc_source_dir)
sys.path.append(os.path.join(dc_source_dir, "dense_correspondence", "correspondence_tools"))
from dense_correspondence.dataset.spartan_dataset_masked import SpartanDataset, ImageType

import dense_correspondence
from dense_correspondence.evaluation.evaluation import *
from dense_correspondence.evaluation.plotting import normalize_descriptor
from dense_correspondence.network.dense_correspondence_network import DenseCorrespondenceNetwork


import dense_correspondence_manipulation.utils.visualization as vis_utils


from dense_correspondence_manipulation.simple_pixel_correspondence_labeler.annotate_correspondences import label_colors, draw_reticle, pil_image_to_cv2, drawing_scale_config, numpy_to_cv2
 def load_default_config():
     default_config_file = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'stations', 'RLG_iiwa_1',
                                        'change_detection.yaml')
     config = utils.getDictFromYamlFilename(default_config_file)
     return config
コード例 #21
0
import dense_correspondence_manipulation.utils.utils as utils
utils.add_dense_correspondence_to_python_path()
from dense_correspondence.training.training import *
import sys
import logging

#utils.set_default_cuda_visible_devices()
# utils.set_cuda_visible_devices([0]) # use this to manually set CUDA_VISIBLE_DEVICES

from dense_correspondence.training.training import DenseCorrespondenceTraining
from dense_correspondence.dataset.spartan_dataset_masked import SpartanDataset
logging.basicConfig(level=logging.INFO)

from dense_correspondence.evaluation.evaluation import DenseCorrespondenceEvaluation

config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
                               'dataset', 'composite', 'toy.yaml')
config = utils.getDictFromYamlFilename(config_filename)

train_config_file = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
                               'training', 'toy_training.yaml')

train_config = utils.getDictFromYamlFilename(train_config_file)
dataset = SpartanDataset(config=config)

logging_dir = "/home/zhouxian/git/pytorch-dense-correspondence/pdc/trained_models/tutorials"
d = 3 # the descriptor dimension
name = "toy_hacker_%d" %(d)
train_config["training"]["logging_dir_name"] = name
train_config["training"]["logging_dir"] = logging_dir
train_config["dense_correspondence_network"]["descriptor_dimension"] = d
コード例 #22
0
 def load_default_config():
     default_config_file = os.path.join(
         utils.getDenseCorrespondenceSourceDir(), 'config', 'stations',
         'RLG_iiwa_1', 'change_detection.yaml')
     config = utils.getDictFromYamlFilename(default_config_file)
     return config
コード例 #23
0
def run(data_folder, ply_binary_filename='images.ply'):

    # install ply if do not already have it
    os.chdir(data_folder)
    vtp_filename = os.path.join(data_folder, 'images.vtp')
    dc_source_dir = utils.getDenseCorrespondenceSourceDir()

    ply_to_ascii_executable = os.path.join(dc_source_dir, 'src', 'ply', 'ply2ascii')
    path_to_ply = os.path.join(dc_source_dir, "src", "ply")
    if not (os.path.isfile(ply_to_ascii_executable)):
        os.system("cd " + path_to_ply + " && make")


    correct_ply_header_file = os.path.join(dc_source_dir, 'config', 'correct_ply_header.txt')

    ply_binary_full_filename = os.path.join(data_folder, ply_binary_filename)
    converted_ascii_filename = os.path.join(data_folder, "converted_to_ascii.ply")
    converted_ascii_modified_header_filename = os.path.join(data_folder, "converted_to_ascii_modified_header.ply")

    # call ply2ascii
    os.system(ply_to_ascii_executable + "<./" + ply_binary_filename + "> " + converted_ascii_filename)

    # change header to be compatible with Director
    # TODO: make so Director accepts other header?
    line_elements_vertex = ""
    with open(converted_ascii_modified_header_filename, 'w') as outfile:
        with open(converted_ascii_filename) as infile:
            counter = 0
            for line in infile:
                counter += 1
                if counter == 3:
                    line_elements_vertex = line
                    break
        with open(correct_ply_header_file) as infile:
            counter = 0
            for line in infile:
                counter += 1
                if counter == 4:
                    outfile.write(line_elements_vertex)
                    continue
                outfile.write(line)
        with open(converted_ascii_filename) as infile:
            num_skip = 14
            counter = 0
            for line in infile:
                counter += 1
                if counter <= 14:
                    continue
                outfile.write(line)

    # convert to vtp
    convert_ply_to_vtp_script = os.path.join(dc_source_dir, 'modules',
                                             'dense_correspondence_manipulation', 'scripts', 'convertPlyToVtp.py')

    print "converted to ascii ply format"

    os.system("directorPython " + convert_ply_to_vtp_script + " " + converted_ascii_modified_header_filename)


    converted_ascii_modified_header_vtp_filename = os.path.join(data_folder, "converted_to_ascii_modified_header.vtp")

    print "finished convert_ply_to_vtp_script"

    # clean up and rename
    # os.system("rm *.ply *.freiburg")
    os.rename(converted_ascii_modified_header_vtp_filename, vtp_filename)
コード例 #24
0
ファイル: constants.py プロジェクト: amisaw/gastronomy
import os
import dense_correspondence_manipulation.utils.utils as utils

CHANGE_DETECTION_CONFIG_FILE = os.path.join(
    utils.getDenseCorrespondenceSourceDir(), 'config', 'stations',
    'RLG_iiwa_1', 'change_detection.yaml')

CHANGE_DETECTION_BACKGROUND_SUBTRACTION_CONFIG_FILE = os.path.join(
    utils.getDenseCorrespondenceSourceDir(), 'config', 'stations',
    'RLG_iiwa_1', 'change_detection_background_subtraction.yaml')

BACKGROUND_SCENE_DATA_FOLDER = os.path.join(
    utils.getDenseCorrespondenceSourceDir(), 'data_volume', 'pdc',
    'logs_proto', '14_background')

DEPTH_IM_SCALE = 1000.0  # This represents that depth images are saved as uint16, where the integer value
# is depth in millimeters.  So this scale just converts millimeters to meters.

DEPTH_IM_RESCALE = 4000.0  # Only for visualizaiton purposes

IMAGE_NET_MEAN = [0.485, 0.456, 0.406]
IMAGE_NET_STD_DEV = [0.229, 0.224, 0.225]

DEFAULT_IMAGE_MEAN = [
    0.5573105812072754, 0.37420374155044556, 0.37020164728164673
]
DEFAULT_IMAGE_STD_DEV = [
    0.24336038529872894, 0.2987397611141205, 0.31875079870224
]
コード例 #25
0
    
    DCE = DenseCorrespondenceEvaluation

    for subdir in model_lst:
        print("evaluate model {}".format(subdir))
        start_time = time.time()
        output_subdir = os.path.join(utils.get_data_dir(), output_dir, subdir.split('/')[-1])
        DCE.run_evaluation_on_network(model_folder=subdir, compute_descriptor_statistics=True, cross_scene=False,
            output_dir=output_subdir, num_image_pairs=num_image_pairs,dataset=gt_dataset)
        end_time = time.time()
        print("evaluation takes %.2f seconds" %(end_time - start_time))

if __name__ == '__main__':
    args = parse_args()
    model_dir = args.model_dir
    output_dir = args.output_dir
    print('output_dir')
    print(output_dir)
    num_image_pairs = args.num_image_pairs
    gt_dataset_config_file = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config','dense_correspondence', 'evaluation', 'gt_dataset.yaml')
    # gt_dataset_config_file = '/home/kudo/data/pdc/trained_models/new_test_caterpillar/default_scaling_gt_pose_3/dataset.yaml'
    gt_dataset_config = utils.getDictFromYamlFilename(gt_dataset_config_file)
    
    gt_dataset_config = None

    abs_model_dir = utils.convert_data_relative_path_to_absolute_path(model_dir)
    d = abs_model_dir
    model_lst = [model_dir + '/' + o for o in os.listdir(d) if os.path.isdir(os.path.join(d,o))]
    print("model list")
    print(model_lst)
    evaulate_model(model_lst, output_dir, num_image_pairs, gt_dataset_config)
コード例 #26
0
import sys
import os
import cv2
import numpy as np
import copy
import dense_correspondence_manipulation.utils.utils as utils
import time
from PIL import Image

dc_source_dir = utils.getDenseCorrespondenceSourceDir()
sys.path.append(dc_source_dir)
sys.path.append(
    os.path.join(dc_source_dir, "dense_correspondence",
                 "correspondence_tools"))
from dense_correspondence.dataset.spartan_dataset_masked import SpartanDataset, ImageType

import dense_correspondence
from dense_correspondence.evaluation.evaluation import *
from dense_correspondence.evaluation.plotting import normalize_descriptor
from dense_correspondence.network.dense_correspondence_network import DenseCorrespondenceNetwork
import dense_correspondence.correspondence_tools.correspondence_finder as correspondence_finder

sys.path.append(
    os.path.join(os.path.dirname(__file__),
                 "../simple-pixel-correspondence-labeler"))
from annotate_correspondences import label_colors, draw_reticle, pil_image_to_cv2, drawing_scale_config, numpy_to_cv2, label_colors

COLOR_RED = np.array([0, 0, 255])
COLOR_GREEN = np.array([0, 255, 0])

utils.set_default_cuda_visible_devices()
コード例 #27
0
import dense_correspondence.correspondence_tools.correspondence_plotter as correspondence_plotter
from dense_correspondence.dataset.dense_correspondence_dataset_masked import ImageType

import os
import torch
import numpy as np
import argparse
#%matplotlib inline

if __name__ == "__main__":

    parser = argparse.ArgumentParser()
    parser.add_argument("--data_name",
                        type=str,
                        default="caterpillar_upright.yaml")

    args = parser.parse_args()

    dataset_config_filename = os.path.join(
        utils.getDenseCorrespondenceSourceDir(), 'config',
        'dense_correspondence', 'dataset', 'composite', args.data_name)

    dataset_config = utils.getDictFromYamlFilename(dataset_config_filename)

    dataset = SpartanDataset(debug=True, config=dataset_config)

    match_type, image_a_rgb, image_b_rgb, \
    matches_a, matches_b, masked_non_matches_a, \
    masked_non_matches_a, non_masked_non_matches_a, \
    non_masked_non_matches_b, blind_non_matches_a, \
    blind_non_matches_b, metadata = dataset.get_single_object_within_scene_data()
コード例 #28
0
import sys
import os
import cv2
import numpy as np
import copy

import dense_correspondence_manipulation.utils.utils as utils
dc_source_dir = utils.getDenseCorrespondenceSourceDir()
sys.path.append(dc_source_dir)
sys.path.append(
    os.path.join(dc_source_dir, "dense_correspondence",
                 "correspondence_tools"))
from dense_correspondence.dataset.spartan_dataset_masked import SpartanDataset, ImageType

from dense_correspondence_manipulation.simple_pixel_correspondence_labeler.annotate_correspondences import label_colors, draw_reticle, pil_image_to_cv2, drawing_scale_config

config_filename = os.path.join(
    utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
    'dataset', 'composite', 'caterpillar_baymax_starbot_onlymulti_front.yaml')
config = utils.getDictFromYamlFilename(config_filename)
sd = SpartanDataset(config=config)
sd.set_train_mode()

annotated_data_yaml_filename = os.path.join(os.getcwd(),
                                            "new_annotated_pairs.yaml")
annotated_data = utils.getDictFromYamlFilename(annotated_data_yaml_filename)

index_of_pair_to_display = 0


def draw_points(img, img_points_picked):
コード例 #29
0
import sys
import os
import cv2
import numpy as np
import copy

import dense_correspondence_manipulation.utils.utils as utils
dc_source_dir = utils.getDenseCorrespondenceSourceDir()
sys.path.append(dc_source_dir)
sys.path.append(os.path.join(dc_source_dir, "dense_correspondence", "correspondence_tools"))
from dense_correspondence.dataset.spartan_dataset_masked import SpartanDataset, ImageType

from dense_correspondence_manipulation.simple_pixel_correspondence_labeler.annotate_correspondences import label_colors, draw_reticle, pil_image_to_cv2, drawing_scale_config

config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence', 
                               'dataset', 'composite', 'caterpillar_baymax_starbot_onlymulti_front.yaml')
config = utils.getDictFromYamlFilename(config_filename)
sd = SpartanDataset(config=config)
sd.set_train_mode()

annotated_data_yaml_filename = os.path.join(os.getcwd(), "new_annotated_pairs.yaml")
annotated_data = utils.getDictFromYamlFilename(annotated_data_yaml_filename)

index_of_pair_to_display = 0

def draw_points(img, img_points_picked):
    for index, img_point in enumerate(img_points_picked):
        color = label_colors[index%len(label_colors)]
        draw_reticle(img, int(img_point["u"]), int(img_point["v"]), color)

def next_image_pair_from_saved():