def load_dcn_network(self): """ Loads the DCN. """ config = pdc_utils.getDictFromYamlFilename(NETWORK_CONFIG_FILENAME) defaults_config = pdc_utils.get_defaults_config() pdc_utils.set_cuda_visible_devices([0]) dce = DenseCorrespondenceEvaluation(config) self.dcn = dce.load_network_from_config(NETWORK_NAME) self.dcn.eval() self.dataset = self.dcn.load_training_dataset( ) # why do we need to do this? print "finished loading dcn"
def load_dcn_network(self): """ Loads the DCN. Currently just edit this function to change which """ config = pdc_utils.getDictFromYamlFilename(NETWORK_CONFIG_FILENAME) defaults_config = pdc_utils.get_defaults_config() pdc_utils.set_cuda_visible_devices([0]) dce = DenseCorrespondenceEvaluation(config) network_name = self.pick_point_config["network_name"] self.dcn = dce.load_network_from_config(network_name) self.dataset = self.dcn.load_training_dataset() # why do we need to do this? print "finished loading dcn"
def main(): set_cuda_visible_devices([get_freer_gpu()]) # # model_file, dataset_name = get_DD_model_file() dataset_name = "box_push_1000_top_down" dataset_name = "box_push_1000_angled" dataset_paths = exp_utils.get_dataset_paths(dataset_name) dataset_root = dataset_paths['dataset_root'] model_file = dataset_paths['dense_descriptor_model_chkpt'] multi_episode_dict = DCDrakeSimEpisodeReader.load_dataset( dataset_root, max_num_episodes=None) camera_list = [dataset_paths['main_camera_name']] target_camera_names = [dataset_paths['main_camera_name']] # camera_list = ['camera_1_top_down'] # target_camera_names = ['camera_1_top_down'] model_config_file = os.path.join(os.path.dirname(model_file), 'config.yaml') model_config = getDictFromYamlFilename(model_config_file) model_config['dataset']['data_augmentation'] = False dataset = DynamicDrakeSimDataset( model_config, multi_episode_dict, phase="valid") # could also use train data model = torch.load(model_file) model = model.cuda() model = model.eval() heatmap_vis = HeatmapVisualization(model_config, dataset, model, visualize_3D=False, camera_list=camera_list, target_camera_names=target_camera_names, verbose=True, sample_same_episode=False) heatmap_vis.run()
import numpy as np import copy import transforms3d import time import math import matplotlib.pyplot as plt # need to import pydrake before pdc import pydrake import meshcat # pdc for setting available GPU's from dense_correspondence_manipulation.utils.utils import set_cuda_visible_devices GPU_LIST = [0] set_cuda_visible_devices(GPU_LIST) import torch # dense correspondence from dense_correspondence_manipulation.utils import constants from dense_correspondence_manipulation.utils import utils as pdc_utils from dense_correspondence.network import predict import dense_correspondence_manipulation.utils.visualization as vis_utils # key_dynam from key_dynam.utils.utils import get_project_root, load_yaml, get_current_YYYY_MM_DD_hh_mm_ss_ms, get_data_root, \ load_pickle from key_dynam.envs.drake_pusher_slider import DrakePusherSliderEnv from key_dynam.envs.drake_mugs import DrakeMugsEnv from key_dynam.envs import utils as env_utils
from key_dynam.utils.utils import get_project_root, load_yaml, get_current_YYYY_MM_DD_hh_mm_ss_ms, get_data_root, load_pickle from key_dynam.utils.torch_utils import get_freer_gpu from key_dynam.dynamics.utils import set_seed from dense_correspondence_manipulation.utils.utils import set_cuda_visible_devices from key_dynam.dense_correspondence.dc_drake_sim_episode_reader import DCDrakeSimEpisodeReader from key_dynam.dense_correspondence.precompute_descriptors import precompute_descriptor_keypoints from key_dynam.dense_correspondence.keypoint_selection import score_and_select_spatially_separated_keypoints from key_dynam.experiments.exp_18_box_on_side import utils as exp_18_utils from key_dynam.experiments.exp_20_mugs import utils as exp_20_utils # CUDA_VISIBLE_DEVICES = [0] # set_cuda_visible_devices(CUDA_VISIBLE_DEVICES) set_cuda_visible_devices([get_freer_gpu()]) from dense_correspondence_manipulation.utils.visualization import draw_reticles def get_DD_model_file(dataset_name): model_name = None model_file = None if dataset_name == "dps_box_on_side_600": model_name = "" model_file = "/home/manuelli/data/key_dynam/dev/experiments/drake_pusher_slider_box_on_side/dataset_dps_box_on_side_600/trained_models/perception/dense_descriptor/3D_loss_camera_angled_2020-05-13-23-39-35-818188/net_best_dy_model.pth" elif dataset_name == "correlle_mug-small_single_color_600": model_name = "" model_file = "/home/manuelli/data/key_dynam/dev/experiments/20/dataset_correlle_mug-small_single_color_600/trained_models/perception/dense_descriptors/data_aug_2020-06-03-00-27-50-738970/net_best_model.pth" elif dataset_name == "correlle_mug-small_many_colors_600":
# camera_list = ['camera_1_top_down'] # target_camera_names = ['camera_1_top_down'] model_config_file = os.path.join(os.path.dirname(model_file), 'config.yaml') model_config = getDictFromYamlFilename(model_config_file) model_config['dataset']['data_augmentation'] = False dataset = DynamicDrakeSimDataset( model_config, multi_episode_dict, phase="valid") # could also use train data model = torch.load(model_file) model = model.cuda() model = model.eval() heatmap_vis = HeatmapVisualization(model_config, dataset, model, visualize_3D=False, camera_list=camera_list, target_camera_names=target_camera_names, verbose=True, sample_same_episode=False) heatmap_vis.run() if __name__ == "__main__": set_cuda_visible_devices([1]) main()
save_dir, SceneStructure.descriptor_image_filename(img_idx)) np.save(descriptor_filename, res) print("descriptor_filename", descriptor_filename) print("processing image %d of %d" % (counter, num_images)) counter += 1 if __name__ == "__main__": dc_source_dir = utils.getDenseCorrespondenceSourceDir() config_filename = os.path.join(dc_source_dir, 'config', 'dense_correspondence', 'evaluation', 'lucas_evaluation.yaml') eval_config = utils.getDictFromYamlFilename(config_filename) default_config = utils.get_defaults_config() utils.set_cuda_visible_devices(default_config['cuda_visible_devices']) dce = DenseCorrespondenceEvaluation(eval_config) network_name = "caterpillar_M_background_0.500_3" dcn = dce.load_network_from_config(network_name) dataset_config_file = os.path.join(dc_source_dir, 'config', 'dense_correspondence', 'dataset', 'composite', 'caterpillar_only_9.yaml') dataset_config = utils.getDictFromYamlFilename(dataset_config_file) dataset = SpartanDataset(config=dataset_config) scene_name = SCENE_NAME save_dir = SAVE_DIR compute_descriptor_images_for_single_scene(dataset, scene_name, dcn, save_dir)
""" import os import pydrake import torch from key_dynam.dense_correspondence.precompute_descriptors import compute_descriptor_confidences from key_dynam.utils.utils import get_project_root, load_yaml, get_current_YYYY_MM_DD_hh_mm_ss_ms, get_data_root, load_pickle from dense_correspondence_manipulation.utils.utils import set_cuda_visible_devices from key_dynam.dense_correspondence.dc_drake_sim_episode_reader import DCDrakeSimEpisodeReader from key_dynam.dense_correspondence.precompute_descriptors import precompute_descriptor_keypoints from key_dynam.dense_correspondence.keypoint_selection import score_and_select_spatially_separated_keypoints CUDA_VISIBLE_DEVICES = [0] set_cuda_visible_devices(CUDA_VISIBLE_DEVICES) def load_episodes(): # DATASET_NAME = "2020-03-25-19-57-26-556093_constant_velocity_500" # DATASET_NAME = "2020-04-15-21-15-56-602712_T_aug_random_velocity_500" DATASET_NAME = "2020-04-20-14-58-21-418302_T_aug_random_velocity_1000" dataset_root = os.path.join(get_data_root(), "dev/experiments/09/data", DATASET_NAME) max_num_episodes = None multi_episode_dict = DCDrakeSimEpisodeReader.load_dataset( dataset_root, max_num_episodes=max_num_episodes) return DATASET_NAME, multi_episode_dict
import dense_correspondence_manipulation.utils.utils as utils utils.add_dense_correspondence_to_python_path() from dense_correspondence.training.training import * import sys import logging import argparse #utils.set_default_cuda_visible_devices() utils.set_cuda_visible_devices([0]) # use this to manually set CUDA_VISIBLE_DEVICES from dense_correspondence.training.training import DenseCorrespondenceTraining from dense_correspondence.dataset.spartan_dataset_masked import SpartanDataset logging.basicConfig(level=logging.INFO) from dense_correspondence.evaluation.evaluation import DenseCorrespondenceEvaluation if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--data_name", type=str, default="caterpillar_upright.yaml") parser.add_argument("--run_prefix", type=str, default="caterpillar") parser.add_argument("--training_yaml", type=str, default="training.yaml") args = parser.parse_args() config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence', 'dataset', 'composite', args.data_name) config = utils.getDictFromYamlFilename(config_filename)
from dense_correspondence.dataset.spartan_dataset_masked import SpartanDataset, ImageType import dense_correspondence from dense_correspondence.evaluation.evaluation import * from dense_correspondence.evaluation.plotting import normalize_descriptor from dense_correspondence.network.dense_correspondence_network import DenseCorrespondenceNetwork sys.path.append( os.path.join(os.path.dirname(__file__), "../simple-pixel-correspondence-labeler")) from annotate_correspondences import label_colors, draw_reticle, pil_image_to_cv2, drawing_scale_config, numpy_to_cv2, label_colors COLOR_RED = np.array([0, 0, 255]) COLOR_GREEN = np.array([0, 255, 0]) utils.set_cuda_visible_devices([0]) eval_config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence', 'evaluation', 'evaluation.yaml') EVAL_CONFIG = utils.getDictFromYamlFilename(eval_config_filename) class HeatmapVisualization(object): """ Launches a live interactive heatmap visualization. Edit config/dense_correspondence/heatmap_vis/heatmap.yaml to specify which networks to visualize. Specifically add the network you want to visualize to the "networks" list. Make sure that this network appears in the file pointed to by EVAL_CONFIG Usage: Launch this file with python after sourcing the environment with