def set_dset_list(self, data_dir, down_sampling=True): """ Fill scene_information with the static environment features that will be used as part of the input of Static Scene Feature Extractor module in SafeGAN""" _dir = os.path.dirname(os.path.realpath(__file__)) _dir = _dir.split("/")[:-2] _dir = "/".join(_dir) directory = _dir + '/datasets/safegan_dataset/' self.list_data_files = sorted([ get_dset_name(os.path.join(data_dir, _path).split("/")[-1]) for _path in os.listdir(data_dir) ]) for name in self.list_data_files: path_group = os.path.join(directory, get_dset_group_name(name)) """ The inputs are the boundary points between the traversable and non-traversable areas. It is possible to take all points or just a sample""" path = os.path.join(path_group, name) map = np.load(path + "/world_points_boundary.npy") if self.down_samples != -1 and down_sampling and map.shape[ 0] > self.down_samples: down_sampling = (map.shape[0] // self.down_samples) sampled = map[::down_sampling] map = sampled[:self.down_samples] self.scene_information[name] = torch.from_numpy(map).type( torch.float).to(device)
def get_homography_and_map(dset, annotated_points_name = '/world_points_boundary.npy'): directory = get_root_dir() + '/data/' path_group = os.path.join(directory, get_dset_group_name(dset)) path = os.path.join(path_group, dset) h_matrix = pd.read_csv(path + '/{}_homography.txt'.format(dset), delim_whitespace=True, header=None).values if 'txt' in annotated_points_name: map = np.loadtxt(path + annotated_points_name, delimiter=' ') elif 'jpg' in annotated_points_name: map = load_bin_map(path + annotated_points_name) else: map = np.load(path + annotated_points_name) return map, h_matrix
def set_dset_list(self, data_dir, down_sampling=True, down_samples=200): directory = get_root_dir() + '/datasets/safegan_dataset/' self.list_data_files = sorted([get_dset_name(os.path.join(data_dir, _path).split("/")[-1]) for _path in os.listdir(data_dir)]) for name in self.list_data_files: path_group = os.path.join(directory, get_dset_group_name(name)) """ The inputs are the boundary points between the traversable and non-traversable areas. It is possible to take all points or just a sample""" path = os.path.join(path_group, name) map = np.load(path + "/world_points_boundary.npy") if down_samples != -1 and down_sampling and map.shape[0] > down_samples: down_sampling = (map.shape[0] // down_samples) sampled = map[::down_sampling] map = sampled[:down_samples] self.scene_information[name] = torch.from_numpy(map).type(torch.float).to(device)
def set_dset_list(self, data_dir): """ Fill scene_information with the static environment features that will be used as part of the input of Static Scene Feature Extractor module in SafeGAN""" directory = get_root_dir() + '/datasets/safegan_dataset/' self.list_data_files = sorted([ get_dset_name(os.path.join(data_dir, _path).split("/")[-1]) for _path in os.listdir(data_dir) ]) for name in self.list_data_files: path_group = os.path.join(directory, get_dset_group_name(name)) if self.pool_static_type == "physical_attention_no_encoder": """ In this case the features are the one extracted by one of Segmentation Networks I trained on the new dataset I created. The features are taken before the last upsample layers.""" path = os.path.join(path_group + "/segmented_features", name) features = np.load(path + "_segmentation_features.npy") features = torch.from_numpy(features).type( torch.float).to(device) elif self.pool_static_type == "physical_attention_with_encoder": """ In this case the input is the raw image or the segmented one (by one of the Segmentation Networks I trained on the new dataset I created). This image is then encoded by a Deep Network like ResNet""" path = os.path.join(path_group + "/segmented_scenes", name) image = plt.imread(path + ".jpg") image = torch.from_numpy(image).type(torch.float).to(device) # Images fed to the model must be a Float tensor of dimension N, 3, 256, 256, where N is the batch size. # PyTorch follows the NCHW convention, which means the channels dimension (C) must precede the size dimensions image = image.permute(2, 0, 1) # Normalize the image image = self.transform(image) features = self.attention_encoder(image.unsqueeze(0)) else: print( "ERROR in recognizing physical attention pool static type") exit() self.scene_information[name] = features
def get_homography(dset): directory = get_root_dir() + '/data/' path_group = os.path.join(directory, get_dset_group_name(dset)) path = os.path.join(path_group, dset) h_matrix = pd.read_csv(path + '/{}_homography.txt'.format(dset), delim_whitespace=True, header=None).values return h_matrix
def get_path(dset): directory = get_root_dir() + '/data/' path_group = os.path.join(directory, get_dset_group_name(dset)) path = os.path.join(path_group, dset) return path