def create_template_metadata(template_specifications): """ Creates a longitudinal dataset object from xml parameters. """ objects_list = [] objects_name = [] objects_noise_variance = [] objects_name_extension = [] objects_norm = [] objects_norm_kernel_type = [] objects_norm_kernel_width = [] for object_id, object in template_specifications.items(): filename = object['filename'] object_type = object['deformable_object_type'].lower() assert object_type in [ 'SurfaceMesh'.lower(), 'PolyLine'.lower(), 'PointCloud'.lower(), 'Landmark'.lower(), 'Image'.lower() ], "Unknown object type." root, extension = splitext(filename) reader = DeformableObjectReader() objects_list.append(reader.create_object(filename, object_type)) objects_name.append(object_id) objects_name_extension.append(extension) if object['noise_std'] < 0: objects_noise_variance.append(-1.0) else: objects_noise_variance.append(object['noise_std']**2) object_norm = _get_norm_for_object(object, object_id) objects_norm.append(object_norm) if object_norm in ['current', 'varifold']: objects_norm_kernel_type.append(object['kernel_type']) objects_norm_kernel_width.append(float(object['kernel_width'])) else: objects_norm_kernel_type.append("no_kernel") objects_norm_kernel_width.append(0.) # Optional grid downsampling parameter for image data. if object_type == 'image' and 'downsampling_factor' in list( object.keys()): objects_list[-1].downsampling_factor = object[ 'downsampling_factor'] multi_object_attachment = MultiObjectAttachment() multi_object_attachment.attachment_types = objects_norm for k in range(len(objects_norm)): multi_object_attachment.kernels.append( kernel_factory.factory(objects_norm_kernel_type[k], objects_norm_kernel_width[k])) return objects_list, objects_name, objects_name_extension, objects_noise_variance, multi_object_attachment
def create_dataset(template_specifications, visit_ages=None, dataset_filenames=None, subject_ids=None, dimension=None): """ Creates a longitudinal dataset object from xml parameters. """ deformable_objects_dataset = [] if dataset_filenames is not None: for i in range(len(dataset_filenames)): deformable_objects_subject = [] for j in range(len(dataset_filenames[i])): object_list = [] reader = DeformableObjectReader() for object_id in template_specifications.keys(): if object_id not in dataset_filenames[i][j]: raise RuntimeError('The template object with id ' + object_id + ' is not found for the visit ' + str(j) + ' of subject ' + str(i) + '. Check the dataset xml.') else: object_type = template_specifications[object_id]['deformable_object_type'] object_list.append(reader.create_object(dataset_filenames[i][j][object_id], object_type, dimension)) deformable_objects_subject.append(DeformableMultiObject(object_list)) deformable_objects_dataset.append(deformable_objects_subject) longitudinal_dataset = LongitudinalDataset( subject_ids, times=visit_ages, deformable_objects=deformable_objects_dataset) return longitudinal_dataset
def read_and_create_image_dataset(dataset_filenames, visit_ages, subject_ids, template_specifications): """ Builds a longitudinal dataset of images (non deformable images). Loads everything into memory. #TODO assert on the format of the images ! """ deformable_objects_dataset = [] for i in range(len(dataset_filenames)): deformable_objects_subject = [] for j in range(len(dataset_filenames[i])): for object_id in template_specifications.keys(): if object_id not in dataset_filenames[i][j]: raise RuntimeError('The template object with id ' + object_id + ' is not found for the visit ' + str(j) + ' of subject ' + str(i) + '. Check the dataset xml.') else: objectType = template_specifications[object_id]['deformable_object_type'] reader = DeformableObjectReader() deformable_object_visit = reader.create_object(dataset_filenames[i][j][object_id], objectType) deformable_object_visit.update() deformable_objects_subject.append(deformable_object_visit) if len(deformable_objects_subject) <= 1: msg = "I have only one observation for subject {}".format(str(i)) warnings.warn(msg) deformable_objects_dataset.append(deformable_objects_subject) longitudinal_dataset = LongitudinalDataset() longitudinal_dataset.times = [np.array(elt) for elt in visit_ages] longitudinal_dataset.subject_ids = subject_ids longitudinal_dataset.deformable_objects = deformable_objects_dataset longitudinal_dataset.update() longitudinal_dataset.check_image_shapes() longitudinal_dataset.order_observations() return longitudinal_dataset
def add_gaussian_noise_to_vtk_file(global_output_dir, filename, obj_type, noise_std): reader = DeformableObjectReader() obj = reader.create_object(filename, obj_type) obj.update() obj.set_points(obj.points + normal(0.0, noise_std, size=obj.points.shape)) obj.write(global_output_dir, os.path.basename(filename))
def __infer_dimension(template_specifications): reader = DeformableObjectReader() max_dimension = 0 for elt in template_specifications.values(): object_filename = elt['filename'] object_type = elt['deformable_object_type'] o = reader.create_object(object_filename, object_type, dimension=None) d = o.dimension max_dimension = max(d, max_dimension) return max_dimension
def create_template_metadata(template_specifications, dimension=None): """ Creates a longitudinal dataset object from xml parameters. """ objects_list = [] objects_name = [] objects_noise_variance = [] objects_name_extension = [] objects_norm = [] objects_norm_kernels = [] for object_id, object in template_specifications.items(): filename = object['filename'] object_type = object['deformable_object_type'].lower() assert object_type in ['SurfaceMesh'.lower(), 'PolyLine'.lower(), 'PointCloud'.lower(), 'Landmark'.lower(), 'Image'.lower()], "Unknown object type." root, extension = splitext(filename) reader = DeformableObjectReader() objects_list.append(reader.create_object(filename, object_type, dimension=dimension)) objects_name.append(object_id) objects_name_extension.append(extension) object_norm = _get_norm_for_object(object, object_id) objects_norm.append(object_norm) if object_norm in ['current', 'pointcloud', 'varifold']: objects_norm_kernels.append(kernel_factory.factory( 'torch', object['kernel_width'], device=object['kernel_device'] if 'kernel_device' in object else default.deformation_kernel_device)) else: objects_norm_kernels.append(kernel_factory.factory(kernel_factory.Type.NO_KERNEL)) # Optional grid downsampling parameter for image data. if object_type == 'image' and 'downsampling_factor' in list(object.keys()): objects_list[-1].downsampling_factor = object['downsampling_factor'] multi_object_attachment = MultiObjectAttachment(objects_norm, objects_norm_kernels) return objects_list, objects_name, objects_name_extension, multi_object_attachment
def __init__(self, kernel_type, kernel_device='CPU', use_cuda=False, data_type='landmark', data_size='small'): np.random.seed(42) kernel_width = 10. if use_cuda: self.tensor_scalar_type = torch.cuda.FloatTensor else: self.tensor_scalar_type = torch.FloatTensor self.exponential = Exponential(kernel=kernel_factory.factory(kernel_type, kernel_width, kernel_device), number_of_time_points=11, use_rk2_for_flow=False, use_rk2_for_shoot=False) if data_type.lower() == 'landmark': reader = DeformableObjectReader() if data_size == 'small': surface_mesh = reader.create_object(path_to_small_surface_mesh_1, 'SurfaceMesh') self.control_points = create_regular_grid_of_points(surface_mesh.bounding_box, kernel_width, surface_mesh.dimension) elif data_size == 'large': surface_mesh = reader.create_object(path_to_large_surface_mesh_1, 'SurfaceMesh') self.control_points = create_regular_grid_of_points(surface_mesh.bounding_box, kernel_width, surface_mesh.dimension) else: connectivity = np.array(list(itertools.combinations(range(100), 3))[:int(data_size)]) # up to ~16k. surface_mesh = SurfaceMesh(3) surface_mesh.set_points(np.random.randn(np.max(connectivity) + 1, surface_mesh.dimension)) surface_mesh.set_connectivity(connectivity) surface_mesh.update() self.control_points = np.random.randn(int(data_size) // 10, 3) # self.template.object_list.append(surface_mesh) self.template = DeformableMultiObject([surface_mesh]) elif data_type.lower() == 'image': image = Image(3) image.set_intensities(np.random.randn(int(data_size), int(data_size), int(data_size))) image.set_affine(np.eye(4)) image.downsampling_factor = 5. image.update() self.control_points = create_regular_grid_of_points(image.bounding_box, kernel_width, image.dimension) self.control_points = remove_useless_control_points(self.control_points, image, kernel_width) # self.template.object_list.append(image) self.template = DeformableMultiObject([image]) else: raise RuntimeError('Unknown data_type argument. Choose between "landmark" or "image".') # self.template.update() self.momenta = np.random.randn(*self.control_points.shape)
def compute_distance_squared(path_to_mesh_1, path_to_mesh_2, deformable_object_type, attachment_type, kernel_width=None): reader = DeformableObjectReader() object_1 = reader.create_object(path_to_mesh_1, deformable_object_type.lower()) object_2 = reader.create_object(path_to_mesh_2, deformable_object_type.lower()) multi_object_1 = DeformableMultiObject([object_1]) multi_object_2 = DeformableMultiObject([object_2]) multi_object_attachment = MultiObjectAttachment( [attachment_type], [kernel_factory.factory('torch', kernel_width)]) return multi_object_attachment.compute_distances( { key: torch.from_numpy(value) for key, value in multi_object_1.get_points().items() }, multi_object_1, multi_object_2).data.cpu().numpy()
def _read_poly_line(self, path): reader = DeformableObjectReader() object = reader.create_object(path, "PolyLine") object.update() return object
def _read_surface_mesh(self, path): reader = DeformableObjectReader() object = reader.create_object(path, "SurfaceMesh") object.update() return object
def __init__(self, kernel_type, kernel_device='CPU', use_cuda=False, data_size='small'): np.random.seed(42) kernel_width = 10. tensor_scalar_type = default.tensor_scalar_type if kernel_device.upper() == 'CPU': tensor_scalar_type = torch.FloatTensor elif kernel_device.upper() == 'GPU': tensor_scalar_type = torch.cuda.FloatTensor else: raise RuntimeError self.multi_object_attachment = MultiObjectAttachment(['varifold'], [ kernel_factory.factory( kernel_type, kernel_width, device=kernel_device) ]) self.kernel = kernel_factory.factory(kernel_type, kernel_width, device=kernel_device) reader = DeformableObjectReader() if data_size == 'small': self.surface_mesh_1 = reader.create_object( path_to_small_surface_mesh_1, 'SurfaceMesh', tensor_scalar_type) self.surface_mesh_2 = reader.create_object( path_to_small_surface_mesh_2, 'SurfaceMesh', tensor_scalar_type) self.surface_mesh_1_points = tensor_scalar_type( self.surface_mesh_1.get_points()) elif data_size == 'large': self.surface_mesh_1 = reader.create_object( path_to_large_surface_mesh_1, 'SurfaceMesh', tensor_scalar_type) self.surface_mesh_2 = reader.create_object( path_to_large_surface_mesh_2, 'SurfaceMesh', tensor_scalar_type) self.surface_mesh_1_points = tensor_scalar_type( self.surface_mesh_1.get_points()) else: data_size = int(data_size) connectivity = np.array( list(itertools.combinations(range(100), 3))[:data_size]) # up to ~16k. self.surface_mesh_1 = SurfaceMesh(3) self.surface_mesh_1.set_points( np.random.randn(np.max(connectivity) + 1, 3)) self.surface_mesh_1.set_connectivity(connectivity) self.surface_mesh_1.update() self.surface_mesh_2 = SurfaceMesh(3) self.surface_mesh_2.set_points( np.random.randn(np.max(connectivity) + 1, 3)) self.surface_mesh_2.set_connectivity(connectivity) self.surface_mesh_2.update() self.surface_mesh_1_points = tensor_scalar_type( self.surface_mesh_1.get_points())