def get_pair_data(self): """Return the tuple (input, ground truth) with the data content in numpy array.""" cache_mode = 'fill' if self.cache else 'unchanged' input_data = [] for handle in self.input_handle: hwd_oriented = imed_loader_utils.orient_img_hwd( handle.get_fdata(cache_mode, dtype=np.float32), self.slice_axis) input_data.append(hwd_oriented) gt_data = [] # Handle unlabeled data if self.gt_handle is None: gt_data = None for gt in self.gt_handle: if gt is not None: hwd_oriented = imed_loader_utils.orient_img_hwd( gt.get_fdata(cache_mode, dtype=np.float32), self.slice_axis) data_type = np.float32 if self.soft_gt else np.uint8 gt_data.append(hwd_oriented.astype(data_type)) else: gt_data.append( np.zeros(imed_loader_utils.orient_shapes_hwd( self.input_handle[0].shape, self.slice_axis), dtype=np.float32).astype(np.uint8)) return input_data, gt_data
def get_pair_data(self): """Return the tuple (input, ground truth) with the data content in numpy array.""" cache_mode = 'fill' if self.cache else 'unchanged' input_data = [] for handle in self.input_handle: hwd_oriented = imed_loader_utils.orient_img_hwd(handle.get_fdata(cache_mode, dtype=np.float32), self.slice_axis) input_data.append(hwd_oriented) gt_data = [] # Handle unlabeled data if self.gt_handle is None: gt_data = None for gt in self.gt_handle: if gt is not None: if not isinstance(gt, list): # this tissue has annotation from only one rater hwd_oriented = imed_loader_utils.orient_img_hwd(gt.get_fdata(cache_mode, dtype=np.float32), self.slice_axis) gt_data.append(hwd_oriented) else: # this tissue has annotation from several raters hwd_oriented_list = [ imed_loader_utils.orient_img_hwd(gt_rater.get_fdata(cache_mode, dtype=np.float32), self.slice_axis) for gt_rater in gt] gt_data.append([hwd_oriented.astype(data_type) for hwd_oriented in hwd_oriented_list]) else: gt_data.append( np.zeros(imed_loader_utils.orient_shapes_hwd(self.input_handle[0].shape, self.slice_axis), dtype=np.float32).astype(np.uint8)) return input_data, gt_data
def generate_bounding_box_file(subject_list, model_path, path_output, gpu_id=0, slice_axis=0, contrast_lst=None, keep_largest_only=True, safety_factor=None): """Creates json file containing the bounding box dimension for each images. The file has the following format: {"path/to/img.nii.gz": [[x1_min, x1_max, y1_min, y1_max, z1_min, z1_max], [x2_min, x2_max, y2_min, y2_max, z2_min, z2_max]]} where each list represents the coordinates of an object on the image (2 instance of a given object in this example). Args: subject_list (list): List of all subjects in the BIDS directory. model_path (string): Path to object detection model. path_output (string): Output path. gpu_id (int): If available, GPU number. slice_axis (int): Slice axis (0: sagittal, 1: coronal, 2: axial). contrast_lst (list): Contrasts. keep_largest_only (bool): Boolean representing if only the largest object of the prediction is kept. safety_factor (list or tuple): Factors to multiply each dimension of the bounding box. Returns: dict: Dictionary containing bounding boxes related to their image. """ bounding_box_dict = {} if safety_factor is None: safety_factor = [1.0, 1.0, 1.0] for subject in subject_list: if subject.record["modality"] in contrast_lst: subject_path = str(subject.record["absolute_path"]) object_mask, _ = imed_inference.segment_volume(model_path, [subject_path], gpu_id=gpu_id) object_mask = object_mask[0] if keep_largest_only: object_mask = imed_postpro.keep_largest_object(object_mask) mask_path = os.path.join(path_output, "detection_mask") if not os.path.exists(mask_path): os.mkdir(mask_path) nib.save(object_mask, os.path.join(mask_path, subject_path.split("/")[-1])) ras_orientation = nib.as_closest_canonical(object_mask) hwd_orientation = imed_loader_utils.orient_img_hwd( ras_orientation.get_fdata(), slice_axis) bounding_boxes = get_bounding_boxes(hwd_orientation) bounding_box_dict[subject_path] = [ adjust_bb_size(bb, safety_factor) for bb in bounding_boxes ] file_path = os.path.join(path_output, 'bounding_boxes.json') with open(file_path, 'w') as fp: json.dump(bounding_box_dict, fp, indent=4) return bounding_box_dict
def get_data(fname_in, axis): """Get data from fname along an axis. Args: fname_in string: image fname axis int: Returns: nibabel, ndarray """ # Load image input_img = nib.load(fname_in) # Reorient as canonical input_img = nib.as_closest_canonical(input_img) # Get input data input_data = input_img.get_fdata(dtype=np.float32) # Reorient data input_data = imed_loader_utils.orient_img_hwd(input_data, slice_axis=axis) return input_img, input_data
def bounding_box_prior(fname_mask, metadata, slice_axis): """ Computes prior steps to a model requiring bounding box crop. This includes loading a mask of the ROI, orienting the given mask into the following dimensions: (height, width, depth), extracting the bounding boxes and storing the information in the metadata. Args: fname_mask (str): Filename containing the mask of the ROI metadata (dict): Dictionary containing the image metadata slice_axis (int): Slice axis (0: sagittal, 1: coronal, 2: axial) """ nib_prior = nib.load(fname_mask) # orient image into HWD convention nib_ras = nib.as_closest_canonical(nib_prior) np_mask = nib_ras.get_fdata()[..., 0] if len(nib_ras.get_fdata().shape) == 4 else nib_ras.get_fdata() np_mask = imed_loader_utils.orient_img_hwd(np_mask, slice_axis) bounding_box = get_bounding_boxes(np_mask) metadata['bounding_box'] = bounding_box[0]
def bounding_box_prior(fname_mask, metadata, slice_axis, safety_factor=None): """ Computes prior steps to a model requiring bounding box crop. This includes loading a mask of the ROI, orienting the given mask into the following dimensions: (height, width, depth), extracting the bounding boxes and storing the information in the metadata. Args: fname_mask (str): Filename containing the mask of the ROI metadata (dict): Dictionary containing the image metadata slice_axis (int): Slice axis (0: sagittal, 1: coronal, 2: axial) safety_factor (list or tuple): Factors to multiply each dimension of the bounding box. """ nib_prior = nib.load(fname_mask) # orient image into HWD convention nib_ras = nib.as_closest_canonical(nib_prior) np_mask = nib_ras.get_fdata()[..., 0] if len( nib_ras.get_fdata().shape) == 4 else nib_ras.get_fdata() np_mask = imed_loader_utils.orient_img_hwd(np_mask, slice_axis) # Extract the bounding box from the list bounding_box = get_bounding_boxes(np_mask)[0] if safety_factor: bounding_box = adjust_bb_size(bounding_box, safety_factor) metadata[MetadataKW.BOUNDING_BOX] = bounding_box