def test_conform(): anat = nib.load(pjoin(DATA_DIR, 'anatomical.nii')) # Test with default arguments. c = conform(anat) assert c.shape == (256, 256, 256) assert c.header.get_zooms() == (1, 1, 1) assert c.dataobj.dtype.type == anat.dataobj.dtype.type assert aff2axcodes(c.affine) == ('R', 'A', 'S') assert isinstance(c, Nifti1Image) # Test with non-default arguments. c = conform(anat, out_shape=(100, 100, 200), voxel_size=(2, 2, 1.5), orientation="LPI", out_class=Nifti2Image) assert c.shape == (100, 100, 200) assert c.header.get_zooms() == (2, 2, 1.5) assert c.dataobj.dtype.type == anat.dataobj.dtype.type assert aff2axcodes(c.affine) == ('L', 'P', 'I') assert isinstance(c, Nifti2Image) # TODO: support nD images in `conform` in the future, but for now, test that we get # errors on non-3D images. func = nib.load(pjoin(DATA_DIR, 'functional.nii')) with pytest.raises(ValueError): conform(func) with pytest.raises(ValueError): conform(anat, out_shape=(100, 100)) with pytest.raises(ValueError): conform(anat, voxel_size=(2, 2))
def main(args=None): """Main program function.""" parser = _get_parser() opts = parser.parse_args(args) from_img = load(opts.infile) if not opts.force and Path(opts.outfile).exists(): raise FileExistsError(f"Output file exists: {opts.outfile}") out_img = conform(from_img=from_img, out_shape=opts.out_shape, voxel_size=opts.voxel_size, order=3, cval=0.0, orientation=opts.orientation) save(out_img, opts.outfile)
def loadrespadsave( in_path,xy_width,ressize,orient='LPS',mask=0,rescalemethod='minmax',out_path=None ): ### Import modules import nibabel as _nib import nibabel.processing as _nibp from scipy.ndimage.interpolation import zoom as _zoom ### Load image FileRead = _nib.load( in_path ) ### Re-orient, resample and resize the image #IF if mask: spline_order = 0 #NN else: spline_order = 1 #Linear #ENDIF FileRead_res = _nibp.conform( FileRead, \ out_shape=(xy_width,xy_width,xy_width), \ voxel_size=(ressize,ressize,ressize), \ orientation=orient, \ order=spline_order ) # IF if len( FileRead_res.get_data().shape ) > 3: FileDat = FileRead_res.get_data()[ :,:,:,0 ] else: FileDat = FileRead_res.get_data() # ENDIF ### Rescale image if mask == 0: FileDat = rescaleimages.rescaleImage( FileDat, minInt=0, maxInt=1, perc=99.9, method=rescalemethod ) # IF # Save as numpy file if out_path: _np.save( out_path, FileDat ) # Return numpy array else: return FileDat, FileRead_res
def get_mask(self, weights_path=None, post_process=True, inplace=False): """ Estimate a mask from the provided input data. Parameters ---------- weights_path : str, optional Path to custom neural network weights. Defaults ot segment home and will download latest weights if nothing is specified. post_process : bool, optional Default True Keep only the two largest connected volumes in the mask. Note this may cause issue with subjects that have more or less than two kidneys. inplace : bool, optional Default False If true, no numpy array of the mask will be returned, instead only the mask attributes in the class will be updated. Can be useful if only kidney volumes are desired rather than the voxel by voxel masks. Returns ------- mask : np.ndarray, optional The estimated probability that each voxel is renal tissue """ if weights_path is None: weights_path = fetch.Weights().path img = conform(self._img, out_shape=(240, 240, self.shape[-1]), voxel_size=(1.458, 1.458, self.zoom[-1] * 0.998), orientation='LIP') data = img.get_fdata() data = np.flip(data, 1) data = np.swapaxes(data, 0, 2) data = np.swapaxes(data, 1, 2) data = self._rescale(data) data = resize(data, (data.shape[0], 256, 256)) data = data.reshape((data.shape[0], data.shape[1], data.shape[2], 1)) model = load_model(weights_path, custom_objects={'dice_coef_loss': self._dice_coef_loss, 'dice_coef': self._dice_coef}) batch_size = 2 ** 3 mask = model.predict(data, batch_size=batch_size) mask = np.squeeze(mask) mask = np.swapaxes(mask, 0, 2) mask = np.swapaxes(mask, 0, 1) mask = np.flip(mask, 1) mask = resize(mask, (240, 240, self.shape[-1])) if post_process: cleaned_mask = self._cleanup(mask > 0.05) mask[cleaned_mask < 0.5] = 0.0 mask_img = nib.Nifti1Image(mask, img.affine) self._mask_img = conform(mask_img, out_shape=self.shape, voxel_size=self.zoom, orientation=self.orientation) self.mask = self._rescale(self._mask_img.get_fdata(), 0, 1) self._mask_img = nib.Nifti1Image(self.mask, self._mask_img.affine) self.tkv = (np.sum(self.mask > 0.5) * np.prod(self.zoom))/1000 self.lkv = (np.sum(self.mask[120:] > 0.5) * np.prod(self.zoom))/1000 self.rkv = (np.sum(self.mask[:120] > 0.5) * np.prod(self.zoom)) / 1000 if not inplace: return self.mask
'Reorient to LIA and resample to 1mm iso-voxel resolution if required') parser.add_argument('source', type=str, help='Input volume') parser.add_argument('destination', type=str, help='Normalized volume') args = parser.parse_args() src_nib = nib_funcs.squeeze_image(nib.load(args.source)) current_orientation = ''.join(nib.aff2axcodes(src_nib.affine)) print('Input: {} [{}]'.format(src_nib.header.get_zooms(), current_orientation)) # Avoid resampling if already 1mm iso-voxel # Note: Also in cases of tiny rounding error, e.g. (1.0000001, 1.0000001, 1.0) if not np.allclose(src_nib.header.get_zooms(), [1, 1, 1]): # requires re-sampling print('Resampling') dst_nib = nib_processing.conform(src_nib, orientation='LIA') elif current_orientation != 'LIA': # requires just reorient print('Reorientating {} to LIA'.format(current_orientation)) start_ornt = nib_orientations.io_orientation(src_nib.affine) end_ornt = nib_orientations.axcodes2ornt('LIA') transform = nib_orientations.ornt_transform(start_ornt, end_ornt) dst_nib = src_nib.as_reoriented(transform) else: dst_nib = src_nib nib.save(dst_nib, args.destination)
# Load the model model = _get_model(model_path) # Load the input file _orig_infile = nib.load(data) img = _orig_infile ndim = len(img.shape) if ndim != 3: raise ValueError( "Input volume must have three dimensions but got {}.".format(ndim)) # check data dimension and conform if img.shape != required_shape: print("++ Conforming volume to 1mm^3 voxels and size 256x256x256.") img = conform(_orig_infile, out_shape=required_shape) inputs = np.asarray(img.dataobj) img.uncache() inputs = inputs.astype(np.float32) # forward pass of the model outputs = predict_from_array(inputs, model, block_shape, batch_size=1, normalizer=standardize_numpy, n_samples=n_samples, return_variance=True, return_entropy=True)