Example #1
0
    def _make_one_example(features_filepath, labels_filepath):
        """Create a `tf.train.Example` instance of the given arrays of volumetric features and labels."""
        dtype = _TFRECORDS_FEATURES_DTYPE
        x = nib.load(features_filepath)
        if to_ras:
            x = nib.as_closest_canonical(x)
        xdata = x.get_fdata(caching='unchanged', dtype=dtype)
        y = nib.load(labels_filepath)
        if to_ras:
            y = nib.as_closest_canonical(y)
        ydata = y.get_fdata(caching='unchanged', dtype=dtype)

        def bytes_feature(value):
            return tf.train.Feature(bytes_list=tf.train.BytesList(
                value=[value]))

        feature = {
            'volume':
            bytes_feature(xdata.ravel().tostring()),
            'volume_affine':
            bytes_feature(x.affine.astype(dtype).ravel().tostring()),
            'label':
            bytes_feature(ydata.ravel().tostring()),
            'label_affine':
            bytes_feature(y.affine.astype(dtype).ravel().tostring()),
        }
        return tf.train.Example(features=tf.train.Features(feature=feature))
Example #2
0
    def reorient_images(self):
        if (self.reorient_flag):
            print(
                "Reorient flag is set to true, Hence reorienting both images to Right Anterior Superior"
            )
            canonical_img_1 = nb.as_closest_canonical(self.orig_nii_stationary)
            print(" ============= ============== ===================")
            print("orientation changed  t1 affine: {}".format(
                canonical_img_1.affine))
            print(" ============= ============== ===================")
            print("orientation changed  t1 : {}".format(
                nb.aff2axcodes(canonical_img_1.affine)))
            print(" ============= ============== ===================")
            canonical_img_2 = nb.as_closest_canonical(self.orig_nii_moving)
            print(" ============= ============== ===================")
            print("orientation changed  t2 affine: {}".format(
                canonical_img_2.affine))
            print(" ============= ============== ===================")
            print("orientation changed  t1 : {}".format(
                nb.aff2axcodes(canonical_img_2.affine)))
            print(" ============= ============== ===================")

            self.canonical_img_1 = canonical_img_1
            self.canonical_img_2 = canonical_img_2
            return self.canonical_img_1, self.canonical_img_2
        else:
            print(" ============= ============== ===================")
            print("Not reorienting the images as reorient flag is false")
            print(" ============= ============== ===================")
            self.canonical_img_1 = orig_nii_stationary
            self.canonical_img_2 = orig_nii_moving
            return self.canonical_img_1, self.canonical_img_2
Example #3
0
def get_nii_nii(volpath,segpath):
    vol = nib.as_closest_canonical(nib.load(volpath))
    vol = vol.get_data().astype(np.int16)
    seg = nib.as_closest_canonical(nib.load(segpath))
    seg = seg.get_data().astype(np.int16)

    return vol, seg
Example #4
0
def reorient(imgloc, segloc=None):

    imagedata = nib.load(imgloc)
    orig_affine = imagedata.affine
    orig_header = imagedata.header
    imagedata = nib.as_closest_canonical(imagedata)
    img_affine = imagedata.affine
    numpyimage = imagedata.get_data().astype(IMG_DTYPE)
    numpyseg = None
    print('image :    ', nib.orientations.aff2axcodes(orig_affine), ' to ',
          nib.orientations.aff2axcodes(img_affine))

    if segloc is not None:
        segdata = nib.load(segloc)
        old_affine = segdata.affine
        segdata = nib.as_closest_canonical(segdata)
        seg_affine = segdata.affine
        if not np.allclose(seg_affine, img_affine):
            segcopy = nib.load(segloc).get_data()
            copy_header = orig_header.copy()
            segdata = nib.nifti1.Nifti1Image(segcopy,
                                             orig_affine,
                                             header=copy_header)
            segdata = nib.as_closest_canonical(segdata)
            seg_affine = segdata.affine
        print('seg   :    ', nib.orientations.aff2axcodes(old_affine), ' to ',
              nib.orientations.aff2axcodes(seg_affine))
        numpyseg = segdata.get_data().astype(SEG_DTYPE)

    return numpyimage, orig_header, numpyseg
def load(filename,
         gradients_file=None,
         b0_file=None,
         brainmask_file=None,
         fmap_file=None):
    """Load DWI data."""
    filename = Path(filename)
    if filename.name.endswith(".h5"):
        return DWI.from_filename(filename)

    if not gradients_file:
        raise RuntimeError("A gradients file is necessary")

    img = nb.as_closest_canonical(nb.load(filename))
    retval = DWI(affine=img.affine, )
    grad = np.loadtxt(gradients_file, dtype="float32").T
    gradmsk = grad[-1] > 50
    retval.gradients = grad[..., gradmsk]
    retval.dataobj = img.get_fdata(dtype="float32")[..., gradmsk]

    if b0_file:
        b0img = nb.as_closest_canonical(nb.load(b0_file))
        retval.bzero = np.asanyarray(b0img.dataobj)

    if brainmask_file:
        mask = nb.as_closest_canonical(nb.load(brainmask_file))
        retval.brainmask = np.asanyarray(mask.dataobj)

    if fmap_file:
        fmapimg = nb.as_closest_canonical(nb.load(fmap_file))
        retval.fieldmap = fmapimg.get_fdata(fmapimg, dtype="float32")

    return retval
Example #6
0
def reorient(imgloc, segloc=None):
    imagedata = nib.load(imgloc)
    orig_affine = imagedata.affine
    orig_header = imagedata.header
    imagedata = nib.as_closest_canonical(imagedata)
    img_affine = imagedata.affine
    numpyimage = imagedata.get_data().astype(np.int16)
    numpyseg = None

    if segloc is not None:
        segdata = nib.load(segloc)
        old_affine = segdata.affine
        segdata = nib.as_closest_canonical(segdata)
        seg_affine = segdata.affine
        if not np.allclose(seg_affine, img_affine):
            segcopy = nib.load(segloc).get_data()
            copy_header = orig_header.copy()
            segdata = nib.nifti1.Nifti1Image(segcopy,
                                             orig_affine,
                                             header=copy_header)
            segdata = nib.as_closest_canonical(segdata)
            seg_affine = segdata.affine
        numpyseg = segdata.get_data().astype(np.uint8)

    return numpyimage, orig_header, numpyseg
Example #7
0
    def __init__(self,
                 input_filename,
                 gt_filename,
                 cache=True,
                 canonical=False):
        self.input_filename = input_filename
        self.gt_filename = gt_filename
        self.canonical = canonical
        self.cache = cache

        self.input_handle = nib.load(self.input_filename)

        # Unlabeled data (inference time)
        if self.gt_filename is None:
            self.gt_handle = None
        else:
            self.gt_handle = nib.load(self.gt_filename)

        if len(self.input_handle.shape) > 3:
            raise RuntimeError("4-dimensional volumes not supported.")

        # Sanity check for dimensions, should be the same
        input_shape, gt_shape = self.get_pair_shapes()

        if self.gt_handle is not None:
            if not np.allclose(input_shape, gt_shape):
                raise RuntimeError(
                    'Input and ground truth with different dimensions.')

        if self.canonical:
            self.input_handle = nib.as_closest_canonical(self.input_handle)

            # Unlabeled data
            if self.gt_handle is not None:
                self.gt_handle = nib.as_closest_canonical(self.gt_handle)
Example #8
0
def extract_mid_slice_and_convert_coordinates_to_heatmaps(
        path, suffix, aim=-1):
    """
    This function takes as input a path to a dataset  and generates a set of images:
    (i) mid-sagittal image and
    (ii) heatmap of disc labels associated with the mid-sagittal image.

    Example::

        ivadomed_prepare_dataset_vertebral_labeling -p path/to/bids -s _T2w -a 0
    
    Args:
        path (string): path to BIDS dataset form which images will be generated. Flag: ``--path``, ``-p``
        suffix (string): suffix of image that will be processed (e.g., T2w). Flag: ``--suffix``, ``-s``
        aim (int): If aim is not 0, retrieves only labels with value = aim, else create heatmap with all labels.
                  Flag: ``--aim``, ``-a``

    Returns:
        None. Images are saved in BIDS folder
    """
    t = os.listdir(path)
    t.remove('derivatives')

    for i in range(len(t)):
        sub = t[i]
        path_image = os.path.join(path, t[i], 'anat',
                                  t[i] + suffix + '.nii.gz')
        if os.path.isfile(path_image):
            path_label = os.path.join(
                path, 'derivatives', 'labels', t[i], 'anat',
                t[i] + suffix + '_labels-disc-manual.nii.gz')
            list_points = mask2label(path_label, aim=aim)
            image_ref = nib.load(path_image)
            nib_ref_can = nib.as_closest_canonical(image_ref)
            imsh = np.array(nib_ref_can.dataobj).shape
            mid_nifti = imed_preprocessing.get_midslice_average(
                path_image, list_points[0][0], slice_axis=0)
            nib.save(
                mid_nifti,
                os.path.join(path, t[i], 'anat',
                             t[i] + suffix + '_mid.nii.gz'))
            lab = nib.load(path_label)
            nib_ref_can = nib.as_closest_canonical(lab)
            label_array = np.zeros(imsh[1:])

            for j in range(len(list_points)):
                label_array[list_points[j][1], list_points[j][2]] = 1

            heatmap = imed_maths.heatmap_generation(label_array[:, :], 10)
            arr_pred_ref_space = imed_utils.reorient_image(
                np.expand_dims(heatmap[:, :], axis=0), 2, lab, nib_ref_can)
            nib_pred = nib.Nifti1Image(arr_pred_ref_space, lab.affine)
            nib.save(
                nib_pred,
                os.path.join(
                    path, 'derivatives', 'labels', t[i], 'anat',
                    t[i] + suffix + '_mid_heatmap' + str(aim) + '.nii.gz'))
        else:
            pass
Example #9
0
def orient_correctly(img_nii):
    orientation = nibabel.io_orientation(img_nii.affine)
    try:
        img_new = nibabel.as_closest_canonical(img_nii,
                                               True).get_data().astype(float)
    except:
        img_new = nibabel.as_closest_canonical(img_nii,
                                               False).get_data().astype(float)
    img_trans = np.transpose(img_new, (0, 2, 1))
    img_flip = np.flip(img_trans, 0)
    img_flip = np.flip(img_flip, 1)
    return img_flip, orientation
Example #10
0
    def __init__(self, input_filenames, gt_filenames, metadata=None, cache=True, canonical=False):

        self.input_filenames = input_filenames
        self.gt_filenames = gt_filenames
        self.metadata = metadata
        self.canonical = canonical
        self.cache = cache

        # list of the images
        self.input_handle = []

        # loop over the filenames (list)
        for input_file in self.input_filenames:
            input_img = nib.load(input_file)
            self.input_handle.append(input_img)
            if len(input_img.shape) > 3:
                raise RuntimeError("4-dimensional volumes not supported.")

        # list of GT for multiclass segmentation
        self.gt_handle = []

        # Unlabeled data (inference time)
        if self.gt_filenames is not None:
            for gt in self.gt_filenames:
                if gt is not None:
                    self.gt_handle.append(nib.load(gt))
                else:
                    self.gt_handle.append(None)

        # Sanity check for dimensions, should be the same
        input_shape, gt_shape = self.get_pair_shapes()

        if self.gt_filenames is not None:
            if not np.allclose(input_shape, gt_shape):
                raise RuntimeError('Input and ground truth with different dimensions.')

        if self.canonical:
            for idx, handle in enumerate(self.input_handle):
                self.input_handle[idx] = nib.as_closest_canonical(handle)

            # Unlabeled data
            if self.gt_filenames is not None:
                for idx, gt in enumerate(self.gt_handle):
                    if gt is not None:
                        self.gt_handle[idx] = nib.as_closest_canonical(gt)

        if self.metadata:
            self.metadata = []
            for data, input_filename in zip(metadata, input_filenames):
                data["input_filenames"] = input_filename
                data["gt_filenames"] = gt_filenames
                self.metadata.append(data)
Example #11
0
    def OnOpenOverlay(self, pubsub_evt):
        dirpath = dialog.ShowOpenNiftiDialog()
        nifti_image = nifti.ReadNifti(dirpath)

        if nifti_image:
            # Rearranges the axes of the image to be closest to RAS+ orientation,
            # so the slices of the image (axial, coronal, sagittal) are shown correctly.
            # See http://nipy.org/nibabel/image_orientation.html
            nifti_image = as_closest_canonical(nifti_image)

            imagedata = nifti_image.get_data()

            # Conversion of type of numpy array (to unsigned char)
            imagedata = imagedata.astype('f')

            slc = sl.Slice()
            y, x = slc.buffer_slices["AXIAL"].image.shape
            z = slc.buffer_slices["SAGITAL"].image.shape[0]

            # Add borders (with zero values) to overlay array to fit the dimensions of the 3D image
            pad_x = numpy.abs( (x/2.0) - (imagedata.shape[0]/2.0) )
            pad_y = numpy.abs( (y/2.0) - (imagedata.shape[1]/2.0) )
            pad_z = numpy.abs( (z/2.0) - (imagedata.shape[2]/2.0) )
            final_overlay = numpy.pad(imagedata, ((pad_x,pad_x),(pad_y,pad_y),(pad_z,pad_z)), mode='constant', constant_values=0)

            # Modifications to work with Invesalius (Z,Y,X)
            final_overlay = numpy.swapaxes(final_overlay, 0, 2)
            final_overlay = numpy.fliplr(final_overlay)

            Publisher.sendMessage('Set slice overlay', final_overlay)
Example #12
0
File: FROG.py Project: valette/FROG
def flipAndSaveToRAS(filename):

    #Recover the image object
    imageObj = nib.load(filename)

    #Get the current orientation
    CurrentOrientation = nib.aff2axcodes(imageObj.affine)
    print("The current orientation is : ", CurrentOrientation)

    #Check if the current orientation is already RAS+
    if CurrentOrientation == ('R', 'A', 'S'):

        print(
            "Image already recorded into the RAS+ orientation, nothing to do")
        return filename

    else:
        #Flip the image to RAS
        flippedImage = nib.as_closest_canonical(imageObj)

        ##Check the new orientation
        NewOrientation = nib.aff2axcodes(flippedImage.affine)

        #Set Qcode to 1 that the Qform matrix can be used into the further processing
        flippedImage.header['qform_code'] = 1

        #Save the flipped image
        nib.save(flippedImage, RASFile)

        print("The new orientation is now : ", NewOrientation)
        return RASFile
Example #13
0
def load_medical_image(path,
                       crop_size=(0, 0, 0),
                       crop=(0, 0, 0),
                       type=None,
                       normalization="mean",
                       resample=None,
                       viz3d=False,
                       to_canonical=False):
    slices_crop, w_crop, h_crop = crop
    img_nii = nib.load(path)
    img_np = np.squeeze(img_nii.get_fdata(dtype=np.float32))

    if viz3d:
        return torch.from_numpy(img_np)

    if to_canonical:
        img_nii = nib.as_closest_canonical(img_nii)
        img_np = img_nii.get_fdata(dtype=np.float32)

    if crop_size[0] != 0:
        img_np = img_np[slices_crop:slices_crop + crop_size[0],
                        w_crop:w_crop + crop_size[1],
                        h_crop:h_crop + crop_size[2]]

    if resample is not None:
        img_nii = resample_to_output(img_nii, voxel_sizes=resample)
        img_np = np.squeeze(img_nii.get_fdata(dtype=np.float32))

    img_tensor = torch.from_numpy(img_np)  # slice , width, height
    #print('final tensor shape', img_tensor.shape)

    if type != "label":
        img_tensor = normalize(img_tensor, normalization=normalization)
    return img_tensor
Example #14
0
def transform_input_images(image_path, scan_names):
    """
    Transform input input images for processing
    + n4 normalization between scans
    + move t1 to the  canonical space
    Images are stored in the tmp/ folder
    """

    # check if tmp folder is available
    tmp_folder = os.path.join(image_path, 'tmp')
    if os.path.exists(tmp_folder) is False:
        os.mkdir(tmp_folder)

    # normalize images
    for s in scan_names:
        current_scan = os.path.join(image_path, s)
        nifti_orig = nib.load(current_scan)
        im_ = nifti_orig.get_data()

        processed_scan = nib.Nifti1Image(im_.astype('<f4'),
                                         affine=nifti_orig.affine)

        # check for extra dims
        if len(nifti_orig.get_data().shape) > 3:
            processed_scan = nib.Nifti1Image(np.squeeze(
                processed_scan.get_data()),
                                             affine=nifti_orig.affine)

        processed_scan.get_data()[:] = normalize_data(
            processed_scan.get_data(), norm_type='zero_one')

        t1_nifti_canonical = nib.as_closest_canonical(processed_scan)
        t1_nifti_canonical.to_filename(os.path.join(tmp_folder, s))
Example #15
0
def reorient(in_file):
    import nibabel as nb
    import os
    _, outfile = os.path.split(in_file)
    nii = nb.as_closest_canonical(nb.load(in_file))
    nii.to_filename(outfile)
    return os.path.abspath(outfile)
    def main(self, niifile, slicerange, labelinclude, labelexclude, labelrange,
             bottompx, strokepx):
        nii = nibabel.load(niifile)
        nii = nibabel.as_closest_canonical(nii)
        hdr = nii.get_header()
        img = nii.get_data()
        pngdir = self.tempdir('png')
        svgdir = self.tempdir('svg')
        svxdir = self.tempdir('svx')
        densdir = self.tempdir('svx/dens')

        # autodetect slicerange if not specified
        if slicerange == []:
            mask = applyMask(img, labelrange, labelinclude, labelexclude)
            select = numpy.flatnonzero(mask.max(axis=0).max(axis=1))
            slicerange = [select[0], select[-1]]

        srange = range(slicerange[0], slicerange[1])
        mask = img[:, srange, :]
        mask = applyMask(mask, labelrange, labelinclude,
                         labelexclude).astype(float)
        if strokepx:
            if bottompx:
                bottompx = range(0, bottompx) if bottompx > 0 else range(
                    mask.shape[1] + bottompx, mask.shape[1])
                solidmask = mask[:, bottompx, :]
            mask = binaryContour(mask)
            if bottompx:
                mask[:, bottompx, :] = solidmask
            radius = strokepx * math.pi / 3
            ball = ball3d(radius) - 1
            mask = ndimage.grey_dilation(mask, structure=ball)

        mask = (mask * 255.999).astype(numpy.uint8)
        return FancyOutput(mask=mask, slicerange=slicerange)
Example #17
0
def read_image(img_spec, error_msg='image', num_dims=3):
    """Image reader. Removes stray values close to zero (smaller than 5 %ile)."""

    if isinstance(img_spec, str):
        if pexists(realpath(img_spec)):
            hdr = nib.load(img_spec)
            # trying to stick to an orientation
            hdr = nib.as_closest_canonical(hdr)
            img = hdr.get_data()
        else:
            raise IOError('Given path to {} does not exist!\n\t{}'.format(
                error_msg, img_spec))
    elif isinstance(img_spec, np.ndarray):
        img = img_spec
    else:
        raise ValueError(
            'Invalid input specified! '
            'Input either a path to image data, or provide 3d Matrix directly.'
        )

    if num_dims == 3:
        img = check_image_is_3d(img)
    elif num_dims == 4:
        check_image_is_4d(img)
    else:
        raise ValueError('Requested check for {} dims - allowed: 3 or 4!')

    if not np.issubdtype(img.dtype, np.float64):
        img = img.astype('float32')

    return img
Example #18
0
    def buildAnat(self, parFiles):
        """ Build a 3D structural/anatomical Nifti image from the par/rec file

        On Philip's scanner, entire anat image is assumed to be contained in
        single par/rec file pair. Open the par file, extract the image data,
        realign to RAS+ and build a nifti object

        Parameters
        ----------
        parFiles : list
            list containing the file names (file names ONLY, no path) of all
            par files in a the series directory. If this is an anatomical image
            there should only be a single file in the list

        Returns
        -------
        anatImage_RAS : Nifti1Image
            nifti-1 formated image of the 3D anatomical data, oriented in
            RAS+

        See Also
        --------
        nibabel.nifti1.Nifti1Image()

        """
        # should only be a single parFile in the list
        anatImage = nib.load(join(self.seriesDir, parFiles[0]), strict_sort=True)

        # convert to RAS+
        anatImage_RAS = nib.as_closest_canonical(anatImage)

        print('Nifti image dims: {}'.format(anatImage_RAS.shape))

        return anatImage_RAS
Example #19
0
def reorient_t1w(img: str, anat_preproc_dir: Path):
    """Reorients input image to RAS+. Essentially a wrapper on `nib.as_closest_canonical` and `normalize_xform`.

    Parameters
    ----------
    img : str
        Path to image being reoriented
    anat_preproc_dir : Path
        Location of preproc directory for anatomical files.


    Returns
    -------
    str
        Path to reoriented image
    """

    # Load image, orient as RAS
    orig_img = nib.load(img)
    reoriented = nib.as_closest_canonical(orig_img)
    normalized = normalize_xform(reoriented)

    # Image may be reoriented
    out_name = anat_preproc_dir / f"{get_filename(img)}"
    if normalized is not orig_img:
        print(f"Reorienting {img} to RAS+...")
        out_name = str(out_name) + "_reor_RAS.nii.gz"
    else:
        out_name = str(out_name) + "_RAS.nii.gz"

    normalized.to_filename(out_name)

    return out_name
Example #20
0
    def map(self, func_name, data, thresh=None, **kwargs):
        """Map a dataset across the mosaic of axes.

        Parameters
        ----------
        func_name : str
            Name of a pyplot function.
        data : filename, nibabel image, or array
            Dataset to plot.
        thresh : float
            Don't map voxels in ``data`` below this threshold.
        kwargs : key, value mappings
            Other keyword arguments are passed to the plotting function.

        """
        if isinstance(data, string_types):
            data_img = nib.load(data)
        elif isinstance(data, np.ndarray):
            data_img = nib.Nifti1Image(data, np.eye(4))
        else:
            data_img = data
        data_img = nib.as_closest_canonical(data_img)
        data = data_img.get_data()
        data = data.astype(np.float)
        if thresh is not None:
            data[data < thresh] = np.nan
        data = data[self.x_slice, self.y_slice, self.z_slice]
        self._map(func_name, data, **kwargs)
Example #21
0
def mask2label(path_label, aim=0):
    """
    Retrieve points coordinates and value from a label file containing singl voxel label
    Args:
        path_label (str): path of nifti image
        aim (int): -1 will return all points with label between 3 and 30 , any other int > 0  will return
        only the coordinates of points with label defined by aim.

    Returns:
        ndarray: array containing the asked point in the format [x,y,z,value] in the RAS orientation.

    """
    image = nib.load(path_label)
    image = nib.as_closest_canonical(image)
    arr = np.array(image.dataobj)
    list_label_image = []
    # Arr non zero used since these are single voxel label
    for i in range(len(arr.nonzero()[0])):
        x = arr.nonzero()[0][i]
        y = arr.nonzero()[1][i]
        z = arr.nonzero()[2][i]
        # need to check every points
        if aim == 0:
            # we don't want to account for pmj (label 49) nor C1/C2 which is hard to distinguish.
            if arr[x, y, z] < 30 and arr[x, y, z] != 1:
                list_label_image.append([x, y, z, arr[x, y, z]])
        elif aim > 0:
            if arr[x, y, z] == aim:
                list_label_image.append([x, y, z, arr[x, y, z]])
    list_label_image.sort(key=lambda x: x[3])
    return list_label_image
Example #22
0
    def buildAnat(self, parFiles):
        """ Build a 3D structural/anatomical Nifti image from the par/rec file

        On Philip's scanner, entire anat image is assumed to be contained in
        single par/rec file pair. Open the par file, extract the image data,
        realign to RAS+ and build a nifti object

        Parameters
        ----------
        parFiles : list
            list containing the file names (file names ONLY, no path) of all
            par files in a the series directory. If this is an anatomical image
            there should only be a single file in the list

        Returns
        -------
        anatImage_RAS : Nifti1Image
            nifti-1 formated image of the 3D anatomical data, oriented in
            RAS+

        See Also
        --------
        nibabel.nifti1.Nifti1Image()

        """
        # should only be a single parFile in the list
        anatImage = nib.load(join(self.seriesDir, parFiles[0]),
                             strict_sort=True)

        # convert to RAS+
        anatImage_RAS = nib.as_closest_canonical(anatImage)

        print('Nifti image dims: {}'.format(anatImage_RAS.shape))

        return anatImage_RAS
Example #23
0
    def _run_interface(self, runtime):
        # Load image, orient as RAS
        fname = self.inputs.in_file
        orig_img = nb.load(fname)
        reoriented = nb.as_closest_canonical(orig_img)

        # Reconstruct transform from orig to reoriented image
        ornt_xfm = nb.orientations.inv_ornt_aff(
            nb.io_orientation(orig_img.affine), orig_img.shape)

        normalized = normalize_xform(reoriented)

        # Image may be reoriented
        if normalized is not orig_img:
            out_name = fname_presuffix(fname,
                                       suffix='_ras',
                                       newpath=runtime.cwd)
            normalized.to_filename(out_name)
        else:
            out_name = fname

        mat_name = fname_presuffix(fname,
                                   suffix='.mat',
                                   newpath=runtime.cwd,
                                   use_ext=False)
        np.savetxt(mat_name, ornt_xfm, fmt='%.08f')

        self._results['out_file'] = out_name
        self._results['transform'] = mat_name

        return runtime
Example #24
0
File: misc.py Project: kaitj/mriqc
def reorient_and_discard_non_steady(in_file, float32=False):
    import nibabel as nb
    import os
    import numpy as np
    import nibabel as nb
    from statsmodels.robust.scale import mad

    _, outfile = os.path.split(in_file)

    nii = nb.as_closest_canonical(nb.load(in_file))
    in_data = nii.get_data()

    # downcast to reduce space consumption and improve performance
    if float32 and np.dtype(in_data.dtype).itemsize > 4:
        in_data = in_data.astype(np.float32)

    data = in_data[:, :, :, :50]
    timeseries = data.max(axis=0).max(axis=0).max(axis=0)
    outlier_timecourse = (timeseries - np.median(timeseries)) / mad(
        timeseries)
    exclude_index = 0
    for i in range(10):
        if outlier_timecourse[i] > 10:
            exclude_index += 1
        else:
            break

    nb.Nifti1Image(in_data[:, :, :, exclude_index:], nii.affine, nii.header).to_filename(outfile)
    nii.uncache()
    return exclude_index, os.path.abspath(outfile)
Example #25
0
def weights_to_nifti(weights, image, output_filename):
    """

    Args:
        weights:
        image:
        output_filename:

    Returns:

    """

    # Normalize with 2-norm
    # features = 2 * weights / np.power(norm(weights.flatten(), 2), 2)

    # Normalize inf-norm
    features = weights / abs(weights).max()

    img = nib.load(image)
    canonical_img = nib.as_closest_canonical(img)
    hd = canonical_img.header
    qform = np.zeros((4, 4))

    for i in range(1, 4):
        qform[i-1, i-1] = hd['pixdim'][i]
        qform[i-1, 3] = -1.0 * hd['pixdim'][i] * hd['dim'][i] / 2.0

    output_image = nib.Nifti1Image(features, qform)
    nib.save(output_image, output_filename)
Example #26
0
def get_midslice_average(path_im, ind, slice_axis=0):
    """
    Extract an average 2D slice out of a 3D volume. This image is generated by
    averaging the 7 slices in the middle of the volume
    Args:
        path_im (string): path to image
        ind (int): index of the slice around which we will average
        slice_axis (int): Slice axis according to RAS convention

    Returns:
        nifti: a single slice nifti object containing the average image in the image space.

    """
    image = nib.load(path_im)
    image_can = nib.as_closest_canonical(image)
    arr_can = np.array(image_can.dataobj)
    numb_of_slice = 3
    # Avoid out of bound error by changing the number of slice taken if needed
    if ind + 3 > arr_can.shape[slice_axis]:
        numb_of_slice = arr_can.shape[slice_axis] - ind
    if ind - numb_of_slice < 0:
        numb_of_slice = ind

    slc = [slice(None)] * len(arr_can.shape)
    slc[slice_axis] = slice(ind - numb_of_slice, ind + numb_of_slice)
    mid = np.mean(arr_can[tuple(slc)], slice_axis)

    arr_pred_ref_space = imed_utils.reorient_image(
        np.expand_dims(mid[:, :], axis=slice_axis), 2, image,
        image_can).astype('float32')
    nib_pred = nib.Nifti1Image(arr_pred_ref_space, image.affine)

    return nib_pred
Example #27
0
def ReadOthers(dir_):
    """
    Read the given Analyze, NIfTI, Compressed NIfTI or PAR/REC file,
    remove singleton image dimensions and convert image orientation to
    RAS+ canonical coordinate system. Analyze header does not support
    affine transformation matrix, though cannot be converted automatically
    to canonical orientation.

    :param dir_: file path
    :return: imagedata object
    """

    if not const.VTK_WARNING:
        log_path = os.path.join(const.USER_LOG_DIR, 'vtkoutput.txt')
        fow = vtk.vtkFileOutputWindow()
        fow.SetFileName(log_path.encode(const.FS_ENCODE))
        ow = vtk.vtkOutputWindow()
        ow.SetInstance(fow)

    try:
        imagedata = nib.squeeze_image(nib.load(dir_))
        imagedata = nib.as_closest_canonical(imagedata)
        imagedata.update_header()
    except (nib.filebasedimages.ImageFileError):
        return False

    return imagedata
  def main(self,niifile,slicerange,labelinclude,labelexclude,labelrange,bottompx,strokepx):
    nii = nibabel.load(niifile)
    nii = nibabel.as_closest_canonical(nii)
    hdr = nii.get_header()
    img = nii.get_data()
    pngdir = self.tempdir('png')
    svgdir = self.tempdir('svg')
    svxdir = self.tempdir('svx')
    densdir = self.tempdir('svx/dens')

    # autodetect slicerange if not specified
    if slicerange == []:
      mask = applyMask(img,labelrange,labelinclude,labelexclude)
      select = numpy.flatnonzero(mask.max(axis=0).max(axis=1))
      slicerange = [select[0],select[-1]]

    srange = range(slicerange[0],slicerange[1])
    mask = img[:,srange,:]
    mask = applyMask(mask,labelrange,labelinclude,labelexclude).astype(float)
    if strokepx:
      if bottompx:
        bottompx = range(0,bottompx) if bottompx>0 else range(mask.shape[1]+bottompx,mask.shape[1])
        solidmask = mask[:,bottompx,:]
      mask = binaryContour(mask)
      if bottompx:
        mask[:,bottompx,:] = solidmask
      radius = strokepx*math.pi/3
      ball = ball3d(radius)-1
      mask = ndimage.grey_dilation(mask,structure=ball)
    
    mask = (mask*255.999).astype(numpy.uint8)
    return FancyOutput(
      mask = mask,
      slicerange = slicerange
    )
Example #29
0
def ReadOthers(dir_):
    """
    Read the given Analyze, NIfTI, Compressed NIfTI or PAR/REC file,
    remove singleton image dimensions and convert image orientation to
    RAS+ canonical coordinate system. Analyze header does not support
    affine transformation matrix, though cannot be converted automatically
    to canonical orientation.

    :param dir_: file path
    :return: imagedata object
    """

    if not const.VTK_WARNING:
        log_path = os.path.join(const.USER_LOG_DIR, 'vtkoutput.txt')
        fow = vtk.vtkFileOutputWindow()
        fow.SetFileName(log_path.encode(const.FS_ENCODE))
        ow = vtk.vtkOutputWindow()
        ow.SetInstance(fow)

    try:
        imagedata = nib.squeeze_image(nib.load(dir_))
        imagedata = nib.as_closest_canonical(imagedata)
        imagedata.update_header()
    except(nib.filebasedimages.ImageFileError):
        return False

    return imagedata
Example #30
0
    def get_data(self, img):
        """
        Extract data array and meta data from loaded image and return them.
        This function returns 2 objects, first is numpy array of image data, second is dict of meta data.
        It constructs `affine`, `original_affine`, and `spatial_shape` and stores in meta dict.
        If loading a list of files, stack them together and add a new dimension as first dimension,
        and use the meta data of the first image to represent the stacked result.

        Args:
            img: a Nibabel image object loaded from a image file or a list of Nibabel image objects.

        """
        img_array: List[np.ndarray] = list()
        compatible_meta: Dict = {}

        for i in ensure_tuple(img):
            header = self._get_meta_dict(i)
            header["affine"] = self._get_affine(i)
            header["original_affine"] = self._get_affine(i)
            header["as_closest_canonical"] = self.as_closest_canonical
            if self.as_closest_canonical:
                i = nib.as_closest_canonical(i)
                header["affine"] = self._get_affine(i)
            header["spatial_shape"] = self._get_spatial_shape(i)
            img_array.append(self._get_array_data(i))
            _copy_compatible_dict(header, compatible_meta)

        img_array_ = np.stack(img_array,
                              axis=0) if len(img_array) > 1 else img_array[0]
        return img_array_, compatible_meta
Example #31
0
def load_image(file):
    image = nib.load(file)
    image = nib.as_closest_canonical(image)
    # print(nib.aff2axcodes(image.affine))
    image_np = image_to_np(image)

    return image, image_np
Example #32
0
def plot_spikes(in_file, in_fft, spikes_list, cols=3,
                labelfmt='t={0:.3f}s (z={1:d})',
                out_file=None):
    from mpl_toolkits.axes_grid1 import make_axes_locatable

    nii = nb.as_closest_canonical(nb.load(in_file))
    fft = nb.load(in_fft).get_data()


    data = nii.get_data()
    zooms = nii.header.get_zooms()[:2]
    tstep = nii.header.get_zooms()[-1]
    ntpoints = data.shape[-1]

    if len(spikes_list) > cols * 7:
        cols += 1


    nspikes = len(spikes_list)
    rows = 1
    if nspikes > cols:
        rows = math.ceil(nspikes / cols)

    fig = plt.figure(figsize=(7 * cols, 5 * rows))

    for i, (t, z) in enumerate(spikes_list):
        prev = None
        pvft = None
        if t > 0:
            prev = data[..., z, t - 1]
            pvft = fft[..., z, t - 1]

        post = None
        psft = None
        if t < (ntpoints - 1):
            post = data[..., z, t + 1]
            psft = fft[..., z, t + 1]


        ax1 = fig.add_subplot(rows, cols, i + 1)
        divider = make_axes_locatable(ax1)
        ax2 = divider.new_vertical(size="100%", pad=0.1)
        fig.add_axes(ax2)

        plot_slice_tern(data[..., z, t], prev=prev, post=post, spacing=zooms,
                        ax=ax2,
                        label=labelfmt.format(t * tstep, z))

        plot_slice_tern(fft[..., z, t], prev=pvft, post=psft, vmin=-5, vmax=5,
                        cmap=get_parula(), ax=ax1)

    plt.tight_layout()
    if out_file is None:
        fname, ext = op.splitext(op.basename(in_file))
        if ext == '.gz':
            fname, _ = op.splitext(fname)
        out_file = op.abspath('%s.svg' % fname)

    fig.savefig(out_file, format='svg', dpi=300, bbox_inches='tight')
    return out_file
Example #33
0
def center_nifti_origin(input_image, output_image):
    """

    Put the origin of the coordinate system at the center of the image

    Args:
        input_image: path to the input image
        output_image: path to the output image (where the result will be stored)

    Returns:

    """

    import nibabel as nib
    import numpy as np

    img = nib.load(input_image)
    canonical_img = nib.as_closest_canonical(img)
    hd = canonical_img.header
    # if hd['quatern_b'] != 0 or hd['quatern_c'] != 0 or hd['quatern_d'] != 0:
    #    print('Warning: Not all values in quatern are equal to zero')
    qform = np.zeros((4, 4))
    for i in range(1, 4):
        qform[i - 1, i - 1] = hd['pixdim'][i]
        qform[i - 1, 3] = -1.0 * hd['pixdim'][i] * hd['dim'][i] / 2.0
    new_img = nib.Nifti1Image(canonical_img.get_data(caching='unchanged'), qform)
    nib.save(new_img, output_image)
Example #34
0
def sanitize(input_fname):
    im = nb.as_closest_canonical(nb.squeeze_image(nb.load(str(input_fname))))
    hdr = im.header.copy()
    dtype = 'int16'
    data = None
    if str(input_fname).endswith('_mask.nii.gz'):
        dtype = 'uint8'
        data = im.get_fdata() > 0

    if str(input_fname).endswith('_probseg.nii.gz'):
        dtype = 'float32'
        hdr['cal_max'] = 1.0
        hdr['cal_min'] = 0.0
        data = im.get_fdata()
        data[data < 0] = 0

    if input_fname.name.split('_')[-1].split('.')[0] in ('T1w', 'T2w', 'PD'):
        data = im.get_fdata()
        data[data < 0] = 0

    hdr.set_data_dtype(dtype)
    nii = nb.Nifti1Image(
        data if data is not None else im.get_fdata().astype(dtype), im.affine,
        hdr)

    sform = nii.header.get_sform()
    nii.header.set_sform(sform, 4)
    nii.header.set_qform(sform, 4)

    nii.header.set_xyzt_units(xyz='mm')
    nii.to_filename(str(input_fname))
Example #35
0
    def processVolume(self, volIdx):
        """ Process a single 3D timepoint from the series

        Extract the 3D numpy array of voxel data for the current volume (set by
        self.volCounter attribute). Reorder the voxel data so that it is RAS+,
        build a header JSON object, and then send both the header and the voxel
        data out over the socket connection to Pyneal

        Parameters
        ----------
        volIdx : int
            index (0-based) of the volume you want to process
        """
        self.logger.info('Volume {} processing'.format(volIdx))

        ### Prep the voxel data by extracting this vol from the imageMatrix,
        # and then converting to a Nifti1 object in order to set the voxel
        # order to RAS+, then get the voxel data as contiguous numpy array
        thisVol = self.imageMatrix[:, :, :, volIdx]
        thisVol_nii = nib.Nifti1Image(thisVol, self.affine)
        thisVol_RAS = nib.as_closest_canonical(thisVol_nii)  # make RAS+
        thisVol_RAS_data = np.ascontiguousarray(thisVol_RAS.get_fdata())

        ### Create a header with metadata info
        volHeader = {
            'volIdx': volIdx,
            'TR': str(self.tr),
            'dtype': str(thisVol_RAS_data.dtype),
            'shape': thisVol_RAS_data.shape,
            'affine': json.dumps(thisVol_RAS.affine.tolist())
        }

        ### Send the voxel array and header to the pynealSocket
        self.sendVolToPynealSocket(volHeader, thisVol_RAS_data)
Example #36
0
    def __call__(self, data):
        """
        :param data: Data dictionary to be processed by this transform
        :type data: dict
        :return: Updated data dictionary
        :rtype: dict
        """
        for field in self.fields:

            complete_file_path = os.path.join(self.data_dir, data[field])

            assert complete_file_path[0:5] == 's3://'

            filename = get_file_from_s3(self.s3_client, complete_file_path, self.cache)

            img = nib.load(filename)

            if self.canonical:
                img = nib.as_closest_canonical(img)

            data[field] = img
            data[field + '_affines'] = img.affine
            data[field + '_orientations'] = nib.aff2axcodes(img.affine)

        return data
Example #37
0
    def __init__(self, fmapnii, weights=None, knots_zooms=None, padding=3,
                 pe_dir=1, njobs=-1):

        self._pedir = pe_dir
        if knots_zooms is None:
            knots_zooms = [40., 40., 18.]
            knots_zooms[pe_dir] = 60.

        if not isinstance(knots_zooms, (list, tuple)):
            knots_zooms = [knots_zooms] * 3

        self._knots_zooms = np.array(knots_zooms)

        if isinstance(fmapnii, str):
            fmapnii = nb.as_closest_canonical(nb.load(fmapnii))

        self._fmapnii = fmapnii
        self._padding = padding

        # Pad data with zeros
        self._data = np.zeros(tuple(np.array(
            self._fmapnii.get_data().shape) + 2 * padding))

        # The list of ijk coordinates
        self._fmapijk = get_ijk(self._data)

        # Find padding coordinates
        self._data[padding:-padding,
                   padding:-padding,
                   padding:-padding] = 1
        self._frameijk = self._data[tuple(self._fmapijk.T)] > 0

        # Set data
        self._data[padding:-padding,
                   padding:-padding,
                   padding:-padding] = fmapnii.get_data()

        # Get ijk in homogeneous coords
        ijk_h = np.hstack((self._fmapijk, np.array([1.0] * len(self._fmapijk))[..., np.newaxis]))

        # The list of xyz coordinates
        self._fmapaff = compute_affine(self._data, self._fmapnii.header.get_zooms())
        self._fmapxyz = self._fmapaff.dot(ijk_h.T)[:3, :].T

        # Mask coordinates
        self._weights = self._set_weights(weights)

        # Generate control points
        self._generate_knots()

        self._X = None
        self._coeff = None
        self._smoothed = None

        self._Xinv = None
        self._inverted = None
        self._invcoeff = None

        self._njobs = njobs
Example #38
0
def load_nifti(fname, reorient=True):
    """
    Loads a nifti image,
    returns a ndarray()
    """
    img = nib.load(fname)
    if reorient:
        img = nib.as_closest_canonical(img)
    return(img.get_data())
Example #39
0
def niiToArray(niiImg, c_contiguos=True, canonical=False):
    if canonical:
        rough_data = nib.as_closest_canonical(niiImg).get_data()
    else:
        rough_data = niiImg.get_data()
    print np.isfortran(rough_data)
    #if c_contiguos and np.isfortran(rough_data):
    #    rough_data = rough_data.T
    if c_contiguos and not rough_data.flags['C_CONTIGUOUS']:
        rough_data = rough_data.T
    return rough_data
 def main(self,mncfile,niifile):
   mnc = nibabel.load(mncfile)
   hdr = mnc.get_header()
   q = hdr.get_best_affine()
   print('Affine transformation matrix: {}'.format(q))
   img = mnc.get_data()
   print('Some data... {}'.format(img[:10,:10,:10]))
   print('Niifile {}'.format(niifile))
   nii = nibabel.Nifti1Image(img,q)
   nii = nibabel.as_closest_canonical(nii)
   nibabel.save(nii,niifile)
   return FancyOutput( niifile )
Example #41
0
def reorient(in_file, out_file=None):
    import nibabel as nb
    from fmriprep.utils.misc import genfname
    from builtins import (str, bytes)

    if out_file is None:
        out_file = genfname(in_file, suffix='ras')

    if isinstance(in_file, (str, bytes)):
        nii = nb.load(in_file)
    nii = nb.as_closest_canonical(nii)
    nii.to_filename(out_file)
    return out_file
Example #42
0
    def OnShowNiftiFile(self, pubsub_evt):
        dirpath = dialog.ShowOpenNiftiDialog()
        nifti_image = nifti.ReadNifti(dirpath)
        
        if nifti_image:
            # Rearranges the axes of the image to be closest to RAS+ orientation,
            # so the slices of the image (axial, coronal, sagittal) are shown correctly.
            # See http://nipy.org/nibabel/image_orientation.html
            nifti_image = as_closest_canonical(nifti_image)

            self.CreateNiftiProject(nifti_image)

        self.LoadProject()
        Publisher.sendMessage("Enable state project", True)
 def main(self,inp,out):
   nii = nibabel.load(inp)
   nii = nibabel.as_closest_canonical(nii)
   # remove trailing singleton dimension
   hdr = nii.get_header()
   shape = hdr.get_data_shape()
   if shape[-1] == 1:
     img = nii.get_data()
     img = img.squeeze(-1)
     nii = nibabel.nifti1.Nifti1Image(img,hdr.get_best_affine())
   nibabel.nifti1.save(nii, out)
   return FancyDict(
     out = out,
     dtype = nii.get_data_dtype()
   )
Example #44
0
    def _run_interface(self, runtime):
        # load image
        nii = nb.load(self.inputs.in_file)
        hdr = nii.get_header().copy()

        if self.inputs.check_ras:
            nii = nb.as_closest_canonical(nii)

        if self.inputs.check_dtype:
            changed = True
            datatype = int(hdr['datatype'])

            if datatype == 1:
                IFLOGGER.warn('Input image %s has a suspicious data type "%s"',
                              self.inputs.in_file, hdr.get_data_dtype())

            # signed char and bool to uint8
            if datatype == 1 or datatype == 2 or datatype == 256:
                dtype = np.uint8

            # int16 to uint16
            elif datatype == 4:
                dtype = np.uint16

            # Signed long, long long, etc to uint32
            elif datatype == 8 or datatype == 1024 or datatype == 1280:
                dtype = np.uint32

            # Floats over 32 bits
            elif datatype == 64 or datatype == 1536:
                dtype = np.float32
            else:
                changed = False

            if changed:
                hdr.set_data_dtype(dtype)
                nii = nb.Nifti1Image(nii.get_data().astype(dtype),
                                     nii.get_affine(), hdr)

        # Generate name
        out_file, ext = op.splitext(op.basename(self.inputs.in_file))
        if ext == '.gz':
            out_file, ext2 = op.splitext(out_file)
            ext = ext2 + ext

        self._results['out_file'] = op.abspath('{}_conformed{}'.format(out_file, ext))
        nii.to_filename(self._results['out_file'])
        return runtime
Example #45
0
    def processParFile(self, par_fname):
        """ Process a given par header file

        Read the par header file and corresponding rec image file. Read in as
        a nifti object that will provide the 3D voxel array for this volume.
        Reorder to RAS+, and then send to the pynealSocket

        Parameters
        ----------
        par_fname : string
            full path to the par file that you want to process
        """
        # make sure the corresponding rec file exists
        while not os.path.isfile(par_fname.replace('.par', '.rec')):
            time.sleep(.01)

        ### Build the 3D voxel array and reorder to RAS+
        # nibabel will load the par/rec, but there can be multiple images (mag,
        # phase, etc...) concatenated into the 4th dimension. Loading with the
        # strict_sort option (I think) will make sure the first image is the
        # data we want. Extract this data, then reorder the voxel array to RAS+
        thisVol = nib.load(par_fname, strict_sort=True)

        # get the volume index from the acq_nr field of the header (1-based index)
        volIdx = int(thisVol.header.general_info['acq_nr']) - 1
        self.logger.info('Volume {} processing'.format(volIdx))

        # convert to RAS+
        thisVol_RAS = nib.as_closest_canonical(thisVol)

        # grab the data for the first volume along the 4th dimension
        # and store as contiguous array (required for ZMQ)
        thisVol_RAS_data = np.ascontiguousarray(thisVol_RAS.get_data()[:, :, :, 0].astype('uint16'))

        ### Create a header with metadata info
        volHeader = {
            'volIdx': volIdx,
            'dtype': str(thisVol_RAS_data.dtype),
            'shape': thisVol_RAS_data.shape,
            'affine': json.dumps(thisVol_RAS.affine.tolist()),
            'TR': str(thisVol.header.general_info['repetition_time'][0])
        }

        ### Send the voxel array and header to the pynealSocket
        self.sendVolToPynealSocket(volHeader, thisVol_RAS_data)
Example #46
0
def _get_limits(nifti_file, only_plot_noise=False):
    if isinstance(nifti_file, str):
        nii = nb.as_closest_canonical(nb.load(nifti_file))
        data = nii.get_data()
    else:
        data = nifti_file

    data_mask = np.logical_not(np.isnan(data))

    if only_plot_noise:
        data_mask = np.logical_and(data_mask, data != 0)
        vmin = np.percentile(data[data_mask], 0)
        vmax = np.percentile(data[data_mask], 61)
    else:
        vmin = np.percentile(data[data_mask], 0.5)
        vmax = np.percentile(data[data_mask], 99.5)

    return vmin, vmax
Example #47
0
def niiToArray(niiImg, canonical, c_contiguos):
    dataType = getNumpyDataFormat(niiImg)
    sizeof_hdr = niiImg.header['sizeof_hdr']
    scl_slope = niiImg.header['scl_slope']
    scl_inter = niiImg.header['scl_inter']
    if math.isnan(sizeof_hdr):
        sizeof_hdr = 1
    if math.isnan(scl_slope):
        scl_slope = 1
    if math.isnan(scl_inter):
        scl_inter = 0
    if canonical:
        aligendImage = nib.as_closest_canonical(niiImg)
    else:
        aligendImage = niiImg
    INroughData = aligendImage.get_data()
    if c_contiguos and not INroughData.flags['C_CONTIGUOUS']:
        INroughData = INroughData.T
    return INroughData * scl_slope + scl_inter
Example #48
0
def _get_limits(nifti_file, only_plot_noise=False):
    from builtins import bytes, str   # pylint: disable=W0622

    if isinstance(nifti_file, (str, bytes)):
        nii = nb.as_closest_canonical(nb.load(nifti_file))
        data = nii.get_data()
    else:
        data = nifti_file

    data_mask = np.logical_not(np.isnan(data))

    if only_plot_noise:
        data_mask = np.logical_and(data_mask, data != 0)
        vmin = np.percentile(data[data_mask], 0)
        vmax = np.percentile(data[data_mask], 61)
    else:
        vmin = np.percentile(data[data_mask], 0.5)
        vmax = np.percentile(data[data_mask], 99.5)

    return vmin, vmax
Example #49
0
def prepRealDataset(image_path):
    """ Prepare a real, existing dataset for use with the simulator

    Read in the supplied 4d image file, set orientation to RAS+

    Parameters
    ----------
    image_path : string
        full path to the dataset you want to use

    Returns
    -------
    ds_RAS : nibabel-like image
        Nibabel dataset with orientation set to RAS+

    """
    print('Prepping dataset: {}'.format(image_path))
    ds = nib.load(image_path)

    # make sure it's RAS+
    ds_RAS = nib.as_closest_canonical(ds)

    print('Dimensions: {}'.format(ds_RAS.shape))
    return ds_RAS
Example #50
0
def reorient(in_file, newpath=None):
    """Reorient Nifti files to RAS"""
    out_file = fname_presuffix(in_file, suffix='_ras', newpath=newpath)
    nb.as_closest_canonical(nb.load(in_file)).to_filename(out_file)
    return out_file
Example #51
0
    def _run_interface(self, runtime):
        # Load image, orient as RAS
        fname = self.inputs.in_file
        orig_img = nb.load(fname)
        reoriented = nb.as_closest_canonical(orig_img)

        # Set target shape information
        target_zooms = np.array(self.inputs.target_zooms)
        target_shape = np.array(self.inputs.target_shape)
        target_span = target_shape * target_zooms

        zooms = np.array(reoriented.header.get_zooms()[:3])
        shape = np.array(reoriented.shape[:3])

        # Reconstruct transform from orig to reoriented image
        ornt_xfm = nb.orientations.inv_ornt_aff(
            nb.io_orientation(orig_img.affine), orig_img.shape)
        # Identity unless proven otherwise
        target_affine = reoriented.affine.copy()
        conform_xfm = np.eye(4)

        xyz_unit = reoriented.header.get_xyzt_units()[0]
        if xyz_unit == 'unknown':
            # Common assumption; if we're wrong, unlikely to be the only thing that breaks
            xyz_unit = 'mm'

        # Set a 0.05mm threshold to performing rescaling
        atol = {'meter': 1e-5, 'mm': 0.01, 'micron': 10}[xyz_unit]

        # Rescale => change zooms
        # Resize => update image dimensions
        rescale = not np.allclose(zooms, target_zooms, atol=atol)
        resize = not np.all(shape == target_shape)
        if rescale or resize:
            if rescale:
                scale_factor = target_zooms / zooms
                target_affine[:3, :3] = reoriented.affine[:3, :3].dot(np.diag(scale_factor))

            if resize:
                # The shift is applied after scaling.
                # Use a proportional shift to maintain relative position in dataset
                size_factor = target_span / (zooms * shape)
                # Use integer shifts to avoid unnecessary interpolation
                offset = (reoriented.affine[:3, 3] * size_factor - reoriented.affine[:3, 3])
                target_affine[:3, 3] = reoriented.affine[:3, 3] + offset.astype(int)

            data = nli.resample_img(reoriented, target_affine, target_shape).get_data()
            conform_xfm = np.linalg.inv(reoriented.affine).dot(target_affine)
            reoriented = reoriented.__class__(data, target_affine, reoriented.header)

        # Image may be reoriented, rescaled, and/or resized
        if reoriented is not orig_img:
            out_name = fname_presuffix(fname, suffix='_ras', newpath=runtime.cwd)
            reoriented.to_filename(out_name)
        else:
            out_name = fname

        transform = ornt_xfm.dot(conform_xfm)
        assert np.allclose(orig_img.affine.dot(transform), target_affine)

        mat_name = fname_presuffix(fname, suffix='.mat', newpath=runtime.cwd, use_ext=False)
        np.savetxt(mat_name, transform, fmt='%.08f')

        self._results['out_file'] = out_name
        self._results['transform'] = mat_name

        return runtime
Example #52
0
    def __init__(self, anat, stat=None, mask=None, n_col=9, step=2,
                 tight=True, show_mask=True, slice_dir="axial",
                 anat_lims=None, title=None):
        """Plot a mosaic of axial slices through an MRI volume.

        Parameters
        ----------
        anat : filename, nibabel image, or array
            The anatomical image that will form the background of the mosaic.
            If only an array is passed, an identity matrix will be used as
            the affine and orientation could be incorrect.
        stat : filename, nibabel image, or array
            A statistical map to plot as an overlay (which happens by calling
            one of the methods). If only an array is passed, it is assumed
            to have the same orientation as the anatomy.
        mask : filename, nibabel image, or array
            A binary image where voxels included in the statistical analysis
            are True. This will be used to gray-out voxels in the anatomical
            image that are outside the field of view. If you want to overlay
            the mask itself, pass it to ``stat``.
        n_col : int
            Number of columns in the mosaic. This will also determine the size
            of the figure (1 inch per column).
        step : int
            Show every ``step`` slice along the slice_dir in the mosaic.
        tight : bool
            If True, try to crop panes to focus on the brain volume.
        show_mask : bool
            If True, gray-out voxels in the anat image that are outside
            of the mask image.
        slice_dir : axial | coronal | sagital
            Direction to slice the mosaic on.
        anat_lims : pair of floats
            Limits for the anatomical (background) image colormap

        """
        # -- Load and reorient the anatomical image

        if isinstance(anat, string_types):
            anat_img = nib.load(anat)
            have_orientation = True
        elif isinstance(anat, nib.spatialimages.SpatialImage):
            anat_img = anat
            have_orientation = True
        elif isinstance(anat, np.ndarray):
            anat_img = nib.Nifti1Image(anat, np.eye(4))
            have_orientation = False
        else:
            raise TypeError("anat type {} not understood".format(type(anat)))
        self.anat_img = nib.as_closest_canonical(anat_img)
        self.anat_data = self.anat_img.get_data()

        # -- Load and reorient the statistical image

        if isinstance(stat, string_types):
            stat_img = nib.load(stat)
        elif isinstance(stat, nib.spatialimages.SpatialImage):
            stat_img = stat
        elif isinstance(stat, np.ndarray):
            if stat.dtype is np.dtype("bool"):
                stat = stat.astype(np.int)
            stat_img = nib.Nifti1Image(stat, anat_img.affine, anat_img.header)
        elif stat is not None:
            raise TypeError("stat type {} not understood".format(type(stat)))
        else:
            stat_img = None

        if stat_img is not None:
            self.stat_img = nib.as_closest_canonical(stat_img)

        # -- Load and reorient the mask image

        if isinstance(mask, string_types):
            mask_img = nib.load(mask)
        elif isinstance(mask, nib.spatialimages.SpatialImage):
            mask_img = mask
        elif isinstance(mask, np.ndarray):
            if mask.dtype is np.dtype("bool"):
                mask = mask.astype(np.int)
            mask_img = nib.Nifti1Image(mask, anat_img.affine, anat_img.header)
        elif mask is not None:
            raise TypeError("mask type {} not understood".format(type(mask)))
        else:
            mask_img = None
            mask_data = None

        if mask is not None:
            self.mask_img = nib.as_closest_canonical(mask_img)
            mask_data = self.mask_img.get_data().astype(bool)

        if slice_dir[0] not in "sca":
            err = "Slice direction {} not understood".format(slice_dir)
            raise ValueError(err)

        # Find a field of view that tries to eliminate empty voxels
        anat_fov = self.anat_img.get_data() > 1e-5
        if tight:
            self.fov = anat_fov
            if mask is not None:
                self.fov &= mask_data
        else:
            self.fov = np.ones_like(anat_fov, np.bool)

        # Save the mosaic parameters
        self.n_col = n_col
        self.step = step
        self.slice_dir = slice_dir
        self.title = title

        # Define slice objects to crop to the volume
        slices, = ndimage.find_objects(self.fov.astype(np.int))
        self.x_slice, self.y_slice, self.z_slice = slices

        # Update the slice on the mosiac axis with steps
        slice_ax = dict(s="x", c="y", a="z")[slice_dir[0]]
        ms = getattr(self, slice_ax + "_slice")
        mosaic_slice = slice(ms.start, ms.stop, step)
        setattr(self, slice_ax + "_slice", mosaic_slice)
        self.n_slices = (ms.stop - ms.start) // step

        # Initialize the figure and plot the constant info
        self._setup_figure()
        self._plot_anat(anat_lims)
        if mask is not None and show_mask:
            self._plot_inverse_mask()

        # Label the anatomy
        if have_orientation:
            l_label, r_label = dict(s="PA", c="LR", a="LR")[self.slice_dir[0]]
            self.fig.text(.01, .03, l_label, size=14, color="w",
                          ha="left", va="center")
            self.fig.text(.99, .03, r_label, size=14, color="w",
                          ha="right", va="center")
Example #53
0
def _gen_reference(fixed_image, moving_image, fov_mask=None, out_file=None,
                   message=None, force_xform_code=None):
    """
    Generates a sampling reference, and makes sure xform matrices/codes are
    correct
    """

    if out_file is None:
        out_file = fname_presuffix(fixed_image,
                                   suffix='_reference',
                                   newpath=os.getcwd())

    # Moving images may not be RAS/LPS (more generally, transverse-longitudinal-axial)
    reoriented_moving_img = nb.as_closest_canonical(nb.load(moving_image))
    new_zooms = reoriented_moving_img.header.get_zooms()[:3]

    # Avoid small differences in reported resolution to cause changes to
    # FOV. See https://github.com/poldracklab/fmriprep/issues/512
    # A positive diagonal affine is RAS, hence the need to reorient above.
    new_affine = np.diag(np.round(new_zooms, 3))

    resampled = nli.resample_img(fixed_image,
                                 target_affine=new_affine,
                                 interpolation='nearest')

    if fov_mask is not None:
        # If we have a mask, resample again dropping (empty) samples
        # out of the FoV.
        fixednii = nb.load(fixed_image)
        masknii = nb.load(fov_mask)

        if np.all(masknii.shape[:3] != fixednii.shape[:3]):
            raise RuntimeError(
                'Fixed image and mask do not have the same dimensions.')

        if not np.allclose(masknii.affine, fixednii.affine, atol=1e-5):
            raise RuntimeError(
                'Fixed image and mask have different affines')

        # Get mask into reference space
        masknii = nli.resample_img(fixed_image,
                                   target_affine=new_affine,
                                   interpolation='nearest')
        res_shape = np.array(masknii.shape[:3])

        # Calculate a bounding box for the input mask
        # with an offset of 2 voxels per face
        bbox = np.argwhere(masknii.get_data() > 0)
        new_origin = np.clip(bbox.min(0) - 2, a_min=0, a_max=None)
        new_end = np.clip(bbox.max(0) + 2, a_min=0,
                          a_max=res_shape - 1)

        # Find new origin, and set into new affine
        new_affine_4 = resampled.affine.copy()
        new_affine_4[:3, 3] = new_affine_4[:3, :3].dot(
            new_origin) + new_affine_4[:3, 3]

        # Calculate new shapes
        new_shape = new_end - new_origin + 1
        resampled = nli.resample_img(fixed_image,
                                     target_affine=new_affine_4,
                                     target_shape=new_shape.tolist(),
                                     interpolation='nearest')

    xform = resampled.affine  # nibabel will pick the best affine
    _, qform_code = resampled.header.get_qform(coded=True)
    _, sform_code = resampled.header.get_sform(coded=True)

    xform_code = sform_code if sform_code > 0 else qform_code
    if xform_code == 1:
        xform_code = 2

    if force_xform_code is not None:
        xform_code = force_xform_code

    # Keep 0, 2, 3, 4 unchanged
    resampled.header.set_qform(xform, int(xform_code))
    resampled.header.set_sform(xform, int(xform_code))
    resampled.header['descrip'] = 'reference image generated by %s.' % (
        message or '(unknown software)')
    resampled.to_filename(out_file)
    return out_file
Example #54
0
def create_cfm(in_file, lesion_mask=None, global_mask=True, out_path=None):
    """
    Create a mask to constrain registration.

    Parameters
    ----------
    in_file : str
        Path to an existing image (usually a mask).
        If global_mask = True, this is used as a size/dimension reference.
    out_path : str
        Path/filename for the new cost function mask.
    lesion_mask : str, optional
        Path to an existing binary lesion mask.
    global_mask : bool
        Create a whole-image mask (True) or limit to reference mask (False)
        A whole image-mask is 1 everywhere

    Returns
    -------
    str
        Absolute path of the new cost function mask.

    Notes
    -----
    in_file and lesion_mask must be in the same
    image space and have the same dimensions
    """
    import os
    import numpy as np
    import nibabel as nb
    from nipype.utils.filemanip import fname_presuffix

    if out_path is None:
        out_path = fname_presuffix(in_file, suffix='_cfm', newpath=os.getcwd())
    else:
        out_path = os.path.abspath(out_path)

    if not global_mask and not lesion_mask:
        NIWORKFLOWS_LOG.warning(
            'No lesion mask was provided and global_mask not requested, '
            'therefore the original mask will not be modified.')

    # Load the input image
    in_img = nb.load(in_file)

    # If we want a global mask, create one based on the input image.
    data = np.ones(in_img.shape, dtype=np.uint8) if global_mask else in_img.get_data()
    if set(np.unique(data)) - {0, 1}:
        raise ValueError("`global_mask` must be true if `in_file` is not a binary mask")

    # If a lesion mask was provided, combine it with the secondary mask.
    if lesion_mask is not None:
        # Reorient the lesion mask and get the data.
        lm_img = nb.as_closest_canonical(nb.load(lesion_mask))

        # Subtract lesion mask from secondary mask, set negatives to 0
        data = np.fmax(data - lm_img.get_data(), 0)
        # Cost function mask will be created from subtraction
    # Otherwise, CFM will be created from global mask

    cfm_img = nb.Nifti1Image(data, in_img.affine, in_img.header)

    # Save the cost function mask.
    cfm_img.set_data_dtype(np.uint8)
    cfm_img.to_filename(out_path)

    return out_path
Example #55
0
def plot_mosaic(img, out_file=None, ncols=8, title=None, overlay_mask=None,
                bbox_mask_file=None, only_plot_noise=False, annotate=True,
                vmin=None, vmax=None, cmap='Greys_r', plot_sagittal=True,
                fig=None, zmax=128):

    if isinstance(img, (str, bytes)):
        nii = nb.as_closest_canonical(nb.load(img))
        img_data = nii.get_data()
        zooms = nii.header.get_zooms()
    else:
        img_data = img
        zooms = [1.0, 1.0, 1.0]
        out_file = 'mosaic.svg'

    # Remove extra dimensions
    img_data = np.squeeze(img_data)

    if img_data.shape[2] > zmax and bbox_mask_file is None:
        lowthres = np.percentile(img_data, 5)
        mask_file = np.ones_like(img_data)
        mask_file[img_data <= lowthres] = 0
        img_data = _bbox(img_data, mask_file)

    if bbox_mask_file is not None:
        bbox_data = nb.as_closest_canonical(
            nb.load(bbox_mask_file)).get_data()
        img_data = _bbox(img_data, bbox_data)

    z_vals = np.array(list(range(0, img_data.shape[2])))

    # Reduce the number of slices shown
    if len(z_vals) > zmax:
        rem = 15
        # Crop inferior and posterior
        if not bbox_mask_file:
            # img_data = img_data[..., rem:-rem]
            z_vals = z_vals[rem:-rem]
        else:
            # img_data = img_data[..., 2 * rem:]
            z_vals = z_vals[2 * rem:]

    while len(z_vals) > zmax:
        # Discard one every two slices
        # img_data = img_data[..., ::2]
        z_vals = z_vals[::2]

    n_images = len(z_vals)
    nrows = math.ceil(n_images / ncols)
    if plot_sagittal:
        nrows += 1

    if overlay_mask:
        overlay_data = nb.as_closest_canonical(
            nb.load(overlay_mask)).get_data()

    # create figures
    if fig is None:
        fig = plt.figure(figsize=(22, nrows * 3))

    est_vmin, est_vmax = _get_limits(img_data,
                                     only_plot_noise=only_plot_noise)
    if not vmin:
        vmin = est_vmin
    if not vmax:
        vmax = est_vmax

    naxis = 1
    for z_val in z_vals:
        ax = fig.add_subplot(nrows, ncols, naxis)

        if overlay_mask:
            ax.set_rasterized(True)
        plot_slice(img_data[:, :, z_val], vmin=vmin, vmax=vmax,
                   cmap=cmap, ax=ax, spacing=zooms[:2],
                   label='%d' % z_val, annotate=annotate)

        if overlay_mask:
            from matplotlib import cm
            msk_cmap = cm.Reds  # @UndefinedVariable
            msk_cmap._init()
            alphas = np.linspace(0, 0.75, msk_cmap.N + 3)
            msk_cmap._lut[:, -1] = alphas
            plot_slice(overlay_data[:, :, z_val], vmin=0, vmax=1,
                       cmap=msk_cmap, ax=ax, spacing=zooms[:2])
        naxis += 1

    if plot_sagittal:
        naxis = ncols * (nrows - 1) + 1

        step = int(img_data.shape[0] / (ncols + 1))
        start = step
        stop = img_data.shape[0] - step

        if step == 0:
            step = 1

        for x_val in list(range(start, stop, step))[:ncols]:
            ax = fig.add_subplot(nrows, ncols, naxis)

            plot_slice(img_data[x_val, ...], vmin=vmin, vmax=vmax,
                       cmap=cmap, ax=ax, label='%d' % x_val,
                       spacing=[zooms[0], zooms[2]])
            naxis += 1

    fig.subplots_adjust(
        left=0.05, right=0.95, bottom=0.05, top=0.95, wspace=0.05,
        hspace=0.05)

    if title:
        fig.suptitle(title, fontsize='10')
    fig.subplots_adjust(wspace=0.002, hspace=0.002)

    if out_file is None:
        fname, ext = op.splitext(op.basename(img))
        if ext == ".gz":
            fname, _ = op.splitext(fname)
        out_file = op.abspath(fname + '_mosaic.svg')

    fig.savefig(out_file, format='svg', dpi=300, bbox_inches='tight')
    return out_file
Example #56
0
    def buildFunc(self, parFiles):
        """ Build a 4D functional image from list of par files

        Given a list of `parFiles`, build a 4D functional image from them. For
        Philips scanners, there should be a par header file and corresponding
        rec image file for each volume in the series. This function will read
        each header/image pair and construct a 4D nifti object. The 4D nifti
        object contain a voxel array ordered like RAS+ as well the affine
        transformation to map between vox and mm space

        Parameters
        ----------
        parFiles : list
            list containing the file names (file names ONLY, no path) of all
            par files to be used in constructing the final nifti image

        Returns
        -------
        funcImage_RAS : Nifti1Image
            nifti-1 formated image of the 4D functional data, oriented in
            RAS+

        See Also
        --------
        nibabel.nifti1.Nifti1Image()

        """
        imageMatrix = None
        affine = None
        TR = None

        ### Loop over all of the par files
        nVols = len(parFiles)
        for par_fname in parFiles:
            # build full path to this par file
            par_fname = join(self.seriesDir, par_fname)

            # make sure there is a corresponding .rec file
            if not os.path.isfile(par_fname.replace('.par', '.rec')):
                print('No REC file found to match par: {}', par_fname)

            ### Build the 3d voxel array for this volume and reorder to RAS+
            # nibabel will load the par/rec, but there can be multiple images
            # (mag, phase, etc...) concatenated into the 4th dimension. Loading
            # with the strict_sort option (I think) will make sure the first
            # image is the data we want. Extract this data, then reorder the
            # voxel array to RAS+
            thisVol = nib.load(par_fname, strict_sort=True)

            # get the vol index (0-based index) from the acq_nr field of the
            # header (1-based index)
            volIdx = int(thisVol.header.general_info['acq_nr']) - 1

            # set TR
            if TR is None:
                TR = thisVol.header.general_info['repetition_time'][0]

            # convert to RAS+
            thisVol_RAS = nib.as_closest_canonical(thisVol)

            # construct the imageMatrix if it hasn't been made yet
            if imageMatrix is None:
                imageMatrix = np.zeros(shape=(thisVol_RAS.shape[0],
                                              thisVol_RAS.shape[1],
                                              thisVol_RAS.shape[2],
                                              nVols), dtype=np.uint16)

            # construct the affine if it isn't made yet
            if affine is None:
                affine = thisVol_RAS.affine

            # Add this data to the image matrix
            imageMatrix[:, :, :, volIdx] = thisVol_RAS.get_data()[:, :, :, 0].astype('uint16')

        ### Build a Nifti object
        funcImage = nib.Nifti1Image(imageMatrix, affine=affine)

        # add the correct TR to the header
        pixDims = np.array(funcImage.header.get_zooms())
        pixDims[3] = TR
        funcImage.header.set_zooms(pixDims)

        return funcImage
Example #57
0
    def _run_interface(self, runtime):
        import nibabel as nb
        import nilearn.image as nli
        from nipype.utils.filemanip import fname_presuffix, copyfile

        in_names = self.inputs.t1w_list
        orig_imgs = [nb.load(fname) for fname in in_names]
        reoriented = [nb.as_closest_canonical(img) for img in orig_imgs]
        target_shape = np.max([img.shape for img in reoriented], axis=0)
        target_zooms = np.min([img.header.get_zooms()[:3]
                               for img in reoriented], axis=0)

        resampled_imgs = []
        for img in reoriented:
            zooms = np.array(img.header.get_zooms()[:3])
            shape = np.array(img.shape)

            xyz_unit = img.header.get_xyzt_units()[0]
            if xyz_unit == 'unknown':
                # Common assumption; if we're wrong, unlikely to be the only thing that breaks
                xyz_unit = 'mm'
            # Set a 0.05mm threshold to performing rescaling
            atol = {'meter': 5e-5, 'mm': 0.05, 'micron': 50}[xyz_unit]

            # Rescale => change zooms
            # Resize => update image dimensions
            rescale = not np.allclose(zooms, target_zooms, atol=atol)
            resize = not np.all(shape == target_shape)
            if rescale or resize:
                target_affine = np.eye(4, dtype=img.affine.dtype)
                if rescale:
                    scale_factor = target_zooms / zooms
                    target_affine[:3, :3] = np.diag(scale_factor).dot(img.affine[:3, :3])
                else:
                    target_affine[:3, :3] = img.affine[:3, :3]

                if resize:
                    # The shift is applied after scaling.
                    # Use a proportional shift to maintain relative position in dataset
                    size_factor = (target_shape.astype(float) + shape) / (2 * shape)
                    # Use integer shifts to avoid unnecessary interpolation
                    offset = (img.affine[:3, 3] * size_factor - img.affine[:3, 3]).astype(int)
                    target_affine[:3, 3] = img.affine[:3, 3] + offset
                else:
                    target_affine[:3, 3] = img.affine[:3, 3]

                data = nli.resample_img(img, target_affine, target_shape).get_data()
                img = img.__class__(data, target_affine, img.header)

            resampled_imgs.append(img)

        out_names = [fname_presuffix(fname, suffix='_ras', newpath=runtime.cwd)
                     for fname in in_names]

        for orig, final, in_name, out_name in zip(orig_imgs, resampled_imgs,
                                                  in_names, out_names):
            if final is orig:
                copyfile(in_name, out_name, copy=True, use_hardlink=True)
            else:
                final.to_filename(out_name)

        self._results['t1w_list'] = out_names

        return runtime
Example #58
0
def plot_mosaic(nifti_file, title=None, overlay_mask=None,
                fig=None, bbox_mask_file=None, only_plot_noise=False,
                figsize=DINA4_LANDSCAPE):
    from six import string_types
    from pylab import cm

    if isinstance(nifti_file, string_types):
        nii = nb.as_closest_canonical(nb.load(nifti_file))
        mean_data = nii.get_data()
    else:
        mean_data = nifti_file

    if bbox_mask_file:
        bbox_data = nb.as_closest_canonical(nb.load(bbox_mask_file)).get_data()
        B = np.argwhere(bbox_data)
        (ystart, xstart, zstart), (ystop, xstop, zstop) = B.min(0), B.max(
            0) + 1
        mean_data = mean_data[ystart:ystop, xstart:xstop, zstart:zstop]

    z_vals = np.array(list(range(0, mean_data.shape[2])))
    # Reduce the number of slices shown
    if mean_data.shape[2] > 70:
        rem = 15
        # Crop inferior and posterior
        if not bbox_mask_file:
            mean_data = mean_data[..., rem:-rem]
            z_vals = z_vals[rem:-rem]
        else:
            mean_data = mean_data[..., 2 * rem:]
            z_vals = z_vals[2 * rem:]

    if mean_data.shape[2] > 70:
        # Discard one every two slices
        mean_data = mean_data[..., ::2]
        z_vals = z_vals[::2]

    n_images = mean_data.shape[2]
    row, col = _calc_rows_columns((figsize[0] / figsize[1]), n_images)

    if overlay_mask:
        overlay_data = nb.as_closest_canonical(
            nb.load(overlay_mask)).get_data()

    # create figures
    if fig is None:
        fig = plt.Figure(figsize=figsize)

    FigureCanvas(fig)

    fig.subplots_adjust(top=0.85)
    for image, z_val in enumerate(z_vals):
        ax = fig.add_subplot(row, col, image + 1)
        data_mask = np.logical_not(np.isnan(mean_data))
        if only_plot_noise:
            data_mask = np.logical_and(data_mask, mean_data != 0)
        if overlay_mask:
            ax.set_rasterized(True)

        if only_plot_noise:
            vmin = np.percentile(mean_data[data_mask], 0)
            vmax = np.percentile(mean_data[data_mask], 61)
        else:
            vmin = np.percentile(mean_data[data_mask], 0.5)
            vmax = np.percentile(mean_data[data_mask], 99.5)

        ax.imshow(np.fliplr(mean_data[:, :, image].T), vmin=vmin,
                  vmax=vmax,
                  cmap=cm.Greys_r, interpolation='nearest', origin='lower')

        if overlay_mask:
            cmap = cm.Reds  # @UndefinedVariable
            cmap._init()
            alphas = np.linspace(0, 0.75, cmap.N + 3)
            cmap._lut[:, -1] = alphas
            ax.imshow(np.fliplr(overlay_data[:, :, image].T), vmin=0, vmax=1,
                      cmap=cmap, interpolation='nearest', origin='lower')

        ax.annotate(
            str(z_val), xy=(.99, .99), xycoords='axes fraction',
            fontsize=8, color='white', horizontalalignment='right',
            verticalalignment='top')

        ax.axis('off')

    fig.subplots_adjust(
        left=0.05, right=0.95, bottom=0.05, top=0.95, wspace=0.01, hspace=0.1)

    if not title:
        _, title = op.split(nifti_file)
        title += " (last modified: {})".format(
            time.ctime(op.getmtime(nifti_file)))
    fig.suptitle(title, fontsize='10')
    fig.subplots_adjust(wspace=0.002, hspace=0.002)
    return fig