Exemplo n.º 1
0
def test_find_cut_coords():
    data = np.zeros((100, 100, 100))
    x_map, y_map, z_map = 50, 10, 40
    data[x_map - 30:x_map + 30, y_map - 3:y_map + 3, z_map - 10:z_map + 10] = 1

    # identity affine
    affine = np.eye(4)
    img = nibabel.Nifti1Image(data, affine)
    mask_img = compute_epi_mask(img)
    x, y, z = find_xyz_cut_coords(img,
                                  mask_img=mask_img)

    np.testing.assert_allclose((x, y, z),
                               (x_map, y_map, z_map),
                               # Need such a high tolerance for the test to
                               # pass. x, y, z = [49.5, 9.5, 39.5]
                               rtol=6e-2)

    # non-trivial affine
    affine = np.diag([1. / 2, 1 / 3., 1 / 4., 1.])
    img = nibabel.Nifti1Image(data, affine)
    mask_img = compute_epi_mask(img)
    x, y, z = find_xyz_cut_coords(img, mask_img=mask_img)
    np.testing.assert_allclose((x, y, z),
                               (x_map / 2., y_map / 3., z_map / 4.),
                               # Need such a high tolerance for the test to
                               # pass. x, y, z = [24.75, 3.17, 9.875]
                               rtol=6e-2)

    # regression test (cf. #473)
    # test case: no data exceeds the activation threshold
    data = np.ones((36, 43, 36))
    affine = np.eye(4)
    img = nibabel.Nifti1Image(data, affine)
    x, y, z = find_xyz_cut_coords(img, activation_threshold=1.1)
    np.testing.assert_array_equal(
        np.array([x, y, z]),
        0.5 * np.array(data.shape).astype(np.float))

    # regression test (cf. #922)
    # pseudo-4D images as input (i.e., X, Y, Z, 1)
    # previously raised "ValueError: too many values to unpack"
    rng = np.random.RandomState(42)
    data_3d = rng.randn(10, 10, 10)
    data_4d = data_3d[..., np.newaxis]
    affine = np.eye(4)
    img_3d = nibabel.Nifti1Image(data_3d, affine)
    img_4d = nibabel.Nifti1Image(data_4d, affine)
    assert_equal(find_xyz_cut_coords(img_3d), find_xyz_cut_coords(img_4d))
Exemplo n.º 2
0
    def _run_interface(self, runtime):
        orig_file_nii = nb.load(self.inputs.in_file)
        in_file_data = orig_file_nii.get_data()

        # pad the data to avoid the mask estimation running into edge effects
        in_file_data_padded = np.pad(in_file_data, (1, 1), 'constant',
                                     constant_values=(0, 0))

        padded_nii = nb.Nifti1Image(in_file_data_padded, orig_file_nii.affine,
                                    orig_file_nii.header)

        mask_nii = compute_epi_mask(padded_nii, exclude_zeros=True)

        mask_data = mask_nii.get_data()
        if isdefined(self.inputs.dilation):
            mask_data = nd.morphology.binary_dilation(mask_data).astype(np.uint8)

        # reverse image padding
        mask_data = mask_data[1:-1, 1:-1, 1:-1]

        # exclude zero and NaN voxels
        mask_data[in_file_data == 0] = 0
        mask_data[np.isnan(in_file_data)] = 0

        better_mask = nb.Nifti1Image(mask_data, orig_file_nii.affine,
                                     orig_file_nii.header)
        better_mask.set_data_dtype(np.uint8)
        better_mask.to_filename("mask_file.nii.gz")

        self._mask_file = os.path.join(runtime.cwd, "mask_file.nii.gz")

        runtime.returncode = 0
        return super(ComputeEPIMask, self)._run_interface(runtime)
Exemplo n.º 3
0
    def _run_interface(self, runtime):
        target_affine = None
        target_shape = None

        if isdefined(self.inputs.target_affine):
            target_affine = self.inputs.target_affine
        if isdefined(self.inputs.target_shape):
            target_shape = self.inputs.target_shape

        masknii = compute_epi_mask(
            self.inputs.in_files,
            lower_cutoff=self.inputs.lower_cutoff,
            upper_cutoff=self.inputs.upper_cutoff,
            connected=self.inputs.connected,
            opening=self.inputs.opening,
            exclude_zeros=self.inputs.exclude_zeros,
            ensure_finite=self.inputs.ensure_finite,
            target_affine=target_affine,
            target_shape=target_shape
        )

        self._results['out_mask'] = genfname(
            self.inputs.in_files[0], suffix='mask')
        masknii.to_filename(self._results['out_mask'])
        return runtime
Exemplo n.º 4
0
def compute_mask(in_file, out_file=None):
    """Compute a brain mask from a 4D or a3D image."""
   # TODO: use nipy ComputeMask
    from nilearn.masking import compute_epi_mask
    mask_image = compute_epi_mask(in_file)
    if out_file is None:
        out_file = add_prefix('brain_mask_', in_file)

    nibabel.save(mask_image, out_file)
    return out_file
Exemplo n.º 5
0
def compute_global_masker(rootdir, subjects):
    masks = [
        compute_epi_mask(
            glob.glob(
                os.path.join(rootdir, "fmri-data/en", "sub-%03d" % s, "func",
                             "*.nii"))) for s in subjects
    ]
    global_mask = math_img('img>0.5', img=mean_img(masks))
    masker = MultiNiftiMasker(global_mask, detrend=True, standardize=True)
    masker.fit()
    return masker
Exemplo n.º 6
0
def compute_global_masker(files):  # [[path, path2], [path3, path4]]
    # return a MultiNiftiMasker object
    masks = [compute_epi_mask(f) for f in files]
    global_mask = math_img(
        'img>0.5',
        img=mean_img(masks))  # take the average mask and threshold at 0.5
    masker = MultiNiftiMasker(
        global_mask, detrend=True, standardize=True
    )  # return a object that transforms a 4D barin into a 2D matrix of voxel-time and can do the reverse action
    masker.fit()
    return masker
Exemplo n.º 7
0
def compute_global_masker(files, **kwargs): # [[path, path2], [path3, path4]]
    """Returns a NiftiMasker object from list (of list) of files.
    Arguments:
        - files: list (of list of str)
    Returns:
        - masker: NiftiMasker
    """
    masks = [compute_epi_mask(f) for f in files]
    global_mask = math_img('img>0.95', img=mean_img(masks)) # take the average mask and threshold at 0.5
    masker = NiftiMasker(global_mask, **kwargs)
    masker.fit()
    return masker
Exemplo n.º 8
0
def mask_roi(dir_path, roi, mask, img_file):
    """
    Create derivative ROI based on intersection of roi and brain mask.

    Parameters
    ----------
    dir_path : str
        Path to directory containing subject derivative data for given run.
    roi : str
        File path to binarized/boolean region-of-interest Nifti1Image file.
    mask : str
        Path to binarized/boolean brain mask Nifti1Image file.
    img_file : str
        File path to Nifti1Image to use to generate an epi-mask.

    Returns
    -------
    roi : str
        File path to binarized/boolean region-of-interest Nifti1Image file, reduced to the spatial intersection with
        the input brain mask.
    """
    import os.path as op
    from nilearn import masking
    from nilearn.masking import intersect_masks
    from nilearn.image import math_img, resample_img

    img_mask_path = f"{dir_path}/{op.basename(img_file).split('.')[0]}_mask.nii.gz"
    nib.save(masking.compute_epi_mask(img_file), img_mask_path)

    if roi and mask:
        print("Refining ROI...")
        _mask_img = nib.load(img_mask_path)
        _roi_img = nib.load(roi)
        roi_res_img = resample_img(
            _roi_img,
            target_affine=_mask_img.affine,
            target_shape=_mask_img.shape,
            interpolation="nearest",
        )
        masked_roi_img = intersect_masks(
            [
                math_img("img > 0.0", img=_mask_img),
                math_img("img > 0.0", img=roi_res_img),
            ],
            threshold=1,
            connected=False,
        )

        roi_red_path = f"{dir_path}/{op.basename(roi).split('.')[0]}_mask.nii.gz"
        nib.save(masked_roi_img, roi_red_path)
        roi = roi_red_path

    return roi
Exemplo n.º 9
0
def test_compute_epi_mask():
    mean_image = np.ones((9, 9, 3))
    mean_image[3:-2, 3:-2, :] = 10
    mean_image[5, 5, :] = 11
    mean_image = Nifti1Image(mean_image, np.eye(4))
    mask1 = compute_epi_mask(mean_image, opening=False)
    mask2 = compute_epi_mask(mean_image, exclude_zeros=True, opening=False)
    # With an array with no zeros, exclude_zeros should not make
    # any difference
    np.testing.assert_array_equal(get_data(mask1), get_data(mask2))
    # Check that padding with zeros does not change the extracted mask
    mean_image2 = np.zeros((30, 30, 3))
    mean_image2[3:12, 3:12, :] = get_data(mean_image)
    mean_image2 = Nifti1Image(mean_image2, np.eye(4))
    mask3 = compute_epi_mask(mean_image2, exclude_zeros=True, opening=False)
    np.testing.assert_array_equal(get_data(mask1), get_data(mask3)[3:12, 3:12])
    # However, without exclude_zeros, it does
    mask3 = compute_epi_mask(mean_image2, opening=False)
    assert_false(np.allclose(get_data(mask1), get_data(mask3)[3:12, 3:12]))

    # Check that we get a ValueError for incorrect shape
    mean_image = np.ones((9, 9))
    mean_image[3:-3, 3:-3] = 10
    mean_image[5, 5] = 100
    mean_image = Nifti1Image(mean_image, np.eye(4))
    assert_raises(ValueError, compute_epi_mask, mean_image)

    # Check that we get a useful warning for empty masks
    mean_image = np.zeros((9, 9, 9))
    mean_image[0, 0, 1] = -1
    mean_image[0, 0, 0] = 1.2
    mean_image[0, 0, 2] = 1.1
    mean_image = Nifti1Image(mean_image, np.eye(4))
    with pytest.warns(MaskWarning, match='Computed an empty mask'):
        compute_epi_mask(mean_image, exclude_zeros=True)
Exemplo n.º 10
0
def get_fmri_instance(individual=0, path_fmri=dataset_path+ '/datasets/01/fMRI/'):

	individuals = sorted([f for f in listdir(path_fmri) if isdir(join(path_fmri, f))])

	individual = individuals[individual]

	fmri_file = '/3_nw_mepi_rest_with_cross.nii.gz'

	complete_path = path_fmri + individual + fmri_file

	mask_img = compute_epi_mask(complete_path)

	return apply_mask(complete_path, mask_img)
Exemplo n.º 11
0
def basic_plots():
    """
    Plot and save a mean EPI image and an EPI mask
    :return:
    """
    for input in glob('input/sub*'):
        sub = op.basename(input)
        func = input + '/func/{}_task-oneback_run-01_bold.nii.gz'.format(sub)
        mean = mean_img(func)
        plot_epi(mean).savefig('figures/{}_mean-epi.png'.format(sub))
        mask_img = compute_epi_mask(func)
        mask_img.to_filename('{}_brain-mask.nii.gz'.format(sub))
        plot_roi(mask_img, mean).savefig('figures/{}_brainmask.png'.format(sub))
Exemplo n.º 12
0
def run_tedana(files, tes, seed):
    """
    Run tedana workflow across a range of parameters

    Parameters
    ----------
    files : list of str
        Echo-specific preprocessed data files
    tes : list of floats
        Echo times in seconds
    seed : int
        Random seed
    """
    out_dir = '/scratch/tsalo006/reliability_analysis/tedana_outputs/'
    ds_dir = '/home/data/nbc/external-datasets/ds001491/'
    tes = [te * 1000 for te in tes]
    sub = re.findall('sub-[0-9a-zA-Z]+_', files[0])[0][:-1]
    #ds_dir = files[0][:files[0].index(sub)]
    name = 'tedana_seed-{0:03d}'.format(seed)
    ted_dir = op.join(ds_dir, 'derivatives', name, sub, 'func')
    if op.isdir(ted_dir):
        rmtree(ted_dir)
    makedirs(ted_dir)

    mask = op.join(ted_dir, 'nilearn_epi_mask.nii')
    mask_img = compute_epi_mask(files[0])
    mask_img.to_filename(mask)

    tedana_workflow(data=files,
                    tes=tes,
                    fixed_seed=seed,
                    tedpca='mle',
                    mask=mask,
                    out_dir=ted_dir,
                    debug=True,
                    gscontrol=None)
    # Grab the files we care about
    log_file = op.join(ted_dir, 'runlog.tsv')
    out_log_file = op.join(out_dir,
                           '{0}_seed-{1:03d}_log.tsv'.format(sub, seed))
    ct_file = op.join(ted_dir, 'comp_table_ica.txt')
    out_ct_file = op.join(out_dir,
                          '{0}_seed-{1:03d}_comptable.txt'.format(sub, seed))
    dn_file = op.join(ted_dir, 'dn_ts_OC.nii')
    out_dn_file = op.join(out_dir,
                          '{0}_seed-{1:03d}_denoised.nii'.format(sub, seed))
    copyfile(log_file, out_log_file)
    copyfile(ct_file, out_ct_file)
    copyfile(dn_file, out_dn_file)
    if seed != 0:  # keep first seed for t2s map
        rmtree(ted_dir)
Exemplo n.º 13
0
    def _run_interface(self, runtime):
        from skimage import morphology as sim
        from scipy.ndimage.morphology import binary_fill_holes
        from nilearn.masking import compute_epi_mask

        in_files = self.inputs.in_files

        if self.inputs.enhance_t2:
            in_files = [
                _enhance_t2_contrast(f, newpath=runtime.cwd) for f in in_files
            ]

        masknii = compute_epi_mask(
            in_files,
            lower_cutoff=self.inputs.lower_cutoff,
            upper_cutoff=self.inputs.upper_cutoff,
            connected=self.inputs.connected,
            opening=self.inputs.opening,
            exclude_zeros=self.inputs.exclude_zeros,
            ensure_finite=self.inputs.ensure_finite,
            target_affine=self.inputs.target_affine,
            target_shape=self.inputs.target_shape,
        )

        if self.inputs.closing:
            closed = sim.binary_closing(
                np.asanyarray(masknii.dataobj).astype(np.uint8),
                sim.ball(1)).astype(np.uint8)
            masknii = masknii.__class__(closed, masknii.affine, masknii.header)

        if self.inputs.fill_holes:
            filled = binary_fill_holes(
                np.asanyarray(masknii.dataobj).astype(np.uint8),
                sim.ball(6)).astype(np.uint8)
            masknii = masknii.__class__(filled, masknii.affine, masknii.header)

        if self.inputs.no_sanitize:
            in_file = self.inputs.in_files
            if isinstance(in_file, list):
                in_file = in_file[0]
            nii = nb.load(in_file)
            qform, code = nii.get_qform(coded=True)
            masknii.set_qform(qform, int(code))
            sform, code = nii.get_sform(coded=True)
            masknii.set_sform(sform, int(code))

        self._results["out_mask"] = fname_presuffix(self.inputs.in_files[0],
                                                    suffix="_mask",
                                                    newpath=runtime.cwd)
        masknii.to_filename(self._results["out_mask"])
        return runtime
def func_Extract(inEPI):
    import nibabel as nib
    import os
    from nilearn.masking import compute_epi_mask
    niimg = nib.load(inEPI)
    niimg_extract = compute_epi_mask(niimg,
                                     lower_cutoff=0.3,
                                     upper_cutoff=0.8,
                                     connected=True,
                                     opening=1,
                                     exclude_zeros=False,
                                     ensure_finite=True,
                                     verbose=0)
    nib.save(niimg_extract, 'func_brain.nii.gz')
    return os.path.abspath('func_brain.nii.gz')
Exemplo n.º 15
0
    def calc_global_SNR(self):
        self.imean = image.mean_img([self.i1, self.i2])
        self.isub = image.math_img("(i2 - i1)", i1=self.i1, i2=self.i2)
        
        self.mask = masking.compute_epi_mask(self.imean)
        if not self.mask.get_fdata().any():
            im = self.imean.get_fdata()[:,:,0]
            thresh = filters.threshold_otsu(im)
            binary = im > thresh
            mask2 = self.imean.get_fdata().copy()
            mask2[:,:,0]=binary
            self.mask = image.new_img_like(self.imean, mask2)

        imean = masking.apply_mask(self.imean, self.mask)
        isub = masking.apply_mask(self.isub, self.mask)
        self.SNR_global, self.SNR_global_std = self.calc_SNR(imean, isub)
Exemplo n.º 16
0
    def _run_interface(self, runtime):

        in_files = self.inputs.in_files

        if self.inputs.enhance_t2:
            in_files = [_enhance_t2_contrast(f, newpath=runtime.cwd)
                        for f in in_files]

        masknii = compute_epi_mask(
            in_files,
            lower_cutoff=self.inputs.lower_cutoff,
            upper_cutoff=self.inputs.upper_cutoff,
            connected=self.inputs.connected,
            opening=self.inputs.opening,
            exclude_zeros=self.inputs.exclude_zeros,
            ensure_finite=self.inputs.ensure_finite,
            target_affine=self.inputs.target_affine,
            target_shape=self.inputs.target_shape
        )

        if self.inputs.closing:
            closed = sim.binary_closing(masknii.get_data().astype(
                np.uint8), sim.ball(1)).astype(np.uint8)
            masknii = masknii.__class__(closed, masknii.affine,
                                        masknii.header)

        if self.inputs.fill_holes:
            filled = binary_fill_holes(masknii.get_data().astype(
                np.uint8), sim.ball(6)).astype(np.uint8)
            masknii = masknii.__class__(filled, masknii.affine,
                                        masknii.header)

        if self.inputs.no_sanitize:
            in_file = self.inputs.in_files
            if isinstance(in_file, list):
                in_file = in_file[0]
            nii = nb.load(in_file)
            qform, code = nii.get_qform(coded=True)
            masknii.set_qform(qform, int(code))
            sform, code = nii.get_sform(coded=True)
            masknii.set_sform(sform, int(code))

        self._results['out_mask'] = fname_presuffix(
            self.inputs.in_files[0], suffix='_mask', newpath=runtime.cwd)
        masknii.to_filename(self._results['out_mask'])
        return runtime
Exemplo n.º 17
0
def estimateMask(im, st="background", logger=None):
    """
    mask the wholehead image (if we don"t have one).
    wrapper for NiLearn implementation
    :param im: image
    :param st: type of automatic extraction. epi for epi images,
    background for all other.
    :param logger: logger file
    :return: mask
    """
    from nilearn import masking
    write_to_logger("Estimating masks...", logger)
    if st == "epi":
        mask = masking.compute_epi_mask(im)
    else:
        mask = masking.compute_background_mask(im)
    return mask
Exemplo n.º 18
0
def make_niimg_masks(i):
    tl.files.exists_or_mkdir(data_path+str(i))
    
    
    patient_path = path + 'training_' + str(i) + '/'
    pMris = [glob(os.path.join(patient_path, '*.Brain.XX.O.MR_' + mri_type+'.*'))for mri_type in mri_types[1:]]
  #  print(pMris)
    niimg = [glob(os.path.join(pMri[0], '*'+'.nii'))[0] for pMri in pMris]
    affines = [nib.load(img).affine for img in niimg]
   # print(nib.load(niimg[0]).affine)
    niimg = [nib.load(img).get_data() for img in niimg]
    print (niimg[0].shape)
    
  #  niimg = [ndimage.zoom(img, (1,1,6)) for img in niimg]
    niimg = [nib.Nifti1Image(niimg[j], affine=affines[j]) for j in range(len(niimg))]
    niimg = [nilearn.image.smooth_img(img, fwhm=6)for img in niimg]
  #  print(niimg[0].get_data().shape)
   # niimg = [nilearn.image.resample_img(img, target_affine=np.eye(4))for img in niimg]
  #  print(niimg.get_affine() )
   # niimg = nilearn.image.mean_img(niimg)
  #  brainmask = [nilearn.masking.compute_epi_mask(img) for img in niimg]
  #  brainmask = [nilearn.image.resample_img(img, target_affine=np.eye(3))for img in brainmask]
    
    brainmask = compute_multi_epi_mask(niimg)
    affine = brainmask.affine
  #  print(brainmask.get_data().shape)
  #  print(brainmask.get_affine() )
    pMris1 = glob(os.path.join(patient_path, '*.Brain.XX.O.MR_' + '4DPWI'+'.*'))    
    im = glob(os.path.join(pMris1[0], '*'+'.nii'))[0]
    im = nib.load(im).get_data()    
    im = np.squeeze(im)
    im = np.mean(im, axis=3)#added after several times without this
  #  im = ndimage.zoom(im, (1,1,6))
    im = nib.Nifti1Image(im, affine=affine)
    niimg1 = nilearn.image.smooth_img(im, fwhm=6)
    
  #  print(niimg1.get_affine())
  #  niimg1 = nilearn.image.mean_img(niimg1)
    
    brainmask1 = compute_epi_mask(niimg1)
  #  print(brainmask1.get_data().shape)
    brainmask2 = nilearn.masking.intersect_masks([brainmask, brainmask1], threshold=.8)
    path1 = data_path + str(i) + '/'
    nib.save(brainmask2, os.path.join(path1, 'brainmask.nii.gz'))
    print(patient_path, path1)
def otherNii(filename, timepoint):
    '''
    Uses an EPI mask of the 3D image at the timepoint input as features
    '''
    DATA_PATH_FOR_NII = r"C:\Users\Ted\Desktop\CAIS_MUSIC_BRAIN\NII-Files"

    niiname2 = os.path.join(
        DATA_PATH_FOR_NII,
        "sub-01_sadln_filtered_func_200hpf_cut20_standard.nii")

    nilearn.plotting.show()
    result = nilearn.image.index_img(niiname2, timepoint)
    print(result.shape)
    plot_epi(result)
    mask_img = compute_epi_mask(result)
    nilearn.plotting.plot_roi(mask_img, result)
    masked_data = nilearn.masking.apply_mask(filename, mask_img)
    return masked_data, masked_data.shape[1]
Exemplo n.º 20
0
    def epi_mask(self, calculate=True, apply=False, plot=True):
        """Compute the average background mask for all given brains.

        :param calculate: {bool} do calculate or just use other options.
        :param apply: {bool} if True, the mask is directly applied to cut down the brains in :py:attr:`data` and
            reshape them into vectors
        :param plot: {bool} whether the generated mask should be visualized in a plot. The first image in 4th
            dimension is shown.
        :return: mask matrix in the attribute :py:attr:`mask` and if `apply=True` data vectors in :py:attr:`data`.
        """
        print("Computing EPI mask...")
        if calculate:
            self.mask = compute_epi_mask(self.img)
        if apply:
            self.apply_mask()
        if plot:
            plot_roi(self.mask, Nifti1Image(self.img.dataobj[..., 0], self.img.affine))
        print("\tEPI mask computed!")
Exemplo n.º 21
0
    def _run_interface(self, runtime):

        in_files = self.inputs.in_files

        if self.inputs.enhance_t2:
            in_files = [
                _enhance_t2_contrast(f, newpath=runtime.cwd) for f in in_files
            ]

        masknii = compute_epi_mask(in_files,
                                   lower_cutoff=self.inputs.lower_cutoff,
                                   upper_cutoff=self.inputs.upper_cutoff,
                                   connected=self.inputs.connected,
                                   opening=self.inputs.opening,
                                   exclude_zeros=self.inputs.exclude_zeros,
                                   ensure_finite=self.inputs.ensure_finite,
                                   target_affine=self.inputs.target_affine,
                                   target_shape=self.inputs.target_shape)

        if self.inputs.closing:
            closed = sim.binary_closing(masknii.get_data().astype(np.uint8),
                                        sim.ball(1)).astype(np.uint8)
            masknii = masknii.__class__(closed, masknii.affine, masknii.header)

        if self.inputs.fill_holes:
            filled = binary_fill_holes(masknii.get_data().astype(np.uint8),
                                       sim.ball(6)).astype(np.uint8)
            masknii = masknii.__class__(filled, masknii.affine, masknii.header)

        if self.inputs.no_sanitize:
            in_file = self.inputs.in_files
            if isinstance(in_file, list):
                in_file = in_file[0]
            nii = nb.load(in_file)
            qform, code = nii.get_qform(coded=True)
            masknii.set_qform(qform, int(code))
            sform, code = nii.get_sform(coded=True)
            masknii.set_sform(sform, int(code))

        self._results['out_mask'] = fname_presuffix(self.inputs.in_files[0],
                                                    suffix='_mask',
                                                    newpath=runtime.cwd)
        masknii.to_filename(self._results['out_mask'])
        return runtime
Exemplo n.º 22
0
def compute_global_masker(files,
                          smoothing_fwhm=None
                          ):  # [[path, path2], [path3, path4]]
    """Returns a MultiNiftiMasker object from list (of list) of files.
    Arguments:
        - files: list (of list of str)
    Returns:
        - masker: MultiNiftiMasker
    """
    masks = [compute_epi_mask(f) for f in files]
    global_mask = math_img(
        'img>0.5',
        img=mean_img(masks))  # take the average mask and threshold at 0.5
    masker = MultiNiftiMasker(global_mask,
                              detrend=True,
                              standardize=True,
                              smoothing_fwhm=smoothing_fwhm)
    masker.fit()
    return masker
Exemplo n.º 23
0
    def run(self):
        self.image = image.load_img(self.ic_path)
        self.raw_data = self.image.get_fdata()

        if self.ic_ts_path is not None:
            self.timeseries = pd.read_csv(self.ic_ts_path,
                                          sep=' ',
                                          header=None).dropna(axis=1).values
        else:
            mask_img = compute_epi_mask(self.image, 0, 1)
            self.timeseries = apply_mask(self.image, mask_img)

        if self.ic_ft_path is not None:
            self.ftransorm = pd.read_csv(self.ic_ft_path, sep=' ',
                                         header=None).dropna(axis=1).values
        else:
            # TODO: implement correct FT
            print("Fourier transformation calculation is not supported now.")
            self.ftransorm = np.abs(np.fft.rfft(self.timeseries))
Exemplo n.º 24
0
    def fit_mask(self,
                 modality='INV2',
                 smooth_fwhm=2.5,
                 threshold=None,
                 **kwargs):
        """Fit a mask based on one of the MP2RAGE images (usually INV2).

        This function creates a mask of the brain and skull, so that parts of the image
        that have insufficient signal for proper T1-fitting can be ignored.
        By default, it uses a slightly smoothed version of the INV2-image (to increase
        SNR), and the "Nichols"-method, as implemented in the ``nilearn``-package,
        to remove low-signal areas. The "Nichols"-method looks for the lowest
        density in the intensity histogram and places a threshold cut there.

        You can also give an arbitrary 'threshold'-parameter to threshold the image
        at a specific value.

        The resulting mask is returned and stored in the ``mask``-attribute of
        the MP2RAGEFitter-object.

        Args:
            modality (str): Modality to  use for masking operation (defaults to INV2)
            smooth (float): The size of the smoothing kernel to apply in mm (defaults
                            to 2.5 mm)
            threshold (float): If not None, the image is thresholded at this (arbitary)
                               number.
            **kwargs: These arguments are forwarded to nilearn's ``compute_epi_mask``

        Returns:
            The computed mask

        """

        im = getattr(self, modality.lower())

        if threshold is None:
            smooth_im = image.smooth_img(im, smooth_fwhm)
            self._mask = masking.compute_epi_mask(smooth_im, **kwargs)
        else:
            self._mask = image.math_img('im > %s' % threshold, im=im)

        return self.mask
def compute_global_masker(files):  # [[path, path2], [path3, path4]]
    # return a MultiNiftiMasker object

    #spm_dir = '/neurospin/unicog/protocols/IRMf/Meyniel_MarkovGuess_2014'
    #mask = join(spm_dir, 'spm12/tpm/mask_ICV.nii')
    #global_mask = math_img('img>0', img=mask)
    #masker = MultiNiftiMasker(mask_img=global_mask)
    #masker.fit()

    masks = [compute_epi_mask(f) for f in files]
    global_mask = math_img(
        'img>0.5',
        img=mean_img(masks))  # take the average mask and threshold at 0.5
    masker = MultiNiftiMasker(
        global_mask,
        detrend=params.pref.detrend,
        standardize=params.pref.standardize
    )  # return a object that transforms a 4D barin into a 2D matrix of voxel-time and can do the reverse action
    masker.fit()
    return masker
Exemplo n.º 26
0
def get_masked_epi(fmri_instances, masker=None, smooth_factor=10, threshold="80%"):
	if(masker == None):
		if(isinstance(fmri_instances, list)):
			img_epi = image.mean_img(fmri_instances)
			img_epi = image.smooth_img(img_epi, smooth_factor)
			img_epi = image.threshold_img(img_epi, threshold=threshold)

			masker = NiftiMasker()
			masker.fit(img_epi)

			masked_instances = []

			for instance in fmri_instances:
				masked_instances += [apply_mask(instance, masker.mask_img_)]

			return masked_instances, masker
		else:
			masker = compute_epi_mask(fmri_instances)

	return apply_mask(fmri_instances, masker), masker
def fmriThresh(img, thresh="90%", mask=True, verbose=0):
    """
    Parameters
    ----------
    img : nimage
        FMRI Image or path to an image
    thresh : float [0, 1] or string, optional
        The default is "90%".
        Sets the threshold
    mask : bool, optional
        The default is True.
        Wheter to use an epi mask or not
    verbose : int, optional
        Sets wether to plot the scan or not
        

    Returns
    -------
    data : 3d binary np array
        Thresholded brain scan
    affine : np array
        For reconstructing the nimage
    """
    brain = image.load_img(img)
    if (mask):
        imgMask = masking.compute_epi_mask(epi_img=brain,
                                           connected=True,
                                           opening=1)
        brain = image.math_img('img1 * img2', img1=brain, img2=imgMask)
    #brain = image.threshold_img(brain, thresh)
    brain = image.threshold_img(brain, thresh)
    affine = brain.affine
    data = brain.get_data()
    #data = (data > filters.threshold_otsu(data)) * data

    if (verbose):
        #brain = nib.Nifti1Image(data.astype(int), affine)
        plotting.plot_glass_brain(brain)
    return data, affine
Exemplo n.º 28
0
def mask_roi(dir_path, roi, mask, img_file):
    """
    Create derivative ROI based on intersection of roi and brain mask.

    Parameters
    ----------
    dir_path : str
        Path to directory containing subject derivative data for given run.
    roi : str
        File path to binarized/boolean region-of-interest Nifti1Image file.
    mask : str
        Path to binarized/boolean brain mask Nifti1Image file.
    img_file : str
        File path to Nifti1Image to use to generate an epi-mask.

    Returns
    -------
    roi : str
        File path to binarized/boolean region-of-interest Nifti1Image file, reduced to the spatial intersection with
        the input brain mask.
    """
    import os
    import os.path as op
    from nilearn import masking

    img_mask_path = "%s%s%s%s" % (
        dir_path, '/', op.basename(img_file).split('.')[0], '_mask.nii.gz')
    img_mask = masking.compute_epi_mask(img_file)
    nib.save(img_mask, img_mask_path)

    if roi and mask:
        print('Refining ROI...')
        roi_red_path = "%s%s%s%s" % (
            dir_path, '/', op.basename(roi).split('.')[0], '_mask.nii.gz')
        os.system("fslmaths {} -mas {} -mas {} -bin {}".format(
            roi, mask, img_mask_path, roi_red_path))
        roi = roi_red_path
    return roi
Exemplo n.º 29
0
    def _run_interface(self, runtime):
        target_affine = None
        target_shape = None

        if isdefined(self.inputs.target_affine):
            target_affine = self.inputs.target_affine
        if isdefined(self.inputs.target_shape):
            target_shape = self.inputs.target_shape

        masknii = compute_epi_mask(self.inputs.in_files,
                                   lower_cutoff=self.inputs.lower_cutoff,
                                   upper_cutoff=self.inputs.upper_cutoff,
                                   connected=self.inputs.connected,
                                   opening=self.inputs.opening,
                                   exclude_zeros=self.inputs.exclude_zeros,
                                   ensure_finite=self.inputs.ensure_finite,
                                   target_affine=target_affine,
                                   target_shape=target_shape)

        self._results['out_mask'] = genfname(self.inputs.in_files[0],
                                             suffix='mask')
        masknii.to_filename(self._results['out_mask'])
        return runtime
def eddy_current_correction(img, file_bvals, file_bvecs, write_dir, mem,
                            acqp='b0_acquisition_params_AP.txt'):
    """ Perform Eddy current correction on diffusion data
    Todo: do topup + eddy in one single command 
    """
    import nibabel as nib
    bvals = np.loadtxt(file_bvals)
    mean_img = get_mean_unweighted_image(nib.load(img), bvals)
    mean_img.to_filename(os.path.join(write_dir, 'mean_unweighted.nii.gz'))
    mask = compute_epi_mask(mean_img)
    mask_file = os.path.join(write_dir, 'mask.nii.gz')
    nib.save(mask, mask_file)
    corrected = os.path.join(os.path.dirname(img),
                             'ed' + os.path.basename(img))
    index = np.ones(len(bvals), np.uint8)
    index_file = os.path.join(write_dir, 'index.txt')
    np.savetxt(index_file, index)
    cmd = 'fsl5.0-eddy_correct --acqp=%s --bvals=%s --bvecs=%s --imain=%s --index=%s --mask=%s --out=%s' % (
        acqp, file_bvals, file_bvecs, img, index_file, mask_file, corrected)
    cmd = 'fsl5.0-eddy_correct %s %s %d' % (img, corrected, 0)
    print(cmd)
    print(commands.getoutput(cmd))
    print(cache(commands.getoutput, mem)(cmd))
Exemplo n.º 31
0
    def _run_interface(self, runtime):
        from scipy.ndimage.morphology import binary_dilation
        from nilearn.masking import compute_epi_mask

        orig_file_nii = nb.load(self.inputs.in_file)
        in_file_data = orig_file_nii.get_fdata()

        # pad the data to avoid the mask estimation running into edge effects
        in_file_data_padded = np.pad(in_file_data, (1, 1),
                                     "constant",
                                     constant_values=(0, 0))

        padded_nii = nb.Nifti1Image(in_file_data_padded, orig_file_nii.affine,
                                    orig_file_nii.header)

        mask_nii = compute_epi_mask(padded_nii, exclude_zeros=True)

        mask_data = np.asanyarray(mask_nii.dataobj).astype(np.uint8)
        if isdefined(self.inputs.dilation):
            mask_data = binary_dilation(mask_data).astype(np.uint8)

        # reverse image padding
        mask_data = mask_data[1:-1, 1:-1, 1:-1]

        # exclude zero and NaN voxels
        mask_data[in_file_data == 0] = 0
        mask_data[np.isnan(in_file_data)] = 0

        better_mask = nb.Nifti1Image(mask_data, orig_file_nii.affine,
                                     orig_file_nii.header)
        better_mask.set_data_dtype(np.uint8)
        better_mask.to_filename("mask_file.nii.gz")

        self._mask_file = os.path.join(runtime.cwd, "mask_file.nii.gz")

        runtime.returncode = 0
        return super(ComputeEPIMask, self)._run_interface(runtime)
Exemplo n.º 32
0
def test_compute_epi_mask():
    mean_image = np.ones((9, 9, 3))
    mean_image[3:-2, 3:-2, :] = 10
    mean_image[5, 5, :] = 11
    mean_image = Nifti1Image(mean_image, np.eye(4))
    mask1 = compute_epi_mask(mean_image, opening=False)
    mask2 = compute_epi_mask(mean_image, exclude_zeros=True,
                             opening=False)
    # With an array with no zeros, exclude_zeros should not make
    # any difference
    np.testing.assert_array_equal(mask1.get_data(), mask2.get_data())
    # Check that padding with zeros does not change the extracted mask
    mean_image2 = np.zeros((30, 30, 3))
    mean_image2[3:12, 3:12, :] = mean_image.get_data()
    mean_image2 = Nifti1Image(mean_image2, np.eye(4))
    mask3 = compute_epi_mask(mean_image2, exclude_zeros=True,
                             opening=False)
    np.testing.assert_array_equal(mask1.get_data(),
                                  mask3.get_data()[3:12, 3:12])
    # However, without exclude_zeros, it does
    mask3 = compute_epi_mask(mean_image2, opening=False)
    assert_false(np.allclose(mask1.get_data(),
                             mask3.get_data()[3:12, 3:12]))

    # Check that we get a ValueError for incorrect shape
    mean_image = np.ones((9, 9))
    mean_image[3:-3, 3:-3] = 10
    mean_image[5, 5] = 100
    mean_image = Nifti1Image(mean_image, np.eye(4))
    assert_raises(ValueError, compute_epi_mask, mean_image)

    # Check that we get a useful warning for empty masks
    mean_image = np.zeros((9, 9, 9))
    mean_image[0, 0, 1] = -1
    mean_image[0, 0, 0] = 1.2
    mean_image[0, 0, 2] = 1.1
    mean_image = Nifti1Image(mean_image, np.eye(4))
    with warnings.catch_warnings(record=True) as w:
        compute_epi_mask(mean_image, exclude_zeros=True)
    assert_equal(len(w), 1)
    assert_true(isinstance(w[0].message, masking.MaskWarning))
Exemplo n.º 33
0
def test_compute_epi_mask():
    mean_image = np.ones((9, 9, 3))
    mean_image[3:-2, 3:-2, :] = 10
    mean_image[5, 5, :] = 11
    mean_image = Nifti1Image(mean_image, np.eye(4))
    mask1 = compute_epi_mask(mean_image, opening=False)
    mask2 = compute_epi_mask(mean_image, exclude_zeros=True,
                             opening=False)
    # With an array with no zeros, exclude_zeros should not make
    # any difference
    np.testing.assert_array_equal(mask1.get_data(), mask2.get_data())
    # Check that padding with zeros does not change the extracted mask
    mean_image2 = np.zeros((30, 30, 3))
    mean_image2[3:12, 3:12, :] = mean_image.get_data()
    mean_image2 = Nifti1Image(mean_image2, np.eye(4))
    mask3 = compute_epi_mask(mean_image2, exclude_zeros=True,
                             opening=False)
    np.testing.assert_array_equal(mask1.get_data(),
                                  mask3.get_data()[3:12, 3:12])
    # However, without exclude_zeros, it does
    mask3 = compute_epi_mask(mean_image2, opening=False)
    assert_false(np.allclose(mask1.get_data(),
                             mask3.get_data()[3:12, 3:12]))

    # Check that we get a ValueError for incorrect shape
    mean_image = np.ones((9, 9))
    mean_image[3:-3, 3:-3] = 10
    mean_image[5, 5] = 100
    mean_image = Nifti1Image(mean_image, np.eye(4))
    assert_raises(ValueError, compute_epi_mask, mean_image)

    # Check that we get a useful warning for empty masks
    mean_image = np.zeros((9, 9, 9))
    mean_image[0, 0, 1] = -1
    mean_image[0, 0, 0] = 1.2
    mean_image[0, 0, 2] = 1.1
    mean_image = Nifti1Image(mean_image, np.eye(4))
    with warnings.catch_warnings(record=True) as w:
        compute_epi_mask(mean_image, exclude_zeros=True)
    assert_equal(len(w), 1)
    assert_true(isinstance(w[0].message, masking.MaskWarning))
Exemplo n.º 34
0
haxby_files = datasets.fetch_haxby(n_subjects=1)

### Visualization #############################################################

import matplotlib.pyplot as plt

# Compute the mean EPI: we do the mean along the axis 3, which is time
mean_haxby = mean_img(haxby_files.func)

plot_epi(mean_haxby)

### Extracting a brain mask ###################################################

# Simple computation of a mask from the fMRI data
from nilearn.masking import compute_epi_mask
mask_img = compute_epi_mask(haxby_files.func[0])

plot_roi(mask_img, mean_haxby)

### Applying the mask #########################################################

from nilearn.masking import apply_mask
masked_data = apply_mask(haxby_files.func[0], mask_img)

# masked_data shape is (timepoints, voxels). We can plot the first 150
# timepoints from two voxels

plt.figure(figsize=(7, 5))
plt.plot(masked_data[:2, :150].T)
plt.xlabel('Time [TRs]', fontsize=16)
plt.ylabel('Intensity', fontsize=16)
Exemplo n.º 35
0
# Visualization
from nilearn.image.image import mean_img

# Compute the mean EPI: we do the mean along the axis 3, which is time
func_filename = haxby_dataset.func[0]
mean_haxby = mean_img(func_filename)

from nilearn.plotting import plot_epi, show
plot_epi(mean_haxby)

##############################################################################
# Extracting a brain mask

# Simple computation of a mask from the fMRI data
from nilearn.masking import compute_epi_mask
mask_img = compute_epi_mask(func_filename)

# Visualize it as an ROI
from nilearn.plotting import plot_roi
plot_roi(mask_img, mean_haxby)

##############################################################################
# Applying the mask to extract the corresponding time series

from nilearn.masking import apply_mask
masked_data = apply_mask(func_filename, mask_img)

# masked_data shape is (timepoints, voxels). We can plot the first 150
# timepoints from two voxels

# And now plot a few of these
Exemplo n.º 36
0
          cmap=plt.cm.gray)

# Third subplot: axial view
plt.subplot(1, 3, 3)
plt.axis('off')
plt.title('Axial')
plt.imshow(np.rot90(mean_img[:, :, 32]), interpolation='nearest',
          cmap=plt.cm.gray)
plt.subplots_adjust(left=.02, bottom=.02, right=.98, top=.95,
                   hspace=.02, wspace=.02)

### Extracting a brain mask ###################################################

# Simple computation of a mask from the fMRI data
from nilearn.masking import compute_epi_mask
mask_img = compute_epi_mask(haxby_files.func[0])
mask_data = mask_img.get_data().astype(bool)

# We create a new figure
plt.figure(figsize=(3, 4))
# A plot the axial view of the mask to compare with the axial
# view of the raw data displayed previously
plt.axis('off')
plt.imshow(np.rot90(mask_data[:, :, 32]), interpolation='nearest')
plt.subplots_adjust(left=.02, bottom=.02, right=.98, top=.95)

### Applying the mask #########################################################

from nilearn.masking import apply_mask
masked_data = apply_mask(haxby_files.func[0], mask_img)
Exemplo n.º 37
0
def load_data(directory,
              subject_name,
              mask_name='',
              num_runs=2,
              zscore_data=False):

    # Cycle through the masks
    print("Processing Start ...")

    # If there is a mask supplied then load it now
    if mask_name is '':
        mask = None
    else:
        mask = load_ss_mask(mask_name, subject_name)

    # Cycle through the runs
    for run in range(1, num_runs + 1):
        epi_data = load_ss_epi_data(subject_name, run)

        # Mask the data if necessary
        if mask_name is not '':
            epi_mask_data = mask_data(epi_data, mask).T
        else:
            # Do a whole brain mask
            if run == 1:
                # Compute mask from epi
                mask = compute_epi_mask(epi_data).get_fdata()
            else:
                # Get the intersection mask
                # (set voxels that are within the mask on all runs to 1, set all other voxels to 0)
                mask *= compute_epi_mask(epi_data).get_fdata()

            # Reshape all of the data from 4D (X*Y*Z*time) to 2D (voxel*time): not great for memory
            epi_mask_data = epi_data.get_fdata().reshape(
                mask.shape[0] * mask.shape[1] * mask.shape[2],
                epi_data.shape[3])

        # Transpose and z-score (standardize) the data
        if zscore_data == True:
            scaler = preprocessing.StandardScaler().fit(epi_mask_data.T)
            preprocessed_data = scaler.transform(epi_mask_data.T)
        else:
            preprocessed_data = epi_mask_data.T

        # Concatenate the data
        if run == 1:
            concatenated_data = preprocessed_data
        else:
            concatenated_data = np.hstack(
                (concatenated_data, preprocessed_data))

    # Apply the whole-brain masking: First, reshape the mask from 3D (X*Y*Z) to 1D (voxel).
    # Second, get indices of non-zero voxels, i.e. voxels inside the mask.
    # Third, zero out all of the voxels outside of the mask.
    if mask_name is '':
        mask_vector = np.nonzero(
            mask.reshape(mask.shape[0] * mask.shape[1] * mask.shape[2], ))[0]
        concatenated_data = concatenated_data[mask_vector, :]

    # Return the list of mask data
    return concatenated_data, mask
Exemplo n.º 38
0
def run_qa(mr_paths,html_dir,software="FSL",voxdim=[2,2,2],outlier_sds=6,investigator="brainman",
           nonzero_thresh=0.25,calculate_mean_image=True,view=True):
    '''run_qa: a tool to generate an interactive qa report for statistical maps

    mr_paths: a list of paths to brain statistical maps that can be read with nibabel [REQUIRED]
    software: currently only freesurfer is supporte [default:FREESURFER]
    voxdim: a list of x,y,z dimensions to resample data when normalizing [default [2,2,2]]
    outlier_sds: the number of standard deviations from the mean to define an outlier [default:6]
    investigator: the name (string) of an investigator to add to alerts summary page [defauflt:None]
    nonzero_thresh: images with # of nonzero voxels in brain mask < this value will be flagged as thresholded [default:0.25] 
    calculate_mean_image: Default True, should be set to False for larger datasets where memory is an issue
    view: view the web report in a browser at the end [default:True]
    '''

    # First resample to standard space
    print("Resampling all data to %s using %s standard brain..." %(voxdim,software))
    reference_file = get_standard_brain(software)
    mask_file = get_standard_mask(software)
    images_resamp, reference_resamp = resample_images_ref(mr_paths,reference_file,resample_dim=voxdim,interpolation="continuous")
    mask = resample_img(mask_file, target_affine=np.diag(voxdim))
    mask_bin = compute_epi_mask(mask)
    mask_out = np.zeros(mask_bin.shape)
    mask_out[mask_bin.get_data()==0] = 1
    voxels_out = np.where(mask_out==1)
    total_voxels = np.prod(mask_bin.shape)
    total_voxels_in =  len(np.where(mask_bin.get_data().flatten()==1)[0])
    total_voxels_out = len(np.where(mask_out.flatten()==1)[0])
    mask_out = nib.Nifti1Image(mask_out,affine=mask_bin.get_affine())
      
    # We will save qa values for all in a data frame
    results = pandas.DataFrame(columns=["voxels_in","voxels_out","standard_deviation_resamp","mean_resamp","variance_resamp","median_resamp","n_outliers_low_%ssd" %(outlier_sds),"n_outliers_high_%ssd" %(outlier_sds),"nonzero_percent_voxels_in_mask","threshold_flag"])

    # We also need to save distributions for the summary page
    all_histograms = []
    image_names = []

    # Set up directories
    pwd = get_package_dir() 
    images_dir = "%s/img" %(html_dir)
    make_dir(images_dir)
   

    # Calculate a mean image for the entire set
    if calculate_mean_image == True:
        print("Calculating mean image...")
        all_masked_data = apply_mask(images_resamp, mask_bin, dtype='f', smoothing_fwhm=None, ensure_finite=True)
        mean_image = np.zeros(mask_bin.shape)
        mean_image[mask_bin.get_data()==1] = np.mean(all_masked_data,axis=0)
        mean_image = nib.Nifti1Image(mean_image,affine=mask_bin.get_affine())
        mean_intensity = np.mean(mean_image.get_data()[mask_bin.get_data()==1])
        histogram_data_mean = get_histogram_data(mean_image.get_data()[mask_bin.get_data()==1])
        histogram_mean_counts = ",".join([str(x) for x in histogram_data_mean["counts"]])
        nib.save(mean_image,"%s/mean.nii" %(html_dir))
        make_stat_image("%s/mean.nii" %(html_dir),png_img_file="%s/mean.png" %(html_dir))    

    nib.save(reference_resamp,"%s/standard.nii" %(html_dir))
    nib.save(mask_bin,"%s/mask.nii" %(html_dir))
    nib.save(mask_out,"%s/mask_out.nii" %(html_dir))
    make_anat_image("%s/mask.nii" %(html_dir),png_img_file="%s/mask.png" %(html_dir))
    make_anat_image("%s/mask_out.nii" %(html_dir),png_img_file="%s/mask_out.png" %(html_dir))
    
    unzip("%s/static/qa_report.zip" %(pwd),html_dir)

    for m in range(0,len(mr_paths)):
        mr = images_resamp[m]
        mr_original = nib.load(mr_paths[m])
        image_name = os.path.split(mr_paths[m])[1]
        print("Generating qa report for %s" %(mr_paths[m]))
      
        # Output folder generation
        mr_folder = "%s/%s" %(html_dir,m)
        make_dir(mr_folder)
        mr_images = "%s/img" %(mr_folder)
        make_dir(mr_images)
        masked_in_data = mr.get_data()[mask_bin.get_data()==1]
        masked_out_data = mr.get_data()[mask_out.get_data()==1]
        mr_in_mask,mr_out_mask = make_in_out_mask(mask_bin=mask_bin,mr_folder=mr_folder,masked_in=masked_in_data,masked_out=masked_out_data,img_dir=mr_images)

        # Glass brain, masked, and histogram data
        make_stat_image("%s/masked.nii" %(mr_folder),png_img_file="%s/mr_masked.png" %(mr_images))
        make_glassbrain_image("%s/masked.nii" %(mr_folder),png_img_file="%s/glassbrain.png" %(mr_images))
        metrics = central_tendency(masked_in_data)

        # Header metrics
        mr_metrics = header_metrics(mr_original)
        histogram_data_in = get_histogram_data(masked_in_data)
        histogram_data_out = get_histogram_data(masked_out_data)

        # Counting voxels (should eventually be in/out brain ratios)
        count_in,count_out = count_voxels(masked_in=masked_in_data,masked_out=masked_out_data)
        high_out,low_out = outliers(masked_in_data,n_std=outlier_sds)
    
        # estimate thresholded or not. If the original image is the same shape as the mask, use it
        if mr_original.shape == mask.shape:
            threshold_flag,percent_nonzero = is_thresholded(mr_original,mask,threshold=nonzero_thresh)
        else: # this will return biased high values because we have resampled with this standard!
            threshold_flag,percent_nonzero = is_thresholded(mr,mask,threshold=nonzero_thresh)
      
        # Add everything to table, prepare single page template
        results.loc[m] = [count_in,count_out,metrics["std"],metrics["mean"],metrics["var"],metrics["med"],low_out,high_out,percent_nonzero,threshold_flag]

        if calculate_mean_image == True:
            template = get_template("qa_single_statmap_mean")
        else:
            template = get_template("qa_single_statmap")
  
        # Things to fill into individual template
        if m != 0: last_page = m-1;
        else: last_page = len(mr_paths)-1;     
        if m != len(mr_paths)-1: next_page = m+1; 
        else: next_page = 0  
        histogram_in_counts = ",".join([str(x) for x in histogram_data_in["counts"]])
        all_histograms.append(histogram_in_counts)
        image_names.append(image_name)
        histogram_out_counts = ",".join([str(x) for x in histogram_data_out["counts"]]) 
        histogram_bins =  '"%s"' % '","'.join([str(np.round(x,2)) for x in histogram_data_in["bins"]])
        substitutions = {"NUMBER_IMAGES":len(mr_paths),
			"IMAGE_NAME":  image_name,
			"NONZERO_VOXELS": "%0.3f" % (percent_nonzero),
			"THRESHOLD_FLAG": "%s" % (threshold_flag),
			"NONZERO_THRESH": "%s" % (nonzero_thresh),
			"TOTAL_VOXELS":total_voxels,
			"MEAN_SCORE":"%0.2f" % metrics["mean"],
			"MEDIAN_SCORE":"%0.2f" % metrics["med"],
			"VARIANCE_SCORE":"%0.2f" % metrics["var"],
			"OUTLIERS_HIGH": high_out,
			"OUTLIERS_LOW":low_out,
			"OUTLIERS_STANDARD_DEVIATION":outlier_sds,
			"STANDARD_DEVIATION_SCORE":"%0.2f" % metrics["std"],
			"STATMAP_HISTOGRAM":histogram_in_counts,
			"NEXT_PAGE":"../%s/%s.html" %(next_page,next_page),
			"LAST_PAGE":"../%s/%s.html" %(last_page,last_page),
                        "OVERLAY_IMAGE":"%s/masked.nii" %(mr_folder),
                        "INVESTIGATOR":investigator
                      }
        template = add_string(substitutions,template)
        if calculate_mean_image == True:
            template = add_string({"MEAN_IMAGE_HISTOGRAM":histogram_mean_counts},template)
        save_template(template,"%s/%s.html" %(mr_folder,m))

    # Individual pages done, now make summary pages, first the histograms
    template = get_template("qa_histograms")
    if calculate_mean_image == True:
        statmap_histograms = ['<div class="span2 statbox purple" onTablet="span2" onDesktop="span2">\n<div class="boxchart">%s</div><div class="number" style="font-size:30px">%s</div><div class="title">images</div><div class="footer"></div></div>' %(histogram_mean_counts,len(mr_paths))]
    else:
        statmap_histograms = [] 
       
    m = 0
    for mean in results["mean_resamp"]:
        if calculate_mean_image == True:
            if mean >= mean_intensity:    
                statmap_histograms.append('<div class="span2 statbox blue" onTablet="span2"\n onDesktop="span2"><div class="boxchart">%s</div><div class="number" style="font-size:30px"><i class="icon-arrow-up"></i></div><div class="title">%s</div><div class="footer"><a href="%s/%s.html"> detail</a></div></div>' %(all_histograms[m],m,m,m))
            else:
                statmap_histograms.append('<div class="span2 statbox red" onTablet="span2"\n onDesktop="span2"><div class="boxchart">%s</div><div class="number" style="font-size:30px"><i class="icon-arrow-down"></i></div><div class="title">%s</div><div class="footer"><a href="%s/%s.html"> detail</a></div></div>' %(all_histograms[m],m,m,m))
        else:
            statmap_histograms.append('<div class="span2 statbox red" onTablet="span2"\n onDesktop="span2"><div class="boxchart">%s</div><div class="number" style="font-size:30px"></div><div class="title">%s</div><div class="footer"><a href="%s/%s.html"> detail</a></div></div>' %(all_histograms[m],m,m,m))
        m+=1
    template = add_string({"STATMAP_HISTOGRAMS":"\n".join(statmap_histograms),
                           "NUMBER_IMAGES":len(mr_paths),
                           "INVESTIGATOR":investigator},template)
    save_template(template,"%s/histograms.html" %(html_dir)) 

    # Summary table page and alerts
    template_summary = get_template("qa_summary_table")
    template_alerts = get_template("qa_alerts")
    statmap_table = []; alerts_passing = []; alerts_outliers = []; alerts_thresh = []; count=0;
    for res in results.iterrows():

        # SUMMARY ITEMS ----

        # If the image has too many zeros:
        if res[1]["threshold_flag"] == True:
            alerts_thresh.append('<div class="task high"><div class="desc"><div class="title">Thresholded Map</div><div>Image ID %s has been flagged as being thresholded! Nonzero voxels in mask: %s.</div></div><div class="time"><div class="date">%s</div></div></div>' %(count,res[1]["nonzero_percent_voxels_in_mask"],time.strftime("%c")))

        # If the image has outliers or is thresholded:
        total_outliers = res[1]["n_outliers_low_%ssd" %(outlier_sds)] + res[1]["n_outliers_high_%ssd" %(outlier_sds)]
        flagged = (total_outliers > 0) | res[1]["threshold_flag"]

        if flagged == True:
            statmap_table.append('<tr><td>%s</td><td class="center">%s</td><td class="center">%0.2f</td><td class="center">%0.2f</td><td class="center">%0.2f</td><td class="center">%0.2f</td><td class="center">%0.2f</td><td class="center">%0.2f</td><td class="center"><a class="btn btn-danger" href="%s/%s.html"><i class="icon-flag zoom-in"></i></a></td></tr>' %(image_names[count],count,res[1]["mean_resamp"],res[1]["median_resamp"],res[1]["variance_resamp"],res[1]["standard_deviation_resamp"],res[1]["n_outliers_low_%ssd" %(outlier_sds)],res[1]["n_outliers_high_%ssd" %(outlier_sds)],count,count))
            if res[1]["n_outliers_high_%ssd" %(outlier_sds)] > 0: 
                alerts_outliers.append('<div class="task medium"><div class="desc"><div class="title">Outlier High</div><div>Image ID %s has been flagged to have a high outlier</div></div><div class="time"><div class="date">%s</div><div></div></div></div>' %(count,time.strftime("%c")))
            if res[1]["n_outliers_low_%ssd" %(outlier_sds)] > 0: 
                alerts_outliers.append('<div class="task medium"><div class="desc"><div class="title">Outlier Low</div><div>Image ID %s has been flagged to have a high outlier</div></div><div class="time"><div class="date">%s</div><div></div></div></div>' %(count,time.strftime("%c")))

        # Image is passing!
        else: 
            statmap_table.append('<tr><td>%s</td><td class="center">%s</td><td class="center">%0.2f</td><td class="center">%0.2f</td><td class="center">%0.2f</td><td class="center">%0.2f</td><td class="center">%0.2f</td><td class="center">%0.2f</td><td class="center"><a class="btn btn-success" href="%s/%s.html"><i class="icon-check zoom-in"></i></a></td></tr>' %(image_names[count],count,res[1]["mean_resamp"],res[1]["median_resamp"],res[1]["variance_resamp"],res[1]["standard_deviation_resamp"],res[1]["n_outliers_low_%ssd" %(outlier_sds)],res[1]["n_outliers_high_%ssd" %(outlier_sds)],count,count))
            alerts_passing.append('<div class="task low"><div class="desc"><div class="title">%s</div><div>This map has no flags as determined by the standards of only this report.</div></div><div class="time"><div class="date">%s</div><div></div></div></div>' %(image_names[count],time.strftime("%c")))
        count+=1

        # ALERTS ITEMS ----
 
        # In the case of zero of any of the above
        if len(alerts_thresh) == 0: 
            alerts_thresh = ['<div class="task high last"><div class="desc"><div class="title">No Thresholded Maps</div><div>No images have been flagged as thresholded [percent nonzero voxels in mask <= %s]</div></div><div class="time"><div class="date">%s</div></div></div>' %(nonzero_thresh,time.strftime("%c"))]
            number_thresh = 0
        else:  
            alerts_thresh[-1] = alerts_thresh[-1].replace("task high","task high last")
            number_thresh = len(alerts_thresh)
    
        if len(alerts_outliers) == 0: 
            alerts_outliers = ['<div class="task medium last"><div class="desc"><div class="title">No Outliers</div><div>No images have been flagged for outliers %s standard deviations in either direction.</div></div><div class="time"><div class="date">%s</div></div></div>' %(outlier_sds,time.strftime("%c"))]
            number_outliers = 0
        else:     
            alerts_outliers[-1] = alerts_outliers[-1].replace("task medium","task medium last")
            number_outliers = len(alerts_outliers)
    
        if len(alerts_passing) == 0: 
            alerts_passing = ['<div class="task low last"><div class="desc"><div class="title">No Passing!</div><div>No images are passing! What did you do?!</div></div><div class="time"><div class="date">%s</div></div></div>' %(time.strftime("%c"))]
    
    # Alerts and summary template
    template_alerts = add_string({"ALERTS_PASSING":"\n".join(alerts_passing),
                                  "ALERTS_OUTLIERS":"\n".join(alerts_outliers),
                                  "NUMBER_IMAGES":len(mr_paths),
                                  "OUTLIERS_STANDARD_DEVIATIONS":outlier_sds,
                                  "ALERTS_THRESH":"\n".join(alerts_thresh),
                                  "INVESTIGATOR":investigator},template_alerts)
    template_summary = add_string({"STATMAP_TABLE":"\n".join(statmap_table),
                                   "NUMBER_IMAGES":len(mr_paths),
                                   "OUTLIERS_STANDARD_DEVIATIONS":outlier_sds,
                                   "INVESTIGATOR":investigator},template_summary)
    save_template(template_summary,"%s/summary.html" %(html_dir)) 
    save_template(template_alerts,"%s/alerts.html" %(html_dir)) 
    
    # Finally, save the index
    index_template = get_template("qa_index")
    image_gallery = ['<div id="image-%s" class="masonry-thumb"><a style="background:url(%s/img/glassbrain.png) width=200px" title="%s" href="%s/%s.html"><img class="grayscale" src="%s/img/glassbrain.png" alt="%s"></a></div>' %(m,m,image_names[m],m,m,m,image_names[m]) for m in range(0,len(mr_paths)) ]
    substitutions = {"GLASSBRAIN_GALLERY":"\n".join(image_gallery),
                     "NUMBER_OUTLIERS":number_outliers,
                     "NUMBER_THRESH":number_thresh,
                     "NUMBER_IMAGES":len(mr_paths),
                     "INVESTIGATOR":investigator
                    }
    index_template = add_string(substitutions,index_template)
    if calculate_mean_image == True:
        index_template = add_string({"MEAN_IMAGE_HISTOGRAM":histogram_mean_counts},index_template)
    save_template(index_template,"%s/index.html" %(html_dir))

    # Save results to file
    results.to_csv("%s/allMetrics.tsv" %(html_dir),sep="\t")
    if view==True:
        os.chdir(html_dir)
        run_webserver(PORT=8091)
Exemplo n.º 39
0
def tedana_workflow(data, tes, mask=None, mixm=None, ctab=None, manacc=None,
                    tedort=False, gscontrol=None, tedpca='mle',
                    source_tes=-1, combmode='t2s', verbose=False, stabilize=False,
                    out_dir='.', fixed_seed=42, maxit=500, maxrestart=10,
                    debug=False, quiet=False, png=False, png_cmap='coolwarm'):
    """
    Run the "canonical" TE-Dependent ANAlysis workflow.

    Parameters
    ----------
    data : :obj:`str` or :obj:`list` of :obj:`str`
        Either a single z-concatenated file (single-entry list or str) or a
        list of echo-specific files, in ascending order.
    tes : :obj:`list`
        List of echo times associated with data in milliseconds.
    mask : :obj:`str`, optional
        Binary mask of voxels to include in TE Dependent ANAlysis. Must be
        spatially aligned with `data`. If an explicit mask is not provided,
        then Nilearn's compute_epi_mask function will be used to derive a mask
        from the first echo's data.
    mixm : :obj:`str`, optional
        File containing mixing matrix. If not provided, ME-PCA and ME-ICA are
        done.
    ctab : :obj:`str`, optional
        File containing component table from which to extract pre-computed
        classifications.
    manacc : :obj:`list`, :obj:`str`, or None, optional
        List of manually accepted components. Can be a list of the components,
        a comma-separated string with component numbers, or None. Default is
        None.
    tedort : :obj:`bool`, optional
        Orthogonalize rejected components w.r.t. accepted ones prior to
        denoising. Default is False.
    gscontrol : {None, 't1c', 'gsr'} or :obj:`list`, optional
        Perform additional denoising to remove spatially diffuse noise. Default
        is None.
    tedpca : {'mle', 'kundu', 'kundu-stabilize'}, optional
        Method with which to select components in TEDPCA. Default is 'mle'.
    source_tes : :obj:`int`, optional
        Source TEs for models. 0 for all, -1 for optimal combination.
        Default is -1.
    combmode : {'t2s'}, optional
        Combination scheme for TEs: 't2s' (Posse 1999, default).
    verbose : :obj:`bool`, optional
        Generate intermediate and additional files. Default is False.
    png : obj:'bool', optional
        Generate simple plots and figures. Default is false.
    png_cmap : obj:'str', optional
            Name of a matplotlib colormap to be used when generating figures.
            --png must still be used to request figures. Default is 'coolwarm'
    out_dir : :obj:`str`, optional
        Output directory.

    Other Parameters
    ----------------
    fixed_seed : :obj:`int`, optional
        Value passed to ``mdp.numx_rand.seed()``.
        Set to a positive integer value for reproducible ICA results;
        otherwise, set to -1 for varying results across calls.
    maxit : :obj:`int`, optional
        Maximum number of iterations for ICA. Default is 500.
    maxrestart : :obj:`int`, optional
        Maximum number of attempts for ICA. If ICA fails to converge, the
        fixed seed will be updated and ICA will be run again. If convergence
        is achieved before maxrestart attempts, ICA will finish early.
        Default is 10.
    debug : :obj:`bool`, optional
        Whether to run in debugging mode or not. Default is False.
    quiet : :obj:`bool`, optional
        If True, suppresses logging/printing of messages. Default is False.

    Notes
    -----
    This workflow writes out several files. For a complete list of the files
    generated by this workflow, please visit
    https://tedana.readthedocs.io/en/latest/outputs.html
    """
    out_dir = op.abspath(out_dir)
    if not op.isdir(out_dir):
        os.mkdir(out_dir)

    if debug and not quiet:
        # ensure old logs aren't over-written
        basename = 'tedana_run'
        extension = 'txt'
        logname = op.join(out_dir, (basename + '.' + extension))
        logex = op.join(out_dir, (basename + '*'))
        previouslogs = glob.glob(logex)
        previouslogs.sort(reverse=True)
        for f in previouslogs:
            previousparts = op.splitext(f)
            newname = previousparts[0] + '_old' + previousparts[1]
            os.rename(f, newname)

        # set logging format
        formatter = logging.Formatter(
                    '%(asctime)s\t%(name)-12s\t%(levelname)-8s\t%(message)s',
                    datefmt='%Y-%m-%dT%H:%M:%S')

        # set up logging file and open it for writing
        fh = logging.FileHandler(logname)
        fh.setFormatter(formatter)
        logging.basicConfig(level=logging.DEBUG,
                            handlers=[fh, logging.StreamHandler()])
    elif quiet:
        logging.basicConfig(level=logging.WARNING)
    else:
        logging.basicConfig(level=logging.INFO)

    LGR.info('Using output directory: {}'.format(out_dir))

    # ensure tes are in appropriate format
    tes = [float(te) for te in tes]
    n_echos = len(tes)

    # Coerce gscontrol to list
    if not isinstance(gscontrol, list):
        gscontrol = [gscontrol]

    # coerce data to samples x echos x time array
    if isinstance(data, str):
        data = [data]

    LGR.info('Loading input data: {}'.format([f for f in data]))
    catd, ref_img = io.load_data(data, n_echos=n_echos)
    n_samp, n_echos, n_vols = catd.shape
    LGR.debug('Resulting data shape: {}'.format(catd.shape))

    if mixm is not None and op.isfile(mixm):
        mixm = op.abspath(mixm)
        # Allow users to re-run on same folder
        if mixm != op.join(out_dir, 'meica_mix.1D'):
            shutil.copyfile(mixm, op.join(out_dir, 'meica_mix.1D'))
            shutil.copyfile(mixm, op.join(out_dir, op.basename(mixm)))
    elif mixm is not None:
        raise IOError('Argument "mixm" must be an existing file.')

    if ctab is not None and op.isfile(ctab):
        ctab = op.abspath(ctab)
        # Allow users to re-run on same folder
        if ctab != op.join(out_dir, 'comp_table_ica.txt'):
            shutil.copyfile(ctab, op.join(out_dir, 'comp_table_ica.txt'))
            shutil.copyfile(ctab, op.join(out_dir, op.basename(ctab)))
    elif ctab is not None:
        raise IOError('Argument "ctab" must be an existing file.')

    if isinstance(manacc, str):
        manacc = [int(comp) for comp in manacc.split(',')]

    if ctab and not mixm:
        LGR.warning('Argument "ctab" requires argument "mixm".')
        ctab = None
    elif ctab and (manacc is None):
        LGR.warning('Argument "ctab" requires argument "manacc".')
        ctab = None
    elif manacc is not None and not mixm:
        LGR.warning('Argument "manacc" requires argument "mixm".')
        manacc = None

    if mask is None:
        LGR.info('Computing EPI mask from first echo')
        first_echo_img = io.new_nii_like(ref_img, catd[:, 0, :])
        mask = compute_epi_mask(first_echo_img)
    else:
        # TODO: add affine check
        LGR.info('Using user-defined mask')

    mask, masksum = utils.make_adaptive_mask(catd, mask=mask, getsum=True)
    LGR.debug('Retaining {}/{} samples'.format(mask.sum(), n_samp))
    if verbose:
        io.filewrite(masksum, op.join(out_dir, 'adaptive_mask.nii'), ref_img)

    os.chdir(out_dir)

    LGR.info('Computing T2* map')
    t2s, s0, t2ss, s0s, t2sG, s0G = decay.fit_decay(catd, tes, mask, masksum)

    # set a hard cap for the T2* map
    # anything that is 10x higher than the 99.5 %ile will be reset to 99.5 %ile
    cap_t2s = stats.scoreatpercentile(t2s.flatten(), 99.5,
                                      interpolation_method='lower')
    LGR.debug('Setting cap on T2* map at {:.5f}'.format(cap_t2s * 10))
    t2s[t2s > cap_t2s * 10] = cap_t2s
    io.filewrite(t2s, op.join(out_dir, 't2sv.nii'), ref_img)
    io.filewrite(s0, op.join(out_dir, 's0v.nii'), ref_img)

    if verbose:
        io.filewrite(t2ss, op.join(out_dir, 't2ss.nii'), ref_img)
        io.filewrite(s0s, op.join(out_dir, 's0vs.nii'), ref_img)
        io.filewrite(t2sG, op.join(out_dir, 't2svG.nii'), ref_img)
        io.filewrite(s0G, op.join(out_dir, 's0vG.nii'), ref_img)

    # optimally combine data
    data_oc = combine.make_optcom(catd, tes, mask, t2s=t2sG, combmode=combmode)

    # regress out global signal unless explicitly not desired
    if 'gsr' in gscontrol:
        catd, data_oc = gsc.gscontrol_raw(catd, data_oc, n_echos, ref_img)

    if mixm is None:
        # Identify and remove thermal noise from data
        dd, n_components = decomposition.tedpca(catd, data_oc, combmode, mask,
                                                t2s, t2sG, ref_img,
                                                tes=tes, algorithm=tedpca,
                                                source_tes=source_tes,
                                                kdaw=10., rdaw=1.,
                                                out_dir=out_dir, verbose=verbose)
        mmix_orig = decomposition.tedica(dd, n_components, fixed_seed,
                                         maxit, maxrestart)

        if verbose:
            np.savetxt(op.join(out_dir, '__meica_mix.1D'), mmix_orig)
            if source_tes == -1:
                io.filewrite(utils.unmask(dd, mask),
                             op.join(out_dir, 'ts_OC_whitened.nii'), ref_img)

        LGR.info('Making second component selection guess from ICA results')
        # Estimate betas and compute selection metrics for mixing matrix
        # generated from dimensionally reduced data using full data (i.e., data
        # with thermal noise)
        comptable, metric_maps, betas, mmix = model.dependence_metrics(
                    catd, data_oc, mmix_orig, mask, t2s, tes,
                    ref_img, reindex=True, label='meica_', out_dir=out_dir,
                    algorithm='kundu_v2', verbose=verbose)
        np.savetxt(op.join(out_dir, 'meica_mix.1D'), mmix)

        comptable = model.kundu_metrics(comptable, metric_maps)
        comptable = selection.kundu_selection_v2(comptable, n_echos, n_vols)
    else:
        LGR.info('Using supplied mixing matrix from ICA')
        mmix_orig = np.loadtxt(op.join(out_dir, 'meica_mix.1D'))
        comptable, metric_maps, betas, mmix = model.dependence_metrics(
                    catd, data_oc, mmix_orig, mask, t2s, tes,
                    ref_img, label='meica_', out_dir=out_dir,
                    algorithm='kundu_v2', verbose=verbose)
        if ctab is None:
            comptable = model.kundu_metrics(comptable, metric_maps)
            comptable = selection.kundu_selection_v2(comptable, n_echos, n_vols)
        else:
            comptable = pd.read_csv(ctab, sep='\t', index_col='component')
            comptable = selection.manual_selection(comptable, acc=manacc)

    comptable.to_csv(op.join(out_dir, 'comp_table_ica.txt'), sep='\t',
                     index=True, index_label='component', float_format='%.6f')

    if comptable[comptable.classification == 'accepted'].shape[0] == 0:
        LGR.warning('No BOLD components detected! Please check data and '
                    'results!')

    mmix_orig = mmix.copy()
    if tedort:
        acc_idx = comptable.loc[
            ~comptable.classification.str.contains('rejected')].index.values
        rej_idx = comptable.loc[
            comptable.classification.str.contains('rejected')].index.values
        acc_ts = mmix[:, acc_idx]
        rej_ts = mmix[:, rej_idx]
        betas = np.linalg.lstsq(acc_ts, rej_ts, rcond=None)[0]
        pred_rej_ts = np.dot(acc_ts, betas)
        resid = rej_ts - pred_rej_ts
        mmix[:, rej_idx] = resid
        np.savetxt(op.join(out_dir, 'meica_mix_orth.1D'), mmix)

    io.writeresults(data_oc, mask=mask, comptable=comptable, mmix=mmix,
                    n_vols=n_vols, ref_img=ref_img)

    if 't1c' in gscontrol:
        LGR.info('Performing T1c global signal regression to remove spatially '
                 'diffuse noise')
        gsc.gscontrol_mmix(data_oc, mmix, mask, comptable, ref_img)

    if verbose:
        io.writeresults_echoes(catd, mmix, mask, comptable, ref_img)

    if png:
        LGR.info('Making figures folder with static component maps and '
                 'timecourse plots.')
        # make figure folder first
        if not op.isdir(op.join(out_dir, 'figures')):
            os.mkdir(op.join(out_dir, 'figures'))

        viz.write_comp_figs(data_oc, mask=mask, comptable=comptable,
                            mmix=mmix_orig, ref_img=ref_img,
                            out_dir=op.join(out_dir, 'figures'),
                            png_cmap=png_cmap)

        LGR.info('Making Kappa vs Rho scatter plot')
        viz.write_kappa_scatter(comptable=comptable,
                                out_dir=op.join(out_dir, 'figures'))

        LGR.info('Making overall summary figure')
        viz.write_summary_fig(comptable=comptable,
                              out_dir=op.join(out_dir, 'figures'))

    LGR.info('Workflow completed')
    for handler in logging.root.handlers[:]:
        logging.root.removeHandler(handler)
Exemplo n.º 40
0
          cmap=pl.cm.gray)

# Third subplot: axial view
pl.subplot(1, 3, 3)
pl.axis('off')
pl.title('Axial')
pl.imshow(np.rot90(mean_img[:, :, 32]), interpolation='nearest',
          cmap=pl.cm.gray)
pl.subplots_adjust(left=.02, bottom=.02, right=.98, top=.95,
                   hspace=.02, wspace=.02)

# Extracting a brain mask ###################################################

# Simple computation of a mask from the fMRI data
from nilearn.masking import compute_epi_mask
mask_img = compute_epi_mask(nifti_img)
mask_data = mask_img.get_data().astype(bool)

# We create a new figure
pl.figure(figsize=(3, 4))
# A plot the axial view of the mask to compare with the axial
# view of the raw data displayed previously
pl.axis('off')
pl.imshow(np.rot90(mask_data[:, :, 32]), interpolation='nearest')
pl.subplots_adjust(left=.02, bottom=.02, right=.98, top=.95)

# Applying the mask #########################################################

from nilearn.masking import apply_mask
masked_data = apply_mask(nifti_img, mask_img)
Exemplo n.º 41
0
    def preprocess_files(self, func_files, anat_files=None, verbose=1):
        """
        TODO: preprocess_files docstring.
        """
        import pandas as pd
        import nibabel
        import nipy.modalities.fmri.design_matrix as dm
        from nilearn.image import index_img
        from nilearn.masking import compute_epi_mask
        from nipy.modalities.fmri.glm import FMRILinearModel
        from nipy.modalities.fmri.experimental_paradigm import (
            EventRelatedParadigm)

        def get_beta_filepath(func_file, cond):
            return func_file.replace('_bold.nii.gz', '_beta-%s.nii.gz' % cond)

        beta_files = []
        for fi, func_file in enumerate(func_files):
            # Don't re-do preprocessing.
            # beta_mask = func_file.replace('_bold.nii.gz', '_beta*.nii.gz')
            cond_file = func_file.replace('_bold.nii.gz', '_events.tsv')
            cond_data = pd.read_csv(cond_file, sep='\t')

            # Get condition info, to search if betas have been done.
            conditions = cond_data['trial_type'].tolist()
            all_conds = np.unique(conditions)
            all_beta_files = [get_beta_filepath(func_file, cond)
                              for cond in all_conds]
            # All betas are done.
            if np.all([op.exists(f) for f in all_beta_files]):
                beta_files += all_beta_files
                continue

            if verbose >= 0:
                print('Preprocessing file %d of %d' % (
                    fi + 1, len(func_files)))

            # Need to do regression.
            tr = cond_data['duration'].as_matrix().mean()
            onsets = cond_data['onset'].tolist()

            img = nibabel.load(func_file)
            n_scans = img.shape[3]
            frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)

            # Create the design matrix
            paradigm = EventRelatedParadigm(conditions, onsets)
            design_mat = dm.make_dmtx(frametimes, paradigm,
                                      drift_model='cosine',
                                      hfcut=n_scans, hrf_model='canonical')

            # Do the GLM
            mask_img = compute_epi_mask(img)
            fmri_glm = FMRILinearModel(img, design_mat.matrix, mask=mask_img)
            fmri_glm.fit(do_scaling=True, model='ar1')

            # Pull out the betas
            beta_hat = fmri_glm.glms[0].get_beta()  # Least-squares estimates
            mask = fmri_glm.mask.get_data() > 0

            # output beta images
            dim = design_mat.matrix.shape[1]
            beta_map = np.tile(mask.astype(np.float)[..., np.newaxis], dim)
            beta_map[mask] = beta_hat.T
            beta_image = nibabel.Nifti1Image(beta_map, fmri_glm.affine)
            beta_image.get_header()['descrip'] = ("Parameter estimates of "
                                                  "the localizer dataset")

            # Save beta images
            for ci, cond in enumerate(np.unique(conditions)):
                beta_cond_img = index_img(beta_image, ci)
                beta_filepath = get_beta_filepath(func_file, cond)
                nibabel.save(beta_cond_img, beta_filepath)
                beta_files.append(beta_filepath)

        return beta_files
Exemplo n.º 42
0
def test_find_cuts_empty_mask_no_crash():
    img = nibabel.Nifti1Image(np.ones((2, 2, 2)), np.eye(4))
    mask_img = compute_epi_mask(img)
    cut_coords = assert_warns(UserWarning, find_xyz_cut_coords, img,
                              mask_img=mask_img)
    np.testing.assert_array_equal(cut_coords, [.5, .5, .5])