Example #1
0
def roi_centers(big_roi_fn, subdivision_rois_fns, ref_vol_img):
    M = ref_vol_img.affine[:3, :3]
    abc = ref_vol_img.affine[:3, 3]
    big_roi = load_img(big_roi_fn)
    subdivision_rois = [load_img(f) for f in subdivision_rois_fns]
    big_coords = np.argwhere(big_roi.get_fdata() != 0)
    big_roi_center = M.dot(np.mean(big_coords, 0)) + abc
    big_roi_max_bounds = M.dot(np.max(big_coords, 0)) + abc
    big_roi_min_bounds = M.dot(np.min(big_coords, 0)) + abc
    big_roi_extent = big_roi_max_bounds - big_roi_min_bounds
    print(
        f"Big roi extends from {big_roi_min_bounds} to {big_roi_max_bounds}\nExtent is {big_roi_extent} and center is {big_roi_center}"
    )
    fig, ax = plt.subplots(1)
    for fn, roi in zip(subdivision_rois_fns, subdivision_rois):
        coords = np.argwhere(roi.get_fdata() != 0)
        roi_center = M.dot(np.mean(coords, 0)) + abc
        roi_center_proportion = (big_roi_max_bounds -
                                 roi_center) / big_roi_extent
        ax.scatter(roi_center_proportion[0], 1 - roi_center_proportion[2])
        ax.set_xlabel("Proportion of LGN extent (L-R)")
        ax.set_ylabel("Proportion of LGN extent (Ventral - Dorsal)")
        print(fn, roi_center, roi_center_proportion, sep='\n')
    plt.show()
    plt.close('all')
Example #2
0
 def send_atlases_to_single_subject_space(self, original_atlases):
    """Send atlas(es) in single subject space using previously calculated spm-like deformation field ("iy_*")
           
    Parameters
    ----------
    original_atlases : list with abolute path(s) of the nifti file(s) of the atlas(es) to be registered
    
    
    The function return number of atlases * number of deformation fields nii file named [atlas]_[subject]
    
    """    
    nrm = Normalize12()
    nrm.inputs.apply_to_files = original_atlases
    def_fields = sorted(glob("{}/iy*".format(self.volume_dir)))
    t1_dir = str(Path(self.volume_dir).parents[0])
    for n, def_field in enumerate(def_fields):
        subject_name = Path(def_field).stem.split("iy_")[1]
        t1_file = check_for_multiple_match_ask_input("{}/*{}*".format(t1_dir, subject_name))
        if t1_file is None:
            continue
        print("working on subject {}, {}% treated".format(subject_name, round(((n + 1)/len(def_fields))*100, 2)))
        nrm.inputs.deformation_file = def_field
        nrm.inputs.jobtype = "write"
        nrm.inputs.write_interp = 0
        nrm.run()
        for atlas in original_atlases:
            atlas_dir = "/".join(Path(atlas).parts[0:len(Path(atlas).parts) -1])
            atlas_name = Path(atlas).stem.split("[.]")[0]
            res_atlas = resample_to_img(load_img("{}/w{}.nii".format(atlas_dir, atlas_name)), 
                            load_img(t1_file), interpolation = "nearest")
            res_atlas.to_filename("{}/{}_{}.nii".format(self.atlas_dir, atlas_name, subject_name))
Example #3
0
    def _generate_report(self):
        """ Generates the visual report """
        from niworkflows.viz.utils import compose_view, plot_registration
        NIWORKFLOWS_LOG.info('Generating visual report')

        anat = load_img(self._anat_file)
        contour_nii = load_img(
            self._contour) if self._contour is not None else None

        if self._mask_file:
            anat = unmask(apply_mask(anat, self._mask_file), self._mask_file)
            mask_nii = load_img(self._mask_file)
        else:
            mask_nii = threshold_img(anat, 1e-3)

        n_cuts = 7
        if not self._mask_file and contour_nii:
            cuts = cuts_from_bbox(contour_nii, cuts=n_cuts)
        else:
            cuts = cuts_from_bbox(mask_nii, cuts=n_cuts)

        # Call composer
        compose_view(plot_registration(anat,
                                       'fixed-image',
                                       estimate_brightness=True,
                                       cuts=cuts,
                                       contour=contour_nii,
                                       compress=self.inputs.compress_report),
                     [],
                     out_file=self._out_report)
Example #4
0
    def _run_interface(self, runtime):
        from niworkflows.viz.utils import plot_registration, cuts_from_bbox, compose_view
        from nilearn.image import load_img

        rootdir = Path(self.inputs.subjects_dir) / self.inputs.subject_id
        _anat_file = str(rootdir / 'mri' / 'brain.mgz')
        _contour_file = str(rootdir / 'mri' / 'ribbon.mgz')

        anat = load_img(_anat_file)
        contour_nii = load_img(_contour_file)

        n_cuts = 7
        cuts = cuts_from_bbox(contour_nii, cuts=n_cuts)

        self._results['out_report'] = str(
            Path(runtime.cwd) / self.inputs.out_report)

        # Call composer
        compose_view(plot_registration(anat,
                                       'fixed-image',
                                       estimate_brightness=True,
                                       cuts=cuts,
                                       contour=contour_nii,
                                       compress=self.inputs.compress_report),
                     [],
                     out_file=self._results['out_report'])
        return runtime
Example #5
0
    def _generate_report(self):
        """Generate a reportlet."""
        NIWORKFLOWS_LOG.info('Generating visual report')

        refnii = load_img(self.inputs.reference)
        fmapnii = load_img(self.inputs.fieldmap)
        contour_nii = load_img(self.inputs.mask) if isdefined(
            self.inputs.mask) else None
        mask_nii = threshold_img(refnii, 1e-3)
        cuts = cuts_from_bbox(contour_nii or mask_nii, cuts=self._n_cuts)
        fmapdata = fmapnii.get_fdata()
        vmax = max(fmapdata.max(), abs(fmapdata.min()))

        # Call composer
        compose_view(plot_registration(refnii,
                                       'fixed-image',
                                       estimate_brightness=True,
                                       cuts=cuts,
                                       label='reference',
                                       contour=contour_nii,
                                       compress=False),
                     plot_registration(fmapnii,
                                       'moving-image',
                                       estimate_brightness=True,
                                       cuts=cuts,
                                       label='fieldmap (Hz)',
                                       contour=contour_nii,
                                       compress=False,
                                       plot_params={
                                           'cmap': coolwarm_transparent(),
                                           'vmax': vmax,
                                           'vmin': -vmax
                                       }),
                     out_file=self._out_report)
Example #6
0
    def _generate_report(self):
        """ Generates the visual report """
        from niworkflows.viz.utils import plot_registration
        NIWORKFLOWS_LOG.info('Generating visual report')

        anat = load_img(self._anat_file)
        contour_nii = load_img(self._contour) if self._contour is not None else None

        if self._mask_file:
            anat = unmask(apply_mask(anat, self._mask_file), self._mask_file)
            mask_nii = load_img(self._mask_file)
        else:
            mask_nii = threshold_img(anat, 1e-3)

        n_cuts = 7
        if not self._mask_file and contour_nii:
            cuts = cuts_from_bbox(contour_nii, cuts=n_cuts)
        else:
            cuts = cuts_from_bbox(mask_nii, cuts=n_cuts)

        # Call composer
        compose_view(
            plot_registration(anat, 'fixed-image',
                              estimate_brightness=True,
                              cuts=cuts,
                              contour=contour_nii,
                              compress=self.inputs.compress_report),
            [],
            out_file=self._out_report
        )
def get_individuals_paths_01(path_fmri=dataset_path+"/datasets/01/fMRI/", task=1, run=1, resolution_factor = 5, number_individuals=10):
    
    fmri_individuals = []
    file_individuals = sorted([f for f in listdir(path_fmri) if isdir(join(path_fmri, f))])

    target_shape = image.load_img(path_fmri + file_individuals[0] + '/3_nw_mepi_rest_with_cross.nii.gz').shape
    target_shape = (int(target_shape[0]/resolution_factor), 
                    int(target_shape[1]/resolution_factor), 
                    int(target_shape[2]/resolution_factor))
    
    for i in range(number_individuals):
        
        individual = file_individuals[i]

        fmri_file = '/3_nw_mepi_rest_with_cross.nii.gz'

        individual_path = path_fmri + individual + fmri_file
        
        img = image.load_img(individual_path)
        
        #scale affine accordingly
        off_set = img.affine[:,3]
        new_affine = img.affine*resolution_factor
        new_affine[:,3] = off_set
        
        fmri_image = image.resample_img(img, 
                                        target_affine=new_affine,
                                        target_shape=target_shape,
                                        interpolation='nearest')

        fmri_individuals += [fmri_image]

    return fmri_individuals
Example #8
0
def load_data():
    test_imgs, test_masks = get_test_paths()
    slcs = get_slices(test_masks)

    fMRIs = np.zeros((slcs, IMG_HEIGHT, IMG_WIDTH, 2))
    masks = np.zeros((slcs, IMG_HEIGHT, IMG_WIDTH, 1))

    idx = 0

    for img, msk in zip(test_imgs, test_masks):
        flair = image.load_img(img[0]).get_data()
        t1 = image.load_img(img[1]).get_data()
        mask = image.load_img(msk).get_data()

        flair = partition(flair)
        t1 = partition(t1)
        mask = partition(mask)

        mask[mask == 2.0] = 0.0

        for i in range(flair.shape[2]):
            flair_sect = normalize(flair[:, :, i])
            t1_sect = normalize(t1[:, :, i])
            fMRI_sect = np.stack((t1_sect, flair_sect), axis=-1)
            mask_sect = mask[:, :, i:i + 1]

            fMRI_sect, mask_sect = pad_img(fMRI_sect, mask_sect)

            fMRIs[idx] = fMRI_sect
            masks[idx] = mask_sect

            idx += 1

    return fMRIs, masks
Example #9
0
    def make_pca_mask(pca_map, mask):
        from nilearn import image
        from nipype.utils.filemanip import split_filename
        import os.path as op

        _, fn, ext = split_filename(mask)

        pca_map = image.load_img(pca_map)
        mask = image.load_img(mask)

        pca_map = image.resample_to_img(pca_map, mask, interpolation='nearest')

        new_mask = image.math_img('pca_map * (mask > 0)',
                                  pca_map=pca_map,
                                  mask=mask)

        tmp = new_mask.get_data()
        tmp[tmp != 0] -= tmp[tmp != 0].min() - 1e-4
        tmp[tmp != 0] /= tmp[tmp != 0].max()

        new_mask = image.new_img_like(new_mask, tmp)

        new_mask.to_filename(op.abspath('{}_map{}'.format(fn, ext)))

        return new_mask.get_filename()
Example #10
0
def get_euler_stats(in_file, method, min_clust_size=1, vmin=-5, vmax=5,
                    steps=100, con='_t_'):

    # Load Data
    img = load_img(in_file)
    data = img.get_data()

    # Prepare mask
    if method == 'spm':
        img_mask = load_img('templates/spm_TPM_1.5mm_mask.nii.gz')
    else:
        img_mask = load_img(
            'templates/mni_icbm152_nlin_asym_09c_1.0mm_mask.nii.gz')
    mask = resample_to_img(img_mask, in_file).get_data() >= 0.5

    # Mask Data
    data *= mask

    # Prepare variables
    if '_F_' in con:
        vmin = 0
        vmax = 9
    X = np.linspace(vmin, vmax, steps)
    euler_chars = []
    cluster_count = []
    max_extent = []

    for x in X:

        # Threshold stat image
        if x < 0:
            mask_bin = 1. * (data < x)
        else:
            mask_bin = 1. * (data >= x)

        # Fill holes in the binary mask
        mask_filled = 1. * binary_fill_holes(mask_bin)

        # Extract cluster info
        labels, ncluster = label(mask_bin)

        # Compute euler characteristics
        _, holes = label(mask_filled - mask_bin)
        euler_chars.append(ncluster - holes)

        # Count clusters in volume
        sum_cluster = np.sum(
            [np.sum(labels == i) >= min_clust_size for i in np.unique(labels)])
        cluster_count.append(sum_cluster)

        # Save size of biggest cluster in volume
        label_ids = np.unique(labels)
        if len(label_ids) == 1:
            max_extent.append(0)
        else:
            max_size = np.max([np.sum(labels == n) for n in label_ids[1:]])
            max_extent.append(max_size)

    return euler_chars, cluster_count, max_extent, X
Example #11
0
def load_train_data(image_path, load_size=286, fine_size=256, is_testing=False):
    img = nilimage.load_img(image_path[0])
    img_A = img.get_data()
    img = nilimage.load_img(image_path[1])
    img_B = img.get_data()
    img_AB = np.concatenate((img_A, img_B), axis=3)
    # img_AB shape: (fine_size, fine_size, input_c_dim + output_c_dim)
    return img_AB
Example #12
0
def symmetric_overlap(img1, img2):
    mask1 = load_img(img1).get_fdata() > 0
    mask2 = load_img(img2).get_fdata() > 0

    total1 = np.sum(mask1)
    total2 = np.sum(mask2)
    overlap = np.sum(mask1 & mask2)
    return overlap / np.sqrt(total1 * total2)
def symmetric_overlap(img1, img2):
    mask1 = load_img(img1).get_data() > 0
    mask2 = load_img(img2).get_data() > 0

    total1 = np.sum(mask1)
    total2 = np.sum(mask2)
    overlap = np.sum(mask1 & mask2)
    return overlap / np.sqrt(total1 * total2)
Example #14
0
 def _get_plotting_images(self):
     input_dwi = load_img(self.inputs.in_file)
     outputs = self._list_outputs()
     ref_name = outputs.get('out_file')
     denoised_nii = load_img(ref_name)
     noise_name = outputs['noise_image']
     noisenii = load_img(noise_name)
     return input_dwi, denoised_nii, noisenii
Example #15
0
    def _generate_report(self):
        """Generate the visual report."""
        from nilearn.image import threshold_img, load_img
        from nilearn.masking import apply_mask, unmask
        from niworkflows.viz.utils import plot_registration

        NIWORKFLOWS_LOG.info("Generating visual report")

        fixed_image_nii = load_img(self._fixed_image)
        moving_image_nii = load_img(self._moving_image)
        contour_nii = load_img(
            self._contour) if self._contour is not None else None

        if self._fixed_image_mask:
            fixed_image_nii = unmask(
                apply_mask(fixed_image_nii, self._fixed_image_mask),
                self._fixed_image_mask,
            )
            # since the moving image is already in the fixed image space we
            # should apply the same mask
            moving_image_nii = unmask(
                apply_mask(moving_image_nii, self._fixed_image_mask),
                self._fixed_image_mask,
            )
            mask_nii = load_img(self._fixed_image_mask)
        else:
            mask_nii = threshold_img(fixed_image_nii, 1e-3)

        n_cuts = 7
        if not self._fixed_image_mask and contour_nii:
            cuts = cuts_from_bbox(contour_nii, cuts=n_cuts)
        else:
            cuts = cuts_from_bbox(mask_nii, cuts=n_cuts)

        # Call composer
        compose_view(
            plot_registration(
                fixed_image_nii,
                "fixed-image",
                estimate_brightness=True,
                cuts=cuts,
                label=self._fixed_image_label,
                contour=contour_nii,
                compress=self.inputs.compress_report,
                dismiss_affine=self._dismiss_affine,
            ),
            plot_registration(
                moving_image_nii,
                "moving-image",
                estimate_brightness=True,
                cuts=cuts,
                label=self._moving_image_label,
                contour=contour_nii,
                compress=self.inputs.compress_report,
                dismiss_affine=self._dismiss_affine,
            ),
            out_file=self._out_report,
        )
Example #16
0
def image_processing(file_path):
    data_path=os.listdir(file_path)
    flair_dataset=[]
    mask_dataset=[]
    for i in data_path:
        img_path=os.path.join(file_path, i,'pre')
        mask_path=os.path.join(file_path,i)

        for name in glob.glob(img_path+'/FLAIR*'):
            flair_img=image.load_img(name)
            flair_data=flair_img.get_data()
            flair_data=np.transpose(flair_data, (1,0,2))
            flair_resized = cv2.resize(flair_data, dsize=(img_row,img_col), interpolation=cv2.INTER_CUBIC)
            flair_dataset.append(flair_resized)
        #perform some image augmentation (use same parameters for mask for deterministic augmentation)
        #rotate
            M1 = cv2.getRotationMatrix2D((img_row/2,img_col/2),90,1)
            flair_rotate= cv2.warpAffine(flair_resized,M1,(img_row,img_col))
            flair_dataset.append(flair_rotate)
        #shearing
            pts1 = np.float32([[50,50],[100,100],[50,200]])
            pts2 = np.float32([[10,100],[100,100],[100,210]])
            M2 = cv2.getAffineTransform(pts1,pts2)
            flair_shear= cv2.warpAffine(flair_resized,M2,(img_row,img_col))
            flair_dataset.append(flair_shear)
        #zoom
            pts3 = np.float32([[45,48],[124,30],[50,120],[126,126]])
            pts4= np.float32([[0,0],[128,0],[0,128],[128,128]])
            M3 = cv2.getPerspectiveTransform(pts3,pts4)
            flair_zoom = cv2.warpPerspective(flair_resized,M3,(img_row,img_col))
            flair_dataset.append(flair_zoom)

        # perform same transformation on mask files
        for name in glob.glob(mask_path+'/wmh*'):
            mask_img=image.load_img(name)
            mask_data=mask_img.get_data()
            mask_data=np.transpose(mask_data, (1,0,2)) #transpose so orientation matches nilearn plot
            mask_resized=cv2.resize(mask_data, dsize=(img_row,img_col), interpolation=cv2.INTER_CUBIC)
            ret, mask_binary=cv2.threshold(mask_resized,0.6,1,cv2.THRESH_BINARY)
            mask_dataset.append(mask_binary)
            #need to run binary threshold again after augmentation
            mask_rotate= cv2.warpAffine(mask_binary,M1,(img_row,img_col))
            ret, mask_rotate=cv2.threshold(mask_rotate,0.6,1,cv2.THRESH_BINARY)
            mask_dataset.append(mask_rotate)
                
            mask_shear= cv2.warpAffine(mask_binary,M2,(img_row,img_col))
            ret, mask_shear=cv2.threshold(mask_shear,0.6,1,cv2.THRESH_BINARY)
            mask_dataset.append(mask_shear)
                
            mask_zoom= cv2.warpPerspective(mask_binary,M3,(img_row,img_col))
            ret, mask_zoom=cv2.threshold(mask_zoom,0.6,1,cv2.THRESH_BINARY)
            mask_dataset.append(mask_zoom)
                

    flair_array=np.array(flair_dataset)
    mask_array=np.array(mask_dataset)
    return flair_array, mask_array
def read_data(class_names,class_labels):
	fold1_test  = list()  
	ts_lbl      = list()  
	fold2_train = list()  
	tr_lbl      = list() 
	fold3_valid = list()  
	val_lbl     = list()  

	for pos,sel in enumerate(class_names):
		print(pos,sel)
		images_test  = sorted(glob.glob("../data/Testing/"+sel+"/*.nii"))  #Testing 
		images_train = sorted(glob.glob("../data/Training/"+sel+"/*.nii")) #Training
		images_valid = sorted(glob.glob("../data/Testing/"+sel+"/*.nii"))  #Validation

	#Testing database:-------------------------------------------------------------
		for volume in images_test:
			img = image.load_img(volume)   
			img = img.get_data()    # convert to array
			fold1_test.append(np.asarray(img, dtype = np.float32) / 1.)   # Input Data is already normalized between 0~1
			ts_lbl.append(class_labels[pos])

	#Training database: -----------------------------------------------------------
		for volume in images_train:
			img = image.load_img(volume)
			img = img.get_data()    # convert to array
			fold2_train.append(np.asarray(img, dtype = np.float32) / 1.)  # Input Data is already normalized between 0~1
			tr_lbl.append(class_labels[pos])
	

	#Validation database: -----------------------------------------------------------
		for volume in images_valid:
			img = image.load_img(volume)
			img = img.get_data()    # convert to array
			fold3_valid.append(np.asarray(img, dtype = np.float32) / 1.)  # Input Data is already normalized between 0~1
			val_lbl.append(class_labels[pos])
    
	x_train = np.asarray(fold2_train)   
	y_test  = np.asarray(fold1_test) 
	y_valid = np.asarray(fold3_valid) 

	tr_lbl  = np.asarray(tr_lbl)
	ts_lbl  = np.asarray(ts_lbl)
	val_lbl = np.asarray(val_lbl)
   
	# in case of 'channels_first'
	x_train = x_train.reshape(x_train.shape[0], 1, Height, Width, Depth)
	y_test  = y_test.reshape(y_test.shape[0], 1, Height, Width, Depth)
	y_valid = y_valid.reshape(y_valid.shape[0], 1, Height, Width, Depth)
	
	tr_onehot  = to_categorical(tr_lbl)  # Converts a class vector (integers) to binary class matrix representation
	ts_onehot  = to_categorical(ts_lbl)
	val_onehot = to_categorical(val_lbl)
	
	return x_train, y_test, y_valid, tr_onehot, ts_onehot, val_onehot
Example #18
0
def test_fs_beta_series(sub_metadata, preproc_file, sub_events, confounds_file,
                        brainmask_file, use_nibabel):
    if use_nibabel:
        bold_file = nib.load(str(preproc_file))
        mask_file = nib.load(str(brainmask_file))
    else:
        bold_file = str(preproc_file)
        mask_file = str(brainmask_file)

    selected_confounds = ['white_matter', 'csf']
    hrf_model = 'fir'
    fir_delays = [0, 1, 2, 3, 4]
    with open(str(sub_metadata), 'r') as md:
        bold_metadata = json.load(md)

    beta_series = LSSBetaSeries(bold_file=bold_file,
                                bold_metadata=bold_metadata,
                                mask_file=mask_file,
                                events_file=str(sub_events),
                                confounds_file=str(confounds_file),
                                selected_confounds=selected_confounds,
                                signal_scaling=0,
                                hrf_model=hrf_model,
                                fir_delays=fir_delays,
                                return_tstat=False,
                                smoothing_kernel=None,
                                high_pass=0.008)
    res = beta_series.run()

    events_df = pd.read_csv(str(sub_events), sep='\t')
    trial_types = events_df['trial_type'].unique()

    # check for the correct number of beta maps
    assert len(trial_types) * len(fir_delays) == len(res.outputs.beta_maps)

    input_img_dim = load_img(bold_file).shape[:3]
    for beta_map in res.outputs.beta_maps:
        trial_type = re.search(r'desc-([A-Za-z0-9]+)Delay[0-9]+Vol_',
                               beta_map).groups()[0]
        # check if correct trial type is made
        assert trial_type in trial_types
        # check if output is actually a file
        assert os.path.isfile(beta_map)
        # check if image dimensions are correct
        assert input_img_dim == load_img(beta_map).shape[:3]
        # check if number of trials are correct
        assert (events_df['trial_type'] == trial_type
                ).sum() == load_img(beta_map).shape[-1]
        # clean up
        os.remove(beta_map)

    # check residual image
    assert load_img(bold_file).shape == load_img(res.outputs.residual).shape
    os.remove(res.outputs.residual)
Example #19
0
def image_for_term_wrapper(keys):

    url = "http://neurosynth.org/api/v2/"

    pt_fwd_imgs = {}
    pt_rev_imgs = {}
    for nk in keys:
        fwd, rev = get_image_from_term(url, {'search': nk})
        pt_fwd_imgs[nk] = nli.load_img(fwd)
        pt_rev_imgs[nk] = nli.load_img(rev)
    return pt_fwd_imgs, pt_rev_imgs
def _append_results(results, model, iteration, dim):
    """Gather results from a model which has attributes.

    Parameters
    ----------
    results : dict
        Should contain columns with empty array list for appending
        all the cross validation results for each iteration in
        cross_val_score
        {'atlas': [], 'classifier': [], 'measure': [], 'scores': []}
    model : object, instance of LearnBrainRegions

    Return
    ------
        results : dictionary
    """
    for atlas in model.models_:
        for measure in ['correlation', 'partial correlation', 'tangent']:
            for classifier in ['svc_l1', 'svc_l2', 'ridge']:
                results['iter_shuffle_split'].append(iteration)
                results['atlas'].append(atlas)
                results['measure'].append(measure)
                results['classifier'].append(classifier)
                results['scoring'].append('roc_auc')
                results['dataset'].append('COBRE')
                results['compcor_10'].append('yes')
                results['motion_regress'].append('yes')
                results['smoothing_fwhm'].append(6.)
                results['connectome_regress'].append('no')
                results['region_extraction'].append('yes')
                results['min_region_size_in_mm3'].append(1500)
                results['multi_pca_reduction'].append('yes')
                results['reduction_n_components'].append(100)
                results['covariance_estimator'].append('LedoitWolf')
                score = model.scores_[atlas][measure][classifier]
                results['scores'].append(score)

                print("Loading rois_of {0} to regions".format(atlas))
                results['dimensionality'].append(dim)
                if atlas == 'ica':
                    rois_img = load_img(model.rois_[atlas])
                    results['n_regions'].append(rois_img.shape[3])
                elif atlas == 'dictlearn':
                    rois_img = load_img(model.rois_[atlas])
                    results['n_regions'].append(rois_img.shape[3])
                elif atlas == 'kmeans':
                    rois_img = load_img(model.rois_[atlas])
                    n_regions = np.unique(rois_img.get_data())
                    n_regions = set(n_regions)
                    n_regions.remove(0)
                    results['n_regions'].append(len(n_regions))
                elif atlas == 'ward':
                    results['n_regions'].append('no')
    return results
Example #21
0
    def _generate_report(self):
        """Generate a reportlet."""
        NIWORKFLOWS_LOG.info('Generating visual report')

        movnii = refnii = load_img(self.inputs.reference)
        fmapnii = load_img(self.inputs.fieldmap)

        if isdefined(self.inputs.moving):
            movnii = load_img(self.inputs.moving)

        contour_nii = mask_nii = None
        if isdefined(self.inputs.mask):
            contour_nii = load_img(self.inputs.mask)
            maskdata = contour_nii.get_fdata() > 0
        else:
            mask_nii = threshold_img(refnii, 1e-3)
            maskdata = mask_nii.get_fdata() > 0
        cuts = cuts_from_bbox(contour_nii or mask_nii, cuts=self._n_cuts)
        fmapdata = fmapnii.get_fdata()
        vmax = max(abs(np.percentile(fmapdata[maskdata], 99.8)),
                   abs(np.percentile(fmapdata[maskdata], 0.2)))

        fmap_overlay = [{
            'overlay': fmapnii,
            'overlay_params': {
                'cmap': coolwarm_transparent(max_alpha=self.inputs.max_alpha),
                'vmax': vmax,
                'vmin': -vmax,
            }
        }] * 2

        if self.inputs.show != 'both':
            fmap_overlay[not self.inputs.show] = {}

        # Call composer
        compose_view(plot_registration(movnii,
                                       'moving-image',
                                       estimate_brightness=True,
                                       cuts=cuts,
                                       label=self.inputs.moving_label,
                                       contour=contour_nii,
                                       compress=False,
                                       **fmap_overlay[1]),
                     plot_registration(refnii,
                                       'fixed-image',
                                       estimate_brightness=True,
                                       cuts=cuts,
                                       label=self.inputs.reference_label,
                                       contour=contour_nii,
                                       compress=False,
                                       **fmap_overlay[0]),
                     out_file=self._out_report)
Example #22
0
def calc_flow(mask_path, phase_path, VENC):
    print('\nmask_path:', mask_path)
    mask = load_img(mask_path)
    assert mask.shape[2] == 1, 'Mask is 3D and this will not work'
    phase = load_img(phase_path)
    phase_data = nilearn.masking.apply_mask(phase,
                                            mask,
                                            dtype='f',
                                            smoothing_fwhm=None,
                                            ensure_finite=True)
    assert phase.header['pixdim'][1] > 0.1 and phase.header['pixdim'][
        1] < 1.2, 'Unclear if pixdim is in unit mm'
    # calculate the in-plane area of a voxel
    vox_area = phase.header['pixdim'][1] * phase.header['pixdim'][
        2] / 100  # cm^2
    print('vox_area = %f' % vox_area)
    # Is the phase for one phase or multiple?
    if phase_data.ndim > 1:
        print('multiple cardiac phases (%i, to be precise) are present!' %
              phase_data.shape[0])
        # caluclate the area of the mask noting that total voxels in the masked data will be the mask voxels from a single image times phases
        area = phase_data.size * vox_area / phase_data.shape[
            0]  # cm^2, CURRENTLY THIS FUNCTION WORKS ON A STATIC MASK
        print('area of mask = %f, mask vox count = %i' %
              (area, phase_data.size / phase_data.shape[0]))
        assert np.max(phase.get_data()) - np.min(
            phase.get_data()) > 8100, 'Wrong phase image scale, not -4096:4096'
        avg_vel = np.mean(
            phase_data,
            1) * VENC / 4096  # images scaled -4096 to 4096, units cm/s
        flow = np.sum(phase_data, 1) * VENC / 4096 * vox_area  # ml/s
    else:
        print('one cardiac phase')
        assert phase_data.shape[
            0] == 1, "Something weird in the data dimensions"
        area = phase_data.size * vox_area  # cm^2
        print('area of mask = %f, mask vox count = %i' %
              (area, phase_data.size / phase_data.shape[0]))
        assert np.max(phase.get_data()) - np.min(
            phase.get_data()) > 8100, 'Wrong phase image scale, not -4096:4096'
        avg_vel = np.mean(
            phase_data
        ) * VENC / 4096  # images scaled -4096 to 4096, units cm/s
        flow = np.sum(phase_data) * VENC / 4096 * vox_area  # ml/s
    raw_vel = phase_data * VENC / 4096  # cm/s
    return {
        'fname': str(phase_path),
        'avg_vel': avg_vel,
        'area': area,
        'flow': flow,
        'raw_vel': raw_vel
    }
Example #23
0
 def reload_img(self, filepath, vol_ind=0, fourD=False):
     """Reloads img, repeating original loading as part of NetworkZoo"""
     
     r_img = None
     if fourD and (vol_ind is not None):
         r_img = image.index_img(filepath, vol_ind)
     elif not fourD and (vol_ind > 0):
         img = image.load_img(filepath)
         r_img = image.new_img_like(img, img.get_fdata(caching='unchanged')==vol_ind, 
                                    copy_header=True)
     else:
         r_img = image.load_img(filepath)
     return r_img
Example #24
0
 def _load_files(self):
     """Initializes fn. by loading all images"""
     if self.in_files:
         self.in_imgs = [
             i if isinstance(i, (Nifti1Image,
                                 Nifti1Pair)) else image.load_img(i)
             for i in self.in_files
         ]
     if self.map_files:
         self.map_imgs = [
             i if isinstance(i, (Nifti1Image,
                                 Nifti1Pair)) else image.load_img(i)
             for i in self.map_files
         ]
Example #25
0
def retrieving_mask(savingpath, use_data):

    #we load either the image or the image and the array coding the image
    if use_data == False:
        img = image.load_img(savingpath)
        return img

    else:
        img = image.load_img(savingpath)
        data = img.get_data()
        unique, counts = np.unique(data, return_counts=True)
        print('Score percentile:')
        print(dict(zip(unique, counts)))
        return data, img
def load_masked_random_runs(run3_data,
                            run4_data,
                            union_masks):
    # TODO: make z-scoring optional
    random1_data = [
        zscore(np.nan_to_num(apply_mask(load_img(run3_file), mask_img=unionmask).T), axis=1, ddof=1)
        for run3_file, unionmask
        in zip(run3_data, union_masks)
    ]
    random2_data = [
        zscore(np.nan_to_num(apply_mask(load_img(run4_file), mask_img=unionmask).T), axis=1, ddof=1)
        for run4_file, unionmask
        in zip(run4_data, union_masks)]
    return random1_data, random2_data
Example #27
0
def get_highest_activation(root, nvox=1000):
    stats_file = op.join(root + ".feat", "stats", "zfstat1.nii.gz")
    epi_file = op.join(root + ".nii.gz")
    stats = image.load_img(stats_file)
    masker = input_data.NiftiMasker()
    stats_data = masker.fit_transform(stats)
    top_indices = np.argpartition(stats_data[0,:], -nvox)[-nvox:]
    target_mask = np.zeros_like(stats_data)
    target_mask[0, top_indices] = 1
    target_mask = masker.inverse_transform(target_mask)
    masker = input_data.NiftiMasker(mask_img=target_mask, standardize="psc")
    ts = masker.fit_transform(image.load_img(epi_file))
    df = pd.DataFrame(ts).melt().rename(columns={"variable": "voxel", "value": "psc"})
    df["time"] = [0.874 * (float(i)%ts.shape[0]) for i in df.index]
    return df
    def switch_subject(self, name):
        """
        Detects that the combobox corresponding to subject_run selection has changed and updates the current_image
        variables appropriately before wrap-calling the plotupdate functions
        @param name: The subject_run name extracted from the dataframe (i.e sub_014_ASL_1)
        """
        if name == "Select a subject and run":
            self.reset_images_to_default_black()
        else:
            self.current_subject = name

            # Get the images and if an error occurs, check whether the files actually exist. If they do not exist,
            # inform the user and reset the images to be black volumes
            try:
                self.current_cbf_img = image.load_img(self.subjects_runs_dict[name]["CBF"]).get_fdata()
                self.current_cbf_max = np.nanpercentile(self.current_cbf_img, 99)
                self.current_cbf_img = self.current_cbf_img.clip(0, np.nanmax(self.current_cbf_img))
                self.current_t1_img = image.load_img(self.subjects_runs_dict[name]["T1"]).get_fdata()
                self.current_t1_max = np.nanpercentile(self.current_t1_img, 99)

                self.signal_manager_updateconstrasts.emit(self.current_cbf_max, np.nanmax(self.current_cbf_img))
                print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
                print(f"Mean of current image {np.nanmean(self.current_cbf_img)}")
                print(f"Median of current image {np.nanmedian(self.current_cbf_img)}")
                print(f"Minimum of current image {np.nanmin(self.current_cbf_img)}")
                print(f"Maximum of current image {np.nanmax(self.current_cbf_img)}")
                print(f"Anticipated maximum value image {self.current_cbf_max}")

            except ValueError:
                self.check_if_files_exist(name=name)
                self.reset_images_to_default_black()
            except KeyError:
                # Try to update the subject runs dict just in case the user has recovered the files
                self.get_filenames()
                try:
                    self.current_cbf_img = image.load_img(self.subjects_runs_dict[name]["CBF"]).get_fdata()
                    self.current_cbf_max = np.nanpercentile(self.current_cbf_img, 99)
                    self.current_cbf_img = self.current_cbf_img.clip(0, np.nanmax(self.current_cbf_img))
                    self.current_t1_img = image.load_img(self.subjects_runs_dict[name]["T1"]).get_fdata()
                    self.current_t1_max = np.nanpercentile(self.current_t1_img, 99)
                except KeyError:
                    QMessageBox().warning(self.parent_cw, self.parent_cw.plot_errs["MRIPlotDiscrep"][0],
                                          self.parent_cw.plot_errs["MRIPlotDiscrep"][1] + f"{name}\n" +
                                          self.parent_cw.plot_errs["MRIPlotDiscrep"][2], QMessageBox.Ok)
                    self.reset_images_to_default_black()

        # Regardless of outcome, tell the canvases to update
        self.plotupdate_allslices()
def BetaSeriesToNilearn(subPath, Selecting = None, critereonList = None, Beta = True):
    """
    Convenience function for importing betaseries beta estimates from spm first-level model.
    Returns data as nilearn nifti object. 
    
    args:
        subjPath: subject model path
        
        Selecting: if None, select all beta maps in subject directory. Other options 
                   are Include or Exclude
                   
        critereonList: if Selecting, a list of labels to include or exclude in import
        
        Beta = if True selects beta maps, if False selects spmT files. 
        
    returns: 
        image = 4D image nilearn nifti image
        beta_names = list of condition names
        
    """
    
    beta_names = []
    volumes = []
    
    stat = 'beta_*.hdr' if Beta else 'spmT_*.hdr'
    regex = '(?<=Sn\(.\) ).*(?=\*)' if Beta else '(?<=: )(.*)'
    
    for beta in glob.glob(subPath + stat):
        beta_image_header = str(load_img(beta).header.values()[28])
        try:
            beta_name = re.findall(regex,beta_image_header)[0]

            if Selecting:
                if Selecting == 'Exclude' and beta_name not in critereonList:
                    beta_names.append(beta_name)
                    volumes.append(beta)
                elif Selecting == 'Include' and beta_name in critereonList:
                    beta_names.append(beta_name)
                    volumes.append(beta)
            else:
                beta_names.append(beta_name)
                volumes.append(beta)

        except:
            continue
            
    image = load_img(volumes)
    return image, beta_names
Example #30
0
def _read_fmri_texture(sub, path, task):
    """ Reads BOLD signal data on Texture dataset """
    fmri_path = (('{path}{sub}/fmri/cra{sub}_td{task}.nii')
                 .format(path=path, task=task+1, sub=sub))
    fmri = load_img(fmri_path)

    return fmri
Example #31
0
    def _check_output(self):
        # read the groups file
        subj_files   = self._get_subject_files()
        n_timepoints = len(subj_files)

        # load the loadings file
        loads = self._load_loadings()

        # check shapes
        if n_timepoints != loads.shape[0]:
            raise AttributeError('Shape mismatch between list of subjects {} '
                                 'and loadings data shape {}.'.format(n_timepoints,
                                                                      loads.shape))

        # load components image file to check shape match
        # this doesn't make much sense to be here (because it does not use the components image
        # but it checks if the ICA output is consistent).
        compsf = fetch_one_file(self.ica_dir, self._comps_fname)
        comps_img = niimg.load_img(compsf)

        n_ics = loads.shape[1]
        if comps_img.get_data().shape[-1] != n_ics:
            raise AttributeError('Shape mismatch between loadings matrix {} '
                                 'and components data shape {}.'.format(loads.shape,
                                                                        comps_img.get_data().shape))
Example #32
0
def plot_tbss(img, mean_FA_skeleton, start, end, row_l=6, step=1, title='',
    axis='z', pngfile=None):
    ''' Inspired from plot_two_maps. Plots a TBSS contrast map over the
    skeleton of a mean FA map'''

    # Dilate tbss map
    import numpy as np
    from skimage.morphology import cube, dilation
    from nilearn import image
    d = np.array(image.load_img(img).dataobj)
    dil_tbss = dilation(d, cube(2))
    dil_tbss_img = image.new_img_like(img, dil_tbss)

    slice_nb = int(abs(((end - start) / float(step))))
    images = []

    for line in range(int(slice_nb/float(row_l) + 1)):
        opt = {'title':{True:title,
                        False:None}[line==0],
               'colorbar':False,
               'black_bg':True,
               'display_mode':axis,
               'threshold':0.2,
               'cmap': cm.Greens,
               'cut_coords':range(start + line * row_l * step,
                                       start + (line+1) * row_l * step,
                                       step)}
        method = 'plot_stat_map'
        opt.update({'stat_map_img': mean_FA_skeleton})

        t = getattr(plotting, method).__call__(**opt)


        try:
            # Add overlay
            t.add_overlay(dil_tbss_img, cmap=cm.hot, threshold=0.95, colorbar=True)
        except TypeError:
            print img, 'probably empty tbss map'
            pass

        # Converting to PIL and appending it to the list
        buf = io.BytesIO()
        t.savefig(buf)
        buf.seek(0)
        im = Image.open(buf)
        images.append(im)

    # Joining the images
    imsize = images[0].size
    out = Image.new('RGBA', size=(imsize[0], len(images)*imsize[1]))
    for i, im in enumerate(images):
        box = (0, i * imsize[1], imsize[0], (i+1) * imsize[1])
        out.paste(im, box)

    if pngfile is None:
        import tempfile
        pngfile = tempfile.mkstemp(suffix='.png')[1]
    print 'Saving to...', pngfile, '(%s)'%title

    out.save(pngfile)
Example #33
0
def test_full_img(model, test_subject, patch_size, use_gpu=False):
    test_subject = dataloader.load_brain(test_subject)
    test_img_shape = test_subject['T1'].shape
    test_batches = dataloader.cut_image(test_subject['T1'],
                                        patch_size=patch_size)
    test_pred = []
    for i, batch in enumerate(test_batches):
        print("[Unet] Test batch : {}".format(i), end='\r', flush=True)
        batch = torch.from_numpy(np.array([batch], dtype=np.float32))
        if use_gpu:
            batch = batch.cuda()
        with torch.no_grad():
            test_pred.append(
                np.argmax(np.array(model(batch).data.cpu()), axis=1)[0])
    stitched = dataloader.stitch_image(test_pred,
                                       test_img_shape,
                                       patch_size=patch_size)
    pred_img = image.new_img_like(test_subject['labels_file'], stitched)
    true_img = image.new_img_like(test_subject['labels_file'],
                                  test_subject['labels'])
    anat_img = image.load_img(test_subject['T1_file'])
    iou = metrics.intersection_over_union(true_img.get_data() == 3,
                                          pred_img.get_data() == 3)

    return {
        'pred_img': pred_img,
        'true_img': true_img,
        'anat_img': anat_img,
        'iou': iou,
        'subject': test_subject
    }
    def combine_brains(self, slices=None):
        """Method to combine all brains in the loaded filename dictionary into a big data object with the individual
        brain data in the 4th dimension.

        :param slices: {int} number of slices to load from each brain (testing purpose). The average from ±1 slice
            is loaded for every slice.
        :return: data of all files loaded into the attribute :py:attr:`data`
        """
        print("Loading %i files..." % len(self.names))
        self.img = load_img(self.filenames)
        if slices:
            step = int(float(self.img.shape[2]) / float(slices + 1))
            newshape = list(self.img.shape)
            newshape[2] = slices
            imgarr = np.empty(shape=tuple(newshape))
            for s, img in enumerate(self.img.dataobj.T):
                for i in range(1, slices + 1):
                    imgarr[..., i - 1,
                           s] = np.mean(img.T[...,
                                              (i * step - 1):(i * step + 1)],
                                        axis=2)
            self.img = Nifti1Image(
                imgarr.reshape((self.img.shape[0], self.img.shape[1], slices,
                                len(self.filenames))), self.img.affine)
        self.img.uncache()
        print("\tAll files loaded!")
Example #35
0
def filter_ics(comps_img, mask, zscore=2., mode='+-'):
    """
    Generator for masking and thresholding each IC spatial map.

    Parameters
    ----------
    comps_img: img-like
        The 'raw' ICC maps image.

    mask: img-like
        If not None. Will apply this masks in the end of the process.

    thr: float
        The threshold value.

    zscore: bool
        If True will calculate the z-score of the ICC before thresholding.

    mode: str
        Choices: '+' for positive threshold,
                 '+-' for positive and negative threshold and
                 '-' for negative threshold.

    Returns
    -------
    icc_filts: list of nibabel.NiftiImage
        Thresholded and masked ICCs.
    """
    # store the average value of the blob in a list
    mask = niimg.load_img(mask)
    for i, icimg in enumerate(iter_img(comps_img)):
        yield filter_icc(icimg, mask=mask, thr=zscore, zscore=True, mode=mode)
Example #36
0
def coords_to_peaks_img(coords, mask_img):
    mask_img = image.load_img(mask_img)
    voxels = coords_to_voxels(coords, mask_img)
    peaks = np.zeros(mask_img.shape)
    np.add.at(peaks, tuple(voxels.T), 1.0)
    peaks_img = image.new_img_like(mask_img, peaks)
    return peaks_img
Example #37
0
    def _checktype(self, value):

        if value is None or isinstance(value, IMAGES):
            return value
        if isinstance(value, basestring) and os.path.isfile(value):
            return image.load_img(value)
        raise TypeError('Invalid type: {}'.format(value))
Example #38
0
    def __init__(self, img_file, txt_file, start_from_one=True):
        self._img_file       = img_file
        self._txt_file       = txt_file
        self._start_from_one = start_from_one

        self.network_names = self._network_names()
        self._img          = niimg.load_img(self._img_file)
        self._self_check()
Example #39
0
def _read_fmri_gauthier(sub, run, path):
    """ Reads BOLD signal data on Gauthier dataset """
    run = '_run00' + str(run + 1)
    fmri_path = (('{path}/BOLD/task001{run}/rabold.nii').format(
        path=path, sub=sub, run=run))
    fmri = load_img(fmri_path)

    return fmri
Example #40
0
    def _run_interface(self, runtime):
        t1_img = nli.load_img(self.inputs.in_file)
        t1_data = t1_img.get_data()
        epi_data = nli.load_img(self.inputs.ref_file).get_data()

        # We assume the image is already masked
        mask = t1_data > 0

        t1_min, t1_max = np.unique(t1_data)[[1, -1]]
        epi_min, epi_max = np.unique(epi_data)[[1, -1]]
        scale_factor = (epi_max - epi_min) / (t1_max - t1_min)

        inv_data = mask * ((t1_max - t1_data) * scale_factor + epi_min)

        out_file = fname_presuffix(self.inputs.in_file, suffix='_inv', newpath=runtime.cwd)
        nli.new_img_like(t1_img, inv_data, copy_header=True).to_filename(out_file)
        self._results['out_file'] = out_file
        return runtime
Example #41
0
    def _filter_ic_imgs(self, ic_file):
        if self.zscore > 0:
            do_zscore = True
        else:
            do_zscore = False

        mask = niimg.load_img(self.mask_file)
        return [filter_icc(icimg, mask=mask, thr=self.zscore, zscore=do_zscore, mode=self.mode)
                for icimg in iter_img(ic_file)]
Example #42
0
    def _generate_report(self):
        """ Generates the visual report """
        from niworkflows.viz.utils import plot_registration
        NIWORKFLOWS_LOG.info('Generating visual report')

        fixed_image_nii = load_img(self._fixed_image)
        moving_image_nii = load_img(self._moving_image)
        contour_nii = load_img(self._contour) if self._contour is not None else None

        if self._fixed_image_mask:
            fixed_image_nii = unmask(apply_mask(fixed_image_nii,
                                                self._fixed_image_mask),
                                     self._fixed_image_mask)
            # since the moving image is already in the fixed image space we
            # should apply the same mask
            moving_image_nii = unmask(apply_mask(moving_image_nii,
                                                 self._fixed_image_mask),
                                      self._fixed_image_mask)
            mask_nii = load_img(self._fixed_image_mask)
        else:
            mask_nii = threshold_img(fixed_image_nii, 1e-3)

        n_cuts = 7
        if not self._fixed_image_mask and contour_nii:
            cuts = cuts_from_bbox(contour_nii, cuts=n_cuts)
        else:
            cuts = cuts_from_bbox(mask_nii, cuts=n_cuts)

        # Call composer
        compose_view(
            plot_registration(fixed_image_nii, 'fixed-image',
                              estimate_brightness=True,
                              cuts=cuts,
                              label=self._fixed_image_label,
                              contour=contour_nii,
                              compress=self.inputs.compress_report),
            plot_registration(moving_image_nii, 'moving-image',
                              estimate_brightness=True,
                              cuts=cuts,
                              label=self._moving_image_label,
                              contour=contour_nii,
                              compress=self.inputs.compress_report),
            out_file=self._out_report
        )
Example #43
0
def _read_fmri_mrt(sub, run, path):
    """ Reads BOLD signal data on Mirror-Reversed Text dataset """
    run = 'run-0' + str(run + 1)
    task = 'task-livingnonlivingdecisionwithplainormirrorreversedtext'
    fmri_path = (('{path}/ses-test/func/r{sub}_ses-test_{task}'
                  '_{run}_bold.nii')
                 .format(path=path, sub=sub, task=task, run=run))
    fmri = load_img(fmri_path)

    return fmri
Example #44
0
def _gen_reference(fixed_image, moving_image, out_file=None):
    import numpy
    from nilearn.image import resample_img, load_img

    if out_file is None:
        out_file = genfname(fixed_image, suffix='reference')
    new_zooms = load_img(moving_image).header.get_zooms()[:3]
    # Avoid small differences in reported resolution to cause changes to
    # FOV. See https://github.com/poldracklab/fmriprep/issues/512
    new_zooms_round = numpy.round(new_zooms, 3)
    resample_img(fixed_image, target_affine=numpy.diag(new_zooms_round),
                 interpolation='nearest').to_filename(out_file)
    return out_file
Example #45
0
def read_data_haxby(subject, tr=2.5, masker=False):
    haxby_dataset = fetch_haxby(subjects=[subject])

    # Load fmri data
    fmri_filename = haxby_dataset.func[0]
    fmri = load_img(fmri_filename)
    # mask = haxby_dataset.mask_vt[0]
    masker = NiftiMasker(mask_strategy='epi', standardize=True, detrend=True,
                         high_pass=0.01, t_r=tr, smoothing_fwhm=5)
    fmri = masker.fit_transform(fmri)
    fmri = fmri.reshape(12, -1, fmri.shape[-1])

    # Load stimuli data
    classes = np.array(['rest', 'face', 'house', 'bottle', 'cat', 'chair',
                        'scissors', 'shoe', 'scrambledpix'])
    labels = np.recfromcsv(
        haxby_dataset.session_target[0], delimiter=" ")['labels'].reshape(
            12, -1)
    stimuli, onsets, conditions = (np.zeros((
        12, len(labels[0]), len(classes))), [], [])
    stimuli[:, 0, 0] = 1
    for session in range(12):
        onsets.append([])
        conditions.append([])
        for scan in range(1, len(fmri[session])):
            if (labels[session][scan - 1] == 'rest' and
                labels[session][scan] != 'rest'):
                label = labels[session][scan]
                stimuli[session, scan, np.where(classes == label)[0][0]] = 1
                conditions[session].append(label)
                onsets[session].append(scan * tr)
            else:
                stimuli[session, scan, 0] = 1

    if subject == 5:
        fmri = np.vstack((fmri[:8], fmri[9:]))
        stimuli = np.vstack((stimuli[:8], stimuli[9:]))
        onsets = np.vstack((onsets[:8], onsets[9:]))
        conditions = np.vstack((conditions[:8], conditions[9:]))

    if masker:
        return fmri, stimuli, onsets, conditions, masker

    return fmri, stimuli, onsets, conditions
Example #46
0
def _gen_reference(fixed_image, moving_image, out_file=None):
    import os.path as op
    import numpy
    from nilearn.image import resample_img, load_img

    if out_file is None:
        fname, ext = op.splitext(op.basename(fixed_image))
        if ext == '.gz':
            fname, ext2 = op.splitext(fname)
            ext = ext2 + ext
        out_file = op.abspath('%s_wm%s' % (fname, ext))

    new_zooms = load_img(moving_image).header.get_zooms()

    new_ref_im = resample_img(fixed_image, target_affine=numpy.diag(new_zooms),
                              interpolation='nearest')

    new_ref_im.to_filename(out_file)

    return out_file
Example #47
0
def data_info(img):
    """Tool to report the image data shape, affine, voxel size

    Parameters
    ----------
    img : Nifti like image/object

    Returns
    -------
    shape, affine, vox_size
    """
    img = load_img(img)
    img_data = _safe_get_data(img)

    if len(img.shape) > 3:
        shape = img.shape[:3]
    else:
        shape = img.shape

    affine = img.get_affine()
    vox_size = np.prod(np.diag(abs(affine[:3])))

    return shape, affine, vox_size
Example #48
0
##############################################################################
# **Binarization** and **Intersection** with Ventral Temporal (VT) mask - We
# now want to restrict our investigation to the VT area. The corresponding
# spatial mask is provided in haxby_dataset.mask_vt. We want to compute the
# intersection of this provided mask with our self-computed mask.

# self-computed mask
bin_p_values = (log_p_values != 0)
# VT mask
mask_vt_filename = haxby_dataset.mask_vt[0]

# The first step is to load VT mask and same time convert data type
# numbers to boolean type
from nilearn.image import load_img

vt = load_img(mask_vt_filename).get_data().astype(bool)

# We can then use a logical "and" operation - numpy.logical_and - to keep only
# voxels that have been selected in both masks. In neuroimaging jargon, this
# is called an "AND conjunction". We use already imported numpy as np
bin_p_values_and_vt = np.logical_and(bin_p_values, vt)

# Visualizing the mask intersection results using plotting function `plot_roi`,
# a function which can be used for visualizing target specific voxels.
from nilearn.plotting import plot_roi, show

# First, we create new image type of binarized and intersected mask (second
# argument) and use this created Nifti image type in visualization. Binarized
# values in data type boolean should be converted to int data type at the same
# time. Otherwise, an error will be raised
bin_p_values_and_vt_img = new_img_like(fmri_img,
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt

from nilearn import datasets, plotting

msdl = datasets.fetch_atlas_msdl()

maps = msdl.maps
labels = msdl.labels
networks = msdl.networks

from nilearn.image import load_img, index_img, iter_img

maps_img = load_img(maps)
n_maps = maps_img.shape[3]

list_of_nodes = [[0, 1], [2], [3, 4, 5, 6], [7], [8], [9, 10, 11, 12],
                 [13], [14, 15, 16], [17, 18], [19, 20, 21], [22, 23, 24],
                 [25, 26], [27, 28, 29, 30, 31], [32], [33], [34, 35, 36],
                 [37, 38]]
list_of_imgs = []
list_of_titles = []

for node_ in list_of_nodes:
    cur_img = index_img(maps_img, node_)
    list_of_imgs.append(cur_img)
    title_ = []
    for i in node_:
        title_.append(msdl.labels[i])
Example #50
0
 def _load_timecourses(self):
     """ Return the timecourses image file and checks if the shape is correct."""
     # load the timecourses file
     tcsf = fetch_one_file(self.ica_dir, self._tcs_fname)
     tcs = niimg.load_img(tcsf).get_data()
     return tcs
localizer_tmap_filename = localizer_dataset.tmaps[0]
localizer_anat_filename = localizer_dataset.anats[0]

###############################################################################
# Now, the localizer t-map image can be resampled to the MNI template image.
from nilearn.image import resample_to_img

resampled_localizer_tmap = resample_to_img(localizer_tmap_filename, template)

###############################################################################
# Let's check the shape and affine have been correctly updated.

# First load the original t-map in memory:
from nilearn.image import load_img
tmap_img = load_img(localizer_dataset.tmaps[0])

original_shape = tmap_img.shape
original_affine = tmap_img.affine

resampled_shape = resampled_localizer_tmap.shape
resampled_affine = resampled_localizer_tmap.affine

template_img = load_img(template)
template_shape = template_img.shape
template_affine = template_img.affine
print("""Shape comparison:
- Original t-map image shape : {0}
- Resampled t-map image shape: {1}
- Template image shape       : {2}
""".format(original_shape, resampled_shape, template_shape))
# Restrict to faces and houses
from nilearn.image import index_img
condition_mask = np.logical_or(y == b'face', y == b'house')

fmri_img = index_img(fmri_filename, condition_mask)
y, session = y[condition_mask], session[condition_mask]

#########################################################################
# Prepare masks
#
# - mask_img is the original mask
# - process_mask_img is a subset of mask_img, it contains the voxels that
#   should be processed (we only keep the slice z = 26 and the back of the
#   brain to speed up computation)

mask_img = load_img(haxby_dataset.mask)

# .astype() makes a copy.
process_mask = mask_img.get_data().astype(np.int)
picked_slice = 29
process_mask[..., (picked_slice + 1):] = 0
process_mask[..., :picked_slice] = 0
process_mask[:, 30:] = 0
process_mask_img = new_img_like(mask_img, process_mask)

#########################################################################
# Searchlight computation

# Make processing parallel
# /!\ As each thread will print its progress, n_jobs > 1 could mess up the
#     information output.
motor_images = fetch_neurovault_motor_task()
stat_img = motor_images.images[0]

###############################################################################
# Now, the localizer t-map image can be resampled to the MNI template image.
from nilearn.image import resample_to_img

resampled_stat_img = resample_to_img(stat_img, template)

###############################################################################
# Let's check the shape and affine have been correctly updated.

# First load the original t-map in memory:
from nilearn.image import load_img
tmap_img = load_img(stat_img)

original_shape = tmap_img.shape
original_affine = tmap_img.affine

resampled_shape = resampled_stat_img.shape
resampled_affine = resampled_stat_img.affine

template_img = load_img(template)
template_shape = template_img.shape
template_affine = template_img.affine
print("""Shape comparison:
- Original t-map image shape : {0}
- Resampled t-map image shape: {1}
- Template image shape       : {2}
""".format(original_shape, resampled_shape, template_shape))

###############################################################################
# Visualizing one volume in a 4D file
# -----------------------------------
#
# We can download resting-state networks from the Smith 2009 study on
# correspondance between rest and task
rsn = datasets.fetch_atlas_smith_2009()['rsn10']
print(rsn)

###############################################################################
# It is a 4D nifti file. We load it into the memory to print its
# shape.
from nilearn import image
print(image.load_img(rsn).shape)

###############################################################################
# We can retrieve the first volume (note that Python indexing starts at 0):
first_rsn = image.index_img(rsn, 0)
print(first_rsn.shape)

###############################################################################
# first_rsn is a 3D image.
#
# We can then plot it
plotting.plot_stat_map(first_rsn)


###############################################################################
# Looping on all volumes in a 4D file
Example #55
0
 def load_mask(self):
     """ Return the mask image. """
     mask_file = fetch_one_file(self.ica_dir, self._mask_fname)
     return niimg.load_img(mask_file)
Example #56
0
def spatial_maps_goodness_of_fit(rsn_imgs, spatial_maps, mask_file, rsn_thr=4.0):
    """ Goodness-of-fit values described as in Zhou et al., 2010, Brain.

    Parameters
    ----------
    rsn_imgs: list of niimg-like or 4D niimg-like
        The RSN maps. They should be thresholded beforehand if `rsn_thr` is lower or equal than 0.

    spatial_maps: list of niimg-like or 4D niimg-like

    mask_file: niimg-like
        An extra mask to apply to the thresholded RSN masks.
        This is used to exclude values outside of the RSN blobs.
        It is recommended to use a brain mask for this.

    rsn_thr: float, optional
        The threshold to apply to `rsn_imgs` to create the RSN masks.
        If rsn_thr <= 0, no thresholding will be applied.

    Returns
    -------
    gof_df: np.ndarray
        A matrix of shape MxN, where M is len(rsn_imgs) and N is len(spatial_maps).
        It contains the goodness-of-fit values.

    Notes
    -----
    "These ICN templates were thresholded at a z-score 4.0 to be visually comparable to the
    consistent ICNs published by Damoiseaux et al. (2006). A minor modification of previous
    goodness-of-fit methods (Seeley et al., 2007b, 2009) was included here for template
    matching, with goodness-of-fit scores calculated by multiplying
    (i) the average z-score difference between voxels falling within the template and
    voxels falling outside the template; and
    (ii) the difference in the percentage of positive z-score voxels inside and outside the template.

    This goodness-of-fit algorithm proves less vulnerable to inter-subject variability in shape,
    size, location and strength of each ICN", than the one published in Seeley et al., 2007b.
    This latter method only uses the value in (i).

    Extracted from Zhou et al., 2010, Brain.
    """
    rsn_img = niimg.load_img(rsn_imgs)
    spm_img = niimg.load_img(spatial_maps)

    n_rsns = rsn_img.shape[-1]
    n_ics  =  spm_img.shape[-1]
    gofs   = np.zeros((n_rsns, n_ics), dtype=float)

    # threshold the RSN templates
    if rsn_thr > 0:
        thr_rsns = (spatial_map(rsn, thr=rsn_thr, mode='+-')
                    for rsn in niimg.iter_img(rsn_img))
    else:
        thr_rsns = rsn_img

    # for each RSN template and IC image
    iter_rsn_ic = itertools.product(enumerate(niimg.iter_img(thr_rsns)),
                                    enumerate(niimg.iter_img( spm_img)))

    for (rsn_idx, rsn), (ic_idx, ic) in iter_rsn_ic:

        ref_vol = rsn.get_data()
        rsn_vol = np.zeros(rsn.shape, dtype=int)

        #rsn_in  = niimg.math_img('np.abs(img) > 0', img=rsn)
        rsn_in = rsn_vol.copy()
        rsn_in[np.abs(ref_vol) > 0] = 1

        #rsn_out = niimg.math_img('img == 0', img=rsn)
        rsn_out = rsn_vol.copy()
        rsn_out[ref_vol == 0] = 1

        if mask_file is not None:
            # rsn_out = niimg.math_img('mask * img', mask=rsn_brain_mask, img=rsn_out)
            rsn_brain_mask = niimg.resample_to_img(mask_file, rsn,
                                                   interpolation='nearest')
            rsn_out = rsn_brain_mask.get_data() * rsn_out

        # convert the mask arrays to image in order to resample
        rsn_in  = niimg.new_img_like(rsn, rsn_in)
        rsn_out = niimg.new_img_like(rsn, rsn_out)

        rsn_in  = niimg.resample_to_img(rsn_in,  ic, interpolation='nearest')
        rsn_out = niimg.resample_to_img(rsn_out, ic, interpolation='nearest')

        # apply the mask
        #zscore_in  = niimg.math_img('mask * img', mask=rsn_in,  img=ic).get_data()
        zscore_in = rsn_in.get_data() * ic.get_data()

        #zscore_out = niimg.math_img('mask * img', mask=rsn_out, img=ic).get_data()
        zscore_out = rsn_out.get_data() * ic.get_data()

        #gof_term1
        # calculate the the average z-score difference between voxels falling
        # within the template and voxels falling outside the template
        gof_term1 = zscore_in.mean() - zscore_out.mean()

        #gof_term2
        # the difference in the percentage of positive z-score voxels inside and outside the template.
        n_pos_zscore_in  = np.sum(zscore_in  > 0)
        n_pos_zscore_out = np.sum(zscore_out > 0)
        n_pos_zscore_tot = n_pos_zscore_in + n_pos_zscore_out

        if n_pos_zscore_tot != 0:
            n_pos_zscore_pcnt = 100 / n_pos_zscore_tot
            gof_term2 = (n_pos_zscore_in - n_pos_zscore_out) * n_pos_zscore_pcnt
        else:
            gof_term2 = 0

        # global gof
        gof = gof_term1 * gof_term2

        # add the result
        gofs[rsn_idx][ic_idx] = gof

    return gofs
l, n = find_region_names_using_cut_coords(dmn_coords, atlas_img,
                                          labels=labels)

# where 'l' indicates new labels generated according to given atlas labels and
# coordinates
new_labels = l

# where 'n' indicates brain regions names labelled, generated according to given
# labels
region_names_involved = n

######################################################################
# Let's visualize
from nilearn.image import load_img
from nilearn import plotting
from nilearn.image import new_img_like

atlas_img = load_img(atlas_img)
affine = atlas_img.get_affine()
atlas_data = atlas_img.get_data()

for i, this_label in enumerate(new_labels):
    this_data = (atlas_data == this_label)
    this_data = this_data.astype(int)
    this_img = new_img_like(atlas_img, this_data, affine)
    plotting.plot_roi(this_img, cut_coords=dmn_coords[i],
                      title=region_names_involved[i])

plotting.show()
Example #58
0
def spatial_maps_pairwise_similarity(imgs1, imgs2, mask_file, distance='correlation'):
    """ Similarity values of each image in `imgs1` to each image in `imgs2`, both masked by `mask_file`.
    These values are based on distance metrics, specified by `distance` argument.
    The resulting similarity value is the complementary value of the distance,
    i.e., '1 - <distance value>'.
    The images in `imgs1` will be resampled to `imgs2` if their affine matrix don't match.

    Parameters
    ----------
    imgs1: list of niimg-like or 4D niimg-like

    imgs2: list of niimg-like or 4D niimg-like

    mask_file: niimg-like

    distance: str
        Valid values for `distance` are:
        From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan'].
        From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', 'correlation', 'dice', 'hamming',
                                      'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski',
                                      'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
                                      'sqeuclidean', 'yule']
                                      See the documentation for scipy.spatial.distance for details on these metrics.

    Returns
    -------
    corrs: np.ndarray
        A matrix of shape MxN, where M is len(imgs1) and N is len(imgs2).
        It contains the similarity values.
    """
    img1_ = niimg.load_img(imgs1)
    img2_ = niimg.load_img(imgs2)

    n_imgs1 = img1_.shape[-1]
    n_imgs2 = img2_.shape[-1]
    corrs = np.zeros((n_imgs1, n_imgs2), dtype=float)

    mask_trnsf = niimg.resample_to_img(mask_file, niimg.index_img(img2_, 0),
                                       interpolation='nearest',
                                       copy=True)

    for idx1, img1 in enumerate(niimg.iter_img(img1_)):
        img1_resamp = niimg.resample_to_img(img1, niimg.index_img(img2_, 0), copy=True)
        img1_masked = nimask.apply_mask(img1_resamp, mask_trnsf)

        for idx2, img2 in enumerate(niimg.iter_img(img2_)):
            img2_masked = nimask.apply_mask(img2, mask_trnsf)
            dist = pairwise_distances(img1_masked.reshape(1, -1),
                                      img2_masked.reshape(1, -1),
                                      metric=distance)

            # since this is a scalar value
            dist = dist[0][0]

            # since this is a distance, not a similarity value
            corr = 1 - dist

            # store it
            corrs[idx1, idx2] = corr

    return corrs
# MPI variables
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size

# Generate random data
if rank == 0:
    np.random.seed(0)
    data1_rand = np.random.rand(91,109,91,16)
    data2_rand = np.random.rand(91,109,91,16)
    classical = np.random.rand(2600)
    jazz = np.random.rand(2600)
    d1_reshape = np.reshape(data1_rand,(91*109*91,16))
    d2_reshape = np.reshape(data2_rand,(91*109*91,16))
    a1 = load_img('a1plus_2mm.nii.gz')
    a1_vec = np.reshape(a1.get_data(),(91*109*91))
    a1_idx = np.nonzero(a1_vec)
    for i in range(8):
        d1_reshape[a1_idx[0],i] += classical
        d1_reshape[a1_idx[0],i+8] += jazz
        d2_reshape[a1_idx[0],i] += classical
        d2_reshape[a1_idx[0],i+8] += jazz
    data1 = np.reshape(d1_reshape,(91,109,91,16))
    data2 = np.reshape(d2_reshape,(91,109,91,16))

    # Flatten data, then zscore data, then reshape data back into MNI coordinate space
    data1 = stats.zscore(np.reshape(data1,(91*109*91,16)))
    data1 = np.reshape(data1,(91,109,91,16))
    data2 = stats.zscore(np.reshape(data2,(91*109*91,16)))
    data2 = np.reshape(data2,(91,109,91,16))
Example #60
0
def plot_multi_slices(img, cut_dir="z", n_cuts=20, n_cols=4, figsize=(2.5, 3),
                      title="", title_fontsize=32, plot_func=None, **kwargs):
    """ Create a plot of `n_cuts` of `img` organized distributed in `n_cols`.
    Parameters
    ----------
    img: niimg-like

    cut_dir: str
        Sectional direction; possible values are "x", "y" or "z".

    n_cuts: int
        Number of cuts in the plot.

    n_cols: int
        Maximum number of image columns in the plot.

    figsize: 2-tuple of int
        (w, h) size in inches of the figure.

    title: str
        The superior title of the figure.

    title_fontsize: int
        The size of the title font.

    plot_func: function
       Function to plot each slice.
       Default: nilearn.plotting.plot_stat_map

    kwargs: keyword arguments
        Input arguments for plot_func.

    Returns
    -------
    fig: matplotlib.figure
    """
    import math

    from matplotlib import pyplot as plt
    from matplotlib import gridspec
    import nilearn.plotting as niplot
    import nilearn.image as niimg

    # there is another version without grouper, but it is less efficient.
    def grouper(iterable, n, fillvalue=None):
        "Collect data into fixed-length chunks or blocks"
        # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
        import itertools
        args = [iter(iterable)] * n
        return itertools.zip_longest(fillvalue=fillvalue, *args)

    if plot_func is None:
        plot_func = niplot.plot_stat_map

    _img = niimg.load_img(img)

    n_rows = 1
    if n_cuts > n_cols:
        n_rows = math.ceil(n_cuts/n_cols)

    cuts = niplot.find_cut_slices(_img, n_cuts=n_cuts, direction=cut_dir)

    figsize = figsize[0] * n_cols, figsize[1] * n_rows
    fig = plt.figure(figsize=figsize, facecolor='black')
    gs  = gridspec.GridSpec(n_rows, 1)

    if title:
        fig.suptitle(title, fontsize=title_fontsize, color='gray')

    # for the superior plot
    put_colorbar = True

    # make the plots
    plots = []
    for i, cut_chunks in enumerate(grouper(cuts, n_cols)):
        ax = plt.subplot(gs[i])

        cut_chunks = [cut for cut in cut_chunks if cut is not None]

        try:
            p = plot_func(_img,
                          display_mode=cut_dir,
                          cut_coords=cut_chunks,
                          colorbar=put_colorbar,
                          figure=fig,
                          axes=ax,
                          **kwargs)
        except IndexError:
            logging.warning('Could not plot for coords {}.'.format(cut_chunks))
        finally:
            put_colorbar = False
            plots.append(p)

    for p in plots:
        p.close()

    return fig