コード例 #1
0
def remove_vols(dwifile, bvalfile, bvecfile, remove_Is, output_prefix):
    import numpy as np
    import nibabel as nib
    # load data
    dwidata = nib.load(dwifile)
    data = dwidata.get_data()
    bvals = np.genfromtxt(bvalfile)
    bvecs = np.genfromtxt(bvecfile)
    # bvecs = bvecs.T

    # create output data
    shape = (dwidata.shape[0], dwidata.shape[1], dwidata.shape[2], data.shape[3]-len(remove_Is))
    print shape
    writedata = np.empty(shape)
    write_bvals = []
    write_bvecs = []
    g_all_i = 0
    for g_i in range(data.shape[3]):
        if not (g_i in remove_Is):
            writedata[:,:,:,g_all_i] = data[:,:,:,g_i]
            g_all_i = g_all_i + 1
            write_bvals.append(bvals[g_i])
            write_bvecs.append(bvecs[g_i])

    # write output data
    outfile = experiment_dir + '/' + output_prefix + '/' + output_prefix + 'subvolumes.nii.gz'
    nib.save(nib.nifti1.Nifti1Image(writedata, dwidata.get_affine()), outfile)
    bvecs = np.array(bvecs).T
    bvalfile = experiment_dir + '/' + output_prefix + '/' + output_prefix + 'subvolumes.bval'
    np.savetxt(bvalfile, bvals, fmt='%1.10f')
    bvecfile = experiment_dir + '/' + output_prefix + '/' + output_prefix + 'subvolumes.bvec'
    np.savetxt(bvecfile, bvecs, fmt='%1.10f')

    return outfile, bvalfile, bvecfile
コード例 #2
0
ファイル: tensors.py プロジェクト: DimitriPapadopoulos/nipype
    def _run_interface(self, runtime):
        from dipy.reconst import dti
        from dipy.io.utils import nifti1_symmat
        gtab = self._get_gradient_table()

        img = nb.load(self.inputs.in_file)
        data = img.get_data()
        affine = img.affine
        mask = None
        if isdefined(self.inputs.mask_file):
            mask = nb.load(self.inputs.mask_file).get_data()

        # Fit it
        tenmodel = dti.TensorModel(gtab)
        ten_fit = tenmodel.fit(data, mask)
        lower_triangular = ten_fit.lower_triangular()
        img = nifti1_symmat(lower_triangular, affine)
        out_file = self._gen_filename('dti')
        nb.save(img, out_file)
        IFLOGGER.info('DTI parameters image saved as {i}'.format(i=out_file))

        #FA MD RD and AD
        for metric in ["fa", "md", "rd", "ad"]:
            data = getattr(ten_fit,metric).astype("float32")
            out_name = self._gen_filename(metric)
            nb.Nifti1Image(data, affine).to_filename(out_name)
            IFLOGGER.info('DTI {metric} image saved as {i}'.format(i=out_name, metric=metric))

        return runtime
コード例 #3
0
ファイル: modularity.py プロジェクト: Lx37/dmgraphanalysis
def export_lol_mask_file(rada_lol_file,Pajek_net_file,coords_file,mask_file):

    import numpy as np
    
    import nibabel as nib
    import os
    from dmgraphanalysis.utils_cor import return_mod_mask_corres,read_lol_file,read_Pajek_corres_nodes

    print 'Loading Pajek_net_file for reading node_corres'
    
    node_corres = read_Pajek_corres_nodes(Pajek_net_file)
    
    print node_corres.shape
    
    print 'Loading coords'
    
    coords = np.array(np.loadtxt(coords_file),dtype = int)
    
    print coords.shape
    
    print 'Loading mask parameters'
    
    mask = nib.load(mask_file)
    
    data_mask_shape = mask.get_data().shape
    
    mask_header = mask.get_header().copy()
    
    mask_affine = np.copy(mask.get_affine())
    
    print "Loading community belonging file" + rada_lol_file

    community_vect = read_lol_file(rada_lol_file)
    
    print community_vect
    
    print "transforming to nii file"
    lol_mask_data = return_mod_mask_corres(community_vect,node_corres,coords,data_mask_shape)

    #print lol_mask_data
    print lol_mask_data.shape


    #print "saving npy file"

    #mod_mask_file = os.path.abspath("mod_mask_data.npy")

    #np.save(mod_mask_file ,np.array(mod_mask_data,dtype = int))
    
    print "saving nii file"

    lol_mask_file = os.path.abspath("lol_mask_data.nii")

    nib.save(nib.Nifti1Image(np.array(lol_mask_data,dtype = int),mask_affine,mask_header),lol_mask_file)

    #print "returning"

    #lol_mask_file = ""
    
    return lol_mask_file
コード例 #4
0
ファイル: mask.py プロジェクト: sathayas/fMRIConnectome
def mask_brain(FeatDir, fMask=""):
    '''
    This function creates a brain mask based on the subject's normalized
    T1_brain image. It can also incorporate an external mask.

    Input parameters:
          FeatDir:    The .feat directory where segmentation results reside.
          fMask:      The file name for the binary mask image. If not provided,
                      then it will be omitted.
    
    Returns:
          NONE:
    
    Output:
          It produces a mask image callsed mask_brain.nii.gz under the
          reg directory of the FeatDir.
    '''
    # directory and file names
    RegDir = os.path.join(FeatDir, 'reg')
    sBase = image_base(FeatDir)
    fT1 = sBase + '.nii.gz'
    fT1_bin = sBase + '_bin.nii.gz'
    ffmri = os.path.join(RegDir, 'func2standard_r.nii.gz')
    fIntersect = os.path.join(RegDir, 'mask_brain.nii.gz')

    # threshold the T1 image
    com_fslmaths = 'fslmaths ' + fT1
    com_fslmaths += ' -bin ' + fT1_bin
    res = os.system(com_fslmaths)

    # reslicing the binarized T1 image to the fMRI space
    fT1_bin_r = reslice_to_fMRI(fT1_bin, ffmri)

    # if an external mask is provided
    if fMask!="":
        # first, copy the mask to the reg directory
        MaskDir, fMaskImg = os.path.split(os.path.abspath(fMask))
        fMaskCopy = os.path.join(RegDir, fMaskImg)
        com_cp = 'cp ' + os.path.join(MaskDir, fMaskImg) + ' ' + RegDir
        res = os.system(com_cp)

        # then reslice the mask copy to the voxel size of fMRI data
        fMaskCopy_r = reslice_to_fMRI(fMaskCopy, ffmri)

        # multiplying the resliced external mask and the brain mask
        # first, load mask images
        img_mask = nib.load(fMaskCopy_r)
        X_mask = img_mask.get_data()
        img_brain = nib.load(fT1_bin_r)
        X_brain = img_brain.get_data()
        # then multiply masks
        X_prod = X_mask * X_brain
        # then saving the new mask image
        maskimg = nib.Nifti1Image(X_prod, img_mask.get_affine())
        nib.save(maskimg, fIntersect)

    # if an external mask is not provided
    else:
        com_cp = 'cp ' + fT1_bin_r + ' ' + fIntersect
        res = os.system(com_cp)
コード例 #5
0
ファイル: tensors.py プロジェクト: DimitriPapadopoulos/nipype
    def _run_interface(self, runtime):
        from dipy.reconst import dti

        # Load the 4D image files
        img = nb.load(self.inputs.in_file)
        data = img.get_data()
        affine = img.get_affine()

        # Load the gradient strengths and directions
        gtab = self._get_gradient_table()

        # Mask the data so that tensors are not fit for
        # unnecessary voxels
        mask = data[..., 0] > 50

        # Fit the tensors to the data
        tenmodel = dti.TensorModel(gtab)
        tenfit = tenmodel.fit(data, mask)

        # Calculate the mode of each voxel's tensor
        mode_data = tenfit.mode

        # Write as a 3D Nifti image with the original affine
        img = nb.Nifti1Image(mode_data, affine)
        out_file = self._gen_filename('mode')
        nb.save(img, out_file)
        IFLOGGER.info('Tensor mode image saved as {i}'.format(i=out_file))
        return runtime
コード例 #6
0
ファイル: tensors.py プロジェクト: jamespooley/nipype
    def _run_interface(self, runtime):
        from dipy.reconst import dki
        from dipy.io.utils import nifti1_symmat
        gtab = self._get_gradient_table()

        img = nb.load(self.inputs.in_file)
        data = img.get_data()
        affine = img.affine
        mask = None
        if isdefined(self.inputs.mask_file):
            mask = nb.load(self.inputs.mask_file).get_data()

        # Fit the DKI model
        kurtosis_model = dki.DiffusionKurtosisModel(gtab)
        kurtosis_fit = kurtosis_model.fit(data, mask)
        lower_triangular = kurtosis_fit.lower_triangular()
        img = nifti1_symmat(lower_triangular, affine)
        out_file = self._gen_filename('dki')
        nb.save(img, out_file)
        IFLOGGER.info('DKI parameters image saved as {i}.format(i=out_file)')

        # FA, MD, RD, and AD
        for metric in ['fa', 'md', 'rd', 'ad', 'mk', 'ak', 'rk']:
            data = getattr(kurtosis_fit.metric).astype('float32')
            out_name = self._gen_filename(metric)
            nb.Nifti1Image(data, affine).to_filename(out_name)
            IFLOGGER.info('DKI {metric} image saved as {i}'.format(i=out_name,
                                                                   metric=metric))

        return runtime
コード例 #7
0
ファイル: mask.py プロジェクト: bergtholdt/nipy
def intersect_masks(input_masks, output_filename=None, threshold=0.5, cc=True):
    """
    Given a list of input mask images, generate the output image which
    is the the threshold-level intersection of the inputs 

    Parameters
    ----------
    input_masks: list of strings or ndarrays
        paths of the input images nsubj set as len(input_mask_files), or
        individual masks.
    output_filename, string:
        Path of the output image, if None no file is saved.
    threshold: float within [0, 1[, optional
        gives the level of the intersection.
        threshold=1 corresponds to keeping the intersection of all
        masks, whereas threshold=0 is the union of all masks.
    cc: bool, optional
        If true, extract the main connected component
        
    Returns
    -------
    grp_mask, boolean array of shape the image shape
    """  
    grp_mask = None
    if threshold > 1:
        raise ValueError('The threshold should be < 1')
    if threshold <0:
        raise ValueError('The threshold should be > 0')
    threshold = min(threshold, 1 - 1.e-7)

    for this_mask in input_masks:
        if isinstance(this_mask, basestring):
            # We have a filename
            this_mask = load(this_mask).get_data()
        if grp_mask is None:
            grp_mask = this_mask.copy().astype(np.int)
        else:
            grp_mask += this_mask
    
    grp_mask = grp_mask > (threshold * len(list(input_masks)))
    
    if np.any(grp_mask > 0) and cc:
        grp_mask = largest_cc(grp_mask)
    
    if output_filename is not None:
        if isinstance(input_masks[0], basestring):
            nim = load(input_masks[0]) 
            header = nim.get_header()
            affine = nim.get_affine()
        else:
            header = dict()
            affine = np.eye(4)
        header['descrip'] = 'mask image'
        output_image = nifti1.Nifti1Image(grp_mask.astype(np.uint8),
                                            affine=affine,
                                            header=header,
                                         )
        save(output_image, output_filename)

    return grp_mask > 0
コード例 #8
0
    def coefs_to_niftii(self, coefs, nii_name='coefs.nii.gz'):
        """

        """

        print(coefs)
        if self.n_features != len(coefs):
            print("Num features {} does not match coef size {}. Did you downsize?".format(
                self.n_features, len(coefs)))
            raise(ValueError)

        restored_coefs = self.restore_coefs(coefs)
        print("restoring... num non-zero",np.count_nonzero(restored_coefs) )

        try:
            restored_coefs = restored_coefs.reshape(np.array(self.nii_shape))
            print("Restored shape:", restored_coefs.shape)
        except IndexError:
            raise(IndexError, "Coefs don't match niftii shape")



        sample_img = nib.load('tt29.nii')
        affine = sample_img.affine.copy()
        header = sample_img.header.copy()

        header.set_data_dtype(np.float64)
        assert(header.get_data_dtype()==np.float64)

        print("Saving coefs as datatype", header.get_data_dtype())
        i = nib.Nifti1Image(restored_coefs.astype(np.float64), affine, header)
        nib.save(i, nii_name)

        return restored_coefs
コード例 #9
0
ファイル: discrete_domain.py プロジェクト: bergtholdt/nipy
    def to_image(self, path=None, data=None):
        """Write itself as a binary image, and returns it
        
        Parameters
        ==========
        path: string, path of the output image, if any
        data: array of shape self.size,
              data to put in the nonzer-region of the image
        """
        if data is None:
            wdata = np.zeros(self.shape, np.int8)
        else:
            wdata = np.zeros(self.shape, data.dtype)
        wdata[self.ijk[:, 0], self.ijk[:, 1], self.ijk[:, 2]] = 1
        if data is not None:
            if data.size != self.size:
                raise ValueError('incorrect data size')
            wdata[wdata > 0] = data

        nim = Nifti1Image(wdata, self.affine)
        nim.get_header()['descrip'] = 'mask image representing domain %s' \
            % self.id
        if path is not None:
            save(nim, path)
        return nim
コード例 #10
0
ファイル: tracks.py プロジェクト: Guokr1991/nipype
	def _run_interface(self, runtime):
		tracks, header = trk.read(self.inputs.in_file)
		if not isdefined(self.inputs.data_dims):
			data_dims = header['dim']
		else:
			data_dims = self.inputs.data_dims

		if not isdefined(self.inputs.voxel_dims):
			voxel_size = header['voxel_size']
		else:
			voxel_size = self.inputs.voxel_dims

		affine = header['vox_to_ras']

		streams = ((ii[0]) for ii in tracks)
		data = density_map(streams, data_dims, voxel_size)
		if data.max() < 2**15:
		   data = data.astype('int16')

		img = nb.Nifti1Image(data,affine)
		out_file = op.abspath(self.inputs.out_filename)
		nb.save(img, out_file)
		iflogger.info('Track density map saved as {i}'.format(i=out_file))
		iflogger.info('Data Dimensions {d}'.format(d=data_dims))
		iflogger.info('Voxel Dimensions {v}'.format(v=voxel_size))
		return runtime
コード例 #11
0
def generate_warping_field(fname_dest, warp_x, warp_y, fname_warp='warping_field.nii.gz', verbose=1):
    """
    Generate an ITK warping field
    :param fname_dest:
    :param warp_x:
    :param warp_y:
    :param fname_warp:
    :param verbose:
    :return:
    """
    sct.printv('\nGenerate warping field...', verbose)

    # Get image dimensions
    # sct.printv('Get destination dimension', verbose)
    nx, ny, nz, nt, px, py, pz, pt = Image(fname_dest).dim
    # sct.printv('  matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz), verbose)
    # sct.printv('  voxel size:  '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm', verbose)

    # initialize
    data_warp = np.zeros((nx, ny, nz, 1, 3))

    # fill matrix
    data_warp[:, :, :, 0, 0] = -warp_x  # need to invert due to ITK conventions
    data_warp[:, :, :, 0, 1] = -warp_y  # need to invert due to ITK conventions

    # save warping field
    im_dest = load(fname_dest)
    hdr_dest = im_dest.get_header()
    hdr_warp = hdr_dest.copy()
    hdr_warp.set_intent('vector', (), '')
    hdr_warp.set_data_dtype('float32')
    img = Nifti1Image(data_warp, None, hdr_warp)
    save(img, fname_warp)
    sct.printv(' --> '+fname_warp, verbose)
コード例 #12
0
ファイル: contrast.py プロジェクト: cmaumet/nidmresults
    def export(self, nidm_version, export_dir):
        """
        Create prov graph.
        """
        if self.expl_mean_sq_file is None:
            # Create Contrast Explained Mean Square Map as fstat<num>.nii.gz
            # multiplied by sigmasquareds.nii.gz and save it in export_dir
            fstat_img = nib.load(self.stat_file)
            fstat = fstat_img.get_data()

            sigma_sq_img = nib.load(self.sigma_sq_file)
            sigma_sq = sigma_sq_img.get_data()

            expl_mean_sq = nib.Nifti1Image(
                fstat*sigma_sq, fstat_img.get_qform())

            self.filename = ("ContrastExplainedMeanSquareMap" +
                             self.num + ".nii.gz")
            self.expl_mean_sq_file = os.path.join(
                export_dir, self.filename)
            nib.save(expl_mean_sq, self.expl_mean_sq_file)

        self.file = NIDMFile(self.id, self.expl_mean_sq_file,
                             filename=self.filename,
                             sha=self.sha, fmt=self.fmt)

        # Contrast Explained Mean Square Map entity
        path, filename = os.path.split(self.expl_mean_sq_file)
        self.add_attributes((
            (PROV['type'], self.type),
            (NIDM_IN_COORDINATE_SPACE, self.coord_space.id),
            (PROV['label'], self.label)))
コード例 #13
0
def save_vol(vol, output_filename=None, output_dir=None, basename=None,
             concat=False, **kwargs):
    """
    Saves a single volume to disk.

    """

    if not output_filename is None:
        nibabel.save(vol, output_filename)

        return output_filename
    else:
        if output_dir is None:
            raise ValueError(
                'One of output_filename and ouput_dir must be provided')

    if not basename is None:
        if isinstance(basename, list):
            basename = basename[:1]
        else:
            basename = [basename]

    # delegate to legacy save_vols
    return save_vols([vol], output_dir, basenames=basename,
                      concat=False, **kwargs)[0]
コード例 #14
0
ファイル: _utils.py プロジェクト: salma1601/process-asl
def fill_nan(in_file, fill_value=0.):
    """Replace nan values with a given value

    Parameters
    ----------
    in_file : str
        Path to image file

    fill_value : float, optional
        Value replacing nan

    Returns
    -------
    out_file : str
        Path to the filled file
    """
    img = nibabel.load(in_file)
    data = img.get_data()
    if np.any(np.isnan(data)):
        data[np.isnan(data)] = fill_value
    img = nibabel.Nifti1Image(data, img.get_affine(), img.get_header())
    out_file, _ = os.path.splitext(in_file)
    out_file += '_no_nan.nii'
    nibabel.save(img, out_file)
    return out_file
コード例 #15
0
def test_get_vox_dims():
    # setup
    affine = np.eye(4)
    np.fill_diagonal(affine, [-3, 3, 3])

    output_dir = os.path.join(OUTPUT_DIR, inspect.stack()[0][3])
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # 3D vol
    vol = create_random_image(affine=affine)
    np.testing.assert_array_equal(get_vox_dims(vol), [3, 3, 3])

    # 3D image file
    saved_img_filename = os.path.join(output_dir, "vol.nii.gz")
    nibabel.save(vol, saved_img_filename)
    np.testing.assert_array_equal(get_vox_dims(vol), [3, 3, 3])

    # 4D niimg
    film = create_random_image(n_scans=10, affine=affine)
    np.testing.assert_array_equal(get_vox_dims(film), [3, 3, 3])

    # 4D image file
    film = create_random_image(n_scans=10, affine=affine)
    saved_img_filename = os.path.join(output_dir, "4D.nii.gz")
    nibabel.save(film, saved_img_filename)
    np.testing.assert_array_equal(get_vox_dims(film), [3, 3, 3])
コード例 #16
0
def testset_predict(sess,clf,mean,std,masks,fold,img):
    print "Predicting subject :",sess
    flag = 0
    metric_all = np.array([])
    for sub in sess:
        metric = np.ones(20)
        test_set = read_test_sample(sub)
        TE = test_set
        #2. split x and y
        X_cv = TE[:,masks]
        y_cv = TE[:,-4]
        coor = TE[:,-3:]
        #3. Standardize
        X_true = (X_cv - mean)/std
        y_true = y_cv
        
        y_pred = clf.predict(X_true)
        
        list = [0,1,2,3,4]
        for i,l in enumerate(list):
            P = y_pred== l
            T = y_true== l
            di = dice(P,T)
            if np.isnan(di):
                metric[i] = 1
            else:
                metric[i] = di
        
        #4. Compute metrics
        precision = precision_score(y_true, y_pred,average=None)
        recall = recall_score(y_true, y_pred,average=None)
        f1 = f1_score(y_true,y_pred,average=None)
        
        if len(precision)==len(list):
            metric[5:10] = precision
        if len(recall)==len(list):
            metric[10:15] = recall
        if len(f1)==len(list):
            metric[15:20] = f1
        
        #print metric
        if flag==0:
            metric_all = metric
            flag = 1
        else:
            metric_all = np.vstack((metric_all,metric))

        #construct the predicted image.
        tmp = np.zeros_like(img.get_data())

        for j,c in enumerate(coor):
            tmp[tuple(c)] = y_pred[j]
        img._data = tmp
        nib.save(img,"pred_img_flod_%d/pred_img_%s.nii.gz"%(fold,sub))
        print 'Saving pred_img_%s done!'%sub

    print metric_all
    filename = "test_metrics_sub_level_fold_%d.txt"%fold
    np.savetxt(filename,metric_all,fmt='%1.8e',delimiter=',',newline='\n')
    print "metric score for test set sub level have been saved in the %s"%filename
コード例 #17
0
ファイル: genericHelpers.py プロジェクト: pliu007/deepmedic
def saveImageToANewNiiWithHeaderFromOtherGivenExactFilePaths(labelImageCreatedByPredictions,
                                          fullFilenameToSaveWith,
                                          fullFilenameOfOriginalImageToCopyHeader,
                                          npDtype = np.dtype(np.float32),
                                          myLogger=None) :

    fullFilenameToSaveWith = os.path.abspath(fullFilenameToSaveWith) # Cleans the .././/...
    img_proxy_for_orig_image = nib.load(fullFilenameOfOriginalImageToCopyHeader)
    hdr_for_orig_image = img_proxy_for_orig_image.header
    
    affine_trans_to_ras = img_proxy_for_orig_image.affine
    
    newLabelImg = nib.Nifti1Image(labelImageCreatedByPredictions, affine_trans_to_ras) #Nifti Constructor. data is the image itself, dimensions x,y,z,time. The second argument is the affine RAS transf.
    newLabelImg.set_data_dtype(npDtype)

    dimensionsOfTheGivenArrayImageToSave = len(labelImageCreatedByPredictions.shape)
    newZooms = list(hdr_for_orig_image.get_zooms()[:dimensionsOfTheGivenArrayImageToSave])
    if len(newZooms) < dimensionsOfTheGivenArrayImageToSave : #Eg if original image was 3D, but I need to save a multichannel image.
	newZooms = newZooms + [1.0]*(dimensionsOfTheGivenArrayImageToSave - len(newZooms))
    newLabelImg.header.set_zooms(newZooms)

    if not fullFilenameToSaveWith.endswith(".nii.gz") :
            fullFilenameToSaveWith = fullFilenameToSaveWith + ".nii.gz"
    nib.save(newLabelImg, fullFilenameToSaveWith)

    if myLogger<>None :
        myLogger.print3("Image saved at: " + str(fullFilenameToSaveWith))
    else :
	print("Image saved at: " + str(fullFilenameToSaveWith)) 
コード例 #18
0
	def _run_interface(self, runtime):
		import nibabel as nib
		import numpy as np
		from dipy.denoise.nlmeans import nlmeans
		from nipype.utils.filemanip import split_filename

		fname = self.inputs.in_file
		img = nib.load(fname)
		data = img.get_data()
		affine = img.get_affine()
		mask = data[..., 0] > 80
		a = data.shape

		denoised_data = np.ndarray(shape=data.shape)
		for image in range(0,a[3]):
		    print(str(image + 1) + '/' + str(a[3] + 1))
		    dat = data[...,image]
		    sigma = np.std(dat[~mask]) # Calculating the standard deviation of the noise
		    den = nlmeans(dat, sigma=sigma, mask=mask)
		    denoised_data[:,:,:,image] = den

		_, base, _ = split_filename(fname)
		nib.save(nib.Nifti1Image(denoised_data, affine), base + '_denoised.nii')

		return runtime
コード例 #19
0
	def _run_interface(self, runtime):
		from nilearn.input_data import NiftiMasker, NiftiLabelsMasker
		from nipype.utils.filemanip import split_filename
		import nibabel as nib
		import os

		functional_filename = self.inputs.in_file
		atlas_filename = self.inputs.atlas_filename
		mask_filename = self.inputs.mask_filename

		# Extracting the ROI signals
		masker = NiftiLabelsMasker(labels_img=atlas_filename,
                           background_label = 0,
                           standardize=True,
                           detrend = True,
                           verbose = 1
                           )
		time_series = masker.fit_transform(functional_filename)

		# Removing the ROI signal from the time series
		nifti_masker = NiftiMasker(mask_img=mask_filename)
		masked_data = nifti_masker.fit_transform(functional_filename, confounds=time_series[...,0])
		masked_img = nifti_masker.inverse_transform(masked_data)

		# Saving the result to disk
		outputs = self._outputs().get()
		fname = self.inputs.in_file
		_, base, _ = split_filename(fname)
		nib.save(masked_img, os.path.abspath(base + '_regressed.nii.gz'))
		return runtime
コード例 #20
0
ファイル: utils.py プロジェクト: MartinPerez/unicog
def sphere(file_roi, vol, mm_x=0, mm_y=0, mm_z=0, radius=4, dtype=np.uint8):
    img = nibabel.load(vol)
    header = img.get_header()
    voxels_size= header.get_zooms()
    coord_center_voxel = mm_to_voxel(vol, [[mm_x, mm_y, mm_z]])    
    shape_x, shape_y, shape_z = img.shape[0], img.shape[1], img.shape[2]
    roi = np.zeros((shape_x, shape_y, shape_z))
    
    radius = radius / voxels_size[0]
    if radius > 1 :
       radius -= 1 

    first_x = coord_center_voxel[0][0] - radius
    first_y = coord_center_voxel[0][1] - radius
    first_z = coord_center_voxel[0][2] - radius

    elem = morphology.ball(radius)
    
    #check voxel size is isotropic
    #check radius is at least one voxel
    for x in range(0,elem.shape[0]):
            for y in range(0, elem.shape[1]):
                    for z in range(0, elem.shape[2]):
                        x_ = int(first_x) + x
                        y_ = int(first_y) + y
                        z_ = int(first_z) + z
                        roi[x_, y_, z_] = elem[x,y,z]
  
    roi_img = nibabel.Nifti1Image(roi, img.get_affine(),img.get_header() )
    nibabel.save(roi_img, file_roi)
コード例 #21
0
	def _run_interface(self, runtime):
		import dipy.reconst.dti as dti
		import dipy.denoise.noise_estimate as ne
		from dipy.core.gradients import gradient_table
		from nipype.utils.filemanip import split_filename

		import nibabel as nib

		fname = self.inputs.in_file
		img = nib.load(fname)
		data = img.get_data()
		affine = img.get_affine()

		bvals = self.inputs.bval
		bvecs = self.inputs.bvec

		gtab = gradient_table(bvals, bvecs)
		sigma = ne.estimate_sigma(data)
		dti = dti.TensorModel(gtab,fit_method='RESTORE', sigma=sigma)
		dtifit = dti.fit(data)
		fa = dtifit.fa

		_, base, _ = split_filename(fname)
		nib.save(nib.Nifti1Image(fa, affine), base + '_FA.nii')

		return runtime
コード例 #22
0
def siemens_ph_wrap(in_file, out_file=None, irange=8192, orange=4096.00):
    import nibabel as nb
    import os.path as op
    import numpy as np
    import math

    if out_file is None:
        fname, fext = op.splitext(op.basename(in_file))
        if fext == '.gz':
            fname, _ = op.splitext(fname)
        out_file = op.abspath('./%s_wrapped.nii.gz' % fname)

    im = nb.load(in_file)
    imdata = im.get_data().astype(np.float32)
    imdata = (imdata - np.median(imdata)).astype(np.int16)

    imax = int(irange * 0.5)
    imin = int(irange * -0.5) + 1

    imdata[imdata > imax] = imdata[imdata > imax] - irange
    imdata[imdata < imin] = imdata[imdata < imin] + irange

    imdata = imdata.astype(np.float32) / (1.0 * irange)
    imdata = imdata * orange

    hdr = im.get_header().copy()
    hdr.set_data_dtype(np.int16)
    hdr['datatype'] = 4

    nii = nb.Nifti1Image(imdata.astype(np.int16), im.get_affine(), hdr)
    nb.save(nii, out_file)
    return out_file
コード例 #23
0
ファイル: test_reconst_dti.py プロジェクト: MarcCote/dipy
def reconst_flow_core(flow, extra_args=[]):
    with TemporaryDirectory() as out_dir:
        data_path, bval_path, bvec_path = get_data('small_25')
        vol_img = nib.load(data_path)
        volume = vol_img.get_data()
        mask = np.ones_like(volume[:, :, :, 0])
        mask_img = nib.Nifti1Image(mask.astype(np.uint8), vol_img.affine)
        mask_path = join(out_dir, 'tmp_mask.nii.gz')
        nib.save(mask_img, mask_path)

        dti_flow = flow()

        args = [data_path, bval_path, bvec_path, mask_path]
        args.extend(extra_args)

        dti_flow.run(*args, out_dir=out_dir)

        fa_path = dti_flow.last_generated_outputs['out_fa']
        fa_data = nib.load(fa_path).get_data()
        assert_equal(fa_data.shape, volume.shape[:-1])

        tensor_path = dti_flow.last_generated_outputs['out_tensor']
        tensor_data = nib.load(tensor_path)
        assert_equal(tensor_data.shape[-1], 6)
        assert_equal(tensor_data.shape[:-1], volume.shape[:-1])

        ga_path = dti_flow.last_generated_outputs['out_ga']
        ga_data = nib.load(ga_path).get_data()
        assert_equal(ga_data.shape, volume.shape[:-1])

        rgb_path = dti_flow.last_generated_outputs['out_rgb']
        rgb_data = nib.load(rgb_path)
        assert_equal(rgb_data.shape[-1], 3)
        assert_equal(rgb_data.shape[:-1], volume.shape[:-1])

        md_path = dti_flow.last_generated_outputs['out_md']
        md_data = nib.load(md_path).get_data()
        assert_equal(md_data.shape, volume.shape[:-1])

        ad_path = dti_flow.last_generated_outputs['out_ad']
        ad_data = nib.load(ad_path).get_data()
        assert_equal(ad_data.shape, volume.shape[:-1])

        rd_path = dti_flow.last_generated_outputs['out_rd']
        rd_data = nib.load(rd_path).get_data()
        assert_equal(rd_data.shape, volume.shape[:-1])

        mode_path = dti_flow.last_generated_outputs['out_mode']
        mode_data = nib.load(mode_path).get_data()
        assert_equal(mode_data.shape, volume.shape[:-1])

        evecs_path = dti_flow.last_generated_outputs['out_evec']
        evecs_data = nib.load(evecs_path).get_data()
        assert_equal(evecs_data.shape[-2:], tuple((3, 3)))
        assert_equal(evecs_data.shape[:-2], volume.shape[:-1])

        evals_path = dti_flow.last_generated_outputs['out_eval']
        evals_data = nib.load(evals_path).get_data()
        assert_equal(evals_data.shape[-1], 3)
        assert_equal(evals_data.shape[:-1], volume.shape[:-1])
コード例 #24
0
def labels_to_rd(labels_ct_native_nii, rd_nii, correct_cta, output_dir):
    """ Register the rd to the ct space.
    """

    # Output autocompletion
    labels_rescale_file = os.path.join(output_dir, "BrainMask_to_rd.nii.gz")

    # Load images
    labels_ct_native_im = nibabel.load(labels_ct_native_nii)
    labels_data = labels_ct_native_im.get_data()
    rd_im = nibabel.load(rd_nii)
    rd_data = rd_im.get_data()
    cta = labels_ct_native_im.get_affine()
    rda = rd_im.get_affine()

    # Correct the rda affine matrix
    print cta	
    #cta[1, 3] += 21
    cta[2, 2] = correct_cta
    print cta


    # Inverse affine transformation
    icta = inverse_affine(cta)
    t = numpy.dot(icta, rda)
    numpy.savetxt(os.path.join(output_dir,"t_icta_rda.txt"), t)
    # Matricial dot product
    labels_rescale = numpy.zeros(rd_data.shape)
    dot_image = numpy.zeros(rd_data.shape + (3, ))
    x = numpy.linspace(0, rd_data.shape[0] - 1, rd_data.shape[0])
    y = numpy.linspace(0, rd_data.shape[1] - 1, rd_data.shape[1])
    z = numpy.linspace(0, rd_data.shape[2] - 1, rd_data.shape[2])
    xg, yg, zg = numpy.meshgrid(y, x, z)
    print 'dot image shape: ', dot_image.shape
    print 'yg shape: ', yg.shape
    print 'xg shape: ', xg.shape
    print 'zg shape: ', zg.shape
    dot_image[..., 0] = yg
    dot_image[..., 1] = xg
    dot_image[..., 2] = zg
    dot_image = threed_dot(t, dot_image)

    cnt = 0
    print rd_data.size
    for x in range(rd_data.shape[0]):
        for y in range(rd_data.shape[1]):
            for z in range(rd_data.shape[2]):
            #for z in range(cut_brain_index, rd_data.shape[2]):
                if cnt % 100000 == 0:
                    print cnt
                cnt += 1
                voxel_labels = dot_image[x, y, z]
                if (voxel_labels > 0).all() and (voxel_labels < (numpy.asarray(labels_data.shape) - 1)).all():
                    voxel_labels = numpy.round(voxel_labels)
                    labels_rescale[x, y, z] = labels_data[voxel_labels[0], voxel_labels[1], voxel_labels[2]]

    labels_rescale_im = nibabel.Nifti1Image(labels_rescale, rda)
    nibabel.save(labels_rescale_im, labels_rescale_file)

    return labels_rescale_file
コード例 #25
0
ファイル: misc.py プロジェクト: B-Rich/nipype
    def _run_interface(self, runtime):
        nii1 = nb.load(self.inputs.volume1)
        nii2 = nb.load(self.inputs.volume2)

        origdata1 = np.logical_not(np.logical_or(nii1.get_data() == 0, np.isnan(nii1.get_data())))
        origdata2 = np.logical_not(np.logical_or(nii2.get_data() == 0, np.isnan(nii2.get_data())))

        if isdefined(self.inputs.mask_volume):
            maskdata = nb.load(self.inputs.mask_volume).get_data()
            maskdata = np.logical_not(np.logical_or(maskdata == 0, np.isnan(maskdata)))
            origdata1 = np.logical_and(maskdata, origdata1)
            origdata2 = np.logical_and(maskdata, origdata2)

        for method in ("dice", "jaccard"):
            setattr(self, "_" + method, self._bool_vec_dissimilarity(origdata1, origdata2, method=method))

        self._volume = int(origdata1.sum() - origdata2.sum())

        both_data = np.zeros(origdata1.shape)
        both_data[origdata1] = 1
        both_data[origdata2] += 2

        nb.save(nb.Nifti1Image(both_data, nii1.get_affine(), nii1.get_header()), self.inputs.out_file)

        return runtime
コード例 #26
0
def data_for_neuronal_network(seg_src_path, seg_dest_path):

    ih.createPathIfNotExists(seg_dest_path)

    seg_list = os.listdir(seg_src_path) # seg_list: list of all files in segmentation directory

    print ("List of input files:")
    print (seg_list)

    file_num = 0

    for f in seg_list:
        print("file number: " + str(file_num))
        print("read file...", f)

        path = seg_src_path + "/" + f
        print(path)

        image = nib.load(path)
        headers = image.header
        # print(headers)

        matrix = image.affine
       # matrix[np.where(matrix > 1)] = 1
       # matrix[np.where(matrix < 1)] = 0
        print(matrix)

        # manipulations.....
        #numbers

        dest_file = seg_dest_path + "/" + f
        nib.save(image, dest_file)
コード例 #27
0
def reorient_image(input_axes, in_file, output_dir):
    """ Rectify the orientation of an image.
    """
    # get the transformation to the RAS space
    rotation = swap_affine(input_axes)
    det = numpy.linalg.det(rotation)
    if det != 1:
        raise Exception("Determinant must be equal to "
                        "one got: {0}.".format(det))

    # load image
    image = nibabel.load(in_file)

    # get affine transform (qform or sform)
    affine = image.get_affine()

    # apply transformation
    transformation = numpy.dot(rotation, affine)
    image.set_qform(transformation)
    image.set_sform(transformation)

    # save result
    reoriented_file = os.path.join(output_dir, "im_reorient.nii.gz")
    nibabel.save(image, reoriented_file)

    return reoriented_file
コード例 #28
0
ファイル: fixtures.py プロジェクト: shoshber/nipype
def nifti_image_files(outdir, filelist, shape):
    for f in filelist:
        hdr = nb.Nifti1Header()
        hdr.set_data_shape(shape)
        img = np.random.random(shape)
        nb.save(nb.Nifti1Image(img, np.eye(4), hdr),
                 os.path.join(outdir, f))
コード例 #29
0
ファイル: base.py プロジェクト: sealhuang/nipytools
def save2nifti(data, affine, file_name):
    """
    Save data to a nifti file.

    """
    img = nib.Nifti1Image(data, affine)
    nib.save(img, file_name)
コード例 #30
0
 def save(self, path, result):
     """
     When we aggregate a result we store new single subject results
     """
     
     name_ = result.name
     radius_ = np.int(result._radius)
     map_ = result._map
     
     print map_.get_affine()
     
     # If map is obtained via folding we average across maps
     if len(map_.get_data().shape) > 3: 
         mean_map = map_.get_data().mean(axis=3)
         mean_img = ni.Nifti1Image(mean_map, affine=map_.get_affine())
         fname = "%s_radius_%s_searchlight_mean_map.nii.gz" % (name_, str(radius_))    
         ni.save(mean_img, os.path.join(path,fname))
         return_map = mean_map
     else:
         return_map = map_.get_data()
             
     fname = "%s_radius_%s_searchlight_total_map.nii.gz" % (name_, str(radius_))
     ni.save(map_, os.path.join(path,fname))
     
     result._mean_map = return_map
コード例 #31
0
ファイル: register.py プロジェクト: CaseyWeiner/m2g
    def tissue2dwi_align(self):
        """alignment of ventricle and CC ROI's from MNI space --> dwi and CC and CSF from T1w space --> dwi
        A function to generate and perform dwi space alignment of avoidance/waypoint masks for tractography.
        First creates ventricle and CC ROI. Then creates transforms from stock MNI template to dwi space.
        NOTE: for this to work, must first have called both t1w2dwi_align and atlas2t1w2dwi_align.
        Raises
        ------
        ValueError
            Raised if FSL atlas for ventricle reference not found
        """

        # Create MNI-space ventricle mask
        print("Creating MNI-space ventricle ROI...")
        if not os.path.isfile(self.mni_atlas):
            raise ValueError("FSL atlas for ventricle reference not found!")
        cmd = f"fslmaths {self.mni_vent_loc} -thr 0.1 -bin {self.mni_vent_loc}"
        gen_utils.run(cmd)

        cmd = f"fslmaths {self.corpuscallosum} -bin {self.corpuscallosum}"
        gen_utils.run(cmd)

        cmd = f"fslmaths {self.corpuscallosum} -sub {self.mni_vent_loc} -bin {self.corpuscallosum}"
        gen_utils.run(cmd)

        # Create a transform from the atlas onto T1w. This will be used to transform the ventricles to dwi space.
        reg_utils.align(
            self.mni_atlas,
            self.input_mni,
            xfm=self.xfm_roi2mni_init,
            init=None,
            bins=None,
            dof=6,
            cost="mutualinfo",
            searchrad=True,
            interp="spline",
            out=None,
        )

        # Create transform to align roi to mni and T1w using flirt
        reg_utils.applyxfm(self.input_mni, self.mni_vent_loc,
                           self.xfm_roi2mni_init, self.vent_mask_mni)

        if self.simple is False:
            # Apply warp resulting from the inverse MNI->T1w created earlier
            reg_utils.apply_warp(
                self.t1w_brain,
                self.vent_mask_mni,
                self.vent_mask_t1w,
                warp=self.mni2t1w_warp,
                interp="nn",
                sup=True,
            )

            # Apply warp resulting from the inverse MNI->T1w created earlier
            reg_utils.apply_warp(
                self.t1w_brain,
                self.corpuscallosum,
                self.corpuscallosum_mask_t1w,
                warp=self.mni2t1w_warp,
                interp="nn",
                sup=True,
            )

        # Applyxfm tissue maps to dwi space
        reg_utils.applyxfm(
            self.nodif_B0,
            self.vent_mask_t1w,
            self.t1wtissue2dwi_xfm,
            self.vent_mask_dwi,
        )
        reg_mri_pngs(self.vent_mask_dwi, self.nodif_B0, self.qa_reg)
        reg_utils.applyxfm(
            self.nodif_B0,
            self.corpuscallosum_mask_t1w,
            self.t1wtissue2dwi_xfm,
            self.corpuscallosum_dwi,
        )
        reg_mri_pngs(self.corpuscallosum_dwi, self.nodif_B0, self.qa_reg)
        reg_utils.applyxfm(self.nodif_B0, self.csf_mask,
                           self.t1wtissue2dwi_xfm, self.csf_mask_dwi)
        reg_mri_pngs(self.csf_mask_dwi, self.nodif_B0, self.qa_reg)
        reg_utils.applyxfm(self.nodif_B0, self.gm_mask, self.t1wtissue2dwi_xfm,
                           self.gm_in_dwi)
        reg_mri_pngs(self.gm_in_dwi, self.nodif_B0, self.qa_reg)
        reg_utils.applyxfm(self.nodif_B0, self.wm_mask, self.t1wtissue2dwi_xfm,
                           self.wm_in_dwi)
        reg_mri_pngs(self.wm_in_dwi, self.nodif_B0, self.qa_reg)

        # Threshold WM to binary in dwi space
        thr_img = nib.load(self.wm_in_dwi)
        thr_img.get_data()[thr_img.get_data() < 0.15] = 0
        nib.save(thr_img, self.wm_in_dwi_bin)

        # Threshold GM to binary in dwi space
        thr_img = nib.load(self.gm_in_dwi)
        thr_img.get_data()[thr_img.get_data() < 0.15] = 0
        nib.save(thr_img, self.gm_in_dwi_bin)

        # Threshold CSF to binary in dwi space
        thr_img = nib.load(self.csf_mask_dwi)
        thr_img.get_data()[thr_img.get_data() < 0.99] = 0
        nib.save(thr_img, self.csf_mask_dwi)

        # Threshold WM to binary in dwi space
        self.t_img = load_img(self.wm_in_dwi_bin)
        self.mask = math_img("img > 0", img=self.t_img)
        self.mask.to_filename(self.wm_in_dwi_bin)

        # Threshold GM to binary in dwi space
        self.t_img = load_img(self.gm_in_dwi_bin)
        self.mask = math_img("img > 0", img=self.t_img)
        self.mask.to_filename(self.gm_in_dwi_bin)

        # Threshold CSF to binary in dwi space
        self.t_img = load_img(self.csf_mask_dwi)
        self.mask = math_img("img > 0", img=self.t_img)
        self.mask.to_filename(self.csf_mask_dwi_bin)

        # Create ventricular CSF mask
        print("Creating ventricular CSF mask...")
        cmd = f"fslmaths {self.vent_mask_dwi} -kernel sphere 10 -ero -bin {self.vent_mask_dwi}"
        gen_utils.run(cmd)
        print("Creating Corpus Callosum mask...")
        cmd = f"fslmaths {self.corpuscallosum_dwi} -mas {self.wm_in_dwi_bin} -bin {self.corpuscallosum_dwi}"
        gen_utils.run(cmd)
        cmd = f"fslmaths {self.csf_mask_dwi} -add {self.vent_mask_dwi} -bin {self.vent_csf_in_dwi}"
        gen_utils.run(cmd)

        # Create gm-wm interface image
        cmd = (
            f"fslmaths {self.gm_in_dwi_bin} -mul {self.wm_in_dwi_bin} -add {self.corpuscallosum_dwi} "
            f"-sub {self.vent_csf_in_dwi} -mas {self.nodif_B0_mask} -bin {self.wm_gm_int_in_dwi}"
        )
        gen_utils.run(cmd)
コード例 #32
0
import numpy as np
import nibabel as nib
import os
import glob

np_dir = '/data/hejy/MedicalZooPytorch_2cls/datasets/MICCAI_2020_ribfrac/generated/train_vol_512x512x96_0.6'
nii_out_dir = '/data/hejy/MedicalZooPytorch_2cls/datasets/MICCAI_2020_ribfrac/generated/train_vol_512x512x96_0.6_nii'

if not os.path.exists(nii_out_dir):
    os.makedirs(nii_out_dir)
for i, np_path in enumerate(glob.glob(np_dir+'/*seg.npy')):
    if i % 100 == 0:
        print(i)
    
    nii_save_name = os.path.join(nii_out_dir, os.path.basename(np_path).split('.')[0] + '.nii.gz')
    np_file = np.load(np_path)
    if len(np_file.shape) ==3:
        np_file = np_file.squeeze(0)
    else:
        _, h, w, d = np_file.shape
        np_file_new = np.zeros((h,w,d))
        index_ = np.where(np_file!=0)
        np_file_new[(index_[1], index_[2], index_[3])] = index_[0]
        np_file = np_file_new
       

    new_image = nib.Nifti1Image(np_file, np.eye(4))
    nib.save(new_image, nii_save_name)
k_data = np.zeros((91, 109, 91, 7))
z_data = np.zeros((91, 109, 91, 7))
thresh = np.zeros((91, 109, 91))
maxval_per_maxK = np.zeros((91, 109, 91, 7))
lengths = [10, 15, 20, 25, 30, 35, 40]

for i in range(len(lengths)):
    data = nib.load(datadir + str(lengths[i]) +
                    '_sec_events.nii.gz').get_data()
    z = nib.load(datadir + str(lengths[i]) + '_sec_events_Z.nii.gz').get_data()
    k_data[:, :, :, i] = data
    z_data[:, :, :, i] = z

max_K = np.argmax(k_data, axis=3)
max_K[np.sum(k_data, axis=3) == 0] = 0

for i in range(91):
    for j in range(109):
        for k in range(91):
            thresh[i, j, k] = z_data[i, j, k, max_K[i, j, k]]

max_K[thresh < 0] = 0

# save final map as nifti
maxval = np.max(max_K)
minval = np.min(max_K)
img = nib.Nifti1Image(max_K, affine=nii_template.affine)
img.header['cal_min'] = minval
img.header['cal_max'] = maxval
nib.save(img, datadir + 'best_event_length_map.nii.gz')
コード例 #34
0
def mask(in_path, out_path, mask_path):
    in_volume = np.flipud(nib.load(in_path).get_data())
    mask_volume = binary_fill_holes(np.flipud(nib.load(mask_path).get_data()))
    out_volume = np.multiply(in_volume, mask_volume).astype(in_volume.dtype)
    nib.save(nib.Nifti1Image(out_volume, np.eye(4)), out_path)
    return
コード例 #35
0
                    for qq in range(0, imgsize[3]):
                        myvoxel_zeroed[idx_sort[qq]] = myvoxel[qq]
                    # Pass voxel through SARDU-net
                    myvoxel_up = net(Tensor(myvoxel_zeroed))
                    myvoxel_up = myvoxel_up.detach().numpy()
                    # Bring voxel back to original signal space
                    myvoxel_up = myvoxel_up * (smax - smin) + smin
                    # Store voxel
                    sout_data[xx, yy, zz, :] = myvoxel_up

    # Save the predicted NIFTI file as output
    print('')
    print('     ... saving output file as 4D NIFTI ...')
    buffer_header = sin_obj.header
    if (nbit == 64):
        buffer_header.set_data_dtype(
            'float64'
        )  # Make sure we save output files float64, even if input is not
    elif (nbit == 32):
        buffer_header.set_data_dtype(
            'float32'
        )  # Make sure we save output files float64, even if input is not
    sout_obj = nib.Nifti1Image(sout_data, sin_obj.affine, buffer_header)
    nib.save(sout_obj, args.nifti_out)

    print('')
    print('     Done!')
    print('')

    sys.exit(0)
コード例 #36
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()
    logging.basicConfig(level=logging.INFO)

    assert_inputs_exist(parser,
                        [args.input, args.bvals, args.bvecs, args.frf_file])
    assert_outputs_exist(parser, args, args.out_fODF)

    # Loading data
    full_frf = np.loadtxt(args.frf_file)
    vol = nib.load(args.input)
    data = vol.get_fdata(dtype=np.float32)
    bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs)

    # Checking mask
    if args.mask is None:
        mask = None
    else:
        mask = get_data_as_mask(nib.load(args.mask), dtype=bool)
        if mask.shape != data.shape[:-1]:
            raise ValueError("Mask is not the same shape as data.")

    sh_order = args.sh_order

    # Checking data and sh_order
    check_b0_threshold(args.force_b0_threshold, bvals.min())
    if data.shape[-1] < (sh_order + 1) * (sh_order + 2) / 2:
        logging.warning(
            'We recommend having at least {} unique DWI volumes, but you '
            'currently have {} volumes. Try lowering the parameter sh_order '
            'in case of non convergence.'.format(
                (sh_order + 1) * (sh_order + 2) / 2, data.shape[-1]))

    # Checking bvals, bvecs values and loading gtab
    if not is_normalized_bvecs(bvecs):
        logging.warning('Your b-vectors do not seem normalized...')
        bvecs = normalize_bvecs(bvecs)
    gtab = gradient_table(bvals, bvecs, b0_threshold=bvals.min())

    # Checking full_frf and separating it
    if not full_frf.shape[0] == 4:
        raise ValueError('FRF file did not contain 4 elements. '
                         'Invalid or deprecated FRF format')
    frf = full_frf[0:3]
    mean_b0_val = full_frf[3]

    # Loading the sphere
    reg_sphere = get_sphere('symmetric362')

    # Computing CSD
    csd_model = ConstrainedSphericalDeconvModel(gtab, (frf, mean_b0_val),
                                                reg_sphere=reg_sphere,
                                                sh_order=sh_order)

    # Computing CSD fit
    csd_fit = fit_from_model(csd_model,
                             data,
                             mask=mask,
                             nbr_processes=args.nbr_processes)

    # Saving results
    shm_coeff = csd_fit.shm_coeff
    if args.sh_basis == 'tournier07':
        shm_coeff = convert_sh_basis(shm_coeff,
                                     reg_sphere,
                                     mask=mask,
                                     nbr_processes=args.nbr_processes)
    nib.save(nib.Nifti1Image(shm_coeff.astype(np.float32), vol.affine),
             args.out_fODF)
コード例 #37
0
end = time.time()
time_elapsed = end - start

print("\nDone (in %2.2f seconds)!" % (time_elapsed))

# save the normalised voxel-wise mean
if apply_norm:

    # save the volume containing the normalised voxel-wise mean
    print('Saving the normalised voxel-wise mean volume at %s...' %
          (out_path)),
    sys.stdout.flush()
    nifti_to_save = nib.Nifti1Image(norm_voxelwise_mean,
                                    affine=data_header.get_sform(),
                                    header=data_header)
    nib.save(nifti_to_save, os.path.join(out_path,
                                         'norm_voxelwise_mean.nii.gz'))
    print("Done.")

# save the volume containing the voxel-wise mean
print('Saving the voxel-wise mean volume at %s...' % (out_path)),
sys.stdout.flush()
nifti_to_save = nib.Nifti1Image(voxelwise_mean,
                                affine=data_header.get_sform(),
                                header=data_header)
nib.save(nifti_to_save, os.path.join(out_path, 'voxelwise_mean.nii.gz'))
print("Done.")

# at this point, the variable average_subject subject contains the actual saved volume
# for this reason, don't load it from the .nii.gz but use the variable instead
# compute the voxelwise mean
print("Computing the voxel-wise standard deviation...")
コード例 #38
0
from monai.data.synthetic import create_test_image_3d
from monai.handlers.utils import stopping_fn_from_metric
from monai.networks.utils import predict_segmentation

monai.config.print_config()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)

# Create a temporary directory and 40 random image, mask paris
tempdir = tempfile.mkdtemp()
print(
    'generating synthetic data to {} (this may take a while)'.format(tempdir))
for i in range(40):
    im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1)

    n = nib.Nifti1Image(im, np.eye(4))
    nib.save(n, os.path.join(tempdir, 'im%i.nii.gz' % i))

    n = nib.Nifti1Image(seg, np.eye(4))
    nib.save(n, os.path.join(tempdir, 'seg%i.nii.gz' % i))

images = sorted(glob(os.path.join(tempdir, 'im*.nii.gz')))
segs = sorted(glob(os.path.join(tempdir, 'seg*.nii.gz')))

# Define transforms for image and segmentation
train_imtrans = transforms.Compose(
    [Rescale(), AddChannel(),
     UniformRandomPatch((96, 96, 96))])
train_segtrans = transforms.Compose(
    [AddChannel(), UniformRandomPatch((96, 96, 96))])
val_imtrans = transforms.Compose(
    [Rescale(), AddChannel(), Resize((96, 96, 96))])
コード例 #39
0
ファイル: io.py プロジェクト: ekonyagin/miccai2020paper1600
def save_nii(path_to_file, img):
    """Saves ``img`` of type `numpy.ndarray` in nifty format."""
    nb.save(nb.Nifti1Image(img, np.eye(4)), path_to_file)
コード例 #40
0
ファイル: register.py プロジェクト: CaseyWeiner/m2g
    def atlas2t1w2dwi_align(self, atlas, dsn=True):
        """alignment from atlas to t1w to dwi. A function to perform atlas alignmet. Tries nonlinear registration first, and if that fails, does a liner
        registration instead.
        Note: for this to work, must first have called t1w2dwi_align.
        Parameters
        ----------
        atlas : str
            path to atlas file you want to use
        dsn : bool, optional
            is your space for tractography native-dsn, by default True
        Returns
        -------
        str
            path to aligned atlas file
        """

        self.atlas = atlas
        self.atlas_name = gen_utils.get_filename(self.atlas)
        self.aligned_atlas_t1mni = (
            f"{self.reg_a}/{self.atlas_name}_aligned_atlas_t1w_mni.nii.gz")
        self.aligned_atlas_skull = (
            f"{self.reg_a}/{self.atlas_name}_aligned_atlas_skull.nii.gz")
        self.dwi_aligned_atlas = (
            f"{self.reg_anat}/{self.atlas_name}_aligned_atlas.nii.gz")

        reg_utils.align(
            self.atlas,
            self.t1_aligned_mni,
            init=None,
            xfm=None,
            out=self.aligned_atlas_t1mni,
            dof=12,
            searchrad=True,
            interp="nearestneighbour",
            cost="mutualinfo",
        )

        if (self.simple is False) and (dsn is False):
            try:
                # Apply warp resulting from the inverse of T1w-->MNI created earlier
                reg_utils.apply_warp(
                    self.t1w_brain,
                    self.aligned_atlas_t1mni,
                    self.aligned_atlas_skull,
                    warp=self.mni2t1w_warp,
                    interp="nn",
                    sup=True,
                )

                # Apply transform to dwi space
                reg_utils.align(
                    self.aligned_atlas_skull,
                    self.nodif_B0,
                    init=self.t1wtissue2dwi_xfm,
                    xfm=None,
                    out=self.dwi_aligned_atlas,
                    dof=6,
                    searchrad=True,
                    interp="nearestneighbour",
                    cost="mutualinfo",
                )
            except:
                print(
                    "Warning: Atlas is not in correct dimensions, or input is low quality,\nusing linear template registration."
                )
                # Create transform to align atlas to T1w using flirt
                reg_utils.align(
                    self.atlas,
                    self.t1w_brain,
                    xfm=self.xfm_atlas2t1w_init,
                    init=None,
                    bins=None,
                    dof=6,
                    cost="mutualinfo",
                    searchrad=True,
                    interp="spline",
                    out=None,
                    sch=None,
                )
                reg_utils.align(
                    self.atlas,
                    self.t1_aligned_mni,
                    xfm=self.xfm_atlas2t1w,
                    out=None,
                    dof=6,
                    searchrad=True,
                    bins=None,
                    interp="spline",
                    cost="mutualinfo",
                    init=self.xfm_atlas2t1w_init,
                )

                # Combine our linear transform from t1w to template with our transform from dwi to t1w space to get a transform from atlas ->(-> t1w ->)-> dwi
                reg_utils.combine_xfms(self.xfm_atlas2t1w,
                                       self.t1wtissue2dwi_xfm,
                                       self.temp2dwi_xfm)

                # Apply linear transformation from template to dwi space
                reg_utils.applyxfm(self.nodif_B0, self.atlas,
                                   self.temp2dwi_xfm, self.dwi_aligned_atlas)
        elif dsn is False:
            # Create transform to align atlas to T1w using flirt
            reg_utils.align(
                self.atlas,
                self.t1w_brain,
                xfm=self.xfm_atlas2t1w_init,
                init=None,
                bins=None,
                dof=6,
                cost="mutualinfo",
                searchrad=None,
                interp="spline",
                out=None,
                sch=None,
            )
            reg_utils.align(
                self.atlas,
                self.t1w_brain,
                xfm=self.xfm_atlas2t1w,
                out=None,
                dof=6,
                searchrad=True,
                bins=None,
                interp="spline",
                cost="mutualinfo",
                init=self.xfm_atlas2t1w_init,
            )

            # Combine our linear transform from t1w to template with our transform from dwi to t1w space to get a transform from atlas ->(-> t1w ->)-> dwi
            reg_utils.combine_xfms(self.xfm_atlas2t1w, self.t1wtissue2dwi_xfm,
                                   self.temp2dwi_xfm)

            # Apply linear transformation from template to dwi space
            reg_utils.applyxfm(self.nodif_B0, self.atlas, self.temp2dwi_xfm,
                               self.dwi_aligned_atlas)
        else:
            pass

        # Set intensities to int
        if dsn is False:
            self.atlas_img = nib.load(self.dwi_aligned_atlas)
        else:
            self.atlas_img = nib.load(self.aligned_atlas_t1mni)
        self.atlas_data = np.around(self.atlas_img.get_data()).astype("int16")
        node_num = len(np.unique(self.atlas_data))
        self.atlas_data[self.atlas_data > node_num] = 0

        t_img = load_img(self.wm_gm_int_in_dwi)
        mask = math_img("img > 0", img=t_img)
        mask.to_filename(self.wm_gm_int_in_dwi_bin)

        if dsn is False:
            nib.save(
                nib.Nifti1Image(
                    self.atlas_data.astype(np.int32),
                    affine=self.atlas_img.affine,
                    header=self.atlas_img.header,
                ),
                self.dwi_aligned_atlas,
            )
            reg_mri_pngs(self.dwi_aligned_atlas, self.nodif_B0, self.qa_reg)
            return self.dwi_aligned_atlas
        else:
            nib.save(
                nib.Nifti1Image(
                    self.atlas_data.astype(np.int32),
                    affine=self.atlas_img.affine,
                    header=self.atlas_img.header,
                ),
                self.aligned_atlas_t1mni,
            )
            reg_mri_pngs(self.aligned_atlas_t1mni, self.t1_aligned_mni,
                         self.qa_reg)
            return self.aligned_atlas_t1mni
コード例 #41
0
ファイル: fg_preproc.py プロジェクト: dunsmoorlab/gPPI
def group_std_masks():
    from nilearn.image import get_data, new_img_like, resample_img
    import nibabel as nib
    
    saa = {}
    
    thr = {'mOFC':     [1014,2014],
            'dACC':     [1002,2002],
            'lh_amyg':  [18],
            'rh_amyg':  [54],
            'lh_hpc':   [17],
            'rh_hpc':   [53]}

    masks = {}
    for roi in thr: masks[roi] = {}


    for sub in all_sub_args:
        subj = bids_meta(sub)
        print(sub)
        # os.system('flirt -in %s -out %s -ref %s -applyxfm -init %s -interp nearestneighbour'%(subj.faa,subj.saa,std_2009_brain,subj.ref2std))

        saa[sub] = get_data(subj.faa)

        for roi in thr:
            if len(thr[roi]) == 2:
                l = np.zeros(saa[sub].shape)
                l[np.where(saa[sub] == thr[roi][0])] = 1

                r = np.zeros(saa[sub].shape)
                r[np.where(saa[sub] == thr[roi][1])] = 1

                m = l+r
            
            else:
                m = np.zeros(saa[sub].shape)
                m[np.where(saa[sub] == thr[roi][0])] = 1

            masks[roi][sub] = m

    for roi in masks:
        masks[roi] = np.array([masks[roi][i] for i in masks[roi]])

        masks[roi] = np.sum(masks[roi],axis=0) / 48
        
        masks[roi] = new_img_like(subj.faa,masks[roi],copy_header=False)
        # masks[roi].header['pixdim'] = [1.,3., 3., 3., 0., 0., 0., 0.,]
        # masks[roi] = new_img_like(std_2009_brain,masks[roi],copy_header=False)
        std = nib.load(std_2009_brain)
        masks[roi] = resample_img(masks[roi],target_affine=std.affine,target_shape=std.shape,interpolation='nearest')
        
        # nib.save(masks[roi],os.path.join(group_masks,'%s_3mm_raw.nii.gz'%(roi)))
        # nib.save(masks[roi],os.path.join(group_masks,'%s_1mm_raw.nii.gz'%(roi)))
        nib.save(masks[roi],os.path.join(group_masks,'%s_1mm.nii.gz'%(roi)))
        
        os.system('fslmaths %s -bin %s'%(os.path.join(group_masks,'%s_1mm.nii.gz'%(roi)), os.path.join(group_masks,'%s_group_mask.nii.gz'%(roi))))

    for hemi in ['l','r']:
        amyg = get_data(os.path.join(group_masks,'%sh_amyg_1mm.nii.gz'%(hemi)))
        hpc  = get_data(os.path.join(group_masks,'%sh_hpc_1mm.nii.gz'%(hemi)))

        a_bin = np.where(amyg > hpc, 1, 0)
        h_bin = np.where(hpc > amyg, 1, 0)

        a_out = new_img_like(std_2009_brain,a_bin,copy_header=False)
        h_out = new_img_like(std_2009_brain,h_bin,copy_header=False)

        nib.save(a_out,os.path.join(group_masks,'%sh_amyg_group_mask.nii.gz'%(hemi)))
        nib.save(h_out,os.path.join(group_masks,'%sh_hpc_group_mask.nii.gz'%(hemi)))
コード例 #42
0
ファイル: frmMatNITF.py プロジェクト: shaoweinuaa/easyfmri
    def btnConvert_click(self):
        msgBox = QMessageBox()

        # OutFile
        OutFile = ui.txtOutFile.text()
        if not len(OutFile):
            msgBox.setText("Please enter out file!")
            msgBox.setIcon(QMessageBox.Critical)
            msgBox.setStandardButtons(QMessageBox.Ok)
            msgBox.exec_()
            return False

        ThresholdType = ui.cbThType.currentData()
        MinTh = None
        MaxTh = None

        if (ThresholdType == "min") or (ThresholdType == "ext"):
            try:
                MinTh = np.double(ui.txtThMin.text())
            except:
                msgBox.setText("Min Threshold must be a number")
                msgBox.setIcon(QMessageBox.Critical)
                msgBox.setStandardButtons(QMessageBox.Ok)
                msgBox.exec_()
                return False
            print("Min Threshold is valid")

        if (ThresholdType == "max") or (ThresholdType == "ext"):
            try:
                MaxTh = np.double(ui.txtThMax.text())
            except:
                msgBox.setText("Max Threshold must be a number")
                msgBox.setIcon(QMessageBox.Critical)
                msgBox.setStandardButtons(QMessageBox.Ok)
                msgBox.exec_()
                return False
            print("Max Threshold is valid")

        # OutFile
        AFNI = ui.txtAFNI.text()
        if not len(AFNI):
            AFNI = None
        else:
            CopyFile = ui.txtFAFNI.text()
            if not len(CopyFile):
                msgBox.setText("Please select 3dcopy command!")
                msgBox.setIcon(QMessageBox.Critical)
                msgBox.setStandardButtons(QMessageBox.Ok)
                msgBox.exec_()
                return
            if not os.path.isfile(CopyFile):
                msgBox.setText("Please select 3dcopy command!")
                msgBox.setIcon(QMessageBox.Critical)
                msgBox.setStandardButtons(QMessageBox.Ok)
                msgBox.exec_()
                return

            RefitFile = ui.txtFSUMA.text()
            if not len(RefitFile):
                msgBox.setText("Please select 3drefit command!")
                msgBox.setIcon(QMessageBox.Critical)
                msgBox.setStandardButtons(QMessageBox.Ok)
                msgBox.exec_()
                return
            if not os.path.isfile(RefitFile):
                msgBox.setText("Please select 3drefit command!")
                msgBox.setIcon(QMessageBox.Critical)
                msgBox.setStandardButtons(QMessageBox.Ok)
                msgBox.exec_()
                return

        # InFile
        InFile = ui.txtInFile.text()
        if not len(InFile):
            msgBox.setText("Please enter input file!")
            msgBox.setIcon(QMessageBox.Critical)
            msgBox.setStandardButtons(QMessageBox.Ok)
            msgBox.exec_()
            return
        if not os.path.isfile(InFile):
            msgBox.setText("Input file not found!")
            msgBox.setIcon(QMessageBox.Critical)
            msgBox.setStandardButtons(QMessageBox.Ok)
            msgBox.exec_()
            return

        if not len(ui.txtMatrix.currentText()):
            msgBox.setText("Matrix name not found!")
            msgBox.setIcon(QMessageBox.Critical)
            msgBox.setStandardButtons(QMessageBox.Ok)
            msgBox.exec_()
            return

        if not len(ui.txtCoord.currentText()):
            msgBox.setText("Coordinate name not found!")
            msgBox.setIcon(QMessageBox.Critical)
            msgBox.setStandardButtons(QMessageBox.Ok)
            msgBox.exec_()
            return

        if not len(ui.txtTime.text()):
            msgBox.setText("Time points not found!")
            msgBox.setIcon(QMessageBox.Critical)
            msgBox.setStandardButtons(QMessageBox.Ok)
            msgBox.exec_()
            return

        AffineFile = ui.txtSSSpace.currentText()
        if not len(AffineFile):
            msgBox.setText("Please enter affine reference!")
            msgBox.setIcon(QMessageBox.Critical)
            msgBox.setStandardButtons(QMessageBox.Ok)
            msgBox.exec_()
            return
        if not os.path.isfile(AffineFile):
            msgBox.setText("Affine reference not found!")
            msgBox.setIcon(QMessageBox.Critical)
            msgBox.setStandardButtons(QMessageBox.Ok)
            msgBox.exec_()
            return
        AffineHDR = nb.load(AffineFile)
        Affine = AffineHDR.affine

        Time = strRange(ui.txtTime.text())
        if Time is None:
            msgBox.setText("Time points is wrong!")
            msgBox.setIcon(QMessageBox.Critical)
            msgBox.setStandardButtons(QMessageBox.Ok)
            msgBox.exec_()
            return

        try:
            InData = io.loadmat(InFile)
            Mat = InData[ui.txtMatrix.currentText()]
            Coord = InData[ui.txtCoord.currentText()]
            imgShape = InData["imgShape"][0]

        except:
            print("Cannot load data!")
            msgBox.setText("Cannot load data!")
            msgBox.setIcon(QMessageBox.Critical)
            msgBox.setStandardButtons(QMessageBox.Ok)
            msgBox.exec_()
            return

        ImgData = None
        # Filter Mat
        for t in Time:
            vox = Mat[t, :]
            if ui.cbScale.isChecked():
                vox = preprocessing.scale(vox)
                print("Time Point: " + str(t) + " is normalized.")

            if MinTh is not None:
                vox[np.where(vox < MinTh)] = 0
                print("Time Point: " + str(t) +
                      ", Lower band thresholding is done!")

            if MaxTh is not None:
                vox[np.where(vox > MaxTh)] = 0
                print("Time Point: " + str(t) +
                      ", Upper band thresholding is done!")

            img = np.zeros([imgShape[0], imgShape[1], imgShape[2], 1])
            for coIndx, _ in enumerate(Coord[0]):
                img[Coord[0][coIndx], Coord[1][coIndx], Coord[2][coIndx],
                    0] = vox[coIndx]
            ImgData = img if ImgData is None else np.concatenate(
                (ImgData, img), axis=3)
            print("Time point: " + str(t) + " is done.")

        print("Saving image ...")
        NIF = nb.Nifti1Image(ImgData, Affine)
        nb.save(NIF, OutFile)
        if AFNI is not None:
            cmd = CopyFile + " " + OutFile + " " + AFNI
            print("Running: " + cmd)
            os.system(cmd)
            cmd = RefitFile + "  -view tlrc -space MNI " + AFNI + " " + AFNI + "+tlrc."
            print("Running: " + cmd)
            os.system(cmd)
        print("DONE!")
        msgBox.setText("Image file is generated.")
        msgBox.setIcon(QMessageBox.Information)
        msgBox.setStandardButtons(QMessageBox.Ok)
        msgBox.exec_()
コード例 #43
0
    subj_dst_dir = os.path.join(DST_DIR, subj)
    if not os.path.exists(subj_dst_dir):
        os.makedirs(subj_dst_dir)

    # crop the new CT
    # WIP code:
    #x_pad[np.ix_((x_pad > thresh).any(1),
    #(x_pad > thresh).any(0))]

    # Save CT
    x = np.array(new_img)
    x = np.rollaxis(x, 0, 3)
    # ensure air is constant
    x[np.where(x < -1024)] = -1024

    new_nii_obj = nib.Nifti1Image(x, affine=orig.affine, header=orig.header)
    new_filename = os.path.join(subj_dst_dir, subj + "_CT.nii.gz")
    nib.save(new_nii_obj, new_filename)

    # Save mask
    x = np.array(new_mask)
    x = np.rollaxis(x, 0, 3)
    # ensure binary mask
    # This shouldn't be a problem, but just in case
    x[np.where(x < 0.5)] = 0
    x[np.where(x >= 0.5)] = 1

    new_nii_obj = nib.Nifti1Image(x, affine=orig.affine, header=orig.header)
    new_filename = os.path.join(subj_dst_dir, subj + "_mask.nii.gz")
    nib.save(new_nii_obj, new_filename)
コード例 #44
0
def main(*args):

    t1_overall = time.time()

    # Change to blm directory
    os.chdir(os.path.dirname(os.path.realpath(__file__)))

    batchNo = args[0]

    if len(args) == 1 or (not args[1]):
        # Load in inputs
        with open(os.path.join(os.getcwd(), '..', 'blmm_config.yml'),
                  'r') as stream:
            inputs = yaml.load(stream, Loader=yaml.FullLoader)
    else:
        if type(args[1]) is str:
            # In this case inputs file is first argument
            with open(os.path.join(args[1]), 'r') as stream:
                inputs = yaml.load(stream, Loader=yaml.FullLoader)
        else:
            # In this case inputs structure is first argument.
            inputs = args[1]

    if 'MAXMEM' in inputs:
        MAXMEM = eval(inputs['MAXMEM'])
    else:
        MAXMEM = 2**32

    OutDir = inputs['outdir']

    # Get number of ffx parameters
    c1 = blmm_eval(inputs['contrasts'][0]['c' + str(1)]['vector'])
    c1 = np.array(c1)
    n_p = c1.shape[0]
    del c1

    # Y volumes
    with open(inputs['Y_files']) as a:

        Y_files = []
        i = 0
        for line in a.readlines():

            Y_files.append(line.replace('\n', ''))

    # Load in one nifti to check NIFTI size
    try:
        Y0 = blmm_load(Y_files[0])
    except Exception as error:
        raise ValueError('The NIFTI "' + Y_files[0] + '"does not exist')

    d0 = Y0.get_data()

    # Get the maximum memory a NIFTI could take in storage.
    NIFTIsize = sys.getsizeof(np.zeros(d0.shape, dtype='uint64'))

    # Similar to blksize in SwE, we divide by 8 times the size of a nifti
    # to work out how many blocks we use.
    blksize = int(np.floor(MAXMEM / 8 / NIFTIsize / n_p))

    # Reduce X to X for this block.
    X = blmm_load(inputs['X'])
    X = X[(blksize * (batchNo - 1)):min((blksize * batchNo), len(Y_files))]

    # Number of random effects factors.
    n_f = len(inputs['Z'])

    # Read in each factor
    for i in range(0, n_f):

        Zi_factor = blmm_load(inputs['Z'][i]['f' + str(i + 1)]['factor'])
        Zi_design = blmm_load(inputs['Z'][i]['f' + str(i + 1)]['design'])

        # Number of levels for factor i
        l_i = np.amax(Zi_factor)

        # Number of parameters for factor i
        q_i = Zi_design.shape[1]

        # One hot encode the factor vector
        Zi_factor = pd.get_dummies(pd.DataFrame(Zi_factor)[0]).values

        # Reduce to block.
        Zi_design = Zi_design[(blksize *
                               (batchNo - 1)):min((blksize *
                                                   batchNo), len(Y_files))]
        Zi_factor = Zi_factor[(blksize *
                               (batchNo - 1)):min((blksize *
                                                   batchNo), len(Y_files))]

        # Repeat Zi_factor for each parameter
        Zi = np.repeat(Zi_factor, q_i, axis=1).astype(np.float64)

        # Fill the one values with the design
        Zi[Zi == 1] = Zi_design.reshape(Zi[Zi == 1].shape)

        # Concatenate Z's Horizontally
        if i == 0:

            Z = Zi

        else:

            Z = np.hstack((Z, Zi))

    # Mask volumes (if they are given)
    if 'data_mask_files' in inputs:

        with open(inputs['data_mask_files']) as a:

            M_files = []
            i = 0
            for line in a.readlines():

                M_files.append(line.replace('\n', ''))

        # If we have a mask for each Y, reduce the list to just for this block
        if len(M_files) == len(Y_files):

            # In this case we have a mask per Y volume
            M_files = M_files[(blksize *
                               (batchNo - 1)):min((blksize *
                                                   batchNo), len(M_files))]

        else:

            if len(M_files) > len(Y_files):

                raise ValueError('Too many data_masks specified!')

            else:

                raise ValueError('Too few data_masks specified!')

    # Otherwise we have no masks
    else:

        # There is not a mask for each Y as there are no masks at all!
        M_files = []

    # Mask threshold for Y (if given)
    if 'data_mask_thresh' in inputs:
        M_t = float(inputs['data_mask_thresh'])
    else:
        M_t = None

    # Mask volumes (if they are given)
    if 'analysis_mask' in inputs:
        M_a = blmm_load(inputs['analysis_mask']).get_data()
    else:
        M_a = None

    # Reduce Y_files to only Y files for this block
    Y_files = Y_files[(blksize *
                       (batchNo - 1)):min((blksize * batchNo), len(Y_files))]

    # Verify input
    verifyInput(Y_files, M_files, Y0)

    # Obtain Y, mask for Y and nmap. This mask is just for voxels
    # with no studies present.
    Y, Mask, nmap, M, Mmap = obtainY(Y_files, M_files, M_t, M_a)

    # Work out voxel specific designs
    MX = blkMX(X, Y, M)
    MZ = blkMX(Z, Y, M)  # MIGHT NEED TO THINK ABOUT SPARSITY HERE LATER

    # Get X transpose Y, Z transpose Y and Y transpose Y.
    XtY = blkXtY(X, Y, Mask)
    YtY = blkYtY(Y, Mask)
    ZtY = blkXtY(Z, Y, Mask)  # REVISIT ONCE SPARSED

    # In a spatially varying design XtX has dimensions n_voxels
    # by n_parameters by n_parameters. We reshape to n_voxels by
    # n_parameters^2 so that we can save as a csv.
    XtX = blkXtX(MX)
    XtX = XtX.reshape([XtX.shape[0], XtX.shape[1] * XtX.shape[2]])

    # In a spatially varying design ZtX has dimensions n_voxels
    # by n_q by n_parameters. We reshape to n_voxels by
    # n_parameters^2 so that we can save as a csv.
    ZtX = blkZtX(MZ, MX)
    ZtX = ZtX.reshape([ZtX.shape[0], ZtX.shape[1] * ZtX.shape[2]])

    # ======================================================================
    # NEED TO THINK ABOUT SPARSE HERE
    # ======================================================================
    #
    # In a spatially varying design ZtZ has dimensions n_voxels
    # by n_q by n_q. We reshape to n_voxels by n_q^2 so that we
    # can save as a csv.
    ZtZ = blkXtX(MZ)
    ZtZ = ZtZ.reshape([ZtZ.shape[0], ZtZ.shape[1] * ZtZ.shape[2]])

    # ======================================================================

    # Pandas reads and writes files much more quickly with nrows <<
    # number of columns
    XtY = XtY.transpose()
    ZtY = ZtY.transpose()

    if (len(args) == 1) or (type(args[1]) is str):

        # Record product matrices
        np.save(os.path.join(OutDir, "tmp", "XtX" + str(batchNo)), XtX)
        np.save(os.path.join(OutDir, "tmp", "XtY" + str(batchNo)), XtY)
        np.save(os.path.join(OutDir, "tmp", "YtY" + str(batchNo)), YtY)
        np.save(os.path.join(OutDir, "tmp", "ZtY" + str(batchNo)), ZtY)
        np.save(os.path.join(OutDir, "tmp", "ZtX" + str(batchNo)), ZtX)
        np.save(os.path.join(OutDir, "tmp", "ZtZ" + str(batchNo)), ZtZ)

        # Get map of number of scans at voxel.
        nmap = nib.Nifti1Image(nmap, Y0.affine, header=Y0.header)
        nib.save(
            nmap,
            os.path.join(OutDir, 'tmp',
                         'blmm_vox_n_batch' + str(batchNo) + '.nii'))
        # Get map of number of scans at voxel.
        Mmap = nib.Nifti1Image(Mmap, Y0.affine, header=Y0.header)
        nib.save(
            Mmap,
            os.path.join(OutDir, 'tmp',
                         'blmm_vox_uniqueM_batch' + str(batchNo) + '.nii'))
    else:
        # Return XtX, XtY, YtY, nB
        return (XtX, XtY, YtY, nmap)
    w.resetwarnings()

    t2_overall = time.time()
    print('TIME: ', t2_overall - t1_overall)
コード例 #45
0
#!/usr/bin/python

import nibabel as nib
import scipy.signal
import os

path_main = '/home/andreabertana/Projects/HCP/results/MNI/mc/'
subj = os.listdir('./Projects/HCP/MNI/')
for s in subj:
    path = path_main + s + '_mc.nii.gz'
    print path
    print "Loading data"
    fmri = nib.load(path)
    data = fmri  #.data.astype('float32')
    #fmri= unload()

    #detrend. Axis 0 should be time.bp should run detrend taking 12 different parts,each with 121vol (12 steps: 		1452/121 = 12) that respect one block in Haxby design.
    print "Detrending"
    detrended = scipy.signal.detrend(data.get_data(),
                                     axis=3,
                                     type='linear',
                                     bp=[405])
    filename = ('/home/andreabertana/Projects/HCP/results/MNI/detrend/' + s +
                '_detrend.nii.gz')
    print 'Store detrended data for later re-use'
    affine = data.get_affine()
    img = nib.AnalyzeImage(detrended, affine=affine)
    nib.save(img, filename)
print "Done."
print "All Done"
コード例 #46
0
def save_array_as_nifty_volume(data, filename):
    # numpy data shape [D, H, W]
    # nifty image shape [W, H, W]
    data = np.transpose(data, [2, 1, 0])
    seg = nibabel.Nifti1Image(data, np.eye(4))
    nibabel.save(seg, filename)
コード例 #47
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.INFO)

    required_args = [args.input_dwi, args.bvals, args.bvecs, args.reverse_b0]

    optional_args = [args.output_b0s]

    assert_inputs_exist(parser, required_args)
    assert_outputs_exist(parser, args, [], optional_args)

    if os.path.splitext(args.output_prefix)[1] != '':
        parser.error('The prefix must not contain any extension.')

    bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs)
    bvals_min = bvals.min()

    # TODO refactor this
    b0_threshold = args.b0_thr
    if bvals_min < 0 or bvals_min > b0_threshold:
        raise ValueError('The minimal b-value is lesser than 0 or greater '
                         'than {0}. This is highly suspicious. Please check '
                         'your data to ensure everything is correct. '
                         'Value found: {1}'.format(b0_threshold, bvals_min))

    gtab = gradient_table(bvals, bvecs, b0_threshold=b0_threshold)

    acqparams = create_acqparams(gtab, args.readout, args.encoding_direction)

    if not os.path.exists(args.output_directory):
        os.makedirs(args.output_directory)

    acqparams_path = os.path.join(args.output_directory, 'acqparams.txt')
    np.savetxt(acqparams_path, acqparams, fmt='%1.4f', delimiter=' ')

    rev_b0_img = nib.load(args.reverse_b0)
    rev_b0 = rev_b0_img.get_data()

    dwi_image = nib.load(args.input_dwi)
    dwi = dwi_image.get_data()
    b0s = dwi[..., gtab.b0s_mask]

    b0_idx = np.where(gtab.b0s_mask)[0]
    fused_b0s = np.zeros(b0s.shape[:-1] + (len(b0_idx) + 1, ))
    fused_b0s[..., 0:-1] = b0s
    fused_b0s[..., -1] = rev_b0
    fused_b0s_path = os.path.join(args.output_directory, args.output_b0s)
    nib.save(nib.Nifti1Image(fused_b0s, rev_b0_img.affine), fused_b0s_path)

    output_path = os.path.join(args.output_directory, args.output_prefix)
    fout_path = os.path.join(args.output_directory, "correction_field")
    iout_path = os.path.join(args.output_directory, "corrected_b0s")
    topup = 'topup --imain={0} --datain={1}'\
            ' --config={2} --verbose --out={3}'\
            ' --fout={4} --iout={5} --subsamp=1 \n'\
        .format(fused_b0s_path, acqparams_path, args.config, output_path,
                fout_path, iout_path)

    if args.output_script:
        with open("topup.sh", 'w') as f:
            f.write(topup)
    else:
        print(topup)
コード例 #48
0
    # Plot outlier regions to a brain map in the patient's anatomical space
    print "Plotting accuracy outlier ROIs to a brain map..."
    ROI_atlas = nibabel.load(
        '/usr/share/fsl/5.0/data/atlases/HarvardOxford/HarvardOxford-cortl-maxprob-thr25-2mm.nii.gz'
    )
    orig_ROI_data = ROI_atlas.get_data()
    affine = ROI_atlas.get_affine()
    acc_roi_data = np.zeros(np.shape(orig_ROI_data))
    for i in range(1, roin):
        ids = np.where(orig_ROI_data == i)
        acc_roi_data[ids[0], ids[1], ids[2]] = diff_acc[i]
    acc_roi_data[np.where(acc_roi_data > 0)] = 1  #remove positive values
    acc_img = nibabel.Nifti1Image(-acc_roi_data,
                                  affine)  #change sign to positive
    nibabel.save(
        acc_img,
        patient_results + 'subj' + test_subj + '_diff_accuracy.nii.gz')

    # Transform to native anatomical space
    print "Transforming to native space..."
    flirt = fsl.FLIRT(bins=800, cost='mutualinfo', reference=anat)
    flirt.inputs.in_file = patient_results + 'subj' + test_subj + '_diff_accuracy.nii.gz'
    flirt.inputs.out_file = patient_results + 'subj' + test_subj + '_diff_accuracy_transformed.nii.gz'
    flirt.run()

    # Make box & whiskers plots for each Harvard-Oxford region
    plt.figure(figsize=(10, 20))
    plt.boxplot(reproducibility.T, vert=0, showmeans=True)
    ticks, labels = plt.yticks(np.arange(1,
                                         np.shape(cort_labels)[0] + 1),
                               cort_labels,
コード例 #49
0
ファイル: denoise_ascm.py プロジェクト: tomwright01/dipy
plt.savefig('denoised_ascm.png', bbox_inches='tight')

print("The ascm result saved in denoised_ascm.png")
"""
.. figure:: denoised_ascm.png
   :align: center

   **Showing the axial slice without (left) and with (middle) ASCM denoising**.
"""
"""
From the above figure we can see that the residual is really uniform in nature
which dictates that ASCM denoises the data while preserving the sharpness of
the features.
"""

nib.save(nib.Nifti1Image(den_final, affine), 'denoised_ascm.nii.gz')

print("Saving the entire denoised output in denoised_ascm.nii.gz")
"""
For comparison propose we also plot the outputs of the ``non_local_means``
(both with the larger as well as with the smaller patch radius) with the ASCM
output.
"""

fig, ax = plt.subplots(1, 4)
ax[0].imshow(original, cmap='gray', origin='lower')
ax[0].set_title('Original')
ax[1].imshow(den_small[..., axial_middle].T,
             cmap='gray',
             origin='lower',
             interpolation='none')
コード例 #50
0
    # Create random dictionary
    np.random.seed(12345)

    input_image = nibabel.load(args.input)
    data = input_image.get_data().astype(float)

    maxval = 0
    if args.normalize == True:
        maxval = np.max(data)
        data /= maxval

    if args.rician == False:
        data += np.random.normal(args.mean, args.std, data.shape)
    else:
        data += rice.rvs(args.b,
                         loc=args.mean,
                         scale=args.std,
                         size=data.shape)

    if args.normalize == True:
        data *= maxval

    nibabel.save(nibabel.Nifti1Image(data, input_image.affine), args.output)
    """A Rice continuous random variable. (from scipy code)
    Notes
    -----
    The probability density function for `rice` is::
        rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
    for ``x > 0``, ``b > 0``.
  """
コード例 #51
0
def execute_spm_auditory_glm(data, reg_motion=False):
    reg_motion = reg_motion and 'realignment_parameters' in data

    tr = 7.
    n_scans = 96
    _duration = 6
    epoch_duration = _duration * tr
    conditions = ['rest', 'active'] * 8
    duration = epoch_duration * np.ones(len(conditions))
    onset = np.linspace(0, (len(conditions) - 1) * epoch_duration,
                        len(conditions))
    paradigm = BlockParadigm(con_id=conditions, onset=onset, duration=duration)
    hfcut = 2 * 2 * epoch_duration

    # construct design matrix
    frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
    drift_model = 'Cosine'
    hrf_model = 'Canonical With Derivative'

    add_reg_names = None
    add_regs = None
    if reg_motion:
        add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
        add_regs = data['realignment_parameters'][0]
        if isinstance(add_regs, basestring):
            add_regs = np.loadtxt(add_regs)

    design_matrix = make_dmtx(frametimes,
                              paradigm, hrf_model=hrf_model,
                              drift_model=drift_model, hfcut=hfcut,
                              add_reg_names=add_reg_names,
                              add_regs=add_regs)

    # plot and save design matrix
    ax = design_matrix.show()
    ax.set_position([.05, .25, .9, .65])
    ax.set_title('Design matrix')
    dmat_outfile = os.path.join(data['output_dir'],
                                'design_matrix.png')
    pl.savefig(dmat_outfile, bbox_inches="tight", dpi=200)
    pl.close()

    # specify contrasts
    contrasts = {}
    n_columns = len(design_matrix.names)
    for i in xrange(paradigm.n_conditions):
        contrasts['%s' % design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i]

    # more interesting contrasts"""
    contrasts['active-rest'] = contrasts['active'] - contrasts['rest']

    # fit GLM
    print('\r\nFitting a GLM (this takes time)...')
    fmri_glm = FMRILinearModel(load_4D_img(data['func'][0]),
                               design_matrix.matrix,
                               mask='compute')

    fmri_glm.fit(do_scaling=True, model='ar1')

    # save computed mask
    mask_path = os.path.join(data['output_dir'], "mask.nii.gz")
    print "Saving mask image %s..." % mask_path
    nibabel.save(fmri_glm.mask, mask_path)

    # compute bg unto which activation will be projected
    anat_img = load_vol(data['anat'])

    anat = anat_img.get_data()

    if anat.ndim == 4:
        anat = anat[..., 0]

    anat_affine = anat_img.get_affine()

    print "Computing contrasts..."
    z_maps = {}
    for contrast_id, contrast_val in contrasts.iteritems():
        print "\tcontrast id: %s" % contrast_id
        z_map, t_map, eff_map, var_map = fmri_glm.contrast(
            contrasts[contrast_id],
            con_id=contrast_id,
            output_z=True,
            output_stat=True,
            output_effects=True,
            output_variance=True,
            )

        # store stat maps to disk
        for dtype, out_map in zip(['z', 't', 'effects', 'variance'],
                                  [z_map, t_map, eff_map, var_map]):
            map_dir = os.path.join(
                data['output_dir'], '%s_maps' % dtype)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(
                map_dir, '%s.nii.gz' % contrast_id)
            nibabel.save(out_map, map_path)

            # collect zmaps for contrasts we're interested in
            if contrast_id == 'active-rest' and dtype == "z":
                z_maps[contrast_id] = map_path

            print "\t\t%s map: %s" % (dtype, map_path)

        print

    # do stats report
    stats_report_filename = os.path.join(data['reports_output_dir'],
                                         "report_stats.html")
    contrasts = dict((contrast_id, contrasts[contrast_id])
                     for contrast_id in z_maps.keys())
    generate_subject_stats_report(
        stats_report_filename,
        contrasts,
        z_maps,
        fmri_glm.mask,
        design_matrices=[design_matrix],
        subject_id=data['subject_id'],
        anat=anat,
        anat_affine=anat_affine,
        cluster_th=50,  # we're only interested in this 'large' clusters

        # additional ``kwargs`` for more informative report
        paradigm=paradigm.__dict__,
        TR=tr,
        n_scans=n_scans,
        hfcut=hfcut,
        frametimes=frametimes,
        drift_model=drift_model,
        hrf_model=hrf_model,
        )

    ProgressReport().finish_dir(data['output_dir'])

    print "\r\nStatistic report written to %s\r\n" % stats_report_filename
コード例 #52
0
ファイル: test_api.py プロジェクト: weiwei-wch/pyAFQ
def test_AFQ_data_waypoint():
    """
    Test with some actual data again, this time for track segmentation
    """
    tmpdir = nbtmp.InTemporaryDirectory()
    afd.organize_stanford_data(path=tmpdir.name)
    dmriprep_path = op.join(tmpdir.name, 'stanford_hardi',
                            'derivatives', 'dmriprep')
    bundle_names = ["SLF", "ARC", "CST", "FP"]
    tracking_params = dict(odf_model="DTI")
    segmentation_params = dict(filter_by_endpoints=False,
                               seg_algo="AFQ",
                               return_idx=True)

    clean_params = dict(return_idx=True)

    myafq = api.AFQ(dmriprep_path=dmriprep_path,
                    sub_prefix='sub',
                    bundle_names=bundle_names,
                    scalars=["dti_fa", "dti_md"],
                    tracking_params=tracking_params,
                    segmentation_params=segmentation_params,
                    clean_params=clean_params)

    # Replace the mapping and streamlines with precomputed:
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    streamlines = dts.Streamlines(
        dtu.transform_tracking_output(
            [s for s in streamlines if s.shape[0] > 100],
            np.linalg.inv(myafq.dwi_affine[0])))

    sl_file = op.join(
        myafq.data_frame.results_dir[0],
        'sub-01_sess-01_dwi_space-RASMM_model-DTI_desc-det_tractography.trk')
    sft = StatefulTractogram(streamlines, myafq.data_frame.dwi_file[0],
                             Space.VOX)
    save_tractogram(sft, sl_file, bbox_valid_check=False)

    mapping_file = op.join(
        myafq.data_frame.results_dir[0],
        'sub-01_sess-01_dwi_mapping_from-DWI_to_MNI_xfm.nii.gz')
    nib.save(mapping, mapping_file)
    reg_prealign_file = op.join(
        myafq.data_frame.results_dir[0],
        'sub-01_sess-01_dwi_prealign_from-DWI_to-MNI_xfm.npy')
    np.save(reg_prealign_file, np.eye(4))

    tgram = load_tractogram(myafq.bundles[0], myafq.dwi_img[0])

    bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict, myafq.dwi_img[0])
    npt.assert_(len(bundles['CST_R']) > 0)

    # Test ROI exporting:
    myafq.export_rois()
    assert op.exists(op.join(
        myafq.data_frame['results_dir'][0],
        'ROIs',
        'sub-01_sess-01_dwi_desc-ROI-CST_R-1-include.json'))

    # Test bundles exporting:
    myafq.export_bundles()
    assert op.exists(op.join(
        myafq.data_frame['results_dir'][0],
        'bundles',
        'sub-01_sess-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk'))  # noqa

    # Test creation of file with bundle indices:
    assert op.exists(op.join(
        myafq.data_frame['results_dir'][0],
        'sub-01_sess-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-clean_tractography_idx.json'))  # noqa

    tract_profiles = pd.read_csv(myafq.tract_profiles[0])
    assert tract_profiles.shape == (800, 5)

    # Before we run the CLI, we'll remove the bundles and ROI folders, to see
    # that the CLI generates them
    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0],
                          'bundles'))

    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0],
                          'ROIs'))

    # Test the CLI:
    print("Running the CLI:")

    # Bare bones config only points to the files
    config = dict(files=dict(dmriprep_path=dmriprep_path))

    config_file = op.join(tmpdir.name, "afq_config.toml")
    with open(config_file, 'w') as ff:
        toml.dump(config, ff)

    cmd = "pyAFQ " + config_file
    out = os.system(cmd)
    assert out == 0
    # The combined tract profiles should already exist from the CLI Run:
    from_file = pd.read_csv(op.join(myafq.afq_dir, 'tract_profiles.csv'))
    # And should be identical to what we would get by rerunning this:
    combined_profiles = myafq.combine_profiles()
    assert combined_profiles.shape == (800, 7)
    assert_frame_equal(combined_profiles, from_file)

    # Make sure the CLI did indeed generate these:
    myafq.export_rois()
    assert op.exists(op.join(
        myafq.data_frame['results_dir'][0],
        'ROIs',
        'sub-01_sess-01_dwi_desc-ROI-CST_R-1-include.json'))

    myafq.export_bundles()
    assert op.exists(op.join(
        myafq.data_frame['results_dir'][0],
        'bundles',
        'sub-01_sess-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk'))  # noqa
コード例 #53
0
def main(tempdir):
    monai.config.print_config()
    logging.basicConfig(stream=sys.stdout, level=logging.INFO)

    # create a temporary directory and 40 random image, mask pairs
    print(f"generating synthetic data to {tempdir} (this may take a while)")
    for i in range(40):
        im, seg = create_test_image_3d(128,
                                       128,
                                       128,
                                       num_seg_classes=1,
                                       channel_dim=-1)
        n = nib.Nifti1Image(im, np.eye(4))
        nib.save(n, os.path.join(tempdir, f"img{i:d}.nii.gz"))
        n = nib.Nifti1Image(seg, np.eye(4))
        nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))

    images = sorted(glob(os.path.join(tempdir, "img*.nii.gz")))
    segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz")))
    train_files = [{
        "image": img,
        "label": seg
    } for img, seg in zip(images[:20], segs[:20])]
    val_files = [{
        "image": img,
        "label": seg
    } for img, seg in zip(images[-20:], segs[-20:])]

    # define transforms for image and segmentation
    train_transforms = Compose([
        LoadNiftid(keys=["image", "label"]),
        AsChannelFirstd(keys=["image", "label"], channel_dim=-1),
        ScaleIntensityd(keys="image"),
        RandCropByPosNegLabeld(keys=["image", "label"],
                               label_key="label",
                               spatial_size=[96, 96, 96],
                               pos=1,
                               neg=1,
                               num_samples=4),
        RandRotate90d(keys=["image", "label"], prob=0.5, spatial_axes=[0, 2]),
        ToTensord(keys=["image", "label"]),
    ])
    val_transforms = Compose([
        LoadNiftid(keys=["image", "label"]),
        AsChannelFirstd(keys=["image", "label"], channel_dim=-1),
        ScaleIntensityd(keys="image"),
        ToTensord(keys=["image", "label"]),
    ])

    # create a training data loader
    train_ds = monai.data.CacheDataset(data=train_files,
                                       transform=train_transforms,
                                       cache_rate=0.5)
    # use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training
    train_loader = monai.data.DataLoader(train_ds,
                                         batch_size=2,
                                         shuffle=True,
                                         num_workers=4)
    # create a validation data loader
    val_ds = monai.data.CacheDataset(data=val_files,
                                     transform=val_transforms,
                                     cache_rate=1.0)
    val_loader = monai.data.DataLoader(val_ds, batch_size=1, num_workers=4)

    # create UNet, DiceLoss and Adam optimizer
    device = torch.device("cuda:0")
    net = monai.networks.nets.UNet(
        dimensions=3,
        in_channels=1,
        out_channels=1,
        channels=(16, 32, 64, 128, 256),
        strides=(2, 2, 2, 2),
        num_res_units=2,
    ).to(device)
    loss = monai.losses.DiceLoss(sigmoid=True)
    opt = torch.optim.Adam(net.parameters(), 1e-3)
    lr_scheduler = torch.optim.lr_scheduler.StepLR(opt, step_size=2, gamma=0.1)

    val_post_transforms = Compose([
        Activationsd(keys="pred", sigmoid=True),
        AsDiscreted(keys="pred", threshold_values=True),
        KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]),
    ])
    val_handlers = [
        StatsHandler(output_transform=lambda x: None),
        TensorBoardStatsHandler(log_dir="./runs/",
                                output_transform=lambda x: None),
        TensorBoardImageHandler(
            log_dir="./runs/",
            batch_transform=lambda x: (x["image"], x["label"]),
            output_transform=lambda x: x["pred"],
        ),
        CheckpointSaver(save_dir="./runs/",
                        save_dict={"net": net},
                        save_key_metric=True),
    ]

    evaluator = SupervisedEvaluator(
        device=device,
        val_data_loader=val_loader,
        network=net,
        inferer=SlidingWindowInferer(roi_size=(96, 96, 96),
                                     sw_batch_size=4,
                                     overlap=0.5),
        post_transform=val_post_transforms,
        key_val_metric={
            "val_mean_dice":
            MeanDice(include_background=True,
                     output_transform=lambda x: (x["pred"], x["label"]))
        },
        additional_metrics={
            "val_acc":
            Accuracy(output_transform=lambda x: (x["pred"], x["label"]))
        },
        val_handlers=val_handlers,
        # if no FP16 support in GPU or PyTorch version < 1.6, will not enable AMP evaluation
        amp=True if monai.config.get_torch_version_tuple() >=
        (1, 6) else False,
    )

    train_post_transforms = Compose([
        Activationsd(keys="pred", sigmoid=True),
        AsDiscreted(keys="pred", threshold_values=True),
        KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]),
    ])
    train_handlers = [
        LrScheduleHandler(lr_scheduler=lr_scheduler, print_lr=True),
        ValidationHandler(validator=evaluator, interval=2, epoch_level=True),
        StatsHandler(tag_name="train_loss",
                     output_transform=lambda x: x["loss"]),
        TensorBoardStatsHandler(log_dir="./runs/",
                                tag_name="train_loss",
                                output_transform=lambda x: x["loss"]),
        CheckpointSaver(save_dir="./runs/",
                        save_dict={
                            "net": net,
                            "opt": opt
                        },
                        save_interval=2,
                        epoch_level=True),
    ]

    trainer = SupervisedTrainer(
        device=device,
        max_epochs=5,
        train_data_loader=train_loader,
        network=net,
        optimizer=opt,
        loss_function=loss,
        inferer=SimpleInferer(),
        post_transform=train_post_transforms,
        key_train_metric={
            "train_acc":
            Accuracy(output_transform=lambda x: (x["pred"], x["label"]))
        },
        train_handlers=train_handlers,
        # if no FP16 support in GPU or PyTorch version < 1.6, will not enable AMP training
        amp=True if monai.config.get_torch_version_tuple() >=
        (1, 6) else False,
    )
    trainer.run()
コード例 #54
0
    try:
        from nilearn.decomposition.canica import CanICA
        t0 = time.time()
        canica = CanICA(n_components=n_components,
                        mask=mask_img,
                        smoothing_fwhm=6.,
                        memory="nilearn_cache",
                        memory_level=1,
                        threshold=None,
                        random_state=1,
                        n_jobs=-1)
        canica.fit(dataset.func)
        print('Canica: %f' % (time.time() - t0))
        canica_components = masking.unmask(canica.components_, mask_img)
        nibabel.save(
            nibabel.Nifti1Image(canica_components, mask_img.get_affine()),
            join(path, 'canica.nii.gz'))
    except ImportError:
        import warnings
        warnings.warn('nilearn must be installed to run CanICA')

canica_dmn = nibabel.load(join(path, 'canica.nii.gz')).get_data()[..., 4]

### Melodic ICA ############################################################
# To have MELODIC results, please use my melodic branch of nilearn

melodic_dmn = nibabel.load(join(path, 'melodic.nii.gz')).get_data()[..., 3]

### FastICA ##################################################################

# Concatenate all the subjects
コード例 #55
0
data_df = pd.DataFrame(data, columns=col_names, index=gf.index, dtype=float)
df = pd.concat([gf, data_df], axis=1)

# selecting just specific groups
groups = gf.group.tolist()
idx = [i for i, v in enumerate(groups) if v in test_groups]

res = np.zeros([gv.shape[0], 3])  # ttest and pval
res[:] = np.nan
gv_idx = np.nonzero(gv)[0]
for v in range(nvoxels):
    good_subjs = np.nonzero(~np.isnan(df['v%d' % v]))[0]
    keep = np.intersect1d(good_subjs, idx)
    est = smf.ols(formula='v%d ~ group + age + gender' % v,
                  data=df.iloc[keep]).fit()
    # find the coefficient with group term
    sx = [x for x, i in enumerate(est.tvalues.keys()) if i.find('group') == 0]
    if len(sx) > 0:
        sx = sx[0]
        # convert from coefficient to beta
        beta = est.tvalues[sx]
        res[gv_idx[v], 0] = beta
        res[gv_idx[v], 1] = 1 - est.pvalues[sx]
    else:
        res[gv_idx[v], 0] = 0
        res[gv_idx[v], 1] = 0
fname = '%s/%sAgeGender_IC%02d.nii' % (res_dir, test_fname, comp)
print 'Saving results to %s' % fname
res = res.reshape(mask.get_data().shape[:3] + tuple([-1]))
nb.save(nb.Nifti1Image(res, img.get_affine()), fname)
コード例 #56
0
def execute_spm_multimodal_fmri_glm(data, reg_motion=False):
    reg_motion = reg_motion and 'realignment_parameters' in data

    # experimental paradigm meta-params
    stats_start_time = time.ctime()
    tr = 2.
    drift_model = 'Cosine'
    hrf_model = 'Canonical With Derivative'
    hfcut = 128.

    # make design matrices
    design_matrices = []
    for x in xrange(2):
        n_scans = data['func'][x].shape[-1]

        timing = scipy.io.loadmat(data['trials_ses%i' % (x + 1)],
                                  squeeze_me=True, struct_as_record=False)

        faces_onsets = timing['onsets'][0].ravel()
        scrambled_onsets = timing['onsets'][1].ravel()
        onsets = np.hstack((faces_onsets, scrambled_onsets))
        onsets *= tr  # because onsets were reporting in 'scans' units
        conditions = ['faces'] * len(faces_onsets) + ['scrambled'] * len(
            scrambled_onsets)
        paradigm = EventRelatedParadigm(conditions, onsets)
        frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)

        add_reg_names = None
        add_regs = None
        if reg_motion:
            add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
            add_regs = np.loadtxt(data['realignment_parameters'][x])
            if isinstance(add_regs):
                add_regs = np.loadtxt(add_regs)
        design_matrix = make_dmtx(
            frametimes,
            paradigm, hrf_model=hrf_model,
            drift_model=drift_model, hfcut=hfcut,
            add_reg_names=add_reg_names,
            add_regs=add_regs
            )

        design_matrices.append(design_matrix)

    # specify contrasts
    contrasts = {}
    n_columns = len(design_matrix.names)
    for i in xrange(paradigm.n_conditions):
        contrasts['%s' % design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i]

    # more interesting contrasts
    contrasts['faces-scrambled'] = contrasts['faces'] - contrasts['scrambled']
    contrasts['scrambled-faces'] = contrasts['scrambled'] - contrasts['faces']
    contrasts['effects_of_interest'] = contrasts[
        'faces'] + contrasts['scrambled']

    # we've thesame contrasts over sessions, so let's replicate
    contrasts = dict((contrast_id, [contrast_val] * 2)
                     for contrast_id, contrast_val in contrasts.iteritems())

    # fit GLM
    print('\r\nFitting a GLM (this takes time)...')
    fmri_glm = FMRILinearModel([load_4D_img(sess_func)
                                for sess_func in data['func']],
                               [dmat.matrix for dmat in design_matrices],
                               mask='compute')
    fmri_glm.fit(do_scaling=True, model='ar1')

    # save computed mask
    mask_path = os.path.join(data['output_dir'], "mask.nii.gz")
    print "Saving mask image %s" % mask_path
    nibabel.save(fmri_glm.mask, mask_path)

    # compute bg unto which activation will be projected
    anat_img = load_vol(data['anat'])

    anat = anat_img.get_data()

    if anat.ndim == 4:
        anat = anat[..., 0]

    anat_affine = anat_img.get_affine()

    print "Computing contrasts .."
    z_maps = {}
    for contrast_id, contrast_val in contrasts.iteritems():
        print "\tcontrast id: %s" % contrast_id
        z_map, t_map, eff_map, var_map = fmri_glm.contrast(
            contrast_val,
            con_id=contrast_id,
            output_z=True,
            output_stat=True,
            output_effects=True,
            output_variance=True,
            )

        # store stat maps to disk
        for dtype, out_map in zip(['z', 't', 'effects', 'variance'],
                                  [z_map, t_map, eff_map, var_map]):
            map_dir = os.path.join(
                data['output_dir'], '%s_maps' % dtype)
            if not os.path.exists(map_dir):
                os.makedirs(map_dir)
            map_path = os.path.join(
                map_dir, '%s.nii.gz' % contrast_id)
            nibabel.save(out_map, map_path)

            # collect zmaps for contrasts we're interested in
            if dtype == 'z':
                z_maps[contrast_id] = map_path

            print "\t\t%s map: %s" % (dtype, map_path)

    # do stats report
    data['stats_report_filename'] = os.path.join(data['reports_output_dir'],
                                                 "report_stats.html")
    contrasts = dict((contrast_id, contrasts[contrast_id])
                     for contrast_id in z_maps.keys())
    generate_subject_stats_report(
        data['stats_report_filename'],
        contrasts,
        z_maps,
        fmri_glm.mask,
        anat=anat,
        anat_affine=anat_affine,
        design_matrices=design_matrices,
        subject_id=data['subject_id'],
        cluster_th=15,  # we're only interested in this 'large' clusters
        start_time=stats_start_time,

        # additional ``kwargs`` for more informative report
        paradigm=paradigm.__dict__,
        TR=tr,
        n_scans=n_scans,
        hfcut=hfcut,
        frametimes=frametimes,
        drift_model=drift_model,
        hrf_model=hrf_model,
        )

    ProgressReport().finish_dir(data['reports_output_dir'])

    print "\r\nStatistic report written to %s\r\n" % data[
        'stats_report_filename']

    return data
コード例 #57
0
    def __getitem__(self, index):

        data = self.testing[index]

        modalities = ['T1c', 'T1w', 'FLR', 'ENT', 'T2w']
        return_obj = []
        bbox = None
        missing_modalities = []
        if not self.targets:
            for modality in modalities:
                if modality == 'ENT' and data[modality] is not None:
                    ent_map = nib.load(data[modality]).get_fdata()
                    try:
                        bbox = bbox_3d_new(ent_map, self.size)
                    except:
                        print(data['id'])
                        raise ValueError(
                            "Not able to generate bounding box, check mask dimensions"
                        )
            for modality in modalities:
                if modality != 'ENT':
                    path = data[modality]
                    if path is not None:
                        if self.normalize:
                            nifti_img = nib.load(data[modality])
                            min_ = nifti_img.get_fdata().min()
                            max_ = nifti_img.get_fdata().max()
                            if min_ == -0 and max_ == -0:
                                image = nib.load(data[modality])
                            else:
                                if self.normalize == 'fcm' and modality == 'T1c':
                                    self.tissue_mask = norm_main(
                                        nifti_img,
                                        contrast=modality,
                                        method=self.normalize)
                                    image = norm_main(
                                        nifti_img,
                                        contrast=modality,
                                        method='zscore').get_fdata()
                                elif self.normalize == 'fcm' and modality != 'T1c':
                                    image = norm_main(
                                        nifti_img,
                                        contrast=modality,
                                        method=self.normalize,
                                        fcm=self.tissue_mask).get_fdata()
                                else:
                                    image = norm_main(
                                        nifti_img,
                                        contrast=modality,
                                        method=self.normalize).get_fdata()

                        elif not self.normalize:
                            image = nib.load(data[modality]).get_fdata()
                            min_ = image.min()
                            max_ = image.max()

                        if min_ == -0 and max_ == -0:
                            missing_modalities.append(modality)
                            zeros = (self.size, self.size, self.size)
                            image_numpy = np.zeros(zeros)
                            return_obj.append(
                                torch.as_tensor(
                                    np.ascontiguousarray(image_numpy)))

                        else:
                            if bbox is not None:
                                image_numpy = np.array(image[bbox])
                                if image_numpy.shape != (64, 64, 64):
                                    image_numpy = pad_image(
                                        image_numpy, self.size)
                            else:
                                image_numpy = np.array(image)

                            return_obj.append(
                                torch.as_tensor(
                                    np.ascontiguousarray(image_numpy)))

                        if self.montage:
                            returnMontage(image_numpy, modality)
                        if self.save:
                            ni_img = nib.Nifti1Image(image_numpy, np.eye(4))
                            nib.save(
                                ni_img, '{}_bounding_box_{}.nii.gz'.format(
                                    data['id'], modality))

                    else:
                        missing_modalities.append(modality)
                        zeros = (self.size, self.size, self.size)
                        image_numpy = np.zeros(zeros)
                        return_obj.append(
                            torch.as_tensor(np.ascontiguousarray(image_numpy)))

        if not self.montage:

            if self.targets:
                return np.array(([x['group'] for x in self.testing
                                  ], [x['event'] for x in self.testing],
                                 [x['surv'] for x in self.testing]))

            else:
                time, event = tuple((torch.tensor(np.array(data['group'])),
                                     torch.tensor(np.array(data['event']))))
                id_ = data['id']

                if self.only_modality:
                    torch_images = (return_obj[0][None])
                    if torch_images.size() != torch.Size(
                        [1, self.size, self.size, self.size]):
                        return id_

                else:
                    torch_images = torch.cat(
                        tuple((return_obj[0][None], return_obj[1][None],
                               return_obj[2][None], return_obj[3][None])))
                    if torch_images.size() != torch.Size(
                        [4, self.size, self.size, self.size]):
                        print(torch_images.size())
                        return id_

                return torch_images.float()
コード例 #58
0
ファイル: VisualizeResults.py プロジェクト: yjruby/3D-ESPNet
def segmentVoxel(imgLoc, model):
    '''
    Segment the image and store the results
    :param imgLoc: location of the flair image. Other modalities locations can be computed using this one.
    :param model: Segmentation Network
    :return: dice scores
    '''
    t1_loc = imgLoc.replace('flair', 't1')
    t1ce_loc = imgLoc.replace('flair', 't1ce')
    t2_loc = imgLoc.replace('flair', 't2')
    label_loc = imgLoc.replace('flair', 'seg')

    # get the 4 modalities
    img_flair = nib.load(imgLoc).get_data()
    gth_dummy = np.copy(img_flair)
    img_t1 = nib.load(t1_loc).get_data()
    img_t1ce = nib.load(t1ce_loc).get_data()
    img_t2 = nib.load(t2_loc).get_data()

    # Crop and min-max normalize them
    wi_st, wi_en, hi_st, hi_en, ch_st, ch_en = cropVolumes(img_flair, img_t1, img_t1ce, img_t2)
    img_flair = norm(img_flair[wi_st:wi_en, hi_st:hi_en, ch_st:ch_en])
    img_t1 = norm(img_t1[wi_st:wi_en, hi_st:hi_en, ch_st:ch_en])
    img_t1ce = norm(img_t1ce[wi_st:wi_en, hi_st:hi_en, ch_st:ch_en])
    img_t2 = norm(img_t2[wi_st:wi_en, hi_st:hi_en, ch_st:ch_en])


    # convert to Tensor
    resize = (1, img_flair.shape[0], img_flair.shape[1], img_flair.shape[2])
    img_flair = img_flair.reshape(resize)
    img_t1 = img_t1.reshape(resize)
    img_t1ce = img_t1ce.reshape(resize)
    img_t2 = img_t2.reshape(resize)

    tensor_flair = torch.from_numpy(img_flair)
    tensor_t1 = torch.from_numpy(img_t1)
    inputB = torch.from_numpy(img_t1ce)
    tensor_t2 = torch.from_numpy(img_t2)
    del img_flair, img_t1, img_t1ce, img_t2

    # concat the tensors and then feed them to the model
    tensor_concat = torch.cat([tensor_flair, tensor_t1, inputB, tensor_t2], 0)  # inputB #
    tensor_concat = torch.unsqueeze(tensor_concat, 0)
    tensor_concat = tensor_concat.cuda()

    #convert to variable.
    # If you are using PyTorch version > 0.3, then you don't need this
    tensor_concat_var = torch.autograd.Variable(tensor_concat, volatile=True)

    # Average ensembling at multiple resolutions
    # We found by experiments that ensembling at original and 144x144x144 gives best results.
    test_resolutions = [None, (144, 144, 144)]
    output = None
    for res in test_resolutions:
        if output is not None:
            # test at the scaled resolution and combine them
            output = output + model(tensor_concat_var, inp_res=res)
        else:
            # test at the original resolution
            output = model(tensor_concat_var, inp_res=res)
    output = output / len(test_resolutions)
    del tensor_concat, tensor_concat_var

    # convert the output to segmentation mask and move to CPU
    output_ = output[0].max(0)[1].data.byte().cpu().numpy()


    output_numpy = np.zeros_like(gth_dummy)
    output_numpy[wi_st:wi_en, hi_st:hi_en, ch_st:ch_en] = output_

    # rename the label 3 back to label 4
    output_numpy[output_numpy == 3] = 4

    # We need headers, so reload the flair image again
    img1 = nib.load(imgLoc)
    img1_new = nib.Nifti1Image(output_numpy, img1.affine, img1.header)
    name = imgLoc.replace('_flair', '').split('/')[-1]
    out_dir = './predictions'
    if not os.path.isdir(out_dir):
        os.mkdir(out_dir)
    file_name = out_dir + os.sep + name
    nib.save(img1_new, file_name)
コード例 #59
0
def run_command(context, n_gif=0, thr_increment=None, resume_training=False):
    """Run main command.

    This function is central in the ivadomed project as training / testing / evaluation commands are run via this
    function. All the process parameters are defined in the config.

    Args:
        context (dict): Dictionary containing all parameters that are needed for a given process. See
            :doc:`configuration_file` for more details.
        n_gif (int): Generates a GIF during training if larger than zero, one frame per epoch for a given slice. The
            parameter indicates the number of 2D slices used to generate GIFs, one GIF per slice. A GIF shows
            predictions of a given slice from the validation sub-dataset. They are saved within the log directory.
        thr_increment (float): A threshold analysis is performed at the end of the training using the trained model and
            the training + validation sub-dataset to find the optimal binarization threshold. The specified value
            indicates the increment between 0 and 1 used during the ROC analysis (e.g. 0.1).
        resume_training (bool): Load a saved model ("checkpoint.pth.tar" in the log_directory) for resume training.
            This training state is saved everytime a new best model is saved in the log
            directory.

    Returns:
        Float or pandas Dataframe:
        If "train" command: Returns floats: best loss score for both training and validation.
        If "test" command: Returns a pandas Dataframe: of metrics computed for each subject of the testing
            sub dataset and return the prediction metrics before evaluation.
        If "segment" command: No return value.
    """
    command = copy.deepcopy(context["command"])
    log_directory = copy.deepcopy(context["log_directory"])
    if not os.path.isdir(log_directory):
        print('Creating log directory: {}'.format(log_directory))
        os.makedirs(log_directory)
    else:
        print('Log directory already exists: {}'.format(log_directory))

    # Define device
    cuda_available, device = imed_utils.define_device(context['gpu'])

    # Get subject lists. "segment" command uses all participants of BIDS path, hence no need to split
    if command != "segment":
        train_lst, valid_lst, test_lst = imed_loader_utils.get_subdatasets_subjects_list(
            context["split_dataset"],
            context['loader_parameters']['bids_path'], log_directory,
            context["loader_parameters"]['subject_selection'])

    # Loader params
    loader_params = copy.deepcopy(context["loader_parameters"])
    if command == "train":
        loader_params["contrast_params"]["contrast_lst"] = loader_params[
            "contrast_params"]["training_validation"]
    else:
        loader_params["contrast_params"]["contrast_lst"] = loader_params[
            "contrast_params"]["testing"]
    if "FiLMedUnet" in context and context["FiLMedUnet"]["applied"]:
        loader_params.update(
            {"metadata_type": context["FiLMedUnet"]["metadata"]})

    # Load metadata necessary to balance the loader
    if context['training_parameters']['balance_samples']['applied'] and \
            context['training_parameters']['balance_samples']['type'] != 'gt':
        loader_params.update({
            "metadata_type":
            context['training_parameters']['balance_samples']['type']
        })

    # Get transforms for each subdataset
    transform_train_params, transform_valid_params, transform_test_params = \
        imed_transforms.get_subdatasets_transforms(context["transformation"])

    # MODEL PARAMETERS
    model_params = copy.deepcopy(context["default_model"])
    model_params["folder_name"] = copy.deepcopy(context["model_name"])
    model_context_list = [
        model_name for model_name in MODEL_LIST
        if model_name in context and context[model_name]["applied"]
    ]
    if len(model_context_list) == 1:
        model_params["name"] = model_context_list[0]
        model_params.update(context[model_context_list[0]])
    elif 'Modified3DUNet' in model_context_list and 'FiLMedUnet' in model_context_list and len(
            model_context_list) == 2:
        model_params["name"] = 'Modified3DUNet'
        for i in range(len(model_context_list)):
            model_params.update(context[model_context_list[i]])
    elif len(model_context_list) > 1:
        print(
            'ERROR: Several models are selected in the configuration file: {}.'
            'Please select only one (i.e. only one where: "applied": true).'.
            format(model_context_list))
        exit()

    model_params['is_2d'] = False if "Modified3DUNet" in model_params[
        'name'] else model_params['is_2d']
    # Get in_channel from contrast_lst
    if loader_params["multichannel"]:
        model_params["in_channel"] = len(
            loader_params["contrast_params"]["contrast_lst"])
    else:
        model_params["in_channel"] = 1
    # Get out_channel from target_suffix
    model_params["out_channel"] = len(loader_params["target_suffix"])
    # If multi-class output, then add background class
    if model_params["out_channel"] > 1:
        model_params.update({"out_channel": model_params["out_channel"] + 1})
    # Display for spec' check
    imed_utils.display_selected_model_spec(params=model_params)
    # Update loader params
    if 'object_detection_params' in context:
        object_detection_params = context['object_detection_params']
        object_detection_params.update({
            "gpu":
            context['gpu'],
            "log_directory":
            context['log_directory']
        })
        loader_params.update(
            {"object_detection_params": object_detection_params})

    loader_params.update({"model_params": model_params})

    # TESTING PARAMS
    # Aleatoric uncertainty
    if context['uncertainty'][
            'aleatoric'] and context['uncertainty']['n_it'] > 0:
        transformation_dict = transform_train_params
    else:
        transformation_dict = transform_test_params
    undo_transforms = imed_transforms.UndoCompose(
        imed_transforms.Compose(transformation_dict, requires_undo=True))
    testing_params = copy.deepcopy(context["training_parameters"])
    testing_params.update({'uncertainty': context["uncertainty"]})
    testing_params.update({
        'target_suffix': loader_params["target_suffix"],
        'undo_transforms': undo_transforms,
        'slice_axis': loader_params['slice_axis']
    })
    if command == "train":
        imed_utils.display_selected_transfoms(transform_train_params,
                                              dataset_type=["training"])
        imed_utils.display_selected_transfoms(transform_valid_params,
                                              dataset_type=["validation"])
    elif command == "test":
        imed_utils.display_selected_transfoms(transformation_dict,
                                              dataset_type=["testing"])

    # Check if multiple raters
    if any([
            isinstance(class_suffix, list)
            for class_suffix in loader_params["target_suffix"]
    ]):
        print(
            "\nAnnotations from multiple raters will be used during model training, one annotation from one rater "
            "randomly selected at each iteration.\n")
        if command != "train":
            print(
                "\nERROR: Please provide only one annotation per class in 'target_suffix' when not training a model.\n"
            )
            exit()

    if command == 'train':
        # LOAD DATASET
        # Get Validation dataset
        ds_valid = imed_loader.load_dataset(**{
            **loader_params,
            **{
                'data_list': valid_lst,
                'transforms_params': transform_valid_params,
                'dataset_type': 'validation'
            }
        },
                                            device=device,
                                            cuda_available=cuda_available)
        # Get Training dataset
        ds_train = imed_loader.load_dataset(**{
            **loader_params,
            **{
                'data_list': train_lst,
                'transforms_params': transform_train_params,
                'dataset_type': 'training'
            }
        },
                                            device=device,
                                            cuda_available=cuda_available)

        metric_fns = imed_metrics.get_metric_fns(ds_train.task)

        # If FiLM, normalize data
        if 'film_layers' in model_params and any(model_params['film_layers']):
            # Normalize metadata before sending to the FiLM network
            results = imed_film.get_film_metadata_models(
                ds_train=ds_train,
                metadata_type=model_params['metadata'],
                debugging=context["debugging"])
            ds_train, train_onehotencoder, metadata_clustering_models = results
            ds_valid = imed_film.normalize_metadata(
                ds_valid, metadata_clustering_models, context["debugging"],
                model_params['metadata'])
            model_params.update({
                "film_onehotencoder":
                train_onehotencoder,
                "n_metadata":
                len([ll for l in train_onehotencoder.categories_ for ll in l])
            })
            joblib.dump(metadata_clustering_models,
                        "./" + log_directory + "/clustering_models.joblib")
            joblib.dump(train_onehotencoder,
                        "./" + log_directory + "/one_hot_encoder.joblib")

        # Model directory
        path_model = os.path.join(log_directory, context["model_name"])
        if not os.path.isdir(path_model):
            print('Creating model directory: {}'.format(path_model))
            os.makedirs(path_model)
            if 'film_layers' in model_params and any(
                    model_params['film_layers']):
                joblib.dump(train_onehotencoder,
                            os.path.join(path_model, "one_hot_encoder.joblib"))
                if 'metadata_dict' in ds_train[0]['input_metadata'][0]:
                    metadata_dict = ds_train[0]['input_metadata'][0][
                        'metadata_dict']
                    joblib.dump(
                        metadata_dict,
                        os.path.join(path_model, "metadata_dict.joblib"))

        else:
            print('Model directory already exists: {}'.format(path_model))

        save_config_file(context, log_directory)

        # RUN TRAINING
        best_training_dice, best_training_loss, best_validation_dice, best_validation_loss = imed_training.train(
            model_params=model_params,
            dataset_train=ds_train,
            dataset_val=ds_valid,
            training_params=context["training_parameters"],
            log_directory=log_directory,
            device=device,
            cuda_available=cuda_available,
            metric_fns=metric_fns,
            n_gif=n_gif,
            resume_training=resume_training,
            debugging=context["debugging"])

    if thr_increment:
        # LOAD DATASET
        if command != 'train':  # If command == train, then ds_valid already load
            # Get Validation dataset
            ds_valid = imed_loader.load_dataset(**{
                **loader_params,
                **{
                    'data_list': valid_lst,
                    'transforms_params': transform_valid_params,
                    'dataset_type': 'validation'
                }
            },
                                                device=device,
                                                cuda_available=cuda_available)
        # Get Training dataset with no Data Augmentation
        ds_train = imed_loader.load_dataset(**{
            **loader_params,
            **{
                'data_list': train_lst,
                'transforms_params': transform_valid_params,
                'dataset_type': 'training'
            }
        },
                                            device=device,
                                            cuda_available=cuda_available)

        # Choice of optimisation metric
        metric = "recall_specificity" if model_params[
            "name"] in imed_utils.CLASSIFIER_LIST else "dice"
        # Model path
        model_path = os.path.join(log_directory, "best_model.pt")
        # Run analysis
        thr = imed_testing.threshold_analysis(model_path=model_path,
                                              ds_lst=[ds_train, ds_valid],
                                              model_params=model_params,
                                              testing_params=testing_params,
                                              metric=metric,
                                              increment=thr_increment,
                                              fname_out=os.path.join(
                                                  log_directory, "roc.png"),
                                              cuda_available=cuda_available)

        # Update threshold in config file
        context["postprocessing"]["binarize_prediction"] = {"thr": thr}
        save_config_file(context, log_directory)

    if command == 'train':
        return best_training_dice, best_training_loss, best_validation_dice, best_validation_loss

    if command == 'test':
        # LOAD DATASET
        ds_test = imed_loader.load_dataset(**{
            **loader_params,
            **{
                'data_list': test_lst,
                'transforms_params': transformation_dict,
                'dataset_type': 'testing',
                'requires_undo': True
            }
        },
                                           device=device,
                                           cuda_available=cuda_available)

        metric_fns = imed_metrics.get_metric_fns(ds_test.task)

        if 'film_layers' in model_params and any(model_params['film_layers']):
            clustering_path = os.path.join(log_directory,
                                           "clustering_models.joblib")
            metadata_clustering_models = joblib.load(clustering_path)
            ohe_path = os.path.join(log_directory, "one_hot_encoder.joblib")
            one_hot_encoder = joblib.load(ohe_path)
            ds_test = imed_film.normalize_metadata(ds_test,
                                                   metadata_clustering_models,
                                                   context["debugging"],
                                                   model_params['metadata'])
            model_params.update({
                "film_onehotencoder":
                one_hot_encoder,
                "n_metadata":
                len([ll for l in one_hot_encoder.categories_ for ll in l])
            })

        # RUN INFERENCE
        pred_metrics = imed_testing.test(
            model_params=model_params,
            dataset_test=ds_test,
            testing_params=testing_params,
            log_directory=log_directory,
            device=device,
            cuda_available=cuda_available,
            metric_fns=metric_fns,
            postprocessing=context['postprocessing'])

        # RUN EVALUATION
        df_results = imed_evaluation.evaluate(
            bids_path=loader_params['bids_path'],
            log_directory=log_directory,
            target_suffix=loader_params["target_suffix"],
            eval_params=context["evaluation_parameters"])
        return df_results, pred_metrics

    if command == 'segment':
        bids_ds = bids.BIDS(context["loader_parameters"]["bids_path"])
        df = bids_ds.participants.content
        subj_lst = df['participant_id'].tolist()
        bids_subjects = [
            s for s in bids_ds.get_subjects()
            if s.record["subject_id"] in subj_lst
        ]

        # Add postprocessing to packaged model
        path_model = os.path.join(context['log_directory'],
                                  context['model_name'])
        path_model_config = os.path.join(path_model,
                                         context['model_name'] + ".json")
        model_config = imed_config_manager.load_json(path_model_config)
        model_config['postprocessing'] = context['postprocessing']
        with open(path_model_config, 'w') as fp:
            json.dump(model_config, fp, indent=4)

        options = None
        for subject in bids_subjects:
            if context['loader_parameters']['multichannel']:
                fname_img = []
                provided_contrasts = []
                contrasts = context['loader_parameters']['contrast_params'][
                    'testing']
                # Keep contrast order
                for c in contrasts:
                    for s in bids_subjects:
                        if subject.record['subject_id'] == s.record[
                                'subject_id'] and s.record['modality'] == c:
                            provided_contrasts.append(c)
                            fname_img.append(s.record['absolute_path'])
                            bids_subjects.remove(s)
                if len(fname_img) != len(contrasts):
                    logger.warning(
                        "Missing contrast for subject {}. {} were provided but {} are required. Skipping "
                        "subject.".format(subject.record['subject_id'],
                                          provided_contrasts, contrasts))
                    continue
            else:
                fname_img = [subject.record['absolute_path']]

            if 'film_layers' in model_params and any(
                    model_params['film_layers']) and model_params['metadata']:
                subj_id = subject.record['subject_id']
                metadata = df[df['participant_id'] == subj_id][
                    model_params['metadata']].values[0]
                options = {'metadata': metadata}
            pred_list, target_list = imed_inference.segment_volume(
                path_model,
                fname_images=fname_img,
                gpu_number=context['gpu'],
                options=options)
            pred_path = os.path.join(context['log_directory'], "pred_masks")
            if not os.path.exists(pred_path):
                os.makedirs(pred_path)

            for pred, target in zip(pred_list, target_list):
                filename = subject.record['subject_id'] + "_" + subject.record['modality'] + target + "_pred" + \
                           ".nii.gz"
                nib.save(pred, os.path.join(pred_path, filename))
コード例 #60
0
ファイル: lesions.py プロジェクト: neurospin/scripts
                    #print(taffine)
                    break
                buf = np.zeros(tshape, dtype=ttype)
                # now create dyn image file to store all subtypes in one lesion file
                i = 0
                for tissu in tissues:
                    if tissu in masks[lesi]:
                        print(tissu)
                        print(lesi)
                        d = masks[lesi][tissu].get_data()
                        if tissu == 'edema':
                            d[d != 0] = 1
                        else:
                            if tissu == 'enh':
                                d[d != 0] = 2
                            else:
                                if tissu == 'necrosis':
                                    d[d != 0] = 3
                        print "---> lesion ", lesi, "-", tissu, ": ", np.unique(
                            d)
                        buf[:, :, :, i] = d
                        i += 1

#                flesion = os.path.basename(masks[lesion][tissue].get_filename()).split('_')[0]
#                mlesion = os.path.basename(masks[lesion][tissue].get_filename()).split('_')[1]
                flesion = '{}_{}_mask_lesion-{}'.format(s, model,
                                                        lesi) + '.nii.gz'
                f = os.path.join(path, s, model, flesion)
                print f
                ni.save(ni.Nifti1Image(buf, affine=taffine), f)