示例#1
0
def intersect(a, b, new):
    # need to automate expansion of primes
    #c = np.zeros(hcp_Data.shape, dtype=int)

    head, tail = split(new)

    a_Load = load(a)
    b_Load = load(b)

    a_Data = a_Load.get_data()
    b_Data = b_Load.get_data()

    a_Data[np.where(a_Data != 0)] = 3
    b_Data[np.where(b_Data != 0)] = 7

    a_Data[np.where(a_Data == 0)] = 1
    b_Data[np.where(b_Data == 0)] = 1

    c_Data = a_Data * b_Data

    unq = np.unique(c_Data)
    i = 1
    #u = 3
    for u in unq:
        if u != 1:
            out = bin(c_Data, u, 1)
            aff = a_Load.affine
            header = a_Load.header
            label = '{}_{}'.format(tail, int(u))

            saveNifti(out, aff, header, head, label)
示例#2
0
def atlas_from_freesurfer_masks(sub,
                                mask_dir,
                                roi_ids,
                                rh_constant=200,
                                overwrite=False):
    '''
    Create atlas from individual binary masks produced by freesurfer's 'mri_label2vol' tool

    Args:
        sub (int):
            subject number
        mask_dir (str):
            path to subject's folder with freesurfer output
        roi_ids (dict):
            {target_ROI: Left Hemisphere ROI index}
        rh_constant (int):
            Right Hemisphere ROI index := Left Hemisphere ROI index + rh_constant
        overwrite (bool):
            If true: delete old atlas and create new one
            

    Returns:
        atlas (Nifti1Image):
            T1w Atlas with voxel values indicative of ROI identity (as defined in 'roi_ids')
    '''
    atlas_path = os.path.join(
        mask_dir, "sub-" + str(sub).zfill(2) + "_Mask_T1w_Glasser.nii.gz")

    if os.path.isfile(atlas_path) and not overwrite:
        atlas = nifti1.load(atlas_path)
    else:
        # create atlas
        hemi_dict = {'L': 'lh.L_', 'R': 'rh.R_'}
        hemi_add = {'L': 0, 'R': rh_constant}
        mask_array = []
        mask_array_tmp = []
        for roi in roi_ids.keys():
            for hemi in ['L', 'R']:
                mask_raw = nifti1.load(
                    os.path.join(mask_dir,
                                 hemi_dict[hemi] + roi + '_ROI.fbr.nii.gz'))
                if len(mask_array) == 0:
                    mask_array_tmp = mask_raw.get_fdata()
                    mask_array_tmp[
                        mask_array_tmp == 1] = roi_ids[roi][0] + hemi_add[
                            hemi]  # convert binary mask by assigning roi_id to masked voxels
                    mask_array = mask_array_tmp.copy()
                    freesurfer_affine = mask_raw.affine.copy()
                else:
                    mask_array_tmp = mask_raw.get_fdata()
                    mask_array[mask_array_tmp ==
                               1] = roi_ids[roi][0] + hemi_add[hemi]
        atlas = nifti1.Nifti1Image(mask_array, freesurfer_affine)
        nifti1.save(atlas, atlas_path)
    return atlas
示例#3
0
def nodeParc(inFiles, outFile):

    print('Runnning nodeParc')
    head, tail = split(outFile)

    m = load(inFiles[0])
    aff, header = m.affine, m.header
    complete = np.ones(m.get_data().shape, dtype=int)

    labLUT, numLUT = [], []

    f = next_prime(2)

    for a in inFiles:

        a_head, a_tail = split(a)
        a_Load = load(a)
        a_Data = a_Load.get_data()

        o = bin(a_Data, 1, f)
        o[np.where(o == 0)] = 1

        complete = complete * o

        numLUT.append(f)
        labLUT.append(a_tail)

        print('f         {} a_tail  {}'.format(f, a_tail))

        f = next_prime(f)

    complete[np.where(complete == 1)] = 0
    saveNifti(complete, aff, header, head, tail)

    print('{} Values: {}'.format(len(numLUT), numLUT))
    print('{} Values: {}'.format(len(np.unique(complete)),
                                 np.unique(complete)))

    csvPath = join(head, tail)
    with open(csvPath, 'w', newline='\n') as csvfile:
        spamwriter = csv.writer(csvfile,
                                delimiter=' ',
                                quotechar='|',
                                quoting=csv.QUOTE_MINIMAL)
        for i in range(len(labLUT)):
            spamwriter.writerow([numLUT[i], labLUT[i]])

    return True
示例#4
0
def load_EPI(epi_file, only_volume=True):
    EPI_nni = nifti1.load(epi_file)
    EPI = EPI_nni.get_data()
    if only_volume:
        EPI = EPI[..., 0]
    EPI_voxel_sizes = EPI_nni.get_header()['pixdim'][2:5]
    return EPI, EPI_voxel_sizes
def load_EPI(epi_file, only_volume=True):
  EPI_nni = nifti1.load(epi_file)
  EPI = EPI_nni.get_data()
  if only_volume:
    EPI = EPI[...,0]
  EPI_voxel_sizes = EPI_nni.get_header()['pixdim'][2:5]
  return EPI, EPI_voxel_sizes
示例#6
0
def apply_roi_mask_4d(image, mask, brain_mask_path=None, method='nilearn'):
    '''
    Apply mask to a 4d image and store resulting vectors in a measurements array (later input for pyrsa.dataset())

    '''

    # Apply brain mask
    if isinstance(brain_mask_path, str):
        brain_mask = nifti1.load(brain_mask_path)
        image_array = apply_brain_mask(image, brain_mask)
    else:
        image_array = image.get_fdata()

    if method == 'nilearn':
        image_tmp = nifti1.Nifti1Image(image_array, mask.affine.copy())
        masked = masking.apply_mask(image_tmp,
                                    mask,
                                    dtype='f',
                                    smoothing_fwhm=None,
                                    ensure_finite=True)
    elif method == 'custom':
        # Alternative to nilearn's implementation (allows for plotting of masked 3d image)
        mask_array = mask.get_fdata()
        masked = image_array[mask_array.astype(bool)]
        masked = masked.T
    elif method == '3d':
        mask_array = mask.get_fdata()
        image_masked = np.multiply(mask_array, image_array)
        masked = nifti1.Nifti1Image(image_masked, mask.affine.copy())

    return masked
示例#7
0
 def train_autoencoder(self, network_parameters, learning_rate):
     # network_parameters = Autoencoder(folder, 10, 2).parameters()
     optimiser = optim.SGD(network_parameters,
                           lr=learning_rate,
                           momentum=0.5)
     loss_function = self.loss_function(
     )  # Self function is used to allow for effective class override
     iterator = 0
     dataset_size = str(len(self.data))
     for data in self.data:
         if os.path.splitext(data)[1] == 'hdr':
             continue
         data = nifti1.load(data)
         data = numpy.array(data.dataobj)
         data = data.astype(numpy.float32)
         original_data = torch.from_numpy(data)
         original_data = original_data.reshape((8388608))
         encoded_data = self.encoder(original_data)
         encoded_decoded_data = self.decoder(encoded_data)
         loss = loss_function(encoded_decoded_data, original_data)
         loss.backward()
         optimiser.step()
         optimiser.zero_grad()
         iterator += 1
         print('Training iteration: [' + str(iterator) + ']/[' +
               dataset_size + ']. Current error: ' + str(loss.data.item()))
def load_MNI_templates(mni_file, mni_brain_file = None, mni_brain_mask_file = None):
  if not mni_brain_file:
    mni_brain_file = mni_file.replace('.nii', '_brain.nii')
  if not mni_brain_mask_file:
    mni_brain_mask_file = mni_file.replace('.nii', '_brain_mask.nii')
  MNI_nni = nifti1.load(mni_file)
  MNI = MNI_nni.get_data()
  
  MNI_brain_nii = nifti1.load(mni_brain_file)
  MNI_brain = MNI_brain_nii.get_data()
  
  MNI_brain_mask_nii = nifti1.load(mni_brain_mask_file)
  MNI_brain_mask = MNI_brain_mask_nii.get_data()
  
  voxel_sizes = MNI_nni.get_header()['pixdim'][1:4]
    
  return MNI, MNI_brain, MNI_brain_mask, voxel_sizes
示例#9
0
def buildParcellation(roiDir, outPath):

    head, tail = split(outPath)
    rois = [
        join(roiDir, f) for f in listdir(roiDir) if isfile(join(roiDir, f))
    ]

    m = load(rois[0])
    aff = m.affine
    header = m.header

    m_Data = m.get_data()

    complete = np.empty(m_Data.shape, dtype=int)
    labLUT = []
    numLUT = []
    f = 1
    for a in rois:

        a_head, a_tail = split(a)
        print(a)
        a_Load = load(a)
        a_Data = a_Load.get_data()
        o = bin(a_Data, 1, f)
        print(np.unique(o))
        complete += o

        f += 1

        #numLUT.append(f)
        labLUT.append(a_tail)

    numLUT = np.unique(complete)
    print('{} Values: {}'.format(len(numLUT), numLUT))

    saveNifti(complete, aff, header, head, tail)

    csvPath = join(head, tail)
    with open(csvPath, 'w', newline='\n') as csvfile:
        spamwriter = csv.writer(csvfile,
                                delimiter=' ',
                                quotechar='|',
                                quoting=csv.QUOTE_MINIMAL)
        for i in range(len(labLUT)):
            spamwriter.writerow([numLUT[i], labLUT[i]])
示例#10
0
文件: io.py 项目: iZehan/spatial-pbs
def open_image(filePath):
    try:
        image = nib.load(filePath)
        imageData = image.get_data()
    except:
        print "Failed to open image:", filePath
        raise
    imageData = numpy.squeeze(imageData)
    return imageData
示例#11
0
文件: io.py 项目: iZehan/spatial-pbs
def open_image(filePath):
    try:
        image = nib.load(filePath)
        imageData = image.get_data()
    except:
        print "Failed to open image:", filePath
        raise
    imageData = numpy.squeeze(imageData)
    return imageData
示例#12
0
def load_MNI_templates(mni_file,
                       mni_brain_file=None,
                       mni_brain_mask_file=None):
    if not mni_brain_file:
        mni_brain_file = mni_file.replace('.nii', '_brain.nii')
    if not mni_brain_mask_file:
        mni_brain_mask_file = mni_file.replace('.nii', '_brain_mask.nii')
    MNI_nni = nifti1.load(mni_file)
    MNI = MNI_nni.get_data()

    MNI_brain_nii = nifti1.load(mni_brain_file)
    MNI_brain = MNI_brain_nii.get_data()

    MNI_brain_mask_nii = nifti1.load(mni_brain_mask_file)
    MNI_brain_mask = MNI_brain_mask_nii.get_data()

    voxel_sizes = MNI_nni.get_header()['pixdim'][1:4]

    return MNI, MNI_brain, MNI_brain_mask, voxel_sizes
示例#13
0
def calc_nvols(fmriniis):
    if type(fmriniis) is list:
        niiFile = fmriniis[0]
    else:
        niiFile = fmriniis
    if os.path.exists(niiFile):
        n = nii.load(niiFile)
        dims = n.get_shape()
        t = dims[3]
    else:
        t = False
    return t
示例#14
0
def select_volume(filename, index):
    """Return the 3D volume with timestep index from a 4D (3D + time) file
    """
    from nibabel import nifti1 as nifti
    import numpy as np
    assert isinstance(index, int)
    image = nifti.load(filename)
    fourD = image.get_data()
    print len(fourD[...,:])
    header = image.get_header()
    affine = header.get_best_affine()
    threeD = fourD[...,index]
    newVolume = nifti.Nifti1Image(data=threeD, affine=affine, header=header) 
    newVolume.update_header()
    return newVolume, header
示例#15
0
def load(filename):
    vol = nii.load(filename)

    if np.linalg.det(vol.get_affine()) > 0.0:
        imgdata = vol.get_data()[::-1, ...]
        flipmat = np.array([[-1.0, 0.0, 0.0, imgdata.shape[0] - 1.0],
                            [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0],
                            [0.0, 0.0, 0.0, 1.0]])
        affine = vol.affine.dot(flipmat)
    else:
        imgdata = vol.get_data()
        affine = vol.affine

    rvol = nii.Nifti1Image(imgdata, affine)
    rvol.set_qform(affine)

    return rvol
示例#16
0
def map_coords(coordinates, nifti_file):
    """
    Apply the RAS+ transform in a nifti file to a set of coordinates in voxel space
    :param coordinates: Array-like with shape (3,N) or (4,N)
    :param nifti_file:
    :return:
    """
    transform = nifti1.load(nifti_file).get_affine()

    coordinates = np.matrix(coordinates)
    if coordinates.shape[0] == 3:
        coordinates = np.stack(
            [coordinates, np.ones(coordinates.shape[1])], axis=0)

    assert coordinates.shape[0] == 4

    ras_coords = coordinates * transform.astype(np.mat)
    return ras_coords[:3, :]
示例#17
0
def img_transform(img_file_name,save_dir):

    img_hdr = nitool.load(img_file_name)
    img_data = img_hdr.get_data()
    img_shape = img_data.shape
    
    t = img_data.flatten()
    t1 = np.zeros(t.size)
    t1[np.where(t>100)] = t[np.where(t > 100)]+3000
    t1[np.where(np.logical_and(t >= -1000, t <= -100))]=t[np.where(np.logical_and(t >= -1000,t <= -100))]+1000
    t1[np.where(np.logical_and(t >= -99, t <= 100))]=(t[np.where(np.logical_and(t >= -99, t <= 100))]+99)*11+911
    trans_img = t1.reshape(img_shape)
    
    trans_pair = nitool.Nifti1Pair(trans_img,img_hdr.affine)
    save_name = os.path.join(save_dir, 'itrans'+'.nii.gz')
    nitool.save(trans_pair, save_name)
    
    return save_name
def process_file(seed_id, nifti_path, conn=None, threshold=0.5, insert_chunks=500, table='predef_roi_raw_data_table'):
	"""Processes a NIFTI file and loads the voxel data into the database.

	@param seed_id: the seed id for this image
	@param nifti_path: the path to the file
	@param conn: a database connection to use, if None a local connection will be opened and closed
	@param threshold: omit voxels below this threshold
	@param insert_chunks: number of records to insert at a time
	@param table: the table to insert into
	"""
	if not os.path.isfile(nifti_path):
		raise Exception('Path does not name a file: ' + nifti_path)
	local_conn = conn is None
	if local_conn:
		conn = _connect()

	try:
		# load the image and get voxels of interest
		img = nifti1.load(nifti_path)
		data = img.get_data()
		coords = numpy.transpose((data > threshold).nonzero())

		# remove old data
		cur = conn.cursor()
		cur.execute('DELETE FROM `' + table + '` WHERE predef_roi_seed_id=%s', (seed_id,))

		insert = 'INSERT DELAYED INTO `' + table + '` (predef_roi_seed_id, x_loc, y_loc, z_loc, intensity) ' \
			'VALUES (%s, %s, %s, %s, %s)'

		# multi-insert in groups of insert_chunks (or remaining in last set)
#		db_val_gen = ((seed_id, x, y, z, data[z,y,x]) for z, y, x in coords)
		db_val_gen = ((seed_id, x, y, z, data[x,y,z]) for x, y, z in coords)
		for vals in _grouper(insert_chunks, db_val_gen):
			if vals[-1] is None:
				vals = filter(bool, vals) # remove None-s from the last group
			cur.executemany(insert, vals)
	finally:
		if local_conn:
			conn.close()
示例#19
0
def loadNiftiDTI(basedir, basename='dti', reorient=False):
    from nibabel import nifti1
    import numpy as np
    import dtimp

    # ====== MAIN FUNCTION START ===========================
    # PRE-LOAD THE FIRST EIGENVALUE VOLUME TO GET HEADER PARAMS
    L = nifti1.load('{}/{}_L1.nii.gz'.format(basedir, basename))
    s, m, n = L.get_data().shape

    # LOAD AND BUILD EIGENVALUES VOLUME
    evl = [L.get_data()]
    evl.append(
        nifti1.load('{}/{}_L2.nii.gz'.format(basedir, basename)).get_data())
    evl.append(
        nifti1.load('{}/{}_L3.nii.gz'.format(basedir, basename)).get_data())
    evl = np.array(evl)
    evl[evl < 0] = 0

    # LOAD AND BUILD EIGENVECTORS VOLUME
    evt = [nifti1.load('{}/{}_V1.nii.gz'.format(basedir, basename)).get_data()]
    evt.append(
        nifti1.load('{}/{}_V2.nii.gz'.format(basedir, basename)).get_data())
    evt.append(
        nifti1.load('{}/{}_V3.nii.gz'.format(basedir, basename)).get_data())
    evt = np.array(evt).transpose(0, 4, 1, 2, 3)

    T = np.diag(np.ones(4))
    if reorient:
        # GET QFORM AFFINE MATRIX (see Nifti and nibabel specifications)
        T = L.get_header().get_qform()

        # COMPUTE ROTATION MATRIX TO ALIGN SAGITTAL PLANE
        R = align_sagittal_plane(T)
        evl, evt, T = dtimp.rotateDTI(evl, evt, R)

    return (evl, evt, T)
示例#20
0
文件: io.py 项目: iZehan/spatial-pbs
def get_affine(filePath):
    image = nib.load(filePath)
    return image.get_affine()
示例#21
0
文件: io.py 项目: iZehan/spatial-pbs
def get_voxel_size(filePath):
    header = nib.load(filePath).get_header()
    return abs(header.get_base_affine()[0, 0]), abs(header.get_base_affine()[1, 1]), abs(header.get_base_affine()[2, 2])
示例#22
0
文件: io.py 项目: iZehan/spatial-pbs
def get_affine(filePath):
    image = nib.load(filePath)
    return image.get_affine()
示例#23
0
    # with open(ses_dir + os.sep + "TabDat_Effects-of-interest_footer.txt", newline='') as inputfile:
    #     for row in csv.reader(inputfile):
    #         footer.append(row)
    # stat_thresh = float(footer[4][0].split(' ')[1])

    # Load mask dictionaries
    mask_dir = os.path.join(ds_dir, "derivatives", "freesurfer",
                            "sub-" + str(sub).zfill(2), freesurfer_mri)
    mask_dict = mask_utils.load_dict(
        os.path.join(
            mask_dir,
            "sub-" + str(sub).zfill(2) + "_mask_dict_EPI_disjoint.npy"))

    # Load image
    img_path = os.path.join(ses_dir, image_type + ".nii")
    stat_img = nifti1.load(img_path)
    stat_img_array = stat_img.get_fdata()

    # Apply mask and statistical threshold
    for roi_h in mask_dict.keys():
        mask = mask_dict[roi_h]
        mask_array = mask.get_fdata()
        stat_img_array_masked = np.multiply(mask_array, stat_img_array)
        # stat_img_array_thresholded = (stat_img_array_masked>=stat_thresh).astype(int)

        # Save number of significant voxels in ROI
        sig_dict.update({roi_h: sum(sum(sum(stat_img_array_masked)))})

        # Plotting
        if plot_img:
            stat_img_thresholded = nifti1.Nifti1Image(stat_img_array_masked,
示例#24
0
    if want_subplots is True:
        plotnum = numplots
    else:
        plotnum = 1
        
    f,pltarr = plt.subplots(plotnum,sharex=True)
    
    plt.suptitle('Intensity histogram of T2 image across 51 miccai08 subjects')
    for n,i in enumerate(subject_list):
        print i
        mode_image_file = os.path.join(data_dir,i,
                                       i+'_T2_masked_roi_restore_std.nii.gz')
        seg_mask_file = os.path.join(data_dir,i,
                                     'segmented',
                                     i+'_T1_brain_roi_pveseg.nii.gz')

        mode_image = niftitool.load(mode_image_file)
        seg_mask = niftitool.load(seg_mask_file)
        
        if isinstance(pltarr,np.ndarray):
            plotter = pltarr[n]
            plt.setp(plotter.get_yticklabels(),visible=False)
        else:
            plotter = pltarr
            
        plt.setp(plotter.get_yticklabels(),visible=False)
        segHist(mode_image.get_data(),seg_mask.get_data(),plotter)
        if n==numplots-1:
            break
        
    plt.xlabel('Tissue Intensity')
示例#25
0
文件: io.py 项目: iZehan/spatial-pbs
def get_header(filePath):
    image = nib.load(filePath)
    return image.get_header()
def load_T1(t1_file):
  T1_nni = nifti1.load(t1_file)
  T1 = T1_nni.get_data()
  T1_voxel_sizes = T1_nni.get_header()['pixdim'][1:4]
  return T1, T1_voxel_sizes
    glm_dir = os.path.join(spm_dir, "sub-" + str(sub).zfill(2))

    perm_range = get_perm_range(glm_dir)

    for snr in snr_range:
        for perm in perm_range:
            if processing_mode in ['datasets', 'both']:
                signal_4d = None
                signal_path = os.path.join(
                    glm_dir, "sub-" + str(sub).zfill(2) + "_" + task + "_" +
                    stimulus_set + "_data_perm_mixed_" + str(perm).zfill(4) +
                    "_snr_" + str(snr) + "_signal.nii.gz")
                descriptors_path = os.path.join(
                    glm_dir, "sub-" + str(sub).zfill(2) + "_" + task + "_" +
                    stimulus_set + "_signal.csv")
                signal_4d = nifti1.load(signal_path)

            if processing_mode in ['residuals', 'both']:
                noise_4d = None
                noise_path = os.path.join(
                    glm_dir, "sub-" + str(sub).zfill(2) + "_" + task + "_" +
                    stimulus_set + "_data_perm_mixed_" + str(perm).zfill(4) +
                    "_snr_" + str(snr) + "_noise.nii.gz")
                noise_4d = nifti1.load(noise_path)

            # Generate and save pyrsa dataset and pooled residuals for each ROI
            for roi_h in mask_dict.keys():
                measurements = None
                residuals = None

                if processing_mode in ['datasets', 'both']:
示例#28
0
def mrtrixLUTParc(inFile, LUTfile, outFile):

    head, tail = split(outFile)

    print('Runnning nodezLUTParc')
    if '.nii.gz' not in inFile:
        inFile = '{}.nii.gz'.format(inFile)

    p = load(inFile)
    aff, header = p.affine, p.header
    pData = p.get_data()
    complete = np.zeros(pData.shape, dtype=int)
    fillLUT, numLUT, factors, label = [], [], [], []

    labDict = {}
    with open(LUTfile, 'rt') as csvfile:
        spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
        for row in spamreader:
            labDict[row[0]] = row[1]

    i = 1
    unq = np.unique(pData)
    unq = np.delete(unq, [unq == 0])

    row = []
    row2 = []

    for u in unq:
        print("%15f" % u)
    for u in unq:

        out = bin(pData, u, i)
        complete += out

        #label = '{}_{}'.format(tail,int(u))

        #saveNifti(out, aff, header, head, label)

        facts = prime_factors(u)
        facts = [str(int(f)) for f in facts]

        label = []
        for f in facts:
            label.append(labDict[f])

        if len(facts) > 1:
            facts = " ".join(facts)
            label = "_".join(label)

        else:
            facts = "{}".format(int(facts[0]))
            label = "{}".format(label[0])
        label = label.replace('.brain.bin.nii.gz', '')
        #print("{}   {}      {}".format(i, u, facts))
        #labels = [labDict['{}'.format(int(f))] for f in facts]

        #factors.append(facts)
        #fillLUT.append(i)
        #numLUT.append(int(u))
        rowW = "{} {} {}".format(i, int(u), facts)
        rowW2 = "{} {}".format(i, label)
        print(rowW)
        print(rowW2)
        row.append(rowW)
        row2.append(rowW2)

        i += 1

    #label = '{}_{}'.format(head, )
    saveNifti(complete, aff, header, head, tail)

    print(row)
    print(row2)

    csvPath = join(head, tail)
    with open(csvPath, 'w', newline='\n') as csvfile:
        spamwriter = csv.writer(csvfile)  #, delimiter=' ',
        #quotechar='|', quoting=csv.QUOTE_MINIMAL)
        #for j in range(len(fillLUT)):
        #    spamwriter.writerow([fillLUT[j], numLUT[j], factors[j]])
        for r in range(len(row)):
            spamwriter.writerow([row[r]])

    csvPath = join(head, "{}_2".format(tail))
    with open(csvPath, 'w', newline='\n') as csvfile:
        spamwriter = csv.writer(csvfile)  #, delimiter=' ',
        # quotechar='|', quoting=csv.QUOTE_MINIMAL)
        #for j in range(len(fillLUT)):
        #    spamwriter.writerow([fillLUT[j], numLUT[j], factors[j]])
        for r in range(len(row2)):
            spamwriter.writerow([row2[r]])

    return True
示例#29
0
def mrtrixLUTParc3(inFile, LUTfile, outFile, noPrimes=False):

    head, tail = split(outFile)

    print('Runnning nodezLUTParc')
    if '.nii.gz' not in inFile:
        inFile = '{}.nii.gz'.format(inFile)

    p = load(inFile)
    aff, header = p.affine, p.header
    pData = p.get_data()
    complete = np.zeros(pData.shape, dtype=int)
    #fillLUT, numLUT, factors, label     = [], [], [], []

    temp_fill = []
    temp_prime = []
    temp_labs = []
    temp_factors_fill = []

    with open(LUTfile, 'rt') as csvfile:
        spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
        for row in spamreader:
            temp_fill.append(row[0])
            temp_prime.append(row[1])
            temp_labs.append(row[2])
            if not noPrimes:
                temp_factors_fill.append(row[3])

        prime2fill = dict(
            zip(
                np.array(temp_prime).astype(int),
                np.array(temp_fill).astype(int)))
        prime2lab = dict(zip(np.array(temp_prime).astype(int), temp_labs))
        if not noPrimes:
            prime2factors_fill = dict(
                zip(np.array(temp_prime).astype(int), temp_factors_fill))

    #i = 1
    unq = np.unique(pData).astype(int)
    unq = unq[unq != 0]

    row = []
    row2 = []

    for u in unq:
        print("%15f" % u)
    for u in unq:
        if u in prime2fill.keys():

            out = bin(pData, u, prime2fill[u])
            complete += out

            #fill2fill = [ f.split('_') for f in prime2factors_fill[u]]
            if not noPrimes:
                rowW = " ".join([
                    str(prime2fill[u]),
                    str(u),
                    str(prime2factors_fill[u]), prime2lab[u]
                ])
            else:
                rowW = " ".join([str(prime2fill[u]), str(u), prime2lab[u]])

            print(rowW)
            row.append(rowW)

            #i += 1
        else:
            print('not in dict')
            print(u)

    saveNifti(complete, aff, header, head, tail)

    csvPath = join(head, tail.replace('.nii.gz', '.txt'))
    with open(csvPath, 'w', newline='\n') as csvfile:
        spamwriter = csv.writer(csvfile)

        for r in range(len(row)):
            spamwriter.writerow([row[r]])

    return True
示例#30
0
data_folder = sys.argv[1]

gm_filename = r'c1reg.nii'
wm_filename = r'c2reg.nii'
csf_filename = r'c3reg.nii'

sublist_filename = sys.argv[2]

with open(sublist_filename, 'rt') as sublistfile:
    lines = sublistfile.readlines()
sublist = [k.rstrip() for k in lines]

#%%

for i in sublist:
    print(i)
    gm_file = nitool.load(os.path.join(data_folder, i, gm_filename))
    gm_img = gm_file.get_data()

    wm_file = nitool.load(os.path.join(data_folder, i, wm_filename))
    wm_img = wm_file.get_data()

    csf_file = nitool.load(os.path.join(data_folder, i, csf_filename))
    csf_img = csf_file.get_data()

    tiv_img = gm_img + wm_img + csf_img
    tiv_file = nitool.Nifti1Image(tiv_img, gm_file.affine)
    tiv_filename = os.path.join(data_folder, i, 'tivmask_image.nii.gz')
    print(tiv_filename)
    nitool.save(tiv_file, tiv_filename)
示例#31
0
def nodeParc2(inFiles, outFile, LUT):

    print('Runnning nodeParc')
    head, tail = split(outFile)

    m = load(inFiles[0])
    aff, header = m.affine, m.header
    complete = np.ones(m.get_data().shape, dtype=int)

    labLUT, numLUT = [], []

    rowsOut = []

    with open(LUT, 'rt') as csvfile:
        spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
        for row in spamreader:
            if row:
                for path in inFiles:
                    if row[2] in path:

                        a = path
                        a_head, a_tail = split(a)
                        a_Load = load(a)
                        a_Data = a_Load.get_data()

                        o = bin(a_Data, 1, row[1])
                        o[np.where(o == 0)] = 1

                        complete = complete * o

                        numLUT.append(row[1])
                        labLUT.append(row[2])

                        print('{}      {}     {}'.format(
                            row[1], row[2], a_tail))

    complete[np.where(complete == 1)] = 0
    saveNifti(complete, aff, header, head, tail)
    nc = np.unique(complete)
    print('{} Values: {}'.format(len(numLUT), numLUT))
    print('{} Values: {}'.format(len(nc), nc))

    csvPath = join(head, tail)
    with open(csvPath, 'w', newline='\n') as csvfile:
        spamwriter = csv.writer(csvfile,
                                delimiter=' ',
                                quotechar='|',
                                quoting=csv.QUOTE_MINIMAL)
        for r in rowsOut:
            spamwriter.writerow([r])

    csvPath = "{}_cumulative".format(LUT)
    with open(csvPath, 'a', newline='\n') as csvfile:
        spamwriter = csv.writer(csvfile,
                                delimiter=' ',
                                quotechar='|',
                                quoting=csv.QUOTE_MINIMAL)
        for r in nc:
            spamwriter.writerow([r])

    return True
示例#32
0
文件: io.py 项目: iZehan/spatial-pbs
def get_data_shape(filePath):
    return nib.load(filePath).get_header().get_data_shape()[:3]
示例#33
0
def apply_roi_mask_SPM(glm_dir,
                       img_num,
                       mask,
                       target='betas',
                       method='nilearn'):
    '''
    Load 3-D image for prespecified beta-coefficient or residual and apply mask to get a pattern vector
    This function assumes that beta and residual images are in the same directory and follow the SPM naming convention
    
    Args:
        glm_dir (str):
            Path to SPM outputs
        img_num (int):
            Stimulus number
        mask (Nifti1Image):
            To be applied mask (must have same dimensions as the GLM betas image)
        method (str):
            Method for applying the mask 

    Returns:
        beta_vector (1-D array OR Nifti Image):
            Vector of Beta-coefficients for each voxel in mask OR Nifti Image after masking
    '''
    # Check args
    if target not in ['betas', 'residuals']:
        print(
            target,
            "is not a valid SPM output descriptor. Please use 'betas' or 'residuals'"
        )
    if method not in ['nilearn', 'custom', '3d']:
        print(
            method,
            "is not a valid method descriptor. Please use 'nilearn', 'custom', or '3d'"
        )

    # Load image
    if target == 'betas':
        betas_path = os.path.join(glm_dir,
                                  "beta_" + str(img_num).zfill(4) + ".nii")
        image = nifti1.load(betas_path)
    elif target == 'residuals':
        residuals_path = os.path.join(glm_dir,
                                      "Res_" + str(img_num).zfill(4) + ".nii")
        image = nifti1.load(residuals_path)

    # Apply mask
    if method == 'nilearn':
        masked = masking.apply_mask(image,
                                    mask,
                                    dtype='f',
                                    smoothing_fwhm=None,
                                    ensure_finite=True)
    elif method == 'custom':
        # Alternative to nilearn's implementation (allows for plotting of masked 3d image)
        mask_array = mask.get_fdata()
        image_array = image.get_fdata()
        masked = image_array[mask_array.astype(bool)]
    elif method == '3d':
        mask_array = mask.get_fdata()
        image_array = image.get_fdata()
        image_masked = np.multiply(mask_array, image_array)
        masked = nifti1.Nifti1Image(image_masked, mask.affine.copy())

    return masked
示例#34
0
def load_vol(fname):
    return nii.load(fname)
示例#35
0
文件: io.py 项目: iZehan/spatial-pbs
def get_voxel_size(filePath):
    header = nib.load(filePath).get_header()
    return abs(header.get_base_affine()[0, 0]), abs(
        header.get_base_affine()[1, 1]), abs(header.get_base_affine()[2, 2])
示例#36
0
文件: io.py 项目: iZehan/spatial-pbs
def get_header(filePath):
    image = nib.load(filePath)
    return image.get_header()
示例#37
0
fwhm             = np.array(voxel_dimensions['EPI']) # For mask smoohing prior to downsampling
mask_threshold   = 'adaptive' # Options: 'adaptive', a natural number (absolute threshold) or a real number between 0 and 1 
mask_merging     = True # Merge several small ROIs
rh_constant      = 200 #right hemisphere constant (roi_id in rh := roi_id in lh + rh_constant)
if mask_merging:
    merge_list   = [tuple(['PeEc', 'TF']), tuple('PHA%d' % i for i in range(1,4)), tuple('VMV%d' % i for i in range(1,4)), tuple(['MT', 'MST'])]
    merged_names = ['IT','PHA', 'VMV', 'MT+MST']

##############################################################################

for sub in range(1, n_subs+1):     
    
    # Set paths
    mask_dir = os.path.join(ds_dir, "derivatives", "freesurfer","sub-" + str(sub).zfill(2), freesurfer_mri)
    glm_dir_example = os.path.join(spm_dir, "sub-"+str(sub).zfill(2), 'ses-perceptionTest' + '01', "run-01")
    betas_example = nifti1.load(glm_dir_example + os.sep + "beta_0001.nii")
    roi_ids = mask_utils.get_roi_ids_glasser(txt_dir, target_ROIs)
    
    # Create T1w atlas from freesurfer single ROI nifti files
    atlas_o = mask_utils.atlas_from_freesurfer_masks(sub, mask_dir, roi_ids, rh_constant = rh_constant, overwrite = True)
    atlas, roi_ids = mask_utils.merge_masks(atlas_o, roi_ids, merge_list, merged_names, rh_constant = rh_constant)  
    
    # Create mask dictionaries (T1w -> smooth -> resample -> threshold -> make disjoint) and get voxel IDs for the final one
    mask_dict_o, mask_dict_s, mask_dict_r, mask_dict_t = mask_utils.create_mask_dicts(atlas, betas_example, roi_ids, fwhm = fwhm, interpolation='nearest',
                                                 threshold = mask_threshold, voxel_dimensions = voxel_dimensions, rh_constant = rh_constant)
    mask_dict_d = mask_utils.remove_mask_overlap(mask_dict_r, mask_dict_t)
    voxel_ids_dict = mask_utils.get_voxel_ids_from_dict(mask_dict_d)
    
    # Save the final dictionary to mask directory
    mask_utils.save_dict(mask_dict_d, mask_dir, "sub-" + str(sub).zfill(2) + "_mask_dict_EPI_disjoint")
    mask_utils.save_dict(voxel_ids_dict, mask_dir, "sub-" + str(sub).zfill(2) + "_voxel_IDs_dict_EPI_disjoint")
示例#38
0
文件: io.py 项目: iZehan/spatial-pbs
def get_data_shape(filePath):
    return nib.load(filePath).get_header().get_data_shape()[:3]
示例#39
0
文件: miraw.py 项目: jadrian/mipy
 def readNifti(infile, dtype=None, memorder=None):
   """Reads a NIfTI file into a numpy array.
   
   Arguments:
     infile:    Filename of a NIfTI file, or a list of strings.  The list
                indicates a sequence of files to be concatenated together,
                in the order given, in the I dimension (the 4th dimension).
     dtype:     A numpy data type to cast to.  If None, the data type remains
                whatever the NIfTI header specifies.
     memorder:  'F' or 'C' --- the order for storage in memory.  If None, the
                order remains whatever the NIfTI library read it as (most
                likely 'F', the standard layout order for NIfTI).
   
   Returns (vol, cfg, header):
     vol:    numpy ndarray.
     cfg:    dict storing information that you would find in a size_info file.
     header: the NIfTI header of infile (or of the first-listed file, with the
             fourth dimension size changed).
   
   Note that if the NIfTI header encodes a dimension flip or exchange, this
   function DOES NOT apply it to the image before returning.  You'll want to
   check that with niftiGetXform() and perhaps fix it with applyNiftiXform().
     If you ultimately want a raw volume with a non-standard dimension order,
   you should apply that AFTER you apply the NIfTI transform, since
   applyNiftiXform() assumes that the dimension order is precisely as
   represented in the original raw data from the NIfTI file.
     Here's the recommended procedure: read, apply the transform, and then
   remap to the desired dimension order:
       (vol, cfg, header) = readNifti(fname)
       (vol, xform, vox_sz) = applyNiftiXform(vol, niftiGetXform(header), cfg)
       vol = applyDimOrder(vol, dimorder)
     This procedure is what niftiToRaw() does.
   """
   if isinstance(infile, str):
     # Read this one file.
     nii = nifti1.load(infile)
     raw = nii.get_data()
     header = nii.header
   elif isinstance(infile, list):
     # Read a list of files: first read in file 0...
     nii = nifti1.load(infile[0])
     raw = nii.get_data()
     header = nii.header
     raw.resize(raw.shape + (1,)*(4-raw.ndim))
     # ... then concatenate on each other one.
     for i in range(1, len(infile)):
       nii = nifti1.load(infile[i])
       newraw = nii.get_data()
       newraw.resize(newraw.shape + (1,)*(4-newraw.ndim))
       raw = np.concatenate((raw, newraw), axis=3)
     header.set_data_shape(raw.shape)
   else:
     raise ValueError('"%s" is not a valid infile argument.' % repr(infile))
   
   curr_dtype = raw.dtype
   if np.isfortran(raw):
     curr_memorder = "F"
   else:
     curr_memorder = "C"
   
   if dtype is None:
     dtype = curr_dtype
   if memorder is None:
     memorder = curr_memorder
   
   # Create the size_info config dict.
   cfg = {}
   cfg['voxel_size_(mm)']             = header['pixdim'][1:4].tolist()
   cfg['full_image_size_(voxels)']    = raw.shape[:3]
   cfg['low_end_crop_(voxels)']       = [0,0,0]
   cfg['cropped_image_size_(voxels)'] = cfg['full_image_size_(voxels)']
   if raw.ndim > 3:
     cfg['num_dwis']                  = raw.shape[3]
   else:
     cfg['num_dwis']                  = 1
   cfg['dimension_order']             = _miraw_helpers.DIM_DEFAULT_ORDER
   
   return (raw.astype(dtype, order=memorder), cfg, header)
示例#40
0
def load_T1(t1_file):
    T1_nni = nifti1.load(t1_file)
    T1 = T1_nni.get_data()
    T1_voxel_sizes = T1_nni.get_header()['pixdim'][1:4]
    return T1, T1_voxel_sizes