Beispiel #1
0
def fx(sl, dataset, roi_ids, results):
    """this requires the searchlight conditional attribute 'roi_feature_ids'
    to be enabled"""

    import numpy as np
    from mvpa2.datasets import Dataset

    resmap = None
    probmap = None
    for resblock in results:
        for res in resblock:
            if resmap is None:
                # prepare the result container
                resmap = np.zeros((len(res), dataset.nfeatures), dtype=res.samples.dtype)
                observ_counter = np.zeros(dataset.nfeatures, dtype=int)
            #project the result onto all features -- love broadcasting!
            resmap[:, res.a.roi_feature_ids] += res.samples
            # increment observation counter for all relevant features
            observ_counter[res.a.roi_feature_ids] += 1
    # when all results have been added up average them according to the number
    # of observations
    observ_mask = observ_counter > 0
    resmap[:, observ_mask] /= observ_counter[observ_mask]
    # transpose to make broadcasting work -- creates a view, so in-place
    # modification still does the job
    result_ds = Dataset(resmap,
                        fa={'observations': observ_counter})
    if 'mapper' in dataset.a:
        import copy
        result_ds.a['mapper'] = copy.copy(dataset.a.mapper)
    return result_ds
Beispiel #2
0
def run_bootstrap(ds, sts, niter=1000):
    """
       dataset: Input fmri dataset with targets and chunks, should be preprocessed
       Q:       The loading matrix from statis
       niter:   number of iteration for bootstrap
       This function iteratively samples random chunks from dataset with
       replacement and computes each statis solution.
        The factor scores from each statis are then projected into original compromise
       matrix space using Q
       OUTPUT: FBoot collects all projected factor scores
    """
    ntables = sts.ntables
    rows, nfactors = ds.shape
    nrows = rows/ntables
    boot = np.zeros((nrows,nfactors,niter))
    X = ds.a['X'].value
    X = X.samples

    ignore = 0 # Hack: for some reasone inter_table_Rv_analysis chokes...
    ident = np.unique(sts.subtable_idx)
    for i in range(niter):
        idx = ident[np.random.random_integers(0,ntables-1,size=(ntables,))]
        Y = None
        Y_idx = None
        fselect = np.zeros((nrows,nfactors,ntables))

        for k,j in enumerate(idx):
            Y_t = X[:,sts.subtable_idx==j]
            if Y_idx is None:
                Y_idx = np.ones((Y_t.shape[1]))*k
            else:
                Y_idx = np.hstack((Y_idx,np.ones((Y_t.shape[1]))*k))

            if Y == None:
                Y = Y_t
            else:
                Y = np.hstack((Y,Y_t))
            fselect[:,:,k] = ds.samples[ds.chunks==np.unique(ds.chunks)[j],:]

        (A,alpha,C,G) = inter_table_Rv_analysis(Y,Y_idx)
        if G is None:
            ignore += 1
            continue
        #print "shape alpha:", alpha.shape
        #print "shape fselect", fselect.shape
        #print alpha
        #print fselect
        boot[:,:,i] = np.sum(fselect*alpha.flatten(),2)

        if i%100==0:
            sys.stdout.write("iter:%s/%s\r"% (i,niter))
            sys.stdout.flush()

    print "completed %s/%s bootstraps." % (niter-ignore,niter)
    boot = Dataset(boot)
    boot.sa['targets'] = ds.targets[:nrows]
    return boot
Beispiel #3
0
def packing(dataset, radius=4, nifti=False, randoffset=False):
	"""return a hexagonal close sphere packing grid for a PyMVPA fMRI dataset
    
    Keyword arguments:
    radius-- radius in voxels of the spheres to pack (default 4)
    nifti-- write out a seed voxel mask as a nifti
    randomoffset-- random jitter of the seed voxel grid

    """
    
	from pylab import find, random
	from numpy import ones, zeros, arange, sqrt, remainder
	from mvpa2.suite import fmri_dataset, Dataset
	import os

	if randoffset:
		ro = random(3)
	else:
		ro = zeros(3)
	
	minco = dataset.fa.voxel_indices.min(0)
	maxco = dataset.fa.voxel_indices.max(0)
	rect = ones(dataset.a.voxel_dim)
	
	fac = sqrt(6)*2*radius/3
	for iz,z in enumerate(arange(minco[2], maxco[2], fac)):
		for iy,y in enumerate(arange(minco[1], maxco[1], fac)):
			for x in arange(minco[0], maxco[0], 2*radius):
				hx = x + remainder(iy, 2)*radius + ro[0]*radius
				hy = y + remainder(iz, 2)*fac + ro[1]*radius
				hz = z + ro[2]*radius
				if hz <= maxco[2]:
					rect [hx, hy, hz] += 1

	maskedrect = dataset.mapper.forward1(rect)
	roiIndex = find((maskedrect == 2))
	print  'number of seed voxel: '+str(len(roiIndex))
	
	maskedrectds = Dataset([maskedrect])
	maskedrectds.a = dataset.a.copy()
	maskedrectds.fa = dataset.fa.copy()
    
	if nifti:
		from nibabel import Nifti1Image
		Nifti1Image(maskedrectds.O.squeeze(),
					dataset.a.imghdr.get_best_affine()
					).to_filename(os.path.join('sparse'+str(int(radius))+'.nii.gz'))

	return roiIndex, maskedrectds
Beispiel #4
0
    def _forward_dataset(self, ds):
        targ = np.copy(ds.targets)
        mapped = None
        X = None
        i,j = ds.shape
        
        if self._stack=='v':
            chunks = np.unique(ds.sa[self._chunks_attr].value)
        else:
            chunks = np.unique(ds.fa[self._chunks_attr].value)
        
        for ch,chunk in enumerate(chunks):
            if self._stack == 'v':
                table = ds[ds.sa[self._chunks_attr].value==chunk,:]
            if self._stack == 'h':
                table = ds[:,ds.fa[self._chunks_attr].value==chunk]

            table = self.center_and_norm_table(table,
                    col_mean=self._subtable_stats[ch]['col_mean'],
                    col_norm=self._subtable_stats[ch]['col_norm'],
                    table_norm = self._subtable_stats[ch]['table_norm'])[0]


            if self._stack =='v':
                # Assume features align, use average Q
                Q_ = None
                for subtab in np.unique(self.subtable_idx):
                    if Q_ is None:
                        Q_ = self.Q[self.subtable_idx==subtab,:]
                    else:
                        Q_ = Q_ + self.Q[self.subtable_idx==subtab,:]
                Q_ = Q_ / len(np.unique(self.subtable_idx))
                part = Dataset(np.dot(table.samples,Q_))
                part.sa = table.sa
            if self._stack =='h':
                # Assume same number of features as in X
                part = Dataset(np.dot(table,self.Q[self.subtable_idx==chunk,:]))
                part.sa = table.sa
            part.sa['chunks'] = [chunk]*part.shape[0]

            if mapped is None:
                mapped = part.copy()
                X = table
            else:
                mapped.append(part.copy())
                X.append(table,'h')
        mapped.a['X'] = X
         
        if self.keep_dims == 'all':
            self.keep_dims = range(mapped.shape[1])
        return mapped[:,self.keep_dims]
Beispiel #5
0
 def _call(self, ds):
     data_dsm = 1 - pdist(ds.samples, metric='correlation')
     data_dsm = np.arctanh(data_dsm)  # Fisher z transformation
     data_dsm = data_dsm[self.labels != 0]
     labels = self.labels[self.labels != 0]
     data_dsm = zscore(data_dsm)
     # difference between distances of same types of trials across run (labeled 1)
     # and different types of trals across run (labeled 2)
     sim = np.mean(data_dsm[labels == 1]) - np.mean(data_dsm[labels == 2])
     return Dataset([sim])
Beispiel #6
0
def load_eeg_dataset(path, filename, attrib, TR, eliminated_vols=None, **kwargs):
    """
    
    **kwargs:
    
       - type: 
              * 'psd': Power Spectrum Density using matplotlib.specgram
                       additional parameters to be included NFFT and noverlap
              * 'fft': Power Spectrum and Phase using scipy.fft
              * 'time': EEG timecourse in every TR. 
    """

    type = "time"

    for arg in kwargs:
        if arg == "type":
            type = kwargs[arg]

    print "type = " + type
    # load eeg data
    [data, eeg_info] = load_eeg_data(path, filename, TR, eliminatedVols=eliminated_vols)

    channel_ids = eeg_info["channel_ids"]
    dt = eeg_info["dt"]

    kwargs["dt"] = dt

    if (type == "psd") or (type == "fft"):
        [samples, freq] = spectrum_eeg(data, **kwargs)

        data = samples.reshape(samples.shape[0], samples.shape[1], -1)

    # mvpa analysis: attributes and dataset
    attr = SampleAttributes(attrib)
    print "Building dataset..."
    ds = Dataset.from_channeltimeseries(data, channelids=channel_ids, targets=attr.targets, chunks=attr.chunks)

    if (type == "psd") or (type == "fft"):
        ds.a["frequiencies"] = freq
        ds.a["shape"] = samples.shape

    ds.a["timepoints"] = np.arange(0, TR, dt)

    del data

    if "samples" in locals():
        del samples

    return ds
    for j in channelSelected:
        start = markers[i][1] + (onSet / dt)
        stop = start + (nSamples / dt)
        nVec = np.array(data.T[j][start:stop])
        if nVec.shape[0] != (nSamples / dt):
            nVec.resize(nSamples / dt)
            print "yes"
        dataRes[j][i] = nVec

# Select data based on channels Selected
dataResSel = np.take(dataRes, channelSelected, axis=0)
ch_infoSel = np.take(ch_info, channelSelected)

# reshape data to feed Dataset
dataResSel = np.reshape(dataResSel, (nTrials, -1, nSamples / dt))

# Eliminate first n. runs
volToEliminate = 5
dataResSel = dataResSel[: nTrials - volToEliminate]


# mvpa analysis: attributes and dataset
attr = SampleAttributes("/home/robbis/fmri_datasets/monks/monks_attrib_pymvpa.txt")

ds = Dataset.from_channeltimeseries(
    dataResSel, t0=0, dt=dt, channelids=ch_infoSel, targets=attr.targets, chunks=attr.chunks
)


del dataRes, dataResSel
Beispiel #8
0
 def _call(self, ds):
     data_dsm = pdist(ds.samples, metric='correlation')  # 'euclidean'
     corr = spearmanr(data_dsm, self.labels, axis=None).correlation
     return Dataset([corr])