Beispiel #1
0
def test_fmridataset():
    # full-blown fmri dataset testing
    import nibabel
    maskimg = nibabel.load(os.path.join(pymvpa_dataroot, 'mask.nii.gz'))
    data = maskimg.get_data().copy()
    data[data > 0] = np.arange(1, np.sum(data) + 1)
    maskimg = nibabel.Nifti1Image(data, None, maskimg.get_header())
    attr = SampleAttributes(os.path.join(pymvpa_dataroot, 'attributes.txt'))
    ds = fmri_dataset(samples=os.path.join(pymvpa_dataroot, 'bold.nii.gz'),
                      targets=attr.targets,
                      chunks=attr.chunks,
                      mask=maskimg,
                      sprefix='subj1',
                      add_fa={'myintmask': maskimg})
    # content
    assert_equal(len(ds), 1452)
    assert_true(ds.nfeatures, 530)
    assert_array_equal(sorted(ds.sa.keys()),
                       ['chunks', 'targets', 'time_coords', 'time_indices'])
    assert_array_equal(sorted(ds.fa.keys()), ['myintmask', 'subj1_indices'])
    assert_array_equal(
        sorted(ds.a.keys()),
        ['imghdr', 'imgtype', 'mapper', 'subj1_dim', 'subj1_eldim'])
    # vol extent
    assert_equal(ds.a.subj1_dim, (40, 20, 1))
    # check time
    assert_equal(ds.sa.time_coords[-1], 3627.5)
    # non-zero mask values
    assert_array_equal(ds.fa.myintmask, np.arange(1, ds.nfeatures + 1))
    # we know that imgtype must be:
    ok_(ds.a.imgtype is nibabel.Nifti1Image)
Beispiel #2
0
def load_eeg_dataset(path, filename, attrib, TR, eliminated_vols = None, **kwargs):
    """
    
    **kwargs:
    
       - type: 
              * 'psd': Power Spectrum Density using matplotlib.specgram
                       additional parameters to be included NFFT and noverlap
              * 'fft': Power Spectrum and Phase using scipy.fft
              * 'time': EEG timecourse in every TR. 
    """
    
    type = 'time'
    
    for arg in kwargs:
        if (arg == 'type'):
            type = kwargs[arg]

    
    print 'type = ' + type
    #load eeg data
    [data, eeg_info] = load_eeg_data(path, filename, TR, eliminatedVols = eliminated_vols)
    
    channel_ids = eeg_info['channel_ids']
    dt = eeg_info['dt']
    
    kwargs['dt'] = dt
    
    if (type == 'psd') or (type == 'fft'):
        [samples, freq] = spectrum_eeg(data, **kwargs)

        data = samples.reshape(samples.shape[0], samples.shape[1], -1)
    
    #mvpa analysis: attributes and dataset
    attr = SampleAttributes(attrib)
    print 'Building dataset...'
    ds = Dataset.from_channeltimeseries(data, 
                                        channelids = channel_ids,
                                        targets = attr.targets,
                                        chunks = attr.chunks)
    
    if (type == 'psd') or (type == 'fft'):
        ds.a['frequiencies'] = freq
        ds.a['shape'] = samples.shape
    
    ds.a['timepoints'] = np.arange(0, TR, dt)
        
    del data
    
    if 'samples' in locals():
        del samples
    
    return ds
Beispiel #3
0
def process_common_dsattr_opts(ds, args):
    """Goes through an argument namespace and processes attribute options"""
    # legacy support
    if not args.add_sa_attr is None:
        from mvpa2.misc.io.base import SampleAttributes
        smpl_attrs = SampleAttributes(args.add_sa_attr)
        for a in ('targets', 'chunks'):
            verbose(
                2, "Add sample attribute '%s' from sample attributes file" % a)
            ds.sa[a] = getattr(smpl_attrs, a)
    # loop over all attribute configurations that we know
    attr_cfgs = (  # var, dst_collection, loader
        ('--add-sa', args.add_sa, ds.sa, _load_from_cmdline),
        ('--add-fa', args.add_fa, ds.fa, _load_from_cmdline),
        ('--add-sa-txt', args.add_sa_txt, ds.sa, _load_from_txt),
        ('--add-fa-txt', args.add_fa_txt, ds.fa, _load_from_txt),
        ('--add-sa-npy', args.add_sa_npy, ds.sa, _load_from_npy),
        ('--add-fa-npy', args.add_fa_npy, ds.fa, _load_from_npy),
    )
    for varid, srcvar, dst_collection, loader in attr_cfgs:
        if not srcvar is None:
            for spec in srcvar:
                attr_name = spec[0]
                if not len(spec) > 1:
                    raise argparse.ArgumentTypeError(
                        "%s option need at least two values " % varid +
                        "(attribute name and source filename (got: %s)" % spec)
                if dst_collection is ds.sa:
                    verbose(
                        2, "Add sample attribute '%s' from '%s'" %
                        (attr_name, spec[1]))
                else:
                    verbose(
                        2, "Add feature attribute '%s' from '%s'" %
                        (attr_name, spec[1]))
                attr = loader(spec[1:])
                try:
                    dst_collection[attr_name] = attr
                except ValueError, e:
                    # try making the exception more readable
                    e_str = str(e)
                    if e_str.startswith('Collectable'):
                        raise ValueError('attribute %s' % e_str[12:])
                    else:
                        raise e
def test_openfmri_dataset():
    skip_if_no_external('nibabel')

    of = ofm.OpenFMRIDataset(pathjoin(pymvpa_dataroot, 'haxby2001'))
    assert_equal(of.get_model_descriptions(), {1: 'visual object categories'})
    sub_ids = of.get_subj_ids()
    assert_equal(sub_ids, [1, 'phantom'])
    assert_equal(of.get_scan_properties(), {'TR': '2.5'})
    tasks = of.get_task_descriptions()
    assert_equal(tasks, {1: 'object viewing'})
    task = tasks.keys()[0]
    run_ids = of.get_bold_run_ids(sub_ids[0], task)
    assert_equal(run_ids, range(1, 13))
    task_runs = of.get_task_bold_run_ids(task)
    assert_equal(task_runs, {1: range(1, 13)})

    # test access anatomy image
    assert_equal(
        of.get_anatomy_image(1, fname='lowres001.nii.gz').shape, (6, 10, 10))
    # try to get an image that isn't there
    assert_raises(IOError, of.get_bold_run_image, 1, 1, 1)
    # defined model contrasts
    contrast_spec = of.get_model_contrasts(1)
    # one dict per task
    assert_equal(len(contrast_spec), 1)
    assert_true(1 in contrast_spec)
    # six defined contrasts
    assert_equal(len(contrast_spec[1]), 1)
    # check one
    assert_array_equal(contrast_spec[1]['face_v_house'],
                       [-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0])

    orig_attrs = SampleAttributes(
        pathjoin(pymvpa_dataroot, 'attributes_literal.txt'))
    for subj, runs in task_runs.iteritems():
        for run in runs:
            # load single run
            ds = of.get_bold_run_dataset(subj,
                                         task,
                                         run,
                                         flavor='1slice',
                                         mask=pathjoin(pymvpa_dataroot,
                                                       'mask.nii.gz'),
                                         add_sa='bold_moest.txt')
            # basic shape
            assert_equal(len(ds), 121)
            assert_equal(ds.nfeatures, 530)
            # functional mapper
            assert_equal(ds.O.shape, (121, 40, 20, 1))
            # additional attributes present
            moest = of.get_bold_run_motion_estimates(subj, task, run)
            for i in range(6):
                moest_attr = 'bold_moest.txt_%i' % (i, )
                assert_true(moest_attr in ds.sa)
                assert_array_equal(moest[:, i], ds.sa[moest_attr].value)

            # check conversion of model into sample attribute
            events = of.get_bold_run_model(subj, task, run)
            for i, ev in enumerate(events):
                # we only have one trial per condition in the demo dataset
                assert_equal(ev['conset_idx'], 0)
                # proper enumeration and events sorted by time
                assert_equal(ev['onset_idx'], i)
            onsets = [e['onset'] for e in events]
            sorted_onsets = sorted(onsets)
            assert_array_equal(sorted_onsets, onsets)

            targets = events2sample_attr(events,
                                         ds.sa.time_coords,
                                         noinfolabel='rest')
            assert_array_equal(
                orig_attrs['targets'][(run - 1) * 121:run * len(ds)], targets)
            assert_equal(ds.sa['subj'][0], subj)

            # check that we can get the same result from the model dataset
            # (make it exercise the preproc interface too)

            def preproc_img(img):
                return img

            modelds = of.get_model_bold_dataset(1,
                                                subj,
                                                flavor='1slice',
                                                preproc_img=preproc_img,
                                                modelfx=assign_conditionlabels,
                                                mask=pathjoin(
                                                    pymvpa_dataroot,
                                                    'mask.nii.gz'),
                                                add_sa='bold_moest.txt')
            modelds = modelds[modelds.sa.run == run]
            targets = np.array(targets, dtype='object')
            targets[targets == 'rest'] = None
            assert_array_equal(targets, modelds.sa.targets)
    # more basic access
    motion = of.get_task_bold_attributes(1, 'bold_moest.txt', np.loadtxt)
    assert_equal(len(motion), 12)  # one per run
    # one per subject, per volume, 6 estimates
    assert_equal([(len(m), ) + m[1].shape for m in motion], [(1, 121, 6)] * 12)
Beispiel #5
0
sbfs = fs.base.SensitivityBasedFeatureSelection(fsel, fselector,
                                                enable_ca=['sensitivities'])
from mvpa2.clfs.meta import FeatureSelectionClassifier, MappedClassifier
fclf = FeatureSelectionClassifier(clf, sbfs)

from mvpa2.measures.base import CrossValidation
from mvpa2.misc import errorfx
from mvpa2.generators.partition import NFoldPartitioner

cv = CrossValidation(fclf,
                     NFoldPartitioner(attr='chunks'),
                     errorfx=errorfx.mean_match_accuracy)

import numpy as np
from mvpa2.misc.io.base import SampleAttributes
cv_attr = SampleAttributes(os.path.join(paths[3], (con + "_attribute_labels.txt")))

from mvpa2.measures import rsa
dsm = rsa.PDist(square=True)
# searchlight
# import searchlightutils as sl
# from mvpa2.measures.searchlight import sphere_searchlight
# cvSL = sphere_searchlight(cv, radius=r)
# lres = sl.run_cv_sl(cvSL, fds[lidx].copy(deep=False))

lresults = []
presults = []
l2presults = []
p2lresults = []
rsaresults = []
for sub in subList.keys():
    def test_voxel_selection(self):
        '''Compare surface and volume based searchlight'''
        '''
        Tests to see whether results are identical for surface-based
        searchlight (just one plane; Euclidean distnace) and volume-based
        searchlight.

        Note that the current value is a float; if it were int, it would
        specify the number of voxels in each searchlight'''

        radius = 10.
        '''Define input filenames'''
        epi_fn = os.path.join(pymvpa_dataroot, 'bold.nii.gz')
        maskfn = os.path.join(pymvpa_dataroot, 'mask.nii.gz')
        '''
        Use the EPI datafile to define a surface.
        The surface has as many nodes as there are voxels
        and is parallel to the volume 'slice'
        '''
        vg = volgeom.from_any(maskfn, mask_volume=True)

        aff = vg.affine
        nx, ny, nz = vg.shape[:3]
        '''Plane goes in x and y direction, so we take these vectors
        from the affine transformation matrix of the volume'''
        plane = surf.generate_plane(aff[:3, 3], aff[:3, 0], aff[:3, 1], nx, ny)
        '''
        Simulate pial and white matter as just above and below
        the central plane
        '''
        normal_vec = aff[:3, 2]
        outer = plane + normal_vec
        inner = plane + -normal_vec
        '''
        Combine volume and surface information
        '''
        vsm = volsurf.VolSurfMaximalMapping(vg, outer, inner)
        '''
        Run voxel selection with specified radius (in mm), using
        Euclidean distance measure
        '''
        surf_voxsel = surf_voxel_selection.voxel_selection(vsm,
                                                           radius,
                                                           distance_metric='e')
        '''Define the measure'''

        # run_slow=True would give an actual cross-validation with meaningful
        # accuracies. Because this is a unit-test only the number of voxels
        # in each searchlight is tested.
        run_slow = False

        if run_slow:
            meas = CrossValidation(GNB(),
                                   OddEvenPartitioner(),
                                   errorfx=lambda p, t: np.mean(p == t))
            postproc = mean_sample
        else:
            meas = _Voxel_Count_Measure()
            postproc = lambda x: x
        '''
        Surface analysis: define the query engine, cross validation,
        and searchlight
        '''
        surf_qe = SurfaceVerticesQueryEngine(surf_voxsel)
        surf_sl = Searchlight(meas, queryengine=surf_qe, postproc=postproc)
        '''
        new (Sep 2012): also test 'simple' queryengine wrapper function
        '''

        surf_qe2 = disc_surface_queryengine(radius,
                                            maskfn,
                                            inner,
                                            outer,
                                            plane,
                                            volume_mask=True,
                                            distance_metric='euclidean')
        surf_sl2 = Searchlight(meas, queryengine=surf_qe2, postproc=postproc)
        '''
        Same for the volume analysis
        '''
        element_sizes = tuple(map(abs, (aff[0, 0], aff[1, 1], aff[2, 2])))
        sph = Sphere(radius, element_sizes=element_sizes)
        kwa = {'voxel_indices': sph}

        vol_qe = IndexQueryEngine(**kwa)
        vol_sl = Searchlight(meas, queryengine=vol_qe, postproc=postproc)
        '''The following steps are similar to start_easy.py'''
        attr = SampleAttributes(
            os.path.join(pymvpa_dataroot, 'attributes_literal.txt'))

        mask = surf_voxsel.get_mask()

        dataset = fmri_dataset(samples=os.path.join(pymvpa_dataroot,
                                                    'bold.nii.gz'),
                               targets=attr.targets,
                               chunks=attr.chunks,
                               mask=mask)

        if run_slow:
            # do chunkswise linear detrending on dataset

            poly_detrend(dataset, polyord=1, chunks_attr='chunks')

            # zscore dataset relative to baseline ('rest') mean
            zscore(dataset,
                   chunks_attr='chunks',
                   param_est=('targets', ['rest']))

        # select class face and house for this demo analysis
        # would work with full datasets (just a little slower)
        dataset = dataset[np.array(
            [l in ['face', 'house'] for l in dataset.sa.targets],
            dtype='bool')]
        '''Apply searchlight to datasets'''
        surf_dset = surf_sl(dataset)
        surf_dset2 = surf_sl2(dataset)
        vol_dset = vol_sl(dataset)

        surf_data = surf_dset.samples
        surf_data2 = surf_dset2.samples
        vol_data = vol_dset.samples

        assert_array_equal(surf_data, surf_data2)
        assert_array_equal(surf_data, vol_data)
Beispiel #7
0
#!/usr/bin/env python

import numpy as np
from mvpa2.misc.io.base import SampleAttributes
from mvpa2.datasets.mri import fmri_dataset
from mvpa2.mappers.detrend import poly_detrend
from mvpa2.mappers.zscore import zscore
from mvpa2.clfs.svm import LinearCSVMC
from mvpa2.generators.partition import NFoldPartitioner
from mvpa2.measures.base import CrossValidation

sa = SampleAttributes('attributes.txt')
ds = fmri_dataset(samples='func.feat/filtered_func_data.nii.gz',
                  targets=sa.targets,
                  chunks=sa.chunks)
poly_detrend(ds, polyord=1, chunks_attr='chunks')
zscore(ds, param_est=('targets',[0])
ds = ds[ds.sa.targets != 0]
clf = LinearCSVMC()
cvte = CrossValidation(clf, NFoldPartitioner(),
                       enable_ca=['stats'])
cv_results = cvte(ds)
sl = sphere_searchlight(cvte, readius=3, postproc=mean_sample())
sl_results = sl(ds)