示例#1
0
    def test_preallocate_output(self, nblocks):
        ds = datasets['3dsmall'].copy()[:, :25] # smaller copy
        ds.fa['voxel_indices'] = ds.fa.myspace
        ds.fa['feature_id'] = np.arange(ds.nfeatures)

        def measure(ds):
            # return more than one sample
            return np.repeat(ds.fa.feature_id, 10, axis=0)

        nprocs = [1, 2] if externals.exists('pprocess') else [1]
        enable_ca = ['roi_sizes', 'raw_results', 'roi_feature_ids']
        for nproc in nprocs:
            sl = sphere_searchlight(measure,
                                    radius=0,
                                    center_ids=np.arange(ds.nfeatures),
                                    nproc=nproc,
                                    enable_ca=enable_ca,
                                    nblocks=nblocks
                                    )
            sl_inplace = sphere_searchlight(measure,
                                    radius=0,
                                    preallocate_output=True,
                                    center_ids=np.arange(ds.nfeatures),
                                    nproc=nproc,
                                    enable_ca=enable_ca,
                                    nblocks=nblocks
                                    )
            out = sl(ds)
            out_inplace = sl_inplace(ds)

            for c in enable_ca:
                assert_array_equal(sl.ca[c].value, sl_inplace.ca[c].value)
            assert_array_equal(out.samples, out_inplace.samples)
            assert_array_equal(out.fa.center_ids, out_inplace.fa.center_ids)
示例#2
0
 def test_partial_searchlight_with_full_report(self):
     ds = self.dataset.copy()
     center_ids = np.zeros(ds.nfeatures, dtype='bool')
     center_ids[[3,50]] = True
     ds.fa['center_ids'] = center_ids
     # compute N-1 cross-validation for each sphere
     cv = CrossValidation(sample_clf_lin, NFoldPartitioner())
     # contruct diameter 1 (or just radius 0) searchlight
     # one time give center ids as a list, the other one takes it from the
     # dataset itself
     sls = (sphere_searchlight(cv, radius=0, center_ids=[3,50]),
            sphere_searchlight(cv, radius=0, center_ids='center_ids'))
     for sl in sls:
         # run searchlight
         results = sl(ds)
         # only two spheres but error for all CV-folds
         self.assertEqual(results.shape, (len(self.dataset.UC), 2))
     # test if we graciously puke if center_ids are out of bounds
     dataset0 = ds[:, :50] # so we have no 50th feature
     self.assertRaises(IndexError, sls[0], dataset0)
     # but it should be fine on the one that gets the ids from the dataset
     # itself
     results = sl(dataset0)
     assert_equal(results.nfeatures, 1)
     # check whether roi_seeds are correct
     sl = sphere_searchlight(lambda x: np.vstack((x.fa.roi_seed, x.samples)),
                             radius=1, add_center_fa=True, center_ids=[12])
     res = sl(ds)
     assert_array_equal(res.samples[1:, res.samples[0].astype('bool')].squeeze(),
                        ds.samples[:, 12])
示例#3
0
    def test_preallocate_output(self, nblocks):
        ds = datasets['3dsmall'].copy()[:, :25]  # smaller copy
        ds.fa['voxel_indices'] = ds.fa.myspace
        ds.fa['feature_id'] = np.arange(ds.nfeatures)

        def measure(ds):
            # return more than one sample
            return np.repeat(ds.fa.feature_id, 10, axis=0)

        nprocs = [1, 2] if externals.exists('pprocess') else [1]
        enable_ca = ['roi_sizes', 'raw_results', 'roi_feature_ids']
        for nproc in nprocs:
            sl = sphere_searchlight(measure,
                                    radius=0,
                                    center_ids=np.arange(ds.nfeatures),
                                    nproc=nproc,
                                    enable_ca=enable_ca,
                                    nblocks=nblocks)
            sl_inplace = sphere_searchlight(measure,
                                            radius=0,
                                            preallocate_output=True,
                                            center_ids=np.arange(ds.nfeatures),
                                            nproc=nproc,
                                            enable_ca=enable_ca,
                                            nblocks=nblocks)
            out = sl(ds)
            out_inplace = sl_inplace(ds)

            for c in enable_ca:
                assert_array_equal(sl.ca[c].value, sl_inplace.ca[c].value)
            assert_array_equal(out.samples, out_inplace.samples)
            assert_array_equal(out.fa.center_ids, out_inplace.fa.center_ids)
示例#4
0
def test_PDistTargetSimilaritySearchlight():
    # Test ability to use PDistTargetSimilarity in a searchlight
    from mvpa2.testing.datasets import datasets
    from mvpa2.mappers.fx import mean_group_sample
    from mvpa2.mappers.shape import TransposeMapper
    from mvpa2.measures.searchlight import sphere_searchlight
    ds = datasets['3dsmall'][:, :3]
    ds.fa['voxel_indices'] = ds.fa.myspace
    # use chunks values (4 of them) for targets
    ds.sa['targets'] = ds.sa.chunks
    ds = mean_group_sample(['chunks'])(ds)
    tdsm = np.arange(6)
    # We can run on full dataset
    tdcm1 = PDistTargetSimilarity(tdsm)
    a1 = tdcm1(ds)
    assert_array_equal(a1.fa.metrics, ['rho', 'p'])

    tdcm1_rho = PDistTargetSimilarity(tdsm, corrcoef_only=True)
    sl_rho = sphere_searchlight(tdcm1_rho)(ds)
    assert_array_equal(sl_rho.shape, (1, ds.nfeatures))

    # now with both but we need to transpose datasets
    tdcm1_both = PDistTargetSimilarity(tdsm, postproc=TransposeMapper())
    sl_both = sphere_searchlight(tdcm1_both)(ds)
    assert_array_equal(sl_both.shape, (2, ds.nfeatures))
    assert_array_equal(sl_both.sa.metrics, ['rho', 'p'])
    # rho must be exactly the same
    assert_array_equal(sl_both.samples[0], sl_rho.samples[0])
    # just because we are here and we can
    # Actually here for some reason assert_array_lequal gave me a trouble
    assert_true(np.all(sl_both.samples[1] <= 1.0))
    assert_true(np.all(0 <= sl_both.samples[1]))
示例#5
0
def test_PDistTargetSimilaritySearchlight():
    # Test ability to use PDistTargetSimilarity in a searchlight
    from mvpa2.testing.datasets import datasets
    from mvpa2.mappers.fx import mean_group_sample
    from mvpa2.mappers.shape import TransposeMapper
    from mvpa2.measures.searchlight import sphere_searchlight
    ds = datasets['3dsmall'][:, :3]
    ds.fa['voxel_indices'] = ds.fa.myspace
    # use chunks values (4 of them) for targets
    ds.sa['targets'] = ds.sa.chunks
    ds = mean_group_sample(['chunks'])(ds)
    tdsm = np.arange(6)
    # We can run on full dataset
    tdcm1 = PDistTargetSimilarity(tdsm)
    a1 = tdcm1(ds)
    assert_array_equal(a1.fa.metrics, ['rho', 'p'])

    tdcm1_rho = PDistTargetSimilarity(tdsm, corrcoef_only=True)
    sl_rho = sphere_searchlight(tdcm1_rho)(ds)
    assert_array_equal(sl_rho.shape, (1, ds.nfeatures))

    # now with both but we need to transpose datasets
    tdcm1_both = PDistTargetSimilarity(tdsm, postproc=TransposeMapper())
    sl_both = sphere_searchlight(tdcm1_both)(ds)
    assert_array_equal(sl_both.shape, (2, ds.nfeatures))
    assert_array_equal(sl_both.sa.metrics, ['rho', 'p'])
    # rho must be exactly the same
    assert_array_equal(sl_both.samples[0], sl_rho.samples[0])
    # just because we are here and we can
    # Actually here for some reason assert_array_lequal gave me a trouble
    assert_true(np.all(sl_both.samples[1] <= 1.0))
    assert_true(np.all(0 <= sl_both.samples[1]))
示例#6
0
def test_chained_crossvalidation_searchlight():
    from mvpa2.clfs.gnb import GNB
    from mvpa2.clfs.meta import MappedClassifier
    from mvpa2.generators.partition import NFoldPartitioner
    from mvpa2.mappers.base import ChainMapper
    from mvpa2.mappers.base import Mapper
    from mvpa2.measures.base import CrossValidation
    from mvpa2.measures.searchlight import sphere_searchlight
    from mvpa2.testing.datasets import datasets

    dataset = datasets['3dlarge'].copy()
    dataset.fa['voxel_indices'] = dataset.fa.myspace
    sample_clf = GNB()              # fast and deterministic

    class ZScoreFeaturesMapper(Mapper):
        """Very basic mapper which would take care about standardizing
        all features within each sample separately
        """
        def _forward_data(self, data):
            return (data - np.mean(data, axis=1)[:, None])/np.std(data, axis=1)[:, None]

    # only do partial to save time
    sl_kwargs = dict(radius=2, center_ids=[3, 50])
    clf_mapped = MappedClassifier(sample_clf, ZScoreFeaturesMapper())
    cv = CrossValidation(clf_mapped, NFoldPartitioner())
    sl = sphere_searchlight(cv, **sl_kwargs)
    results_mapped = sl(dataset)

    cv_chained = ChainMapper([ZScoreFeaturesMapper(auto_train=True),
                              CrossValidation(sample_clf, NFoldPartitioner())])
    sl_chained = sphere_searchlight(cv_chained, **sl_kwargs)
    results_chained = sl_chained(dataset)

    assert_array_equal(results_mapped, results_chained)
示例#7
0
 def test_nblocks(self):
     skip_if_no_external('pprocess')
     # just a basic test to see that we are getting the same
     # results with different nblocks
     ds = datasets['3dsmall'].copy(deep=True)[:, :13]
     ds.fa['voxel_indices'] = ds.fa.myspace
     cv = CrossValidation(GNB(), OddEvenPartitioner())
     res1 = sphere_searchlight(cv, radius=1, nproc=2)(ds)
     res2 = sphere_searchlight(cv, radius=1, nproc=2, nblocks=5)(ds)
     assert_array_equal(res1, res2)
示例#8
0
 def test_nblocks(self):
     skip_if_no_external('pprocess')
     # just a basic test to see that we are getting the same
     # results with different nblocks
     ds = datasets['3dsmall'].copy(deep=True)[:, :13]
     ds.fa['voxel_indices'] = ds.fa.myspace
     cv = CrossValidation(GNB(), OddEvenPartitioner())
     res1 = sphere_searchlight(cv, radius=1, nproc=2)(ds)
     res2 = sphere_searchlight(cv, radius=1, nproc=2, nblocks=5)(ds)
     assert_array_equal(res1, res2)
示例#9
0
    def test_usecase_concordancesl(self):
        import numpy as np
        from mvpa2.base.dataset import vstack
        from mvpa2.mappers.fx import mean_sample

        # Take our sample 3d dataset
        ds1 = datasets['3dsmall'].copy(deep=True)
        ds1.fa['voxel_indices'] = ds1.fa.myspace
        ds1.sa['subject'] = [1]  # not really necessary -- but let's for clarity
        ds1 = mean_sample()(ds1) # so we get just a single representative sample

        def corr12(ds):
            corr = np.corrcoef(ds.samples)
            assert(corr.shape == (2, 2)) # for paranoid ones
            return corr[0, 1]

        for nsc, thr, thr_mean in (
            (0, 1.0, 1.0),
            (0.1, 0.3, 0.8)):   # just a bit of noise
            ds2 = ds1.copy(deep=True)    # make a copy for the 2nd subject
            ds2.sa['subject'] = [2]
            ds2.samples += nsc * np.random.normal(size=ds1.shape)

            # make sure that both have the same voxel indices
            assert(np.all(ds1.fa.voxel_indices == ds2.fa.voxel_indices))
            ds_both = vstack((ds1, ds2))# join 2 images into a single dataset
                                        # with .sa.subject distinguishing both

            sl = sphere_searchlight(corr12, radius=2)
            slmap = sl(ds_both)
            ok_(np.all(slmap.samples >= thr))
            ok_(np.mean(slmap.samples) >= thr)
示例#10
0
    def test_usecase_concordancesl(self):
        import numpy as np
        from mvpa2.base.dataset import vstack
        from mvpa2.mappers.fx import mean_sample

        # Take our sample 3d dataset
        ds1 = datasets['3dsmall'].copy(deep=True)
        ds1.fa['voxel_indices'] = ds1.fa.myspace
        ds1.sa['subject'] = [1
                             ]  # not really necessary -- but let's for clarity
        ds1 = mean_sample()(
            ds1)  # so we get just a single representative sample

        def corr12(ds):
            corr = np.corrcoef(ds.samples)
            assert (corr.shape == (2, 2))  # for paranoid ones
            return corr[0, 1]

        for nsc, thr, thr_mean in ((0, 1.0, 1.0),
                                   (0.1, 0.3, 0.8)):  # just a bit of noise
            ds2 = ds1.copy(deep=True)  # make a copy for the 2nd subject
            ds2.sa['subject'] = [2]
            ds2.samples += nsc * np.random.normal(size=ds1.shape)

            # make sure that both have the same voxel indices
            assert (np.all(ds1.fa.voxel_indices == ds2.fa.voxel_indices))
            ds_both = vstack((ds1, ds2))  # join 2 images into a single dataset
            # with .sa.subject distinguishing both

            sl = sphere_searchlight(corr12, radius=2)
            slmap = sl(ds_both)
            ok_(np.all(slmap.samples >= thr))
            ok_(np.mean(slmap.samples) >= thr)
示例#11
0
def test_searchlight_cross_decoding(path, subjects, conf_file, type, **kwargs):
    
    conf = read_configuration(path, conf_file, type)
    
    for arg in kwargs:
        conf[arg] = kwargs[arg]
        if arg == 'radius':
            radius = kwargs[arg]
    
    
    debug.active += ["SLC"]
    
    ds_merged = get_merged_ds(path, subjects, conf_file, type, **kwargs)
    
    clf = LinearCSVMC(C=1, probability=1, enable_ca=['probabilities'])
    cv = CrossValidation(clf, NFoldPartitioner(attr='task'))
    
    maps = []
    
    for ds in ds_merged:
                
        ds.targets[ds.targets == 'point'] = 'face'
        ds.targets[ds.targets == 'saccade'] = 'place'
        
        sl = sphere_searchlight(cv, radius, space = 'voxel_indices')
    
        sl_map = sl(ds)
    
        sl_map.samples *= -1
        sl_map.samples +=  1
    
        nif = map2nifti(sl_map, imghdr=ds.a.imghdr)
        
        maps.append(nif)
        
        
    datetime = get_time()
    analysis = 'cross_searchlight'
    mask = conf['mask_area']
    task = type
    
    new_dir = datetime+'_'+analysis+'_'+mask+'_'+task
    command = 'mkdir '+os.path.join(path, '0_results', new_dir)
    os.system(command)
    
    parent_dir = os.path.join(path, '0_results', new_dir)
    
    for s, map in zip(subjects, maps):
        name = s
        command = 'mkdir '+os.path.join(parent_dir, name)
        os.system(command)
        
        results_dir = os.path.join(parent_dir, name)
        fname = name+'_radius_'+str(radius)+'_searchlight_map.nii.gz'
        map.to_filename(os.path.join(results_dir, fname))
        
    
    return maps
示例#12
0
    def test_partial_searchlight_with_full_report(self):
        ds = self.dataset.copy()
        center_ids = np.zeros(ds.nfeatures, dtype='bool')
        center_ids[[3, 50]] = True
        ds.fa['center_ids'] = center_ids
        # compute N-1 cross-validation for each sphere
        cv = CrossValidation(GNB(), NFoldPartitioner())
        # contruct diameter 1 (or just radius 0) searchlight
        # one time give center ids as a list, the other one takes it from the
        # dataset itself
        sls = (
            sphere_searchlight(cv, radius=0, center_ids=[3, 50]),
            sphere_searchlight(None, radius=0, center_ids=[3, 50]),
            sphere_searchlight(cv, radius=0, center_ids='center_ids'),
        )
        for sl in sls:
            # assure that we could set cv post constructor
            if sl.datameasure is None:
                sl.datameasure = cv
            # run searchlight
            results = sl(ds)
            # only two spheres but error for all CV-folds
            self.assertEqual(results.shape, (len(self.dataset.UC), 2))
            # Test if results hold if we "set" a "new" datameasure
            sl.datameasure = CrossValidation(GNB(), NFoldPartitioner())
            results2 = sl(ds)
            assert_array_almost_equal(results, results2)

        # test if we graciously puke if center_ids are out of bounds
        dataset0 = ds[:, :50]  # so we have no 50th feature
        self.assertRaises(IndexError, sls[0], dataset0)
        # but it should be fine on the one that gets the ids from the dataset
        # itself
        results = sl(dataset0)
        assert_equal(results.nfeatures, 1)
        # check whether roi_seeds are correct
        sl = sphere_searchlight(lambda x: np.vstack(
            (x.fa.roi_seed, x.samples)),
                                radius=1,
                                add_center_fa=True,
                                center_ids=[12])
        res = sl(ds)
        assert_array_equal(
            res.samples[1:, res.samples[0].astype('bool')].squeeze(),
            ds.samples[:, 12])
示例#13
0
def test_searchlight_errors_per_trial():
    # To make sure that searchlight can return error/accuracy per trial
    from mvpa2.clfs.gnb import GNB
    from mvpa2.generators.partition import OddEvenPartitioner
    from mvpa2.measures.base import CrossValidation
    from mvpa2.measures.searchlight import sphere_searchlight
    from mvpa2.measures.gnbsearchlight import sphere_gnbsearchlight
    from mvpa2.testing.datasets import datasets
    from mvpa2.misc.errorfx import prediction_target_matches

    dataset = datasets['3dsmall'].copy()
    # randomly permute samples so we break any random correspondence
    # to strengthen tests below
    sample_idx = np.arange(len(dataset))
    dataset = dataset[np.random.permutation(sample_idx)]

    dataset.sa.targets = ['L%d' % l for l in dataset.sa.targets]
    dataset.fa['voxel_indices'] = dataset.fa.myspace
    sample_clf = GNB()              # fast and deterministic

    part = OddEvenPartitioner()
    # only do partial to save time
    cv = CrossValidation(sample_clf, part, errorfx=None) #prediction_target_matches)
    # Just to compare error
    cv_error = CrossValidation(sample_clf, part)

    # Large searchlight radius so we get entire ROI, 2 centers just to make sure
    # that all stacking works correctly
    sl = sphere_searchlight(cv, radius=10, center_ids=[0, 1])
    results = sl(dataset)

    sl_gnb = sphere_gnbsearchlight(sample_clf, part, radius=10, errorfx=None,
                                   center_ids=[0, 1])
    results_gnbsl = sl_gnb(dataset)

    # inspect both results
    # verify that partitioning was done correctly
    partitions = list(part.generate(dataset))
    for res in (results, results_gnbsl):
        assert('targets' in res.sa.keys())  # should carry targets
        assert('cvfolds' in res.sa.keys())  # should carry cvfolds
        for ipart in xrange(len(partitions)):
            assert_array_equal(dataset[partitions[ipart].sa.partitions == 2].targets,
                               res.sa.targets[res.sa.cvfolds == ipart])

    assert_datasets_equal(results, results_gnbsl)

    # one "accuracy" per each trial
    assert_equal(results.shape, (len(dataset), 2))
    # with accuracies the same in both searchlights since the same
    # features were to be selected in both cases due too large radii
    errors_dataset = cv(dataset)
    assert_array_equal(errors_dataset.samples[:, 0], results.samples[:, 0])
    assert_array_equal(errors_dataset.samples[:, 0], results.samples[:, 1])
    # and error matching (up to precision) the one if we run with default error function
    assert_array_almost_equal(np.mean(results.targets[:, None] != results.samples, axis=0)[0],
                              np.mean(cv_error(dataset)))
示例#14
0
def test_searchlight_cross_decoding(path, subjects, conf_file, type, **kwargs):

    conf = read_configuration(path, conf_file, type)

    for arg in kwargs:
        conf[arg] = kwargs[arg]
        if arg == 'radius':
            radius = kwargs[arg]

    debug.active += ["SLC"]

    ds_merged = get_merged_ds(path, subjects, conf_file, type, **kwargs)

    clf = LinearCSVMC(C=1, probability=1, enable_ca=['probabilities'])
    cv = CrossValidation(clf, NFoldPartitioner(attr='task'))

    maps = []

    for ds in ds_merged:

        ds.targets[ds.targets == 'point'] = 'face'
        ds.targets[ds.targets == 'saccade'] = 'place'

        sl = sphere_searchlight(cv, radius, space='voxel_indices')

        sl_map = sl(ds)

        sl_map.samples *= -1
        sl_map.samples += 1

        nif = map2nifti(sl_map, imghdr=ds.a.imghdr)

        maps.append(nif)

    datetime = get_time()
    analysis = 'cross_searchlight'
    mask = conf['mask_area']
    task = type

    new_dir = datetime + '_' + analysis + '_' + mask + '_' + task
    command = 'mkdir ' + os.path.join(path, '0_results', new_dir)
    os.system(command)

    parent_dir = os.path.join(path, '0_results', new_dir)

    for s, map in zip(subjects, maps):
        name = s
        command = 'mkdir ' + os.path.join(parent_dir, name)
        os.system(command)

        results_dir = os.path.join(parent_dir, name)
        fname = name + '_radius_' + str(radius) + '_searchlight_map.nii.gz'
        map.to_filename(os.path.join(results_dir, fname))

    return maps
示例#15
0
    def test_regression_with_additional_sa(self):
        regr = regrswh[:][0]
        ds = datasets['3dsmall'].copy()
        ds.fa['voxel_indices'] = ds.fa.myspace

        # Create a new sample attribute which will be used along with
        # every searchlight
        ds.sa['beh'] = np.random.normal(size=(ds.nsamples, 2))

        # and now for fun -- lets create custom linar regression
        # targets out of some random feature and beh linearly combined
        rfeature = np.random.randint(ds.nfeatures)
        ds.sa.targets = np.dot(
            np.hstack((ds.sa.beh,
                       ds.samples[:, rfeature:rfeature + 1])),
            np.array([0.3, 0.2, 0.3]))

        class CrossValidationWithBeh(CrossValidation):
            """An adapter for regular CV which would hstack
               sa.beh to the searchlighting ds"""
            def _call(self, ds):
                return CrossValidation._call(
                    self,
                    Dataset(np.hstack((ds, ds.sa.beh)),
                            sa=ds.sa))
        cvbeh = CrossValidationWithBeh(regr, OddEvenPartitioner(),
                                       errorfx=corr_error)
        # regular cv
        cv = CrossValidation(regr, OddEvenPartitioner(),
                             errorfx=corr_error)

        slbeh = sphere_searchlight(cvbeh, radius=1)
        slmapbeh = slbeh(ds)
        sl = sphere_searchlight(cv, radius=1)
        slmap = sl(ds)

        assert_equal(slmap.shape, (2, ds.nfeatures))
        # SL which had access to beh should have got for sure better
        # results especially in the vicinity of the chosen feature...
        features = sl.queryengine.query_byid(rfeature)
        assert_array_lequal(slmapbeh.samples[:, features],
                            slmap.samples[:, features])
示例#16
0
    def test_swaroop_case(self, preallocate_output):
        """Test hdf5 backend to pass results on Swaroop's usecase
        """
        skip_if_no_external('h5py')
        from mvpa2.measures.base import Measure

        class sw_measure(Measure):
            def __init__(self):
                Measure.__init__(self, auto_train=True)

            def _call(self, dataset):
                # For performance measures -- increase to 50-200
                # np.sum here is just to get some meaningful value in
                # them
                #return np.ones(shape=(2, 2))*np.sum(dataset)
                return Dataset(
                    np.array([{
                        'd': np.ones(shape=(5, 5)) * np.sum(dataset)
                    }],
                             dtype=object))

        results = []
        ds = datasets['3dsmall'].copy(deep=True)
        ds.fa['voxel_indices'] = ds.fa.myspace

        our_custom_prefix = tempfile.mktemp()
        for backend in ['native'] + \
                (externals.exists('h5py') and ['hdf5'] or []):
            sl = sphere_searchlight(sw_measure(),
                                    radius=1,
                                    tmp_prefix=our_custom_prefix,
                                    results_backend=backend,
                                    preallocate_output=preallocate_output)
            t0 = time.time()
            results.append(np.asanyarray(sl(ds)))
            # print "Done for backend %s in %d sec" % (backend, time.time() - t0)
        # because of swaroop's ad-hoc (who only could recommend such
        # a construct?) use case, and absent fancy working assert_objectarray_equal
        # let's compare manually
        #assert_objectarray_equal(*results)
        if not externals.exists('h5py'):
            self.assertRaises(RuntimeError,
                              sphere_searchlight,
                              sw_measure(),
                              results_backend='hdf5')
            raise SkipTest('h5py required for test of backend="hdf5"')
        assert_equal(results[0].shape, results[1].shape)
        results = [r.flatten() for r in results]
        for x, y in zip(*results):
            assert_equal(x.keys(), y.keys())
            assert_array_equal(x['d'], y['d'])
        # verify that no junk is left behind
        tempfiles = glob.glob(our_custom_prefix + '*')
        assert_equal(len(tempfiles), 0)
示例#17
0
    def test_regression_with_additional_sa(self):
        regr = regrswh[:][0]
        ds = datasets['3dsmall'].copy()
        ds.fa['voxel_indices'] = ds.fa.myspace

        # Create a new sample attribute which will be used along with
        # every searchlight
        ds.sa['beh'] = np.random.normal(size=(ds.nsamples, 2))

        # and now for fun -- lets create custom linar regression
        # targets out of some random feature and beh linearly combined
        rfeature = np.random.randint(ds.nfeatures)
        ds.sa.targets = np.dot(
            np.hstack((ds.sa.beh, ds.samples[:, rfeature:rfeature + 1])),
            np.array([0.3, 0.2, 0.3]))

        class CrossValidationWithBeh(CrossValidation):
            """An adapter for regular CV which would hstack
               sa.beh to the searchlighting ds"""
            def _call(self, ds):
                return CrossValidation._call(
                    self, Dataset(np.hstack((ds, ds.sa.beh)), sa=ds.sa))

        cvbeh = CrossValidationWithBeh(regr,
                                       OddEvenPartitioner(),
                                       errorfx=corr_error)
        # regular cv
        cv = CrossValidation(regr, OddEvenPartitioner(), errorfx=corr_error)

        slbeh = sphere_searchlight(cvbeh, radius=1)
        slmapbeh = slbeh(ds)
        sl = sphere_searchlight(cv, radius=1)
        slmap = sl(ds)

        assert_equal(slmap.shape, (2, ds.nfeatures))
        # SL which had access to beh should have got for sure better
        # results especially in the vicinity of the chosen feature...
        features = sl.queryengine.query_byid(rfeature)
        assert_array_lequal(slmapbeh.samples[:, features],
                            slmap.samples[:, features])
示例#18
0
def main(subject, study_dir, mask, stat, res_name, items='ac',
         suffix='_stim_fix2', feature_mask=None, radius=3, n_perm=1000,
         n_proc=None):

    from mvpa2.datasets.mri import map2nifti
    from mvpa2.mappers.zscore import zscore
    from mvpa2.measures.searchlight import sphere_searchlight
    from nireact import mvpa

    # lookup subject directory
    sp = su.SubjPath(subject, study_dir)

    # load task information
    vols = task.disp_vols(sp.path('behav', 'log'))

    # unpack the items option to get item type code
    item_names = 'abc'
    item_comp = [item_names.index(name) + 1 for name in items]

    # get post runs, A and C items only
    include = ((vols.run >= 5) & np.isin(vols.item_type, item_comp) &
               vols.correct == 1)
    post = vols.loc[include, :]

    # load beta series
    ds = mvpa.load_disp_beta(sp, suffix, mask, feature_mask, verbose=1)

    # define measure and contrasts to write out
    contrasts = ['pos', 'block_inter', 'inter_block']
    m = mvpa.TriadVector(post, item_comp, stat, contrasts, n_perm)

    # zscore
    ds.sa['run'] = vols.run.values
    zscore(ds, chunks_attr='run')

    # searchlight
    print('Running searchlight...')
    sl = sphere_searchlight(m, radius=radius, nproc=n_proc)
    sl_map = sl(ds[include])

    # save results
    nifti_include = map2nifti(ds, sl_map[-1])
    for i, contrast in enumerate(contrasts):
        res_dir = sp.path('rsa', f'{res_name}_{contrast}')
        if not os.path.exists(res_dir):
            os.makedirs(res_dir)

        nifti = map2nifti(ds, sl_map[i])
        nifti.to_filename(su.impath(res_dir, 'zstat'))

        nifti_include.to_filename(su.impath(res_dir, 'included'))
示例#19
0
    def test_swaroop_case(self, preallocate_output):
        """Test hdf5 backend to pass results on Swaroop's usecase
        """
        skip_if_no_external('h5py')
        from mvpa2.measures.base import Measure
        class sw_measure(Measure):
            def __init__(self):
                Measure.__init__(self, auto_train=True)
            def _call(self, dataset):
                # For performance measures -- increase to 50-200
                # np.sum here is just to get some meaningful value in
                # them
                #return np.ones(shape=(2, 2))*np.sum(dataset)
                return Dataset(
                    np.array([{'d': np.ones(shape=(5, 5)) * np.sum(dataset)}],
                             dtype=object))
        results = []
        ds = datasets['3dsmall'].copy(deep=True)
        ds.fa['voxel_indices'] = ds.fa.myspace

        our_custom_prefix = tempfile.mktemp()
        for backend in ['native'] + \
                (externals.exists('h5py') and ['hdf5'] or []):
            sl = sphere_searchlight(sw_measure(),
                                    radius=1,
                                    tmp_prefix=our_custom_prefix,
                                    results_backend=backend,
                                    preallocate_output=preallocate_output)
            t0 = time.time()
            results.append(np.asanyarray(sl(ds)))
            # print "Done for backend %s in %d sec" % (backend, time.time() - t0)
        # because of swaroop's ad-hoc (who only could recommend such
        # a construct?) use case, and absent fancy working assert_objectarray_equal
        # let's compare manually
        #assert_objectarray_equal(*results)
        if not externals.exists('h5py'):
            self.assertRaises(RuntimeError,
                              sphere_searchlight,
                              sw_measure(),
                              results_backend='hdf5')
            raise SkipTest('h5py required for test of backend="hdf5"')
        assert_equal(results[0].shape, results[1].shape)
        results = [r.flatten() for r in results]
        for x, y in zip(*results):
            assert_equal(x.keys(), y.keys())
            assert_array_equal(x['d'], y['d'])
        # verify that no junk is left behind
        tempfiles = glob.glob(our_custom_prefix + '*')
        assert_equal(len(tempfiles), 0)
示例#20
0
def main(subject,
         study_dir,
         mask,
         feature_mask,
         res_dir,
         radius=3,
         n_proc=None):
    from mvpa2.measures.searchlight import sphere_searchlight
    from mvpa2.datasets.mri import map2nifti

    # load functional data
    subject_dir = os.path.join(study_dir, f'tesser_{subject}')
    ds = mvpa.load_struct_timeseries(study_dir,
                                     subject,
                                     mask,
                                     feature_mask=feature_mask,
                                     verbose=1,
                                     zscore_run=True)

    # load events data, split by object within structure learning block
    vols = rsa.load_vol_info(study_dir, subject)
    events = vols.query('sequence_type > 0').copy()
    n_item = events['trial_type'].nunique()
    events['trial_type'] = (events['trial_type'] +
                            (events['sequence_type'] - 1) * n_item)

    # set up BRSA model
    model = brsa.GBRSA()
    n_ev = 21 * 2
    n_vol = ds.shape[0]
    mat, nuisance, scan_onsets = rsa.create_brsa_matrix(
        subject_dir, events, n_vol)
    m = mvpa.ItemBRSA(model, n_ev, mat, nuisance, scan_onsets)

    # run searchlight
    sl = sphere_searchlight(m, radius=radius, nproc=n_proc)
    sl_map = sl(ds)

    # save included voxels map
    if not os.path.exists(res_dir):
        os.makedirs(res_dir)
    nifti_include = map2nifti(ds, sl_map[-1])
    include_file = os.path.join(res_dir, 'included.nii.gz')
    nifti_include.to_filename(include_file)

    # save pairwise correlations as a timeseries
    filepath = os.path.join(res_dir, 'stat.nii.gz')
    nifti = map2nifti(ds, sl_map[:-1])
    nifti.to_filename(filepath)
示例#21
0
    def test_agreement_surface_volume(self):
        '''test agreement between volume-based and surface-based
        searchlights when using euclidean measure'''

        # import runner
        def sum_ds(ds):
            return np.sum(ds)

        radius = 3

        # make a small dataset with a mask
        sh = (10, 10, 10)
        msk = np.zeros(sh)
        for i in xrange(0, sh[0], 2):
            msk[i, :, :] = 1
        vg = volgeom.VolGeom(sh, np.identity(4), mask=msk)

        # make an image
        nt = 6
        img = vg.get_masked_nifti_image(6)
        ds = fmri_dataset(img, mask=msk)

        # run the searchlight
        sl = sphere_searchlight(sum_ds, radius=radius)
        m = sl(ds)

        # now use surface-based searchlight
        v = volsurf.from_volume(ds)
        source_surf = v.intermediate_surface
        node_msk = np.logical_not(np.isnan(source_surf.vertices[:, 0]))

        # check that the mask matches with what we used earlier
        assert_array_equal(msk.ravel() + 0., node_msk.ravel() + 0.)

        source_surf_nodes = np.nonzero(node_msk)[0]

        sel = surf_voxel_selection.voxel_selection(
            v,
            float(radius),
            source_surf=source_surf,
            source_surf_nodes=source_surf_nodes,
            distance_metric='euclidean')

        qe = queryengine.SurfaceVerticesQueryEngine(sel)
        sl = Searchlight(sum_ds, queryengine=qe)
        r = sl(ds)

        # check whether they give the same results
        assert_array_equal(r.samples, m.samples)
示例#22
0
    def searchlight(self, ds, cvte):
        
        sl = sphere_searchlight(cvte, 
                                radius= self._radius, 
                                space = 'voxel_indices')            
        sl_map = sl(ds)
        sl_map.samples *= -1
        sl_map.samples +=  1
        
        map_ = map2nifti(sl_map, imghdr=ds.a.imghdr)
        map_ = ni.Nifti1Image(map_.get_data(), affine=ds.a.imgaffine)

        self.maps.append(map_)
        
        return map_
示例#23
0
    def test_agreement_surface_volume(self):
        '''test agreement between volume-based and surface-based
        searchlights when using euclidean measure'''

        #import runner
        def sum_ds(ds):
            return np.sum(ds)

        radius = 3

        # make a small dataset with a mask
        sh = (10, 10, 10)
        msk = np.zeros(sh)
        for i in xrange(0, sh[0], 2):
            msk[i, :, :] = 1
        vg = volgeom.VolGeom(sh, np.identity(4), mask=msk)

        # make an image
        nt = 6
        img = vg.get_masked_nifti_image(6)
        ds = fmri_dataset(img, mask=msk)


        # run the searchlight
        sl = sphere_searchlight(sum_ds, radius=radius)
        m = sl(ds)

        # now use surface-based searchlight
        v = volsurf.from_volume(ds)
        source_surf = v.intermediate_surface
        node_msk = np.logical_not(np.isnan(source_surf.vertices[:, 0]))

        # check that the mask matches with what we used earlier
        assert_array_equal(msk.ravel() + 0., node_msk.ravel() + 0.)

        source_surf_nodes = np.nonzero(node_msk)[0]

        sel = surf_voxel_selection.voxel_selection(v, float(radius),
                                        source_surf=source_surf,
                                        source_surf_nodes=source_surf_nodes,
                                        distance_metric='euclidean')

        qe = queryengine.SurfaceVerticesQueryEngine(sel)
        sl = Searchlight(sum_ds, queryengine=qe)
        r = sl(ds)

        # check whether they give the same results
        assert_array_equal(r.samples, m.samples)
示例#24
0
def run_searchlight(ds, metric='correlation', radius=2, center_ids=None, n_cpu=None):

    if metric == 'cca_u':
        measure = cca_uncentered
    elif metric == 'cca':
        measure = cca
    elif metric == '1_to_many_cca':
        measure = cca_one_to_all
    elif metric == 'all_cca':
        measure = all_cca
    elif metric == 'cca_validate':
        measure = cca_validate
    elif metric == 'cca_validate_max':
        measure = cca_validate_max
    elif metric == 'correlation':
        measure = pearsons_average
    elif metric == 'all_pearsons':
        measure = all_pearsons_averages
    elif metric == 'pvalues':
        measure = pvalues
    elif metric == 'tvalues':
        measure = tvalues
    elif metric == "timepoint_double_corr":
        measure = timepoint_double_corr
    elif metric == "scene_based_double_corr":
        measure = scene_based_double_corr
    elif metric == "event_svm_cv":
        measure = event_svm_cross_validation
    elif metric =="event_svm_cv_cm":
        measure = event_svm_cross_validation_confusion_matrix
    elif metric == "cluster_scenes":
        measure = cluster_scenes
    elif metric == "cluster_scenes_track_indices":
        measure = cluster_scenes_track_indices
    elif metric == "cluster_scenes_return_indices":
        measure = cluster_scenes_return_indices
    else:
        measure = metric
        
    sl = sphere_searchlight(measure, radius=radius, center_ids=center_ids, nproc=n_cpu)   
        
    searched_ds = sl(ds)
    searched_ds.fa = ds.fa
    searched_ds.a = ds.a    
    
    return searched_ds
示例#25
0
    def test_gnbsearchlight_3partitions_and_splitter(self):
        ds = self.dataset[:, :20]
        # custom partitioner which provides 3 partitions
        part = CustomPartitioner([([2], [3], [1])])
        gnb_sl = sphere_gnbsearchlight(GNB(), part)
        res_gnb_sl = gnb_sl(ds)

        # compare results to full blown searchlight
        sl = sphere_searchlight(CrossValidation(GNB(), part))
        res_sl = sl(ds)

        assert_datasets_equal(res_gnb_sl, res_sl)

        # and theoretically for this simple single cross-validation we could
        # just use Splitter
        splitter = Splitter('chunks', [2, 3])
        # we have to put explicit None since can't become a kwarg in 1 day any
        # longer here
        gnb_sl_ = sphere_gnbsearchlight(GNB(), None, splitter=splitter)
        res_gnb_sl_ = gnb_sl_(ds)
        assert_datasets_equal(res_gnb_sl, res_gnb_sl_)
示例#26
0
    def test_gnbsearchlight_3partitions_and_splitter(self):
        ds = self.dataset[:, :20]
        # custom partitioner which provides 3 partitions
        part = CustomPartitioner([([2], [3], [1])])
        gnb_sl = sphere_gnbsearchlight(GNB(), part)
        res_gnb_sl = gnb_sl(ds)

        # compare results to full blown searchlight
        sl = sphere_searchlight(CrossValidation(GNB(), part))
        res_sl = sl(ds)

        assert_datasets_equal(res_gnb_sl, res_sl)

        # and theoretically for this simple single cross-validation we could
        # just use Splitter
        splitter = Splitter('chunks', [2, 3])
        # we have to put explicit None since can't become a kwarg in 1 day any
        # longer here
        gnb_sl_ = sphere_gnbsearchlight(GNB(), None, splitter=splitter)
        res_gnb_sl_ = gnb_sl_(ds)
        assert_datasets_equal(res_gnb_sl, res_gnb_sl_)
示例#27
0
    def test_chi_square_searchlight(self):
        # only do partial to save time

        # Can't yet do this since test_searchlight isn't yet "under nose"
        #skip_if_no_external('scipy')
        if not externals.exists('scipy'):
            return

        from mvpa2.misc.stats import chisquare

        cv = CrossValidation(sample_clf_lin,
                             NFoldPartitioner(),
                             enable_ca=['stats'])

        def getconfusion(data):
            cv(data)
            return chisquare(cv.ca.stats.matrix)[0]

        sl = sphere_searchlight(getconfusion, radius=0, center_ids=[3, 50])

        # run searchlight
        results = sl(self.dataset)
        self.assertTrue(results.nfeatures == 2)
示例#28
0
    def test_chi_square_searchlight(self):
        # only do partial to save time

        # Can't yet do this since test_searchlight isn't yet "under nose"
        #skip_if_no_external('scipy')
        if not externals.exists('scipy'):
            return

        from mvpa2.misc.stats import chisquare

        cv = CrossValidation(sample_clf_lin, NFoldPartitioner(),
                enable_ca=['stats'])


        def getconfusion(data):
            cv(data)
            return chisquare(cv.ca.stats.matrix)[0]

        sl = sphere_searchlight(getconfusion, radius=0,
                         center_ids=[3, 50])

        # run searchlight
        results = sl(self.dataset)
        self.assertTrue(results.nfeatures == 2)
示例#29
0
文件: rsa_fmri.py 项目: hanke/PyMVPA
plot_mtx(res, mtds.sa.targets, 'ROI pattern correlation distances')

"""
Inspecting the figure we can see that there is not much structure in the matrix,
except for the face and the house condition being slightly more dissimilar than
others.

Now, let's take a look at the variation of similarity structure through the
brain. We can plug the PDist() measure into a searchlight to quickly scan the
brain and harvest this information.
"""

# same as above, but done in a searchlight fashion
from mvpa2.measures.searchlight import sphere_searchlight
dsm = rsa.PDist(square=False)
sl = sphere_searchlight(dsm, 2)
slres = sl(mtds)

"""
The result is a compact distance matrix in vector form for each searchlight
location. We can now try to score each matrix. Let's find the distance matrix
with the largest overall distances across all stimulation conditions, i.e.
the location in the brain where brain response patterns are most dissimilar.
"""

# score each searchlight sphere result wrt global pattern dissimilarity
distinctiveness = np.sum(np.abs(slres), axis=0)
print 'Most dissimilar patterns around', \
        mtds.fa.voxel_indices[distinctiveness.argmax()]
# take a look at the this dissimilarity structure
from scipy.spatial.distance import squareform
示例#30
0
def test_confusion_as_node():
    from mvpa2.misc.data_generators import normal_feature_dataset
    from mvpa2.clfs.gnb import GNB
    from mvpa2.clfs.transerror import Confusion
    ds = normal_feature_dataset(snr=2.0, perlabel=42, nchunks=3,
                                nonbogus_features=[0,1], nfeatures=2)
    clf = GNB()
    cv = CrossValidation(
        clf, NFoldPartitioner(),
        errorfx=None,
        postproc=Confusion(labels=ds.UT),
        enable_ca=['stats'])
    res = cv(ds)
    # needs to be identical to CA
    assert_array_equal(res.samples, cv.ca.stats.matrix)
    assert_array_equal(res.sa.predictions, ds.UT)
    assert_array_equal(res.fa.targets, ds.UT)

    skip_if_no_external('scipy')

    from mvpa2.clfs.transerror import BayesConfusionHypothesis
    from mvpa2.base.node import ChainNode
    # same again, but this time with Bayesian hypothesis testing at the end
    cv = CrossValidation(
        clf, NFoldPartitioner(),
        errorfx=None,
        postproc=ChainNode([Confusion(labels=ds.UT),
                            BayesConfusionHypothesis()]))
    res = cv(ds)
    # only two possible hypothesis with two classes
    assert_equals(len(res), 2)
    # the first hypothesis is the can't discriminate anything
    assert_equal(len(res.sa.hypothesis[0]), 1)
    assert_equal(len(res.sa.hypothesis[0][0]), 2)
    # and the hypothesis is actually less likely than the other one
    # (both classes can be distinguished)
    assert(np.e**res.samples[0,0] < np.e**res.samples[1,0])

    # Let's see how well it would work within the searchlight when we also
    # would like to store the hypotheses per each voxel
    # Somewhat an ad-hoc solution for the answer posted on the ML
    #
    # run 1d searchlight of radii 0, for that just provide a .fa with coordinates
    ds.fa['voxel_indices'] = [[0], [1]]
    # and a custom Node which would collect .sa.hypothesis to place together along
    # with the posterior probabilities
    from mvpa2.base.node import Node
    from mvpa2.measures.searchlight import sphere_searchlight

    class KeepBothPosteriorAndHypothesis(Node):
        def _call(self, ds):
            out = np.zeros(1, dtype=object)
            out[0] = (ds.samples, ds.sa.hypothesis)
            return out
    cv.postproc.append(KeepBothPosteriorAndHypothesis())
    sl = sphere_searchlight(cv, radius=0, nproc=1)
    res = sl(ds)

    assert_equal(res.shape, (1, 2))
    assert_equal(len(res.samples[0,0]), 2)
    assert_equal(res.samples[0,0][0].shape, (2, 2))   # posteriors per 1st SL
    assert_equal(len(res.samples[0,0][1]), 2)   # 2 of hypotheses
示例#31
0
    def test_spatial_searchlight(self, common_variance=True, do_roi=False):
        """Tests both generic and GNBSearchlight
        Test of GNBSearchlight anyways requires a ground-truth
        comparison to the generic version, so we are doing sweepargs here
        """
        # compute N-1 cross-validation for each sphere
        # YOH: unfortunately sample_clf_lin is not guaranteed
        #      to provide exactly the same results due to inherent
        #      iterative process.  Therefore lets use something quick
        #      and pure Python
        gnb = GNB(common_variance=common_variance)
        cv = CrossValidation(gnb, NFoldPartitioner())

        ds = datasets['3dsmall'].copy()
        ds.fa['voxel_indices'] = ds.fa.myspace

        skwargs = dict(radius=1, enable_ca=['roi_sizes', 'raw_results'])

        if do_roi:
            # select some random set of features
            nroi = rnd.randint(1, ds.nfeatures)
            # and lets compute the full one as well once again so we have a reference
            # which will be excluded itself from comparisons but values will be compared
            # for selected roi_id
            sl_all = sphere_gnbsearchlight(gnb, NFoldPartitioner(cvtype=1),
                                           **skwargs)
            result_all = sl_all(ds)
            # select random features
            roi_ids = rnd.permutation(range(ds.nfeatures))[:nroi]
            skwargs['center_ids'] = roi_ids
        else:
            nroi = ds.nfeatures

        sls = [sphere_searchlight(cv, **skwargs),
               #GNBSearchlight(gnb, NFoldPartitioner(cvtype=1))
               sphere_gnbsearchlight(gnb, NFoldPartitioner(cvtype=1),
                                     indexsum='fancy', **skwargs)
               ]

        if externals.exists('scipy'):
            sls += [ sphere_gnbsearchlight(gnb, NFoldPartitioner(cvtype=1),
                                           indexsum='sparse', **skwargs)]

        # Just test nproc whenever common_variance is True
        if externals.exists('pprocess') and common_variance:
            sls += [sphere_searchlight(cv, nproc=2, **skwargs)]

        all_results = []
        for sl in sls:
            # run searchlight
            results = sl(ds)
            all_results.append(results)
            #print `sl`
            # check for correct number of spheres
            self.assertTrue(results.nfeatures == nroi)
            # and measures (one per xfold)
            self.assertTrue(len(results) == len(ds.UC))

            # check for chance-level performance across all spheres
            # makes sense only if number of features was big enough
            # to get some stable estimate of mean
            if not do_roi or nroi > 20:
                self.assertTrue(0.4 < results.samples.mean() < 0.6)

            mean_errors = results.samples.mean(axis=0)
            # that we do get different errors ;)
            self.assertTrue(len(np.unique(mean_errors) > 3))

            # check resonable sphere sizes
            self.assertTrue(len(sl.ca.roi_sizes) == nroi)
            if do_roi:
                # for roi we should relax conditions a bit
                self.assertTrue(max(sl.ca.roi_sizes) <= 7)
                self.assertTrue(min(sl.ca.roi_sizes) >= 4)
            else:
                self.assertTrue(max(sl.ca.roi_sizes) == 7)
                self.assertTrue(min(sl.ca.roi_sizes) == 4)

            # check base-class state
            self.assertEqual(sl.ca.raw_results.nfeatures, nroi)

            # Test if we got results correctly for 'selected' roi ids
            if do_roi:
                assert_array_equal(result_all[:, roi_ids], results)



        if len(all_results) > 1:
            # if we had multiple searchlights, we can check either they all
            # gave the same result (they should have)
            aresults = np.array([a.samples for a in all_results])
            dresults = np.abs(aresults - aresults.mean(axis=0))
            dmax = np.max(dresults)
            self.assertTrue(dmax <= 1e-13)
示例#32
0
    def test_partial_searchlight_with_confusion_matrix(self):
        ds = self.dataset
        from mvpa2.clfs.stats import MCNullDist
        from mvpa2.mappers.fx import mean_sample, sum_sample

        # compute N-1 cross-validation for each sphere
        cm = ConfusionMatrix(labels=ds.UT)
        cv = CrossValidation(
            sample_clf_lin,
            NFoldPartitioner(),
            # we have to assure that matrix does not get flatted by
            # first vstack in cv and then hstack in searchlight --
            # thus 2 leading dimensions
            # TODO: RF? make searchlight/crossval smarter?
            errorfx=lambda *a: cm(*a)[None, None, :])
        # contruct diameter 2 (or just radius 1) searchlight
        sl = sphere_searchlight(cv, radius=1, center_ids=[3, 5, 50])

        # our regular searchlight -- to compare results
        cv_gross = CrossValidation(sample_clf_lin, NFoldPartitioner())
        sl_gross = sphere_searchlight(cv_gross,
                                      radius=1,
                                      center_ids=[3, 5, 50])

        # run searchlights
        res = sl(ds)
        res_gross = sl_gross(ds)

        # only two spheres but error for all CV-folds and complete confusion matrix
        assert_equal(res.shape, (len(ds.UC), 3, len(ds.UT), len(ds.UT)))
        assert_equal(res_gross.shape, (len(ds.UC), 3))

        # briefly inspect the confusion matrices
        mat = res.samples
        # since input dataset is probably balanced (otherwise adjust
        # to be per label): sum within columns (thus axis=-2) should
        # be identical to per-class/chunk number of samples
        samples_per_classchunk = len(ds) / (len(ds.UT) * len(ds.UC))
        ok_(np.all(np.sum(mat, axis=-2) == samples_per_classchunk))
        # and if we compute accuracies manually -- they should
        # correspond to the one from sl_gross
        assert_array_almost_equal(
            res_gross.samples,
            # from accuracies to errors
            1 - (mat[..., 0, 0] + mat[..., 1, 1]).astype(float) /
            (2 * samples_per_classchunk))

        # and now for those who remained sited -- lets perform H0 MC
        # testing of this searchlight... just a silly one with minimal
        # number of permutations
        no_permutations = 10
        permutator = AttributePermutator('targets', count=no_permutations)

        # once again -- need explicit leading dimension to avoid
        # vstacking during cross-validation
        cv.postproc = lambda x: sum_sample()(x)[None, :]

        sl = sphere_searchlight(cv,
                                radius=1,
                                center_ids=[3, 5, 50],
                                null_dist=MCNullDist(
                                    permutator,
                                    tail='right',
                                    enable_ca=['dist_samples']))
        res_perm = sl(ds)
        # XXX all of the res_perm, sl.ca.null_prob and
        #     sl.null_dist.ca.dist_samples carry a degenerate leading
        #     dimension which was probably due to introduced new axis
        #     above within cv.postproc
        assert_equal(res_perm.shape, (1, 3, 2, 2))
        assert_equal(sl.null_dist.ca.dist_samples.shape,
                     res_perm.shape + (no_permutations, ))
        assert_equal(sl.ca.null_prob.shape, res_perm.shape)
        # just to make sure ;)
        ok_(np.all(sl.ca.null_prob.samples >= 0))
        ok_(np.all(sl.ca.null_prob.samples <= 1))

        # we should have got sums of hits across the splits
        assert_array_equal(np.sum(mat, axis=0), res_perm.samples[0])
示例#33
0
    def test_partial_searchlight_with_confusion_matrix(self):
        ds = self.dataset
        from mvpa2.clfs.stats import MCNullDist
        from mvpa2.mappers.fx import mean_sample, sum_sample

        # compute N-1 cross-validation for each sphere
        cm = ConfusionMatrix(labels=ds.UT)
        cv = CrossValidation(
            sample_clf_lin, NFoldPartitioner(),
            # we have to assure that matrix does not get flatted by
            # first vstack in cv and then hstack in searchlight --
            # thus 2 leading dimensions
            # TODO: RF? make searchlight/crossval smarter?
            errorfx=lambda *a: cm(*a)[None, None, :])
        # contruct diameter 2 (or just radius 1) searchlight
        sl = sphere_searchlight(cv, radius=1, center_ids=[3, 5, 50])

        # our regular searchlight -- to compare results
        cv_gross = CrossValidation(sample_clf_lin, NFoldPartitioner())
        sl_gross = sphere_searchlight(cv_gross, radius=1, center_ids=[3, 5, 50])

        # run searchlights
        res = sl(ds)
        res_gross = sl_gross(ds)

        # only two spheres but error for all CV-folds and complete confusion matrix
        assert_equal(res.shape, (len(ds.UC), 3, len(ds.UT), len(ds.UT)))
        assert_equal(res_gross.shape, (len(ds.UC), 3))

        # briefly inspect the confusion matrices
        mat = res.samples
        # since input dataset is probably balanced (otherwise adjust
        # to be per label): sum within columns (thus axis=-2) should
        # be identical to per-class/chunk number of samples
        samples_per_classchunk = len(ds) / (len(ds.UT) * len(ds.UC))
        ok_(np.all(np.sum(mat, axis= -2) == samples_per_classchunk))
        # and if we compute accuracies manually -- they should
        # correspond to the one from sl_gross
        assert_array_almost_equal(res_gross.samples,
                           # from accuracies to errors
                           1 - (mat[..., 0, 0] + mat[..., 1, 1]).astype(float)
                           / (2 * samples_per_classchunk))

        # and now for those who remained sited -- lets perform H0 MC
        # testing of this searchlight... just a silly one with minimal
        # number of permutations
        no_permutations = 10
        permutator = AttributePermutator('targets', count=no_permutations)

        # once again -- need explicit leading dimension to avoid
        # vstacking during cross-validation
        cv.postproc = lambda x: sum_sample()(x)[None, :]

        sl = sphere_searchlight(cv, radius=1, center_ids=[3, 5, 50],
                                null_dist=MCNullDist(permutator, tail='right',
                                                     enable_ca=['dist_samples']))
        res_perm = sl(ds)
        # XXX all of the res_perm, sl.ca.null_prob and
        #     sl.null_dist.ca.dist_samples carry a degenerate leading
        #     dimension which was probably due to introduced new axis
        #     above within cv.postproc
        assert_equal(res_perm.shape, (1, 3, 2, 2))
        assert_equal(sl.null_dist.ca.dist_samples.shape,
                     res_perm.shape + (no_permutations,))
        assert_equal(sl.ca.null_prob.shape, res_perm.shape)
        # just to make sure ;)
        ok_(np.all(sl.ca.null_prob.samples >= 0))
        ok_(np.all(sl.ca.null_prob.samples <= 1))

        # we should have got sums of hits across the splits
        assert_array_equal(np.sum(mat, axis=0), res_perm.samples[0])
示例#34
0
def test_gnbsearchlight_permutations():
    import mvpa2
    from mvpa2.base.node import ChainNode
    from mvpa2.clfs.gnb import GNB
    from mvpa2.generators.base import  Repeater
    from mvpa2.generators.partition import NFoldPartitioner, OddEvenPartitioner
    #import mvpa2.generators.permutation
    #reload(mvpa2.generators.permutation)
    from mvpa2.generators.permutation import AttributePermutator
    from mvpa2.testing.datasets import datasets
    from mvpa2.measures.base import CrossValidation
    from mvpa2.measures.gnbsearchlight import sphere_gnbsearchlight
    from mvpa2.measures.searchlight import sphere_searchlight
    from mvpa2.mappers.fx import mean_sample
    from mvpa2.misc.errorfx import mean_mismatch_error
    from mvpa2.clfs.stats import MCNullDist
    from mvpa2.testing.tools import assert_raises, ok_, assert_array_less

    # mvpa2.debug.active = ['APERM', 'SLC'] #, 'REPM']
    # mvpa2.debug.metrics += ['pid']
    count = 10
    nproc = 1 + int(mvpa2.externals.exists('pprocess'))
    ds = datasets['3dsmall'].copy()
    ds.fa['voxel_indices'] = ds.fa.myspace

    slkwargs = dict(radius=3, space='voxel_indices',  enable_ca=['roi_sizes'],
                    center_ids=[1, 10, 70, 100])

    mvpa2.seed(mvpa2._random_seed)
    clf  = GNB()
    splt = NFoldPartitioner(cvtype=2, attr='chunks')

    repeater   = Repeater(count=count)
    permutator = AttributePermutator('targets', limit={'partitions': 1}, count=1)

    null_sl = sphere_gnbsearchlight(clf, ChainNode([splt, permutator], space=splt.get_space()),
                                    postproc=mean_sample(), errorfx=mean_mismatch_error,
                                    **slkwargs)

    distr_est = MCNullDist(repeater, tail='left', measure=null_sl,
                           enable_ca=['dist_samples'])
    sl = sphere_gnbsearchlight(clf, splt,
                               reuse_neighbors=True,
                               null_dist=distr_est, postproc=mean_sample(),
                               errorfx=mean_mismatch_error,
                               **slkwargs)
    if __debug__:                         # assert is done only without -O mode
        assert_raises(NotImplementedError, sl, ds)

    # "ad-hoc searchlights can't handle yet varying targets across partitions"
    if False:
        # after above limitation is removed -- enable
        sl_map = sl(ds)
        sl_null_prob = sl.ca.null_prob.samples.copy()

    mvpa2.seed(mvpa2._random_seed)
    ### 'normal' Searchlight
    clf  = GNB()
    splt = NFoldPartitioner(cvtype=2, attr='chunks')
    repeater   = Repeater(count=count)
    permutator = AttributePermutator('targets', limit={'partitions': 1}, count=1)
    # rng=np.random.RandomState(0)) # to trigger failure since the same np.random state
    # would be reused across all pprocesses
    null_cv = CrossValidation(clf, ChainNode([splt, permutator], space=splt.get_space()),
                              postproc=mean_sample())
    null_sl_normal = sphere_searchlight(null_cv, nproc=nproc, **slkwargs)
    distr_est_normal = MCNullDist(repeater, tail='left', measure=null_sl_normal,
                           enable_ca=['dist_samples'])

    cv = CrossValidation(clf, splt, errorfx=mean_mismatch_error,
                         enable_ca=['stats'], postproc=mean_sample() )
    sl = sphere_searchlight(cv, nproc=nproc, null_dist=distr_est_normal, **slkwargs)
    sl_map_normal = sl(ds)
    sl_null_prob_normal = sl.ca.null_prob.samples.copy()

    # For every feature -- we should get some variance in estimates In
    # case of failure they are all really close to each other (up to
    # numerical precision), so variance will be close to 0
    assert_array_less(-np.var(distr_est_normal.ca.dist_samples.samples[0],
                              axis=1), -1e-5)
    for s in distr_est_normal.ca.dist_samples.samples[0]:
        ok_(len(np.unique(s)) > 1)
示例#35
0
def runsub(sub, thisContrast, r, dstype='raw', roi='grayMatter', filterLen=49, filterOrd=3, write=False):

    if dstype == 'raw':
        outdir='PyMVPA'
        print "working with raw data"
        thisSub = {sub: subList[sub]}
        dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi)
        thisDS = dsdict[sub]
        mc_params = lmvpa.loadmotionparams(paths, thisSub)
        beta_events = lmvpa.loadevents(paths, thisSub)
        # savitsky golay filtering
        sg.sg_filter(thisDS, filterLen, filterOrd)
        # gallant group zscores before regression.

        # zscore w.r.t. rest trials
        # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks')
        # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time).
        zscore(thisDS, chunks_attr='chunks')
        print "beta extraction"
        ## BETA EXTRACTION ##
        rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub])
        evds = er.fit_event_hrf_model(rds, events, time_attr='time_coords',
                                      condition_attr=('trial_type', 'chunks'),
                                      design_kwargs={'add_regs': mc_params[sub], 'hrf_model': 'canonical'},
                                      return_model=True)

        fds = lmvpa.replacetargets(evds, contrasts, thisContrast)
        fds = fds[fds.targets != '0']
    else:
        outdir=os.path.join('LSS', dstype)
        print "loading betas"
        fds = lmvpa.loadsubbetas(paths, sub, btype=dstype, m=roi)
        fds.sa['targets'] = fds.sa[thisContrast]
        zscore(fds, chunks_attr='chunks')

    fds = lmvpa.sortds(fds)
    print "searchlights"
    ## initialize classifier
    clf = svm.LinearNuSVMC()
    cv = CrossValidation(clf, NFoldPartitioner())
    from mvpa2.measures.searchlight import sphere_searchlight
    cvSL = sphere_searchlight(cv, radius=r)


    # now I have betas per chunk. could just correlate the betas, or correlate the predictions for corresponding runs
    lidx = fds.chunks < fds.sa['chunks'].unique[len(fds.sa['chunks'].unique)/2]
    pidx = fds.chunks >= fds.sa['chunks'].unique[len(fds.sa['chunks'].unique) / 2]

    lres = sl.run_cv_sl(cvSL, fds[lidx].copy(deep=False))
    pres = sl.run_cv_sl(cvSL, fds[pidx].copy(deep=False))

    if write:
        from mvpa2.base import dataset
        map2nifti(fds, dataset.vstack([lres, pres])).\
            to_filename(os.path.join(
                        paths[0], 'Maps', outdir,
                        sub + '_' + roi + '_' + thisContrast + '_cvsl.nii.gz'))

    del lres, pres, cvSL

    cvSL = sphere_searchlight(cv, radius=r)
    crossSet = fds.copy()
    crossSet.chunks[lidx] = 1
    crossSet.chunks[pidx] = 2
    cres = sl.run_cv_sl(cvSL, crossSet.copy(deep=False))
    if write:
        map2nifti(fds, cres[0]).to_filename(
            os.path.join(paths[0], 'Maps', outdir,
                         sub + '_' + roi + '_' + (thisContrast) + '_P2L.nii.gz'))
        map2nifti(fds, cres[1]).to_filename(
            os.path.join(paths[0], 'Maps', outdir,
                         sub + '_' + roi + '_' + (thisContrast) + '_L2P.nii.gz'))
示例#36
0
def do_searchlight(glm_dataset, radius, output_basename, with_null_prob=False):
    clf = LinearCSVMC(space='condition')
    #		clf = RbfCSVMC(C=5.0)
    splt = NFoldPartitioner()
    cv = CrossValidation(clf,
                         splt,
                         errorfx=mean_match_accuracy,
                         enable_ca=['stats'],
                         postproc=mean_sample())
    distr_est = []
    if with_null_prob:
        permutator = AttributePermutator('condition',
                                         count=100,
                                         limit='chunks')
        distr_est = MCNullDist(permutator,
                               tail='left',
                               enable_ca=['dist_samples'])
        """
		repeater   = Repeater(count=100)
		permutator = AttributePermutator('condition', limit={'partitions': 1}, count=1) 
		null_cv = CrossValidation(clf, ChainNode([splt, permutator],space=splt.get_space()),
					  postproc=mean_sample())
		null_sl = sphere_searchlight(null_cv, radius=radius, space='voxel_indices',
					     enable_ca=['roi_sizes'])
		distr_est = MCNullDist(repeater,tail='left', measure=null_sl,
				       enable_ca=['dist_samples'])
		"""
        sl = sphere_searchlight(cv,
                                radius=radius,
                                space='voxel_indices',
                                null_dist=distr_est,
                                enable_ca=['roi_sizes', 'roi_feature_ids'])
    else:

        sl = sphere_searchlight(cv,
                                radius=radius,
                                space='voxel_indices',
                                enable_ca=['roi_sizes', 'roi_feature_ids'])
    #ds = glm_dataset.copy(deep=False,
    #		       sa=['condition','chunks'],
    #		       fa=['voxel_indices'],
    #		       a=['mapper'])
    #debug.active += ["SLC"]
    sl_map = sl(glm_dataset)
    errresults = map2nifti(sl_map, imghdr=glm_dataset.a.imghdr)
    errresults.to_filename('{}-acc.nii.gz'.format(output_basename))
    sl_map.samples *= -1
    sl_map.samples += 1
    niftiresults = map2nifti(sl_map, imghdr=glm_dataset.a.imghdr)
    niftiresults.to_filename('{}-err.nii.gz'.format(output_basename))
    #TODO: save p value map
    if with_null_prob:
        nullt_results = map2nifti(sl_map,
                                  data=sl.ca.null_t,
                                  imghdr=glm_dataset.a.imghdr)
        nullt_results.to_filename('{}-t.nii.gz'.format(output_basename))
        nullprob_results = map2nifti(sl_map,
                                     data=sl.ca.null_prob,
                                     imghdr=glm_dataset.a.imghdr)
        nullprob_results.to_filename('{}-prob.nii.gz'.format(output_basename))
        nullprob_results = map2nifti(sl_map,
                                     data=distr_est.cdf(sl_map.samples),
                                     imghdr=glm_dataset.a.imghdr)
        nullprob_results.to_filename('{}-cdf.nii.gz'.format(output_basename))
示例#37
0
    def test_custom_results_fx_logic(self):
        # results_fx was introduced for the blow-up-the-memory-Swaroop
        # where keeping all intermediate results of the dark-magic SL
        # hyperalignment is not feasible.  So it is desired to split
        # searchlight computation in more blocks while composing the
        # target result "on-the-fly" from available so far results.
        #
        # Implementation relies on using generators feeding the
        # results_fx with fresh results whenever those become
        # available.
        #
        # This test/example's "measure" creates files which should be
        # handled by the results_fx function and removed in this case
        # to check if we indeed have desired high number of blocks while
        # only limited nproc.
        skip_if_no_external('pprocess')

        tfile = tempfile.mktemp('mvpa', 'test-sl')

        ds = datasets['3dsmall'].copy()[:, :71] # smaller copy
        ds.fa['voxel_indices'] = ds.fa.myspace
        ds.fa['feature_id'] = np.arange(ds.nfeatures)

        nproc = 3 # it is not about computing -- so we will can
                  # start more processes than possibly having CPUs just to test
        nblocks = nproc * 7
        # figure out max number of features to be given to any proc_block
        # yoh: not sure why I had to +1 here... but now it became more robust and
        # still seems to be doing what was demanded so be it
        max_block = int(ceil(ds.nfeatures / float(nblocks)) + 1)

        def print_(s, *args):
            """For local debugging"""
            #print s, args
            pass

        def results_fx(sl=None, dataset=None, roi_ids=None, results=None):
            """It will "process" the results by removing those files
               generated inside the measure
            """
            res = []
            print_("READY")
            for x in results:
                ok_(isinstance(x, list))
                res.append(x)
                print_("R: ", x)
                for r in x:
                    # Can happen if we requested those .ca's enabled
                    # -- then automagically _proc_block would wrap
                    # results in a dataset... Originally detected by
                    # running with MVPA_DEBUG=.* which triggered
                    # enabling all ca's
                    if is_datasetlike(r):
                        r = np.asscalar(r.samples)
                    os.unlink(r)         # remove generated file
                print_("WAITING")
            return hstack(sum(res, []))

        def measure(ds):
            """The "measure" will check if a run with the same "index" from
               previous block has been processed by now
            """
            f = '%s+%03d' % (tfile, ds.fa.feature_id[0] % (max_block * nproc))
            print_("FID:%d f:%s" % (ds.fa.feature_id[0], f))

            # allow for up to few seconds to wait for the file to
            # disappear -- i.e. its result from previous "block" was
            # processed
            t0 = time.time()
            while os.path.exists(f) and time.time() - t0 < 4.:
                time.sleep(0.5) # so it does take time to compute the measure
            if os.path.exists(f):
                print_("ERROR: ", f)
                raise AssertionError("File %s must have been processed by now"
                                     % f)
            open(f, 'w').write('XXX')   # signal that we have computing this measure
            print_("RES: %s" % f)
            return f

        sl = sphere_searchlight(measure,
                                radius=0,
                                nproc=nproc,
                                nblocks=nblocks,
                                results_fx=results_fx,
                                center_ids=np.arange(ds.nfeatures)
                                )

        assert_equal(len(glob.glob(tfile + '*')), 0) # so no junk around
        try:
            res = sl(ds)
        finally:
            # remove those generated left-over files
            for f in glob.glob(tfile + '*'):
                os.unlink(f)
示例#38
0
def test_confusion_as_node():
    from mvpa2.misc.data_generators import normal_feature_dataset
    from mvpa2.clfs.gnb import GNB
    from mvpa2.clfs.transerror import Confusion
    ds = normal_feature_dataset(snr=2.0, perlabel=42, nchunks=3,
                                nonbogus_features=[0,1], nfeatures=2)
    clf = GNB()
    cv = CrossValidation(
        clf, NFoldPartitioner(),
        errorfx=None,
        postproc=Confusion(labels=ds.UT),
        enable_ca=['stats'])
    res = cv(ds)
    # needs to be identical to CA
    assert_array_equal(res.samples, cv.ca.stats.matrix)
    assert_array_equal(res.sa.predictions, ds.UT)
    assert_array_equal(res.fa.targets, ds.UT)

    skip_if_no_external('scipy')

    from mvpa2.clfs.transerror import BayesConfusionHypothesis
    from mvpa2.base.node import ChainNode
    # same again, but this time with Bayesian hypothesis testing at the end
    cv = CrossValidation(
        clf, NFoldPartitioner(),
        errorfx=None,
        postproc=ChainNode([Confusion(labels=ds.UT),
                            BayesConfusionHypothesis()]))
    res = cv(ds)
    # only two possible hypothesis with two classes
    assert_equals(len(res), 2)
    # the first hypothesis is the can't discriminate anything
    assert_equal(len(res.sa.hypothesis[0]), 1)
    assert_equal(len(res.sa.hypothesis[0][0]), 2)
    # and the hypothesis is actually less likely than the other one
    # (both classes can be distinguished)
    assert(np.e**res.samples[0,0] < np.e**res.samples[1,0])

    # Let's see how well it would work within the searchlight when we also
    # would like to store the hypotheses per each voxel
    # Somewhat an ad-hoc solution for the answer posted on the ML
    #
    # run 1d searchlight of radii 0, for that just provide a .fa with coordinates
    ds.fa['voxel_indices'] = [[0], [1]]
    # and a custom Node which would collect .sa.hypothesis to place together along
    # with the posterior probabilities
    from mvpa2.base.node import Node
    from mvpa2.measures.searchlight import sphere_searchlight

    class KeepBothPosteriorAndHypothesis(Node):
        def _call(self, ds):
            out = np.zeros(1, dtype=object)
            out[0] = (ds.samples, ds.sa.hypothesis)
            return out
    cv.postproc.append(KeepBothPosteriorAndHypothesis())
    sl = sphere_searchlight(cv, radius=0, nproc=1)
    res = sl(ds)

    assert_equal(res.shape, (1, 2))
    assert_equal(len(res.samples[0,0]), 2)
    assert_equal(res.samples[0,0][0].shape, (2, 2))   # posteriors per 1st SL
    assert_equal(len(res.samples[0,0][1]), 2)   # 2 of hypotheses
示例#39
0
# basic ROI RSA -- dissimilarity matrix for the entire ROI
from mvpa2.measures import rsa
dsm = rsa.PDist(square=True)
res = dsm(mtds)
plot_mtx(res, mtds.sa.targets, 'ROI pattern correlation distances')
Inspecting the figure we can see that there is not much structure in the matrix, except for the face and the house condition being slightly more dissimilar than others.

"""
Now, let’s take a look at the variation of similarity structure through the brain. We can plug the PDist() measure into a searchlight to quickly scan the brain and harvest this information.
"""

# same as above, but done in a searchlight fashion
from mvpa2.measures.searchlight import sphere_searchlight
dsm = rsa.PDist(square=False)
sl = sphere_searchlight(dsm, 2)
slres = sl(mtds)

"""
The result is a compact distance matrix in vector form for each searchlight location. We can now try to score each matrix. Let’s find the distance matrix with the largest overall distances across all stimulation conditions, i.e. the location in the brain where brain response patterns are most dissimilar.
"""

# score each searchlight sphere result wrt global pattern dissimilarity
distinctiveness = np.sum(np.abs(slres), axis=0)
print 'Most dissimilar patterns around', \
        mtds.fa.voxel_indices[distinctiveness.argmax()]
# take a look at the this dissimilarity structure
from scipy.spatial.distance import squareform
plot_mtx(squareform(slres.samples[:, distinctiveness.argmax()]),
         mtds.sa.targets,
         'Maximum distinctive searchlight pattern correlation distances')
plot_args = {
    'background' : os.path.join(sessionPath,'/home/brain/host/pymvpaniifiles/anat.nii.gz'),
    'background_mask' : os.path.join(sessionPath,'/home/brain/host/pymvpaniifiles/mask_brain.nii.gz'),
    'overlay_mask' : os.path.join(sessionPath,'analyze/structural/lc2ms_deskulled.hdr'),
    'do_stretch_colors' : False,
    'cmap_bg' : 'gray',
    'cmap_overlay' : 'autumn', # pl.cm.autumn
    'interactive' : cfg.getboolean('examples', 'interactive', True),
    }

for radius in [3]:
# tell which one we are doing
    print 'Running searchlight with radius: %i ...' % (radius)

    sl = sphere_searchlight(foldwiseCvedAnovaSelectedSMLR, radius=radius, space='voxel_indices',
                            center_ids=center_ids,
                            postproc=mean_sample())

    ds = dataset.copy(deep=False,
                      sa=['targets', 'chunks'],
                      fa=['voxel_indices'],
                      a=['mapper'])

    sl_map = sl(ds)
    sl_map.samples *= -1
    sl_map.samples += 1

    niftiresults = map2nifti(sl_map, imghdr=dataset.a.imghdr)
    niftiresults.to_filename(os.path.join(sessionPath,'analyze/functional/%s-grey-searchlight.nii') % classificationName)
    print 'Best performing sphere error:', np.min(sl_map.samples)
示例#41
0
    def test_custom_results_fx_logic(self):
        # results_fx was introduced for the blow-up-the-memory-Swaroop
        # where keeping all intermediate results of the dark-magic SL
        # hyperalignment is not feasible.  So it is desired to split
        # searchlight computation in more blocks while composing the
        # target result "on-the-fly" from available so far results.
        #
        # Implementation relies on using generators feeding the
        # results_fx with fresh results whenever those become
        # available.
        #
        # This test/example's "measure" creates files which should be
        # handled by the results_fx function and removed in this case
        # to check if we indeed have desired high number of blocks while
        # only limited nproc.
        skip_if_no_external('pprocess')

        tfile = tempfile.mktemp('mvpa', 'test-sl')

        ds = datasets['3dsmall'].copy()[:, :25]  # smaller copy
        ds.fa['voxel_indices'] = ds.fa.myspace
        ds.fa['feature_id'] = np.arange(ds.nfeatures)

        nproc = 3  # it is not about computing -- so we will can
        # start more processes than possibly having CPUs just to test
        nblocks = nproc * 7
        # figure out max number of features to be given to any proc_block
        # yoh: not sure why I had to +1 here... but now it became more robust and
        # still seems to be doing what was demanded so be it
        max_block = int(ceil(ds.nfeatures / float(nblocks)) + 1)

        def print_(s, *args):
            """For local debugging"""
            #print s, args
            pass

        def results_fx(sl=None, dataset=None, roi_ids=None, results=None):
            """It will "process" the results by removing those files
               generated inside the measure
            """
            res = []
            print_("READY")
            for x in results:
                ok_(isinstance(x, list))
                res.append(x)
                print_("R: ", x)
                for r in x:
                    # Can happen if we requested those .ca's enabled
                    # -- then automagically _proc_block would wrap
                    # results in a dataset... Originally detected by
                    # running with MVPA_DEBUG=.* which triggered
                    # enabling all ca's
                    if is_datasetlike(r):
                        r = np.asscalar(r.samples)
                    os.unlink(r)  # remove generated file
                print_("WAITING")

            results_ds = hstack(sum(res, []))

            # store the center ids as a feature attribute since we use
            # them for testing
            results_ds.fa['center_ids'] = roi_ids
            return results_ds

        def results_postproc_fx(results):
            for ds in results:
                ds.fa['test_postproc'] = np.atleast_1d(ds.a.roi_center_ids**2)
            return results

        def measure(ds):
            """The "measure" will check if a run with the same "index" from
               previous block has been processed by now
            """
            f = '%s+%03d' % (tfile, ds.fa.feature_id[0] % (max_block * nproc))
            print_("FID:%d f:%s" % (ds.fa.feature_id[0], f))

            # allow for up to few seconds to wait for the file to
            # disappear -- i.e. its result from previous "block" was
            # processed
            t0 = time.time()
            while os.path.exists(f) and time.time() - t0 < 4.:
                time.sleep(0.5)  # so it does take time to compute the measure
                pass
            if os.path.exists(f):
                print_("ERROR: ", f)
                raise AssertionError(
                    "File %s must have been processed by now" % f)
            open(f, 'w').write(
                'XXX')  # signal that we have computing this measure
            print_("RES: %s" % f)
            return f

        sl = sphere_searchlight(measure,
                                radius=0,
                                nproc=nproc,
                                nblocks=nblocks,
                                results_postproc_fx=results_postproc_fx,
                                results_fx=results_fx,
                                center_ids=np.arange(ds.nfeatures))

        assert_equal(len(glob.glob(tfile + '*')), 0)  # so no junk around
        try:
            res = sl(ds)
            assert_equal(res.nfeatures, ds.nfeatures)
            # verify that we did have results_postproc_fx called
            assert_array_equal(res.fa.test_postproc,
                               np.power(res.fa.center_ids, 2))
        finally:
            # remove those generated left-over files
            for f in glob.glob(tfile + '*'):
                os.unlink(f)
示例#42
0
def main(subject,
         study_dir,
         mask,
         feature_mask,
         models,
         category,
         res_name,
         suffix='_stim_fix2',
         radius=3,
         n_perm=1000,
         n_proc=None):
    from mvpa2.mappers.zscore import zscore
    from mvpa2.mappers.fx import mean_group_sample
    from mvpa2.measures.searchlight import sphere_searchlight
    from mvpa2.datasets.mri import map2nifti
    from wikisim import mvpa

    # load subject data
    sp = su.SubjPath(subject, study_dir)
    vols = task.prex_vols(sp.path('behav', 'log'))

    # load fmri data
    ds = mvpa.load_prex_beta(sp,
                             suffix,
                             mask,
                             feature_mask=feature_mask,
                             verbose=1)

    # zscore
    ds.sa['run'] = vols.run.values
    zscore(ds, chunks_attr='run')

    # average over item presentations
    ds.sa['itemno'] = vols.itemno.to_numpy()
    m = mean_group_sample(['itemno'])
    dsm = ds.get_mapped(m)

    # get items of interest
    if category == 'face':
        cond = [1, 2]
    elif category == 'scene':
        cond = [3, 4]
    else:
        ValueError(f'Invalid category code: {category}')
    include = vols.groupby('itemno').first()['cond'].isin(cond)

    # get models of interest
    model_dir = os.path.join(study_dir, 'batch', 'models3')
    model_names = models.split('-')
    model_rdms_dict = model.load_category_rdms(model_dir, category,
                                               model_names)
    model_rdms = [model_rdms_dict[name] for name in model_names]

    # set up searchlight
    m = mvpa.ItemPartialRSA(model_rdms, n_perm)
    sl = sphere_searchlight(m, radius=radius, nproc=n_proc)
    sl_map = sl(dsm[include])

    nifti_include = map2nifti(ds, sl_map[-1])
    for i, name in enumerate(model_names):
        # save zstat map
        res_dir = sp.path('rsa', f'{res_name}_{name}')
        if not os.path.exists(res_dir):
            os.makedirs(res_dir)
        filepath = os.path.join(res_dir, 'zstat.nii.gz')
        nifti = map2nifti(ds, sl_map[i])
        nifti.to_filename(filepath)

        # save mask of included voxels
        include_file = os.path.join(res_dir, 'included.nii.gz')
        nifti_include.to_filename(include_file)
示例#43
0
def runsub(sub, thisContrast, r, dstype="raw", roi="grayMatter", filterLen=49, filterOrd=3, write=False):

    if dstype == "raw":
        outdir = "PyMVPA"
        print "working with raw data"
        thisSub = {sub: subList[sub]}
        dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi)
        thisDS = dsdict[sub]
        mc_params = lmvpa.loadmotionparams(paths, thisSub)
        beta_events = lmvpa.loadevents(paths, thisSub)
        # savitsky golay filtering
        sg.sg_filter(thisDS, filterLen, filterOrd)
        # gallant group zscores before regression.

        # zscore w.r.t. rest trials
        # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks')
        # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time).
        zscore(thisDS, chunks_attr="chunks")
        print "beta extraction"
        ## BETA EXTRACTION ##
        rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub])
        evds = er.fit_event_hrf_model(
            rds,
            events,
            time_attr="time_coords",
            condition_attr=("trial_type", "chunks"),
            design_kwargs={"add_regs": mc_params[sub], "hrf_model": "canonical"},
            return_model=True,
        )

        fds = lmvpa.replacetargets(evds, contrasts, thisContrast)
        fds = fds[fds.targets != "0"]
    else:
        outdir = os.path.join("LSS", dstype)
        print "loading betas"
        fds = lmvpa.loadsubbetas(paths, sub, btype=dstype, m=roi)
        fds.sa["targets"] = fds.sa[thisContrast]
        zscore(fds, chunks_attr="chunks")

    fds = lmvpa.sortds(fds)
    print "searchlights"
    ## initialize classifier
    clf = svm.LinearNuSVMC()
    cv = CrossValidation(clf, NFoldPartitioner())
    from mvpa2.measures.searchlight import sphere_searchlight

    cvSL = sphere_searchlight(cv, radius=r)

    # now I have betas per chunk. could just correlate the betas, or correlate the predictions for corresponding runs
    lidx = fds.chunks < fds.sa["chunks"].unique[len(fds.sa["chunks"].unique) / 2]
    pidx = fds.chunks >= fds.sa["chunks"].unique[len(fds.sa["chunks"].unique) / 2]

    lres = sl.run_cv_sl(cvSL, fds[lidx].copy(deep=False))
    pres = sl.run_cv_sl(cvSL, fds[pidx].copy(deep=False))

    if write:
        from mvpa2.base import dataset

        map2nifti(fds, dataset.vstack([lres, pres])).to_filename(
            os.path.join(paths[0], "Maps", outdir, sub + "_" + roi + "_" + thisContrast + "_cvsl.nii.gz")
        )

    del lres, pres, cvSL

    cvSL = sphere_searchlight(cv, radius=r)
    crossSet = fds.copy()
    crossSet.chunks[lidx] = 1
    crossSet.chunks[pidx] = 2
    cres = sl.run_cv_sl(cvSL, crossSet.copy(deep=False))
    if write:
        map2nifti(fds, cres[0]).to_filename(
            os.path.join(paths[0], "Maps", outdir, sub + "_" + roi + "_" + (thisContrast) + "_P2L.nii.gz")
        )
        map2nifti(fds, cres[1]).to_filename(
            os.path.join(paths[0], "Maps", outdir, sub + "_" + roi + "_" + (thisContrast) + "_L2P.nii.gz")
        )
示例#44
0
    False,
    'cmap_bg':
    'gray',
    'cmap_overlay':
    'autumn',  # pl.cm.autumn
    'interactive':
    cfg.getboolean('examples', 'interactive', True),
}

for radius in [3]:
    # tell which one we are doing
    print 'Running searchlight with radius: %i ...' % (radius)

    sl = sphere_searchlight(foldwiseCvedAnovaSelectedSMLR,
                            radius=radius,
                            space='voxel_indices',
                            center_ids=center_ids,
                            postproc=mean_sample())

    ds = dataset.copy(deep=False,
                      sa=['targets', 'chunks'],
                      fa=['voxel_indices'],
                      a=['mapper'])

    sl_map = sl(ds)
    sl_map.samples *= -1
    sl_map.samples += 1

    niftiresults = map2nifti(sl_map, imghdr=dataset.a.imghdr)
    niftiresults.to_filename(
        os.path.join(
示例#45
0
def test_multiclass_pairs_svm_searchlight():
    from mvpa2.measures.searchlight import sphere_searchlight
    import mvpa2.clfs.meta
    #reload(mvpa2.clfs.meta)
    from mvpa2.clfs.meta import MulticlassClassifier

    from mvpa2.datasets import Dataset
    from mvpa2.clfs.svm import LinearCSVMC
    #import mvpa2.testing.datasets
    #reload(mvpa2.testing.datasets)
    from mvpa2.testing.datasets import datasets
    from mvpa2.generators.partition import NFoldPartitioner, OddEvenPartitioner
    from mvpa2.measures.base import CrossValidation

    from mvpa2.testing import ok_, assert_equal, assert_array_equal
    from mvpa2.sandbox.multiclass import get_pairwise_accuracies

    # Some parameters used in the test below
    nproc = 1 + int(mvpa2.externals.exists('pprocess'))
    ntargets = 4                                # number of targets
    npairs = ntargets*(ntargets-1)/2
    center_ids = [35, 55, 1]
    ds = datasets['3dsmall'].copy()

    # redefine C,T so we have a multiclass task
    nsamples = len(ds)
    ds.sa.targets = range(ntargets) * (nsamples//ntargets)
    ds.sa.chunks = np.arange(nsamples) // ntargets
    # and add some obvious signal where it is due
    ds.samples[:, 55] += 15*ds.sa.targets   # for all 4 targets
    ds.samples[:, 35] += 15*(ds.sa.targets % 2) # so we have conflicting labels
    # while 35 would still be just for 2 categories which would conflict

    mclf = MulticlassClassifier(LinearCSVMC(),
                                pass_attr=['sa.chunks', 'ca.raw_predictions_ds'],
                                enable_ca=['raw_predictions_ds'])

    label_pairs = mclf._get_binary_pairs(ds)

    def place_sa_as_samples(ds):
        # add a degenerate dimension for the hstacking in the searchlight
        ds.samples = ds.sa.raw_predictions_ds[:, None]
        ds.sa.pop('raw_predictions_ds')   # no need to drag the copy
        return ds

    mcv = CrossValidation(mclf, OddEvenPartitioner(), errorfx=None,
                          postproc=place_sa_as_samples)
    sl = sphere_searchlight(mcv, nproc=nproc, radius=2, space='myspace',
                            center_ids=center_ids)
    slmap = sl(ds)


    ok_('chunks' in slmap.sa)
    ok_('cvfolds' in slmap.sa)
    ok_('targets' in slmap.sa)
    # so for each SL we got all pairwise tests
    assert_equal(slmap.shape, (nsamples, len(center_ids), npairs))
    assert_array_equal(np.unique(slmap.sa.cvfolds), [0, 1])

    # Verify that we got right labels in each 'pair'
    # all searchlights should have the same set of labels for a given
    # pair of targets
    label_pairs_ = np.apply_along_axis(
        np.unique, 0,
        ## reshape slmap so we have only simple pairs in the columns
        np.reshape(slmap, (-1, npairs))).T

    # need to prep that list of pairs obtained from MulticlassClassifier
    # and since it is 1-vs-1, they all should be just pairs of lists of
    # 1 element so should work
    assert_equal(len(label_pairs_), npairs)
    assert_array_equal(np.squeeze(np.array(label_pairs)), label_pairs_)
    assert_equal(label_pairs_.shape, (npairs, 2))   # for this particular case


    out    = get_pairwise_accuracies(slmap)
    out123 = get_pairwise_accuracies(slmap, select=[1, 2, 3])

    assert_array_equal(np.unique(out123.T), np.arange(1, 4))   # so we got at least correct targets
    # test that we extracted correct accuracies
    # First 3 in out.T should have category 0, so skip them and compare otherwise
    assert_array_equal(out.samples[3:], out123.samples)

    ok_(np.all(out.samples[:, 1] == 1.), "This was with super-strong result")
示例#46
0
    def test_spatial_searchlight(self, lrn_sllrn_SL_partitioner, do_roi=False,
                                 results_backend='native'):
        """Tests both generic and ad-hoc searchlights (e.g. GNBSearchlight)
        Test of and adhoc searchlight anyways requires a ground-truth
        comparison to the generic version, so we are doing sweepargs here
        """
        lrn, sllrn, SL, partitioner, correction = lrn_sllrn_SL_partitioner
        ## if results_backend == 'hdf5' and not common_variance:
        ##     # no need for full combination of all possible arguments here
        ##     return

        if __debug__ and 'ENFORCE_CA_ENABLED' in debug.active \
           and  isinstance(lrn, ChainMapper):
            raise SkipTest("Known to fail while trying to enable "
                           "training_stats for the ChainMapper (M1NN here)")


        # e.g. for M1NN we need plain kNN(1) for m1nnsl, but to imitate m1nn
        #      "learner" we must use a chainmapper atm
        if sllrn is None:
            sllrn = lrn
        ds = datasets['3dsmall'].copy()
        # Let's test multiclass here, so boost # of labels
        ds[6:18].T += 2
        ds.fa['voxel_indices'] = ds.fa.myspace

        # To assure that users do not run into incorrect operation due to overflows
        ds.samples += 5000
        ds.samples *= 1000
        ds.samples = ds.samples.astype(np.int16)

        # compute N-1 cross-validation for each sphere
        # YOH: unfortunately sample_clf_lin is not guaranteed
        #      to provide exactly the same results due to inherent
        #      iterative process.  Therefore lets use something quick
        #      and pure Python
        cv = CrossValidation(lrn, partitioner)

        skwargs = dict(radius=1, enable_ca=['roi_sizes', 'raw_results',
                                            'roi_feature_ids'])

        if do_roi:
            # select some random set of features
            nroi = rnd.randint(1, ds.nfeatures)
            # and lets compute the full one as well once again so we have a reference
            # which will be excluded itself from comparisons but values will be compared
            # for selected roi_id
            sl_all = SL(sllrn, partitioner, **skwargs)
            result_all = sl_all(ds)
            # select random features
            roi_ids = rnd.permutation(range(ds.nfeatures))[:nroi]
            skwargs['center_ids'] = roi_ids
        else:
            nroi = ds.nfeatures
            roi_ids = np.arange(nroi)
            result_all = None

        if results_backend == 'hdf5':
            skip_if_no_external('h5py')

        sls = [sphere_searchlight(cv, results_backend=results_backend,
                                  **skwargs),
               #GNBSearchlight(gnb, NFoldPartitioner(cvtype=1))
               SL(sllrn, partitioner, indexsum='fancy', **skwargs)
               ]

        if externals.exists('scipy'):
            sls += [ SL(sllrn, partitioner, indexsum='sparse', **skwargs)]

        # Test nproc just once
        if externals.exists('pprocess') and not self._tested_pprocess:
            sls += [sphere_searchlight(cv, nproc=2, **skwargs)]
            self._tested_pprocess = True

        # Provide the dataset and all those searchlights for testing
        #self._test_searchlights(ds, sls, roi_ids, result_all)
        #nroi = len(roi_ids)
        #do_roi = nroi != ds.nfeatures
        all_results = []
        for sl in sls:
            # run searchlight
            mvpa2.seed()                # reseed rng again for m1nnsl
            results = sl(ds)
            all_results.append(results)
            #print `sl`
            # check for correct number of spheres
            self.assertTrue(results.nfeatures == nroi)
            # and measures (one per xfold)
            if partitioner.cvtype == 1:
                self.assertTrue(len(results) == len(ds.UC))
            elif partitioner.cvtype == 0.5:
                # here we had 4 unique chunks, so 6 combinations
                # even though 20 max was specified for NFold
                self.assertTrue(len(results) == 6)
            else:
                raise RuntimeError("Unknown yet type of partitioner to check")
            # check for chance-level performance across all spheres
            # makes sense only if number of features was big enough
            # to get some stable estimate of mean
            if not do_roi or nroi > 20:
                # correction here is for M1NN class which has wider distribution
                self.assertTrue(
                    0.68 - correction < results.samples.mean() < 0.84 + correction,
                    msg="Out of range mean result: "
                    "lrn: %s  sllrn: %s  NROI: %d  MEAN: %.3f"
                    % (lrn, sllrn, nroi, results.samples.mean(),))

            mean_errors = results.samples.mean(axis=0)
            # that we do get different errors ;)
            self.assertTrue(len(np.unique(mean_errors) > 3))

            # check resonable sphere sizes
            self.assertTrue(len(sl.ca.roi_sizes) == nroi)
            self.assertTrue(len(sl.ca.roi_feature_ids) == nroi)
            for i, fids in enumerate(sl.ca.roi_feature_ids):
                self.assertTrue(len(fids) == sl.ca.roi_sizes[i])
            if do_roi:
                # for roi we should relax conditions a bit
                self.assertTrue(max(sl.ca.roi_sizes) <= 7)
                self.assertTrue(min(sl.ca.roi_sizes) >= 4)
            else:
                self.assertTrue(max(sl.ca.roi_sizes) == 7)
                self.assertTrue(min(sl.ca.roi_sizes) == 4)

            # check base-class state
            self.assertEqual(sl.ca.raw_results.nfeatures, nroi)

            # Test if we got results correctly for 'selected' roi ids
            if do_roi:
                assert_array_equal(result_all[:, roi_ids], results)

        if len(all_results) > 1:
            # if we had multiple searchlights, we can check either they all
            # gave the same result (they should have)
            aresults = np.array([a.samples for a in all_results])
            dresults = np.abs(aresults - aresults.mean(axis=0))
            dmax = np.max(dresults)
            self.assertTrue(dmax <= 1e-13)

        # Test the searchlight's reuse of neighbors
        for indexsum in ['fancy'] + (
            externals.exists('scipy') and ['sparse'] or []):
            sl = SL(sllrn, partitioner, indexsum='fancy',
                    reuse_neighbors=True, **skwargs)
            mvpa2.seed()
            result1 = sl(ds)
            mvpa2.seed()
            result2 = sl(ds)                # must be faster
            assert_array_equal(result1, result2)
示例#47
0
npart = NFoldPartitioner()
# modify partitioning behavior
cross_decode_chain = ChainNode([npart,
                      CrossDecodingFilter((('cs_deval'),('cs_val'),('csm')),
                               npart.get_space(),
                               'trial_type')
                     ],space='filtered_partitions')

#SVM classifier
clf = LinearCSVMC()
#cross validate using NFoldPartioner - which makes cross validation folds by chunk/run
cv = CrossValidation(clf, cross_decode_chain, errorfx=lambda p, t: np.mean(p == t),enable_ca=['stats'])

#implement full brain searchlight with spheres with a radius of 3
svm_sl = sphere_searchlight(cv, radius=3, space='voxel_indices',
                             postproc=mean_sample())

# ---------------------------- Run

#searchlight
# enable progress bar
# if __debug__:
#    debug.active += ["SLC"]

start_time = time.time()
print 'starting searchlight',time.time() - start_time

res_sl = svm_sl(fds)

sl_time = time.time() - start_time
print 'finished searchlight',sl_time
示例#48
0
# get ids of features that have a nonzero value
#center_ids = dataset.fa.wholeBrain.nonzero()[0]
center_ids=dataset.fa.voxel_indices.nonzero()[0]

#cv = CrossValidatedTransferError(TransferError(svmReg, CorrErrorFx()), NFoldSplitter())
cv = CrossValidation(svmReg, NFoldPartitioner(),errorfx=corr_error, enable_ca=['training_stats','stats'])

# cross-validated mean transfer using an N-fold dataset splitter

for radius in [int(slRad)]:
    # tell which one we are doing
    print "Running searchlight with radius: %i ..." % (radius)

    sl = sphere_searchlight(cv, radius=radius, space='voxel_indices',
    #                        center_ids=center_ids,
                            postproc=mean_sample())

    ds = dataset.copy(deep=False,
                      sa=['targets', 'chunks'],
                      fa=['voxel_indices'],
                      a=['mapper'])

    sl_map = sl(ds)
    sl_acc=sl_map.copy(deep=True)
    sl_acc.samples=1.0 - sl_acc.samples

    # save results
    niftiresults = map2nifti(sl_acc, imghdr=dataset.a.imghdr)
    resultspath = os.path.join('.')
    niftiresults.to_filename('accuracy_' + conName + '.nii.gz')
示例#49
0
from mvpa2.testing.datasets import datasets
from mvpa2.mappers.fx import mean_sample

"""For the sake of simplicity, let's use a small artificial dataset."""

# Lets just use our tiny 4D dataset from testing battery
dataset = datasets['3dlarge']

"""Now it only takes three lines for a searchlight analysis."""

# setup measure to be computed in each sphere (cross-validated
# generalization error on odd/even splits)
cv = CrossValidation(LinearCSVMC(), OddEvenPartitioner())

# setup searchlight with 2 voxels radius and measure configured above
sl = sphere_searchlight(cv, radius=2, space='myspace',
                        postproc=mean_sample())

# run searchlight on dataset
sl_map = sl(dataset)

print 'Best performing sphere error:', np.min(sl_map.samples)

"""
If this analysis is done on a fMRI dataset using `NiftiDataset` the resulting
searchlight map (`sl_map`) can be mapped back into the original dataspace
and viewed as a brain overlay. :ref:`Another example <example_searchlight>`
shows a typical application of this algorithm.

.. Mention the fact that it also is a special `SensitivityAnalyzer`
"""
示例#50
0
    def test_spatial_searchlight(self,
                                 lrn_sllrn_SL_partitioner,
                                 do_roi=False,
                                 results_backend='native'):
        """Tests both generic and ad-hoc searchlights (e.g. GNBSearchlight)
        Test of and adhoc searchlight anyways requires a ground-truth
        comparison to the generic version, so we are doing sweepargs here
        """
        lrn, sllrn, SL, partitioner, correction = lrn_sllrn_SL_partitioner
        ## if results_backend == 'hdf5' and not common_variance:
        ##     # no need for full combination of all possible arguments here
        ##     return

        if __debug__ and 'ENFORCE_CA_ENABLED' in debug.active \
           and  isinstance(lrn, ChainMapper):
            raise SkipTest("Known to fail while trying to enable "
                           "training_stats for the ChainMapper (M1NN here)")

        # e.g. for M1NN we need plain kNN(1) for m1nnsl, but to imitate m1nn
        #      "learner" we must use a chainmapper atm
        if sllrn is None:
            sllrn = lrn
        ds = datasets['3dsmall'].copy()
        # Let's test multiclass here, so boost # of labels
        ds[6:18].T += 2
        ds.fa['voxel_indices'] = ds.fa.myspace

        # To assure that users do not run into incorrect operation due to overflows
        ds.samples += 5000
        ds.samples *= 1000
        ds.samples = ds.samples.astype(np.int16)

        # compute N-1 cross-validation for each sphere
        # YOH: unfortunately sample_clf_lin is not guaranteed
        #      to provide exactly the same results due to inherent
        #      iterative process.  Therefore lets use something quick
        #      and pure Python
        cv = CrossValidation(lrn, partitioner)

        skwargs = dict(
            radius=1,
            enable_ca=['roi_sizes', 'raw_results', 'roi_feature_ids'])

        if do_roi:
            # select some random set of features
            nroi = rnd.randint(1, ds.nfeatures)
            # and lets compute the full one as well once again so we have a reference
            # which will be excluded itself from comparisons but values will be compared
            # for selected roi_id
            sl_all = SL(sllrn, partitioner, **skwargs)
            result_all = sl_all(ds)
            # select random features
            roi_ids = rnd.permutation(range(ds.nfeatures))[:nroi]
            skwargs['center_ids'] = roi_ids
        else:
            nroi = ds.nfeatures
            roi_ids = np.arange(nroi)
            result_all = None

        if results_backend == 'hdf5':
            skip_if_no_external('h5py')

        sls = [
            sphere_searchlight(cv, results_backend=results_backend, **skwargs),
            #GNBSearchlight(gnb, NFoldPartitioner(cvtype=1))
            SL(sllrn, partitioner, indexsum='fancy', **skwargs)
        ]

        if externals.exists('scipy'):
            sls += [SL(sllrn, partitioner, indexsum='sparse', **skwargs)]

        # Test nproc just once
        if externals.exists('pprocess') and not self._tested_pprocess:
            sls += [sphere_searchlight(cv, nproc=2, **skwargs)]
            self._tested_pprocess = True

        # Provide the dataset and all those searchlights for testing
        #self._test_searchlights(ds, sls, roi_ids, result_all)
        #nroi = len(roi_ids)
        #do_roi = nroi != ds.nfeatures
        all_results = []
        for sl in sls:
            # run searchlight
            mvpa2.seed()  # reseed rng again for m1nnsl
            results = sl(ds)
            all_results.append(results)
            #print `sl`
            # check for correct number of spheres
            self.assertTrue(results.nfeatures == nroi)
            # and measures (one per xfold)
            if partitioner.cvtype == 1:
                self.assertTrue(len(results) == len(ds.UC))
            elif partitioner.cvtype == 0.5:
                # here we had 4 unique chunks, so 6 combinations
                # even though 20 max was specified for NFold
                self.assertTrue(len(results) == 6)
            else:
                raise RuntimeError("Unknown yet type of partitioner to check")
            # check for chance-level performance across all spheres
            # makes sense only if number of features was big enough
            # to get some stable estimate of mean
            if not do_roi or nroi > 20:
                # correction here is for M1NN class which has wider distribution
                self.assertTrue(0.67 - correction < results.samples.mean() <
                                0.85 + correction,
                                msg="Out of range mean result: "
                                "lrn: %s  sllrn: %s  NROI: %d  MEAN: %.3f" % (
                                    lrn,
                                    sllrn,
                                    nroi,
                                    results.samples.mean(),
                                ))

            mean_errors = results.samples.mean(axis=0)
            # that we do get different errors ;)
            self.assertTrue(len(np.unique(mean_errors) > 3))

            # check resonable sphere sizes
            self.assertTrue(len(sl.ca.roi_sizes) == nroi)
            self.assertTrue(len(sl.ca.roi_feature_ids) == nroi)
            for i, fids in enumerate(sl.ca.roi_feature_ids):
                self.assertTrue(len(fids) == sl.ca.roi_sizes[i])
            if do_roi:
                # for roi we should relax conditions a bit
                self.assertTrue(max(sl.ca.roi_sizes) <= 7)
                self.assertTrue(min(sl.ca.roi_sizes) >= 4)
            else:
                self.assertTrue(max(sl.ca.roi_sizes) == 7)
                self.assertTrue(min(sl.ca.roi_sizes) == 4)

            # check base-class state
            self.assertEqual(sl.ca.raw_results.nfeatures, nroi)

            # Test if we got results correctly for 'selected' roi ids
            if do_roi:
                assert_array_equal(result_all[:, roi_ids], results)

        if len(all_results) > 1:
            # if we had multiple searchlights, we can check either they all
            # gave the same result (they should have)
            aresults = np.array([a.samples for a in all_results])
            dresults = np.abs(aresults - aresults.mean(axis=0))
            dmax = np.max(dresults)
            self.assertTrue(dmax <= 1e-13)

        # Test the searchlight's reuse of neighbors
        for indexsum in ['fancy'] + (externals.exists('scipy') and ['sparse']
                                     or []):
            sl = SL(sllrn,
                    partitioner,
                    indexsum='fancy',
                    reuse_neighbors=True,
                    **skwargs)
            mvpa2.seed()
            result1 = sl(ds)
            mvpa2.seed()
            result2 = sl(ds)  # must be faster
            assert_array_equal(result1, result2)