Exemplo n.º 1
0
    def test_chi_square_searchlight(self):
        # only do partial to save time

        # Can't yet do this since test_searchlight isn't yet "under nose"
        #skip_if_no_external('scipy')
        if not externals.exists('scipy'):
            return

        from mvpa.misc.stats import chisquare

        transerror = TransferError(sample_clf_lin)
        cv = CrossValidatedTransferError(
                transerror,
                NFoldSplitter(cvtype=1),
                enable_ca=['confusion'])


        def getconfusion(data):
            cv(data)
            return chisquare(cv.ca.confusion.matrix)[0]

        sl = sphere_searchlight(getconfusion, radius=0,
                         center_ids=[3,50])

        # run searchlight
        results = sl(self.dataset)
        self.failUnless(results.nfeatures == 2)
Exemplo n.º 2
0
    def test_partial_searchlight_with_full_report(self):
        # compute N-1 cross-validation for each sphere
        cv = CrossValidation(sample_clf_lin, NFoldPartitioner())
        # contruct diameter 1 (or just radius 0) searchlight
        sl = sphere_searchlight(cv, radius=0,
                         center_ids=[3,50])

        # run searchlight
        results = sl(self.dataset)

        # only two spheres but error for all CV-folds
        self.failUnlessEqual(results.shape, (len(self.dataset.UC), 2))

        # test if we graciously puke if center_ids are out of bounds
        dataset0 = self.dataset[:, :50] # so we have no 50th feature
        self.failUnlessRaises(IndexError, sl, dataset0)
Exemplo n.º 3
0
    def _run_core(self,):
        """
        Core routine for detecting outliers

        Parameters
        ----------
        imgfile :
        motionfile :
        """
        attr = SampleAttributes(self.inputs.attributes_file)

        dataset = fmri_dataset(
            samples=self.inputs.samples_file,
            labels=attr.labels,
            chunks=attr.chunks,
            mask=self.inputs.mask_file)

        if 'rest' in dataset.uniquelabels:
            dataset = dataset[dataset.sa.labels != 'rest']

        # zscore dataset relative to baseline ('rest') mean
        zscore(dataset, chunks_attr=True, dtype='float32')

        # choose classifier
        clf = LinearCSVMC()

        # setup measure to be computed by Searchlight
        # cross-validated mean transfer using an N-fold dataset splitter
        cv = CrossValidatedTransferError(TransferError(clf),
                                         NFoldSplitter())


        sl = sphere_searchlight(cv, radius=self.inputs.radius,
                                space='voxel_indices',
                                nproc=2, mapper=mean_sample())

        ds = dataset.copy(deep=False,
                      sa=['labels', 'chunks'], fa=['voxel_indices'], a=[])

        sl_map = sl(ds)
        # map sensitivity map into original dataspace
        orig_sl_map = dataset.map2nifti(sl_map)

        orig_sl_map.save(self._get_output_filename())
Exemplo n.º 4
0
    def test_partial_searchlight_with_full_report(self):
        # compute N-1 cross-validation for each sphere
        transerror = TransferError(sample_clf_lin)
        cv = CrossValidatedTransferError(
                transerror,
                NFoldSplitter(cvtype=1))
        # contruct diameter 1 (or just radius 0) searchlight
        sl = sphere_searchlight(cv, radius=0,
                         center_ids=[3,50])

        # run searchlight
        results = sl(self.dataset)

        # only two spheres but error for all CV-folds
        self.failUnlessEqual(results.shape, (len(self.dataset.UC), 2))

        # test if we graciously puke if center_ids are out of bounds
        dataset0 = self.dataset[:, :50] # so we have no 50th feature
        self.failUnlessRaises(IndexError, sl, dataset0)
Exemplo n.º 5
0
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.measures.searchlight import sphere_searchlight
from mvpa.testing.datasets import datasets
from mvpa.mappers.fx import mean_sample
"""For the sake of simplicity, let's use a small artificial dataset."""

# Lets just use our tiny 4D dataset from testing battery
dataset = datasets['3dlarge']
"""Now it only takes three lines for a searchlight analysis."""

# setup measure to be computed in each sphere (cross-validated
# generalization error on odd/even splits)
cv = CrossValidatedTransferError(TransferError(LinearCSVMC()),
                                 OddEvenSplitter())

# setup searchlight with 2 voxels radius and measure configured above
sl = sphere_searchlight(cv, radius=2, space='myspace', postproc=mean_sample())

# run searchlight on dataset
sl_map = sl(dataset)

print 'Best performing sphere error:', np.min(sl_map.samples)
"""
If this analysis is done on a fMRI dataset using `NiftiDataset` the resulting
searchlight map (`sl_map`) can be mapped back into the original dataspace
and viewed as a brain overlay. :ref:`Another example <example_searchlight>`
shows a typical application of this algorithm.

.. Mention the fact that it also is a special `SensitivityAnalyzer`
"""
Exemplo n.º 6
0
"""For the sake of simplicity, let's use a small artificial dataset."""

# Lets just use our tiny 4D dataset from testing battery
dataset = datasets['3dlarge']

"""Now it only takes three lines for a searchlight analysis."""

# setup measure to be computed in each sphere (cross-validated
# generalization error on odd/even splits)
cv = CrossValidatedTransferError(
         TransferError(LinearCSVMC()),
         OddEvenSplitter())

# setup searchlight with 2 voxels radius and measure configured above
sl = sphere_searchlight(cv, radius=2, space='myspace',
                        postproc=mean_sample())

# run searchlight on dataset
sl_map = sl(dataset)

print 'Best performing sphere error:', np.min(sl_map.samples)

"""
If this analysis is done on a fMRI dataset using `NiftiDataset` the resulting
searchlight map (`sl_map`) can be mapped back into the original dataspace
and viewed as a brain overlay. :ref:`Another example <example_searchlight>`
shows a typical application of this algorithm.

.. Mention the fact that it also is a special `SensitivityAnalyzer`
"""
Exemplo n.º 7
0
    def test_spatial_searchlight(self, common_variance):
        """Tests both generic and GNBSearchlight
        Test of GNBSearchlight anyways requires a ground-truth
        comparison to the generic version, so we are doing sweepargs here
        """
        # XXX after running at 1 million times we now know that gnb_searchlight
        # is totally broken
        raise SkipTest
        # compute N-1 cross-validation for each sphere
        # YOH: unfortunately sample_clf_lin is not guaranteed
        #      to provide exactly the same results due to inherent
        #      iterative process.  Therefore lets use something quick
        #      and pure Python
        gnb = GNB(common_variance=common_variance)
        cv = CrossValidation(gnb, NFoldPartitioner())

        skwargs = dict(radius=1, enable_ca=['roi_sizes', 'raw_results'])
        sls = [sphere_searchlight(cv, **skwargs),
               #GNBSearchlight(gnb, NFoldSplitter(cvtype=1))
               sphere_gnbsearchlight(gnb, NFoldSplitter(cvtype=1),
                                     indexsum='fancy', **skwargs)
               ]

        if externals.exists('scipy'):
            sls += [ sphere_gnbsearchlight(gnb, NFoldSplitter(cvtype=1),
                                           indexsum='sparse', **skwargs)]

        # Just test nproc whenever common_variance is True
        if externals.exists('pprocess') and common_variance:
            sls += [sphere_searchlight(cv, nproc=2, **skwargs)]

        all_results = []
        ds = datasets['3dsmall'].copy()
        ds.fa['voxel_indices'] = ds.fa.myspace
        for sl in sls:
            # run searchlight
            results = sl(ds)
            all_results.append(results)

            # check for correct number of spheres
            self.failUnless(results.nfeatures == 106)
            # and measures (one per xfold)
            self.failUnless(len(results) == len(ds.UC))

            # check for chance-level performance across all spheres
            self.failUnless(0.4 < results.samples.mean() < 0.6)

            mean_errors = results.samples.mean(axis=0)
            # that we do get different errors ;)
            self.failUnless(len(np.unique(mean_errors) > 3))

            # check resonable sphere sizes
            self.failUnless(len(sl.ca.roi_sizes) == 106)
            self.failUnless(max(sl.ca.roi_sizes) == 7)
            self.failUnless(min(sl.ca.roi_sizes) == 4)

            # check base-class state
            self.failUnlessEqual(sl.ca.raw_results.nfeatures, 106)

        if len(all_results) > 1:
            # if we had multiple searchlights, we can check either they all
            # gave the same result (they should have)
            aresults = np.array([a.samples for a in all_results])
            dresults = np.abs(aresults - aresults.mean(axis=0))
            dmax = np.max(dresults)
            self.failUnless(dmax <= 1e-13)
Exemplo n.º 8
0
    def test_spatial_searchlight(self, common_variance):
        """Tests both generic and GNBSearchlight
        Test of GNBSearchlight anyways requires a ground-truth
        comparison to the generic version, so we are doing sweepargs here
        """
        # compute N-1 cross-validation for each sphere
        # YOH: unfortunately sample_clf_lin is not guaranteed
        #      to provide exactly the same results due to inherent
        #      iterative process.  Therefore lets use something quick
        #      and pure Python
        gnb = GNB(common_variance=common_variance)
        transerror = TransferError(gnb)
        cv = CrossValidatedTransferError(
                transerror,
                NFoldSplitter(cvtype=1))

        skwargs = dict(radius=1, enable_ca=['roi_sizes', 'raw_results'])
        sls = [sphere_searchlight(cv, **skwargs),
               #GNBSearchlight(gnb, NFoldSplitter(cvtype=1))
               sphere_gnbsearchlight(gnb, NFoldSplitter(cvtype=1),
                                     indexsum='fancy', **skwargs)
               ]

        if externals.exists('scipy'):
            sls += [ sphere_gnbsearchlight(gnb, NFoldSplitter(cvtype=1),
                                           indexsum='sparse', **skwargs)]

        # Just test nproc whenever common_variance is True
        if externals.exists('pprocess') and common_variance:
            sls += [sphere_searchlight(cv, nproc=2, **skwargs)]

        all_results = []
        ds = datasets['3dsmall'].copy()
        ds.fa['voxel_indices'] = ds.fa.myspace
        for sl in sls:
            # run searchlight
            results = sl(ds)
            all_results.append(results)

            # check for correct number of spheres
            self.failUnless(results.nfeatures == 106)
            # and measures (one per xfold)
            self.failUnless(len(results) == len(ds.UC))

            # check for chance-level performance across all spheres
            self.failUnless(0.4 < results.samples.mean() < 0.6)

            mean_errors = results.samples.mean(axis=0)
            # that we do get different errors ;)
            self.failUnless(len(np.unique(mean_errors) > 3))

            # check resonable sphere sizes
            self.failUnless(len(sl.ca.roi_sizes) == 106)
            self.failUnless(max(sl.ca.roi_sizes) == 7)
            self.failUnless(min(sl.ca.roi_sizes) == 4)

            # check base-class state
            self.failUnlessEqual(sl.ca.raw_results.nfeatures, 106)

        if len(all_results) > 1:
            # if we had multiple searchlights, we can check either they all
            # gave the same result (they should have)
            aresults = np.array([a.samples for a in all_results])
            dresults = np.abs(aresults - aresults.mean(axis=0))
            dmax = np.max(dresults)
            self.failUnless(dmax <= 1e-13)