Exemplo n.º 1
0
    def test_spatial_searchlight(self, common_variance):
        """Tests both generic and GNBSearchlight
        Test of GNBSearchlight anyways requires a ground-truth
        comparison to the generic version, so we are doing sweepargs here
        """
        # XXX after running at 1 million times we now know that gnb_searchlight
        # is totally broken
        raise SkipTest
        # compute N-1 cross-validation for each sphere
        # YOH: unfortunately sample_clf_lin is not guaranteed
        #      to provide exactly the same results due to inherent
        #      iterative process.  Therefore lets use something quick
        #      and pure Python
        gnb = GNB(common_variance=common_variance)
        cv = CrossValidation(gnb, NFoldPartitioner())

        skwargs = dict(radius=1, enable_ca=['roi_sizes', 'raw_results'])
        sls = [sphere_searchlight(cv, **skwargs),
               #GNBSearchlight(gnb, NFoldSplitter(cvtype=1))
               sphere_gnbsearchlight(gnb, NFoldSplitter(cvtype=1),
                                     indexsum='fancy', **skwargs)
               ]

        if externals.exists('scipy'):
            sls += [ sphere_gnbsearchlight(gnb, NFoldSplitter(cvtype=1),
                                           indexsum='sparse', **skwargs)]

        # Just test nproc whenever common_variance is True
        if externals.exists('pprocess') and common_variance:
            sls += [sphere_searchlight(cv, nproc=2, **skwargs)]

        all_results = []
        ds = datasets['3dsmall'].copy()
        ds.fa['voxel_indices'] = ds.fa.myspace
        for sl in sls:
            # run searchlight
            results = sl(ds)
            all_results.append(results)

            # check for correct number of spheres
            self.failUnless(results.nfeatures == 106)
            # and measures (one per xfold)
            self.failUnless(len(results) == len(ds.UC))

            # check for chance-level performance across all spheres
            self.failUnless(0.4 < results.samples.mean() < 0.6)

            mean_errors = results.samples.mean(axis=0)
            # that we do get different errors ;)
            self.failUnless(len(np.unique(mean_errors) > 3))

            # check resonable sphere sizes
            self.failUnless(len(sl.ca.roi_sizes) == 106)
            self.failUnless(max(sl.ca.roi_sizes) == 7)
            self.failUnless(min(sl.ca.roi_sizes) == 4)

            # check base-class state
            self.failUnlessEqual(sl.ca.raw_results.nfeatures, 106)

        if len(all_results) > 1:
            # if we had multiple searchlights, we can check either they all
            # gave the same result (they should have)
            aresults = np.array([a.samples for a in all_results])
            dresults = np.abs(aresults - aresults.mean(axis=0))
            dmax = np.max(dresults)
            self.failUnless(dmax <= 1e-13)
Exemplo n.º 2
0
    def test_spatial_searchlight(self, common_variance):
        """Tests both generic and GNBSearchlight
        Test of GNBSearchlight anyways requires a ground-truth
        comparison to the generic version, so we are doing sweepargs here
        """
        # compute N-1 cross-validation for each sphere
        # YOH: unfortunately sample_clf_lin is not guaranteed
        #      to provide exactly the same results due to inherent
        #      iterative process.  Therefore lets use something quick
        #      and pure Python
        gnb = GNB(common_variance=common_variance)
        transerror = TransferError(gnb)
        cv = CrossValidatedTransferError(
                transerror,
                NFoldSplitter(cvtype=1))

        skwargs = dict(radius=1, enable_ca=['roi_sizes', 'raw_results'])
        sls = [sphere_searchlight(cv, **skwargs),
               #GNBSearchlight(gnb, NFoldSplitter(cvtype=1))
               sphere_gnbsearchlight(gnb, NFoldSplitter(cvtype=1),
                                     indexsum='fancy', **skwargs)
               ]

        if externals.exists('scipy'):
            sls += [ sphere_gnbsearchlight(gnb, NFoldSplitter(cvtype=1),
                                           indexsum='sparse', **skwargs)]

        # Just test nproc whenever common_variance is True
        if externals.exists('pprocess') and common_variance:
            sls += [sphere_searchlight(cv, nproc=2, **skwargs)]

        all_results = []
        ds = datasets['3dsmall'].copy()
        ds.fa['voxel_indices'] = ds.fa.myspace
        for sl in sls:
            # run searchlight
            results = sl(ds)
            all_results.append(results)

            # check for correct number of spheres
            self.failUnless(results.nfeatures == 106)
            # and measures (one per xfold)
            self.failUnless(len(results) == len(ds.UC))

            # check for chance-level performance across all spheres
            self.failUnless(0.4 < results.samples.mean() < 0.6)

            mean_errors = results.samples.mean(axis=0)
            # that we do get different errors ;)
            self.failUnless(len(np.unique(mean_errors) > 3))

            # check resonable sphere sizes
            self.failUnless(len(sl.ca.roi_sizes) == 106)
            self.failUnless(max(sl.ca.roi_sizes) == 7)
            self.failUnless(min(sl.ca.roi_sizes) == 4)

            # check base-class state
            self.failUnlessEqual(sl.ca.raw_results.nfeatures, 106)

        if len(all_results) > 1:
            # if we had multiple searchlights, we can check either they all
            # gave the same result (they should have)
            aresults = np.array([a.samples for a in all_results])
            dresults = np.abs(aresults - aresults.mean(axis=0))
            dmax = np.max(dresults)
            self.failUnless(dmax <= 1e-13)