Exemple #1
0
    def test_surface_voxel_query_engine(self):
        vol_shape = (10, 10, 10, 1)
        vol_affine = np.identity(4)
        vol_affine[0, 0] = vol_affine[1, 1] = vol_affine[2, 2] = 5
        vg = volgeom.VolGeom(vol_shape, vol_affine)

        # make the surfaces
        sphere_density = 10

        outer = surf.generate_sphere(sphere_density) * 25. + 15
        inner = surf.generate_sphere(sphere_density) * 20. + 15

        vs = volsurf.VolSurfMaximalMapping(vg, inner, outer)

        radius = 10

        for fallback, expected_nfeatures in ((True, 1000), (False, 183)):
            voxsel = surf_voxel_selection.voxel_selection(vs, radius)
            qe = SurfaceVoxelsQueryEngine(voxsel,
                                          fallback_euclidian_distance=fallback)

            m = _Voxel_Count_Measure()

            sl = Searchlight(m, queryengine=qe)

            data = np.random.normal(size=vol_shape)
            img = nb.Nifti1Image(data, vol_affine)
            ds = fmri_dataset(img)

            sl_map = sl(ds)

            counts = sl_map.samples

            assert_true(np.all(np.logical_and(5 <= counts, counts <= 18)))
            assert_equal(sl_map.nfeatures, expected_nfeatures)
 def _get_seed_means(self, measure, queryengine, dataset, seed_indices):
     # Computing seed data as mean timeseries in each SL
     seed_data = Searchlight(measure, queryengine=queryengine,
                             nproc=self.params.nproc, roi_ids=seed_indices)
     seed_data = seed_data(dataset)
     zscore(seed_data, chunks_attr=None)
     return seed_data
 def _seed_means(self, measure, queryengine, ds, seed_indices):
     # Seed data is the mean timeseries for each searchlight
     seed_data = Searchlight(measure, queryengine=queryengine, 
                             nproc=self.nproc, roi_ids=np.concatenate(seed_indices).copy())
     if isinstance(ds,np.ndarray):
         ds = Dataset(ds) 
     seed_data = seed_data(ds)
     zscore(seed_data.samples, chunks_attr=None)
     return seed_data
    def test_niml_dset_voxsel(self):
        if not externals.exists('nibabel'):
            return

        # This is actually a bit of an integration test.
        # It tests storing and retrieving searchlight results.
        # Imports are inline here so that it does not mess up the header
        # and makes the other unit tests more modular
        # XXX put this in a separate file?
        from mvpa2.misc.surfing import volgeom, surf_voxel_selection, queryengine
        from mvpa2.measures.searchlight import Searchlight
        from mvpa2.support.nibabel import surf
        from mvpa2.measures.base import Measure
        from mvpa2.datasets.mri import fmri_dataset

        class _Voxel_Count_Measure(Measure):
            # used to check voxel selection results
            is_trained = True

            def __init__(self, dtype, **kwargs):
                Measure.__init__(self, **kwargs)
                self.dtype = dtype

            def _call(self, dset):
                return self.dtype(dset.nfeatures)

        sh = (20, 20, 20)
        vg = volgeom.VolGeom(sh, np.identity(4))

        density = 20

        outer = surf.generate_sphere(density) * 10. + 5
        inner = surf.generate_sphere(density) * 5. + 5

        intermediate = outer * .5 + inner * .5
        xyz = intermediate.vertices

        radius = 50

        sel = surf_voxel_selection.run_voxel_selection(radius, vg, inner,
                                                       outer)
        qe = queryengine.SurfaceVerticesQueryEngine(sel)

        for dtype in (int, float):
            sl = Searchlight(_Voxel_Count_Measure(dtype), queryengine=qe)

            ds = fmri_dataset(vg.get_empty_nifti_image(1))
            r = sl(ds)

            _, fn = tempfile.mkstemp('.niml.dset', 'dset')
            niml_dset.write(fn, r)
            rr = niml_dset.read(fn)

            os.remove(fn)

            assert_array_equal(r.samples, rr.samples)
Exemple #5
0
 def test_1d_multispace_searchlight(self):
     ds = Dataset([np.arange(6)])
     ds.fa['coord1'] = np.repeat(np.arange(3), 2)
     # add a second space to the dataset
     ds.fa['coord2'] = np.tile(np.arange(2), 3)
     measure = lambda x: "+".join([str(x) for x in x.samples[0]])
     # simply select each feature once
     res = Searchlight(measure,
                       IndexQueryEngine(coord1=Sphere(0), coord2=Sphere(0)),
                       nproc=1)(ds)
     assert_array_equal(res.samples, [['0', '1', '2', '3', '4', '5']])
     res = Searchlight(measure,
                       IndexQueryEngine(coord1=Sphere(0), coord2=Sphere(1)),
                       nproc=1)(ds)
     assert_array_equal(res.samples,
                        [['0+1', '0+1', '2+3', '2+3', '4+5', '4+5']])
     res = Searchlight(measure,
                       IndexQueryEngine(coord1=Sphere(1), coord2=Sphere(0)),
                       nproc=1)(ds)
     assert_array_equal(res.samples,
                        [['0+2', '1+3', '0+2+4', '1+3+5', '2+4', '3+5']])
Exemple #6
0
    def test_agreement_surface_volume(self):
        '''test agreement between volume-based and surface-based
        searchlights when using euclidean measure'''

        # import runner
        def sum_ds(ds):
            return np.sum(ds)

        radius = 3

        # make a small dataset with a mask
        sh = (10, 10, 10)
        msk = np.zeros(sh)
        for i in xrange(0, sh[0], 2):
            msk[i, :, :] = 1
        vg = volgeom.VolGeom(sh, np.identity(4), mask=msk)

        # make an image
        nt = 6
        img = vg.get_masked_nifti_image(6)
        ds = fmri_dataset(img, mask=msk)

        # run the searchlight
        sl = sphere_searchlight(sum_ds, radius=radius)
        m = sl(ds)

        # now use surface-based searchlight
        v = volsurf.from_volume(ds)
        source_surf = v.intermediate_surface
        node_msk = np.logical_not(np.isnan(source_surf.vertices[:, 0]))

        # check that the mask matches with what we used earlier
        assert_array_equal(msk.ravel() + 0., node_msk.ravel() + 0.)

        source_surf_nodes = np.nonzero(node_msk)[0]

        sel = surf_voxel_selection.voxel_selection(
            v,
            float(radius),
            source_surf=source_surf,
            source_surf_nodes=source_surf_nodes,
            distance_metric='euclidean')

        qe = queryengine.SurfaceVerticesQueryEngine(sel)
        sl = Searchlight(sum_ds, queryengine=qe)
        r = sl(ds)

        # check whether they give the same results
        assert_array_equal(r.samples, m.samples)
    def test_surface_voxel_query_engine(self):
        vol_shape = (10, 10, 10, 1)
        vol_affine = np.identity(4)
        vol_affine[0, 0] = vol_affine[1, 1] = vol_affine[2, 2] = 5
        vg = volgeom.VolGeom(vol_shape, vol_affine)

        # make the surfaces
        sphere_density = 10

        outer = surf.generate_sphere(sphere_density) * 25. + 15
        inner = surf.generate_sphere(sphere_density) * 20. + 15

        vs = volsurf.VolSurfMaximalMapping(vg, inner, outer)

        radius = 10

        for fallback, expected_nfeatures in ((True, 1000), (False, 183)):
            voxsel = surf_voxel_selection.voxel_selection(vs, radius)
            qe = SurfaceVoxelsQueryEngine(voxsel,
                                          fallback_euclidean_distance=fallback)

            # test i/o and ensure that the loaded instance is trained
            if externals.exists('h5py'):
                fd, qefn = tempfile.mkstemp('qe.hdf5', 'test')
                os.close(fd)
                h5save(qefn, qe)
                qe = h5load(qefn)
                os.remove(qefn)

            m = _Voxel_Count_Measure()

            sl = Searchlight(m, queryengine=qe)

            data = np.random.normal(size=vol_shape)
            img = nb.Nifti1Image(data, vol_affine)
            ds = fmri_dataset(img)

            sl_map = sl(ds)

            counts = sl_map.samples

            assert_true(np.all(np.logical_and(5 <= counts, counts <= 18)))
            assert_equal(sl_map.nfeatures, expected_nfeatures)
    def test_add_center_fa(self):
        # just a smoke test pretty much
        ds = datasets['3dsmall'].copy()

        # check that we do not mark anything as center whenever there is none
        def check_no_center(ds):
            assert (not np.any(ds.fa.center))
            return 1.0

        # or just a single center in our case
        def check_center(ds):
            assert (np.sum(ds.fa.center) == 1)
            return 1.0

        for n, check in [(HollowSphere(1, 0), check_no_center),
                         (Sphere(0), check_center), (Sphere(1), check_center)]:
            Searchlight(check,
                        IndexQueryEngine(myspace=n),
                        add_center_fa='center')(ds)
            # and no changes to original ds data, etc
            assert_array_equal(datasets['3dsmall'].fa.keys(), ds.fa.keys())
            assert_array_equal(datasets['3dsmall'].samples, ds.samples)
Exemple #9
0
def compute_seed_means(measure, queryengine, ds, roi_ids):
    """
    Parameters:
    ----------
    measure: a PyMVPA measure passed to the Searchlight.
    queryengine: a trained PyMVPA SurfaceQueryEngine.
    ds: a single PyMVPA dataset (samples: timeseries, features: vertices)
    roi_ids: the vertex indices where each searchlight will be centered.
    
    Returns:
    --------
    seed_data: dataset with the mean timeseries for a searchlight centered on each ROI_id.

    """

    # Seed data is the mean timeseries for each searchlight
    seed_data = Searchlight(measure, queryengine=queryengine, 
                            nproc=1, roi_ids=roi_ids.copy())
    if isinstance(ds,np.ndarray):
        ds = Dataset(ds) 
    seed_data = seed_data(ds)
    zscore(seed_data.samples, chunks_attr=None)
    return seed_data
Exemple #10
0
    def test_surf_voxel_selection(self):
        vol_shape = (10, 10, 10)
        vol_affine = np.identity(4)
        vol_affine[0, 0] = vol_affine[1, 1] = vol_affine[2, 2] = 5

        vg = volgeom.VolGeom(vol_shape, vol_affine)

        density = 10

        outer = surf.generate_sphere(density) * 25. + 15
        inner = surf.generate_sphere(density) * 20. + 15

        vs = volsurf.VolSurfMaximalMapping(vg, outer, inner)

        nv = outer.nvertices

        # select under variety of parameters
        # parameters are distance metric (dijkstra or euclidean),
        # radius, and number of searchlight  centers
        params = [('d', 1., 10), ('d', 1., 50), ('d', 1., 100), ('d', 2., 100),
                  ('e', 2., 100), ('d', 2., 100), ('d', 20, 100),
                  ('euclidean', 5, None), ('dijkstra', 10, None)]

        # function that indicates for which parameters the full test is run
        test_full = lambda x: len(x[0]) > 1 or x[2] == 100

        expected_labs = ['grey_matter_position', 'center_distances']

        voxcount = []
        tested_double_features = False
        for param in params:
            distance_metric, radius, ncenters = param
            srcs = range(0, nv, nv // (ncenters or nv))
            sel = surf_voxel_selection.voxel_selection(
                vs,
                radius,
                source_surf_nodes=srcs,
                distance_metric=distance_metric)

            # see how many voxels were selected
            vg = sel.volgeom
            datalin = np.zeros((vg.nvoxels, 1))

            mp = sel
            for k, idxs in mp.iteritems():
                if idxs is not None:
                    datalin[idxs] = 1

            voxcount.append(np.sum(datalin))

            if test_full(param):
                assert_equal(np.sum(datalin), np.sum(sel.get_mask()))

                assert_true(len('%s%r' % (sel, sel)) > 0)

                # see if voxels containing inner and outer
                # nodes were selected
                for sf in [inner, outer]:
                    for k, idxs in mp.iteritems():
                        xyz = np.reshape(sf.vertices[k, :], (1, 3))
                        linidx = vg.xyz2lin(xyz)

                        # only required if xyz is actually within the volume
                        assert_equal(linidx in idxs, vg.contains_lin(linidx))

                # check that it has all the attributes
                labs = sel.aux_keys()

                assert_true(all([lab in labs for lab in expected_labs]))

                if externals.exists('h5py'):
                    # some I/O testing
                    fd, fn = tempfile.mkstemp('.h5py', 'test')
                    os.close(fd)
                    h5save(fn, sel)

                    sel2 = h5load(fn)
                    os.remove(fn)

                    assert_equal(sel, sel2)
                else:
                    sel2 = sel

                # check that mask is OK even after I/O
                assert_array_equal(sel.get_mask(), sel2.get_mask())

                # test I/O with surfaces
                # XXX the @tempfile decorator only supports a single filename
                #     hence this method does not use it
                fd, outerfn = tempfile.mkstemp('outer.asc', 'test')
                os.close(fd)
                fd, innerfn = tempfile.mkstemp('inner.asc', 'test')
                os.close(fd)
                fd, volfn = tempfile.mkstemp('vol.nii', 'test')
                os.close(fd)

                surf.write(outerfn, outer, overwrite=True)
                surf.write(innerfn, inner, overwrite=True)

                img = sel.volgeom.get_empty_nifti_image()
                img.to_filename(volfn)

                sel3 = surf_voxel_selection.run_voxel_selection(
                    radius,
                    volfn,
                    innerfn,
                    outerfn,
                    source_surf_nodes=srcs,
                    distance_metric=distance_metric)

                outer4 = surf.read(outerfn)
                inner4 = surf.read(innerfn)
                vsm4 = vs = volsurf.VolSurfMaximalMapping(vg, inner4, outer4)

                # check that two ways of voxel selection match
                sel4 = surf_voxel_selection.voxel_selection(
                    vsm4,
                    radius,
                    source_surf_nodes=srcs,
                    distance_metric=distance_metric)

                assert_equal(sel3, sel4)

                os.remove(outerfn)
                os.remove(innerfn)
                os.remove(volfn)

                # compare sel3 with other selection results
                # NOTE: which voxels are precisely selected by sel can be quite
                #       off from those in sel3, as writing the surfaces imposes
                #       rounding errors and the sphere is very symmetric, which
                #       means that different neighboring nodes are selected
                #       to select a certain number of voxels.
                sel3cmp_difference_ratio = [(sel, .2), (sel4, 0.)]
                for selcmp, ratio in sel3cmp_difference_ratio:
                    nunion = ndiff = 0

                    for k in selcmp.keys():
                        p = set(sel3.get(k))
                        q = set(selcmp.get(k))
                        nunion += len(p.union(q))
                        ndiff += len(p.symmetric_difference(q))

                    assert_true(float(ndiff) / float(nunion) <= ratio)

                # check searchlight call
                # as of late Aug 2012, this is with the fancy query engine
                # as implemented by Yarik

                mask = sel.get_mask()
                keys = None if ncenters is None else sel.keys()

                dset_data = np.reshape(np.arange(vg.nvoxels), vg.shape)
                dset_img = nb.Nifti1Image(dset_data, vg.affine)
                dset = fmri_dataset(samples=dset_img, mask=mask)

                qe = queryengine.SurfaceVerticesQueryEngine(
                    sel,
                    # you can optionally add additional
                    # information about each near-disk-voxels
                    add_fa=['center_distances', 'grey_matter_position'])

                # test i/o ensuring that when loading it is still trained
                if externals.exists('h5py'):
                    fd, qefn = tempfile.mkstemp('qe.hdf5', 'test')
                    os.close(fd)
                    h5save(qefn, qe)
                    qe = h5load(qefn)
                    os.remove(qefn)

                assert_false('ERROR' in repr(qe))  #  to check if repr works
                voxelcounter = _Voxel_Count_Measure()
                searchlight = Searchlight(
                    voxelcounter,
                    queryengine=qe,
                    roi_ids=keys,
                    nproc=1,
                    enable_ca=['roi_feature_ids', 'roi_center_ids'])
                sl_dset = searchlight(dset)

                selected_count = sl_dset.samples[0, :]
                mp = sel
                for i, k in enumerate(sel.keys()):
                    # check that number of selected voxels matches
                    assert_equal(selected_count[i], len(mp[k]))

                assert_equal(searchlight.ca.roi_center_ids, sel.keys())

                assert_array_equal(sl_dset.fa['center_ids'], qe.ids)

                # check nearest node is *really* the nearest node

                allvx = sel.get_targets()
                intermediate = outer * .5 + inner * .5

                for vx in allvx:
                    nearest = sel.target2nearest_source(vx)

                    xyz = intermediate.vertices[nearest, :]
                    sqsum = np.sum((xyz - intermediate.vertices)**2, 1)

                    idx = np.argmin(sqsum)
                    assert_equal(idx, nearest)

                if not tested_double_features:  # test only once
                    # see if we have multiple features for the same voxel, we would get them all
                    dset1 = dset.copy()
                    dset1.fa['dset'] = [1]
                    dset2 = dset.copy()
                    dset2.fa['dset'] = [2]
                    dset_ = hstack((dset1, dset2), 'drop_nonunique')
                    dset_.sa = dset1.sa
                    # dset_.a.imghdr = dset1.a.imghdr
                    assert_true('imghdr' in dset_.a.keys())
                    assert_equal(dset_.a['imghdr'].value,
                                 dset1.a['imghdr'].value)
                    roi_feature_ids = searchlight.ca.roi_feature_ids
                    sl_dset_ = searchlight(dset_)
                    # and we should get twice the counts
                    assert_array_equal(sl_dset_.samples, sl_dset.samples * 2)

                    # compare old and new roi_feature_ids
                    assert (len(roi_feature_ids) == len(
                        searchlight.ca.roi_feature_ids))
                    nfeatures = dset.nfeatures
                    for old, new in zip(roi_feature_ids,
                                        searchlight.ca.roi_feature_ids):
                        # each new ids should comprise of old ones + (old + nfeatures)
                        # since we hstack'ed two datasets
                        assert_array_equal(
                            np.hstack([(x, x + nfeatures) for x in old]), new)
                    tested_double_features = True

        # check whether number of voxels were selected is as expected
        expected_voxcount = [22, 93, 183, 183, 183, 183, 183, 183, 183]

        assert_equal(voxcount, expected_voxcount)
    def test_voxel_selection(self):
        '''Compare surface and volume based searchlight'''
        '''
        Tests to see whether results are identical for surface-based
        searchlight (just one plane; Euclidean distnace) and volume-based
        searchlight.

        Note that the current value is a float; if it were int, it would
        specify the number of voxels in each searchlight'''

        radius = 10.
        '''Define input filenames'''
        epi_fn = os.path.join(pymvpa_dataroot, 'bold.nii.gz')
        maskfn = os.path.join(pymvpa_dataroot, 'mask.nii.gz')
        '''
        Use the EPI datafile to define a surface.
        The surface has as many nodes as there are voxels
        and is parallel to the volume 'slice'
        '''
        vg = volgeom.from_any(maskfn, mask_volume=True)

        aff = vg.affine
        nx, ny, nz = vg.shape[:3]
        '''Plane goes in x and y direction, so we take these vectors
        from the affine transformation matrix of the volume'''
        plane = surf.generate_plane(aff[:3, 3], aff[:3, 0], aff[:3, 1], nx, ny)
        '''
        Simulate pial and white matter as just above and below
        the central plane
        '''
        normal_vec = aff[:3, 2]
        outer = plane + normal_vec
        inner = plane + -normal_vec
        '''
        Combine volume and surface information
        '''
        vsm = volsurf.VolSurfMaximalMapping(vg, outer, inner)
        '''
        Run voxel selection with specified radius (in mm), using
        Euclidean distance measure
        '''
        surf_voxsel = surf_voxel_selection.voxel_selection(vsm,
                                                           radius,
                                                           distance_metric='e')
        '''Define the measure'''

        # run_slow=True would give an actual cross-validation with meaningful
        # accuracies. Because this is a unit-test only the number of voxels
        # in each searchlight is tested.
        run_slow = False

        if run_slow:
            meas = CrossValidation(GNB(),
                                   OddEvenPartitioner(),
                                   errorfx=lambda p, t: np.mean(p == t))
            postproc = mean_sample
        else:
            meas = _Voxel_Count_Measure()
            postproc = lambda x: x
        '''
        Surface analysis: define the query engine, cross validation,
        and searchlight
        '''
        surf_qe = SurfaceVerticesQueryEngine(surf_voxsel)
        surf_sl = Searchlight(meas, queryengine=surf_qe, postproc=postproc)
        '''
        new (Sep 2012): also test 'simple' queryengine wrapper function
        '''

        surf_qe2 = disc_surface_queryengine(radius,
                                            maskfn,
                                            inner,
                                            outer,
                                            plane,
                                            volume_mask=True,
                                            distance_metric='euclidean')
        surf_sl2 = Searchlight(meas, queryengine=surf_qe2, postproc=postproc)
        '''
        Same for the volume analysis
        '''
        element_sizes = tuple(map(abs, (aff[0, 0], aff[1, 1], aff[2, 2])))
        sph = Sphere(radius, element_sizes=element_sizes)
        kwa = {'voxel_indices': sph}

        vol_qe = IndexQueryEngine(**kwa)
        vol_sl = Searchlight(meas, queryengine=vol_qe, postproc=postproc)
        '''The following steps are similar to start_easy.py'''
        attr = SampleAttributes(
            os.path.join(pymvpa_dataroot, 'attributes_literal.txt'))

        mask = surf_voxsel.get_mask()

        dataset = fmri_dataset(samples=os.path.join(pymvpa_dataroot,
                                                    'bold.nii.gz'),
                               targets=attr.targets,
                               chunks=attr.chunks,
                               mask=mask)

        if run_slow:
            # do chunkswise linear detrending on dataset

            poly_detrend(dataset, polyord=1, chunks_attr='chunks')

            # zscore dataset relative to baseline ('rest') mean
            zscore(dataset,
                   chunks_attr='chunks',
                   param_est=('targets', ['rest']))

        # select class face and house for this demo analysis
        # would work with full datasets (just a little slower)
        dataset = dataset[np.array(
            [l in ['face', 'house'] for l in dataset.sa.targets],
            dtype='bool')]
        '''Apply searchlight to datasets'''
        surf_dset = surf_sl(dataset)
        surf_dset2 = surf_sl2(dataset)
        vol_dset = vol_sl(dataset)

        surf_data = surf_dset.samples
        surf_data2 = surf_dset2.samples
        vol_data = vol_dset.samples

        assert_array_equal(surf_data, surf_data2)
        assert_array_equal(surf_data, vol_data)
    def test_voxel_selection_alternative_calls(self):
        # Tests a multitude of different searchlight calls
        # that all should yield exactly the same results.
        #
        # Calls differ by whether the arguments are filenames
        # or data objects, whether values are specified explicityly
        # or set to the default implicitly (using None).
        # and by different calls to run the voxel selection.
        #
        # This method does not test for mask functionality.

        # define the volume
        vol_shape = (10, 10, 10, 3)
        vol_affine = np.identity(4)
        vol_affine[0, 0] = vol_affine[1, 1] = vol_affine[2, 2] = 5

        # four versions: array, nifti image, file name, fmri dataset
        volarr = np.ones(vol_shape)
        volimg = nb.Nifti1Image(volarr, vol_affine)
        # There is a detected problem with elderly NumPy's (e.g. 1.6.1
        # on precise on travis) leading to segfaults while operating
        # on memmapped volumes being forwarded to pprocess.
        # Thus just making it compressed volume for those cases
        suf = '.gz' \
            if externals.exists('pprocess') and externals.versions['numpy'] < '1.6.2' \
            else ''
        fd, volfn = tempfile.mkstemp('vol.nii' + suf, 'test')
        os.close(fd)
        volimg.to_filename(volfn)
        volds = fmri_dataset(volfn)

        fd, volfngz = tempfile.mkstemp('vol.nii.gz', 'test')
        os.close(fd)
        volimg.to_filename(volfngz)
        voldsgz = fmri_dataset(volfngz)

        # make the surfaces
        sphere_density = 10

        # two versions: Surface and file name
        outer = surf.generate_sphere(sphere_density) * 25. + 15
        inner = surf.generate_sphere(sphere_density) * 20. + 15
        intermediate = inner * .5 + outer * .5
        nv = outer.nvertices

        fd, outerfn = tempfile.mkstemp('outer.asc', 'test')
        os.close(fd)
        fd, innerfn = tempfile.mkstemp('inner.asc', 'test')
        os.close(fd)
        fd, intermediatefn = tempfile.mkstemp('intermediate.asc', 'test')
        os.close(fd)

        for s, fn in zip([outer, inner, intermediate],
                         [outerfn, innerfn, intermediatefn]):
            surf.write(fn, s, overwrite=True)

        # searchlight radius (in mm)
        radius = 10.

        # dataset used to run searchlight on
        ds = fmri_dataset(volfn)

        # simple voxel counter (run for each searchlight position)
        m = _Voxel_Count_Measure()

        # number of voxels expected in each searchlight
        r_expected = np.array([[
            18, 9, 10, 9, 9, 9, 9, 10, 9, 9, 9, 9, 11, 11, 11, 11, 10, 10, 10,
            9, 10, 11, 9, 10, 10, 8, 7, 8, 8, 8, 9, 10, 12, 12, 11, 7, 7, 8, 5,
            9, 11, 11, 12, 12, 9, 5, 8, 7, 7, 12, 12, 13, 12, 12, 7, 7, 8, 5,
            9, 12, 12, 13, 11, 9, 5, 8, 7, 7, 11, 12, 12, 11, 12, 10, 10, 11,
            9, 11, 12, 12, 12, 12, 16, 13, 16, 16, 16, 17, 15, 17, 17, 17, 16,
            16, 16, 18, 16, 16, 16, 16, 18, 16
        ]])

        params = dict(intermediate_=(intermediate, intermediatefn, None),
                      center_nodes_=(None, range(nv)),
                      volume_=(volimg, volfn, volds, volfngz, voldsgz),
                      surf_src_=('filename', 'surf'),
                      volume_mask_=(None, True, 0, 2),
                      call_method_=("qe", "rvs", "gam"))

        combis = _cartprod(params)  # compute all possible combinations
        combistep = 17  #173
        # some fine prime number to speed things up
        # if this value becomes too big then not all
        # cases are covered
        # the unit test tests itself whether all values
        # occur at least once

        tested_params = dict()

        def val2str(x):
            return '%r:%r' % (type(x), x)

        for i in xrange(0, len(combis), combistep):
            combi = combis[i]

            intermediate_ = combi['intermediate_']
            center_nodes_ = combi['center_nodes_']
            volume_ = combi['volume_']
            surf_src_ = combi['surf_src_']
            volume_mask_ = combi['volume_mask_']
            call_method_ = combi['call_method_']

            # keep track of which values were used -
            # so that this unit test tests itself

            for k in combi.keys():
                if not k in tested_params:
                    tested_params[k] = set()
                tested_params[k].add(val2str(combi[k]))

            if surf_src_ == 'filename':
                s_i, s_m, s_o = inner, intermediate, outer
            elif surf_src_ == 'surf':
                s_i, s_m, s_o = innerfn, intermediatefn, outerfn
            else:
                raise ValueError('this should not happen')

            if call_method_ == "qe":
                # use the fancy query engine wrapper
                qe = disc_surface_queryengine(radius,
                                              volume_,
                                              s_i,
                                              s_o,
                                              s_m,
                                              source_surf_nodes=center_nodes_,
                                              volume_mask=volume_mask_)
                sl = Searchlight(m, queryengine=qe)
                r = sl(ds).samples

            elif call_method_ == 'rvs':
                # use query-engine but build the
                # ingredients by hand
                vg = volgeom.from_any(volume_, volume_mask_)
                vs = volsurf.VolSurfMaximalMapping(vg, s_i, s_o)
                sel = surf_voxel_selection.voxel_selection(
                    vs,
                    radius,
                    source_surf=s_m,
                    source_surf_nodes=center_nodes_)
                qe = SurfaceVerticesQueryEngine(sel)
                sl = Searchlight(m, queryengine=qe)
                r = sl(ds).samples

            elif call_method_ == 'gam':
                # build everything from the ground up
                vg = volgeom.from_any(volume_, volume_mask_)
                vs = volsurf.VolSurfMaximalMapping(vg, s_i, s_o)
                sel = surf_voxel_selection.voxel_selection(
                    vs,
                    radius,
                    source_surf=s_m,
                    source_surf_nodes=center_nodes_)
                mp = sel

                ks = sel.keys()
                nk = len(ks)
                r = np.zeros((1, nk))
                for i, k in enumerate(ks):
                    r[0, i] = len(mp[k])

            # check if result is as expected
            assert_array_equal(r_expected, r)

        # clean up
        all_fns = [volfn, volfngz, outerfn, innerfn, intermediatefn]
        map(os.remove, all_fns)

        for k, vs in params.iteritems():
            if not k in tested_params:
                raise ValueError("Missing key: %r" % k)
            for v in vs:
                vstr = val2str(v)
                if not vstr in tested_params[k]:
                    raise ValueError("Missing value %r for %s" %
                                     (tested_params[k], k))
Exemple #13
0
def run(args):
    if os.path.isfile(args.payload) and args.payload.endswith('.py'):
        measure = script2obj(args.payload)
    elif args.payload == 'cv':
        if args.cv_learner is None or args.cv_partitioner is None:
            raise ValueError(
                'cross-validation payload requires --learner and --partitioner'
            )
        # get CV instance
        measure = get_crossvalidation_instance(
            args.cv_learner, args.cv_partitioner, args.cv_errorfx,
            args.cv_sampling_repetitions, args.cv_learner_space,
            args.cv_balance_training, args.cv_permutations,
            args.cv_avg_datafold_results, args.cv_prob_tail)
    else:
        raise RuntimeError("this should not happen")
    ds = arg2ds(args.data)
    if not args.ds_preproc_fx is None:
        ds = args.ds_preproc_fx(ds)
    # setup neighborhood
    # XXX add big switch to allow for setting up surface-based neighborhoods
    from mvpa2.misc.neighborhood import IndexQueryEngine
    qe = IndexQueryEngine(**dict(args.neighbors))
    # determine ROIs
    rids = None  # all by default
    aggregate_fx = args.aggregate_fx
    if args.roi_attr is not None:
        # first figure out which roi features should be processed
        if len(args.roi_attr) == 1 and args.roi_attr[0] in ds.fa.keys():
            # name of an attribute -> pull non-zeroes
            rids = ds.fa[args.roi_attr[0]].value.nonzero()[0]
        else:
            # an expression?
            from .cmd_select import _eval_attr_expr
            rids = _eval_attr_expr(args.roi_attr, ds.fa).nonzero()[0]

    seed_ids = None
    if args.scatter_rois is not None:
        # scatter_neighborhoods among available ids if was requested
        from mvpa2.misc.neighborhood import scatter_neighborhoods
        attr, nb = args.scatter_rois
        coords = ds.fa[attr].value
        if rids is not None:
            # select only those which were chosen by ROI
            coords = coords[rids]
        _, seed_ids = scatter_neighborhoods(nb, coords)
        if aggregate_fx is None:
            # no custom one given -> use default "fill in" function
            aggregate_fx = _fill_in_scattered_results
            if args.enable_ca is None:
                args.enable_ca = ['roi_feature_ids']
            elif 'roi_feature_ids' not in args.enable_ca:
                args.enable_ca += ['roi_feature_ids']

    if seed_ids is None:
        roi_ids = rids
    else:
        if rids is not None:
            # we had to sub-select by scatterring among available rids
            # so we would need to get original ids
            roi_ids = rids[seed_ids]
        else:
            # scattering happened on entire feature-set
            roi_ids = seed_ids

    verbose(
        3, 'Attempting %i ROI analyses' %
        ((roi_ids is None) and ds.nfeatures or len(roi_ids)))

    from mvpa2.measures.searchlight import Searchlight

    sl = Searchlight(measure,
                     queryengine=qe,
                     roi_ids=roi_ids,
                     nproc=args.nproc,
                     results_backend=args.multiproc_backend,
                     results_fx=aggregate_fx,
                     enable_ca=args.enable_ca,
                     disable_ca=args.disable_ca)
    # XXX support me too!
    #                 add_center_fa
    #                 tmp_prefix
    #                 nblocks
    #                 null_dist
    # run
    res = sl(ds)
    if (seed_ids is not None) and ('mapper' in res.a):
        # strip the last mapper link in the chain, which would be the seed ID selection
        res.a['mapper'] = res.a.mapper[:-1]
    # XXX create more output
    # and store
    ds2hdf5(res, args.output, compression=args.hdf5_compression)
    return res