def test_queryengine_io(self, fn): skip_if_no_external("h5py") from mvpa2.base.hdf5 import h5save, h5load vol_shape = (10, 10, 10, 1) vol_affine = np.identity(4) vg = volgeom.VolGeom(vol_shape, vol_affine) # generate some surfaces, # and add some noise to them sphere_density = 10 outer = surf.generate_sphere(sphere_density) * 5 + 8 inner = surf.generate_sphere(sphere_density) * 3 + 8 radius = 5.0 add_fa = ["center_distances", "grey_matter_position"] qe = disc_surface_queryengine(radius, vg, inner, outer, add_fa=add_fa) ds = fmri_dataset(vg.get_masked_nifti_image()) # the following is not really a strong requirement. XXX remove? assert_raises(ValueError, lambda: qe[qe.ids[0]]) # check that after training it behaves well qe.train(ds) i = qe.ids[0] try: m = qe[i] except ValueError, e: raise AssertionError( "Failed to query %r from %r after training on %r. " "Exception was: %r" % (i, qe, ds, e) )
def test_queryengine_io(self, fn): skip_if_no_external('h5py') from mvpa2.base.hdf5 import h5save, h5load vol_shape = (10, 10, 10, 1) vol_affine = np.identity(4) vg = volgeom.VolGeom(vol_shape, vol_affine) # generate some surfaces, # and add some noise to them sphere_density = 10 outer = surf.generate_sphere(sphere_density) * 5 + 8 inner = surf.generate_sphere(sphere_density) * 3 + 8 radius = 5. add_fa = ['center_distances', 'grey_matter_position'] qe = disc_surface_queryengine(radius, vg, inner, outer, add_fa=add_fa) ds = fmri_dataset(vg.get_masked_nifti_image()) # the following is not really a strong requirement. XXX remove? assert_raises(ValueError, lambda: qe[qe.ids[0]]) # check that after training it behaves well qe.train(ds) i = qe.ids[0] try: m = qe[i] except ValueError, e: raise AssertionError( 'Failed to query %r from %r after training on %r. Exception was: %r' % (i, qe, ds, e))
def test_surface_minimal_voxel_selection(self): # Tests 'minimal' voxel selection. # It assumes that 'maximal' voxel selection works (which is tested # in other unit tests) vol_shape = (10, 10, 10, 1) vol_affine = np.identity(4) vg = volgeom.VolGeom(vol_shape, vol_affine) # generate some surfaces, # and add some noise to them sphere_density = 10 nvertices = sphere_density**2 + 2 noise = np.random.uniform(size=(nvertices, 3)) outer = surf.generate_sphere(sphere_density) * 5 + 8 + noise inner = surf.generate_sphere(sphere_density) * 3 + 8 + noise radii = [5., 20., 10] # note: no fixed radii at the moment # Note: a little outside margin is necessary # as otherwise there are nodes in the minimal case # that have no voxels associated with them for radius in radii: for output_modality in ('surface', 'volume'): for i, nvm in enumerate(('minimal', 'maximal')): qe = disc_surface_queryengine( radius, vg, inner, outer, node_voxel_mapping=nvm, output_modality=output_modality) voxsel = qe.voxsel if i == 0: keys_ = voxsel.keys() voxsel_ = voxsel else: keys = voxsel.keys() # minimal one has a subset assert_equal(keys, keys_) # and the subset is quite overlapping assert_true(len(keys) * .90 < len(keys_)) for k in keys_: x = set(voxsel_[k]) y = set(voxsel[k]) d = set.symmetric_difference(x, y) r = float(len(d)) / 2 / len(x) if type(radius) is float: assert_equal(x - y, set()) # decent agreement in any case # between the two sets assert_true(r < .6)
def test_surface_minimal_voxel_selection(self): # Tests 'minimal' voxel selection. # It assumes that 'maximal' voxel selection works (which is tested # in other unit tests) vol_shape = (10, 10, 10, 1) vol_affine = np.identity(4) vg = volgeom.VolGeom(vol_shape, vol_affine) # generate some surfaces, # and add some noise to them sphere_density = 10 nvertices = sphere_density ** 2 + 2 noise = np.random.uniform(size=(nvertices, 3)) outer = surf.generate_sphere(sphere_density) * 5 + 8 + noise inner = surf.generate_sphere(sphere_density) * 3 + 8 + noise radii = [5.0, 20.0, 10] # note: no fixed radii at the moment # Note: a little outside margin is necessary # as otherwise there are nodes in the minimal case # that have no voxels associated with them for radius in radii: for output_modality in ("surface", "volume"): for i, nvm in enumerate(("minimal", "maximal")): qe = disc_surface_queryengine( radius, vg, inner, outer, node_voxel_mapping=nvm, output_modality=output_modality ) voxsel = qe.voxsel if i == 0: keys_ = voxsel.keys() voxsel_ = voxsel else: keys = voxsel.keys() # minimal one has a subset assert_equal(keys, keys_) # and the subset is quite overlapping assert_true(len(keys) * 0.90 < len(keys_)) for k in keys_: x = set(voxsel_[k]) y = set(voxsel[k]) d = set.symmetric_difference(x, y) r = float(len(d)) / 2 / len(x) if type(radius) is float: assert_equal(x - y, set()) # decent agreement in any case # between the two sets assert_true(r < 0.6)
def test_surface_minimal_lowres_voxel_selection(self, fn): vol_shape = (4, 10, 10, 1) vol_affine = np.identity(4) vg = volgeom.VolGeom(vol_shape, vol_affine) # make surfaces that are far away from all voxels # in the volume sphere_density = 10 radius = 10 outer = surf.generate_plane((0, 0, 4), (0, .4, 0), (0, 0, .4), 14, 14) inner = outer + 2 source = surf.generate_plane((0, 0, 4), (0, .8, 0), (0, 0, .8), 7, 7) + 1 for i, nvm in enumerate(('minimal', 'minimal_lowres')): qe = disc_surface_queryengine(radius, vg, inner, outer, source, node_voxel_mapping=nvm) voxsel = qe.voxsel if i == 0: voxsel0 = voxsel else: assert_equal(voxsel.keys(), voxsel0.keys()) for k in voxsel.keys(): p = voxsel[k] q = voxsel0[k] # require at least 60% agreement delta = set.symmetric_difference(set(p), set(q)) assert_true(len(delta) < .8 * (len(p) + len(q))) if externals.exists('h5py'): from mvpa2.base.hdf5 import h5save, h5load h5save(fn, voxsel) voxsel_copy = h5load(fn) assert_equal(voxsel.keys(), voxsel_copy.keys()) for id in qe.ids: assert_array_equal(voxsel.get(id), voxsel_copy.get(id))
def test_surface_minimal_lowres_voxel_selection(self, fn): vol_shape = (4, 10, 10, 1) vol_affine = np.identity(4) vg = volgeom.VolGeom(vol_shape, vol_affine) # make surfaces that are far away from all voxels # in the volume sphere_density = 10 radius = 10 outer = surf.generate_plane((0, 0, 4), (0, .4, 0), (0, 0, .4), 14, 14) inner = outer + 2 source = surf.generate_plane((0, 0, 4), (0, .8, 0), (0, 0, .8), 7, 7) + 1 for i, nvm in enumerate(('minimal', 'minimal_lowres')): qe = disc_surface_queryengine(radius, vg, inner, outer, source, node_voxel_mapping=nvm) voxsel = qe.voxsel if i == 0: voxsel0 = voxsel else: assert_equal(voxsel.keys(), voxsel0.keys()) for k in voxsel.keys(): p = voxsel[k] q = voxsel0[k] # require at least 60% agreement delta = set.symmetric_difference(set(p), set(q)) assert_true(len(delta) < .8 * (len(p) + len(q))) if externals.exists('h5py'): from mvpa2.base.hdf5 import h5save, h5load h5save(fn, voxsel) voxsel_copy = h5load(fn) assert_equal(voxsel.keys(), voxsel_copy.keys()) for id in qe.ids: assert_array_equal(voxsel.get(id), voxsel_copy.get(id))
def test_voxel_selection(self): """Compare surface and volume based searchlight""" """ Tests to see whether results are identical for surface-based searchlight (just one plane; Euclidean distnace) and volume-based searchlight. Note that the current value is a float; if it were int, it would specify the number of voxels in each searchlight""" radius = 10.0 """Define input filenames""" epi_fn = pathjoin(pymvpa_dataroot, "bold.nii.gz") maskfn = pathjoin(pymvpa_dataroot, "mask.nii.gz") """ Use the EPI datafile to define a surface. The surface has as many nodes as there are voxels and is parallel to the volume 'slice' """ vg = volgeom.from_any(maskfn, mask_volume=True) aff = vg.affine nx, ny, nz = vg.shape[:3] """Plane goes in x and y direction, so we take these vectors from the affine transformation matrix of the volume""" plane = surf.generate_plane(aff[:3, 3], aff[:3, 0], aff[:3, 1], nx, ny) """ Simulate pial and white matter as just above and below the central plane """ normal_vec = aff[:3, 2] outer = plane + normal_vec inner = plane + -normal_vec """ Combine volume and surface information """ vsm = volsurf.VolSurfMaximalMapping(vg, outer, inner) """ Run voxel selection with specified radius (in mm), using Euclidean distance measure """ surf_voxsel = surf_voxel_selection.voxel_selection(vsm, radius, distance_metric="e") """Define the measure""" # run_slow=True would give an actual cross-validation with meaningful # accuracies. Because this is a unit-test only the number of voxels # in each searchlight is tested. run_slow = False if run_slow: meas = CrossValidation(GNB(), OddEvenPartitioner(), errorfx=lambda p, t: np.mean(p == t)) postproc = mean_sample else: meas = _Voxel_Count_Measure() postproc = lambda x: x """ Surface analysis: define the query engine, cross validation, and searchlight """ surf_qe = SurfaceVerticesQueryEngine(surf_voxsel) surf_sl = Searchlight(meas, queryengine=surf_qe, postproc=postproc) """ new (Sep 2012): also test 'simple' queryengine wrapper function """ surf_qe2 = disc_surface_queryengine( radius, maskfn, inner, outer, plane, volume_mask=True, distance_metric="euclidean" ) surf_sl2 = Searchlight(meas, queryengine=surf_qe2, postproc=postproc) """ Same for the volume analysis """ element_sizes = tuple(map(abs, (aff[0, 0], aff[1, 1], aff[2, 2]))) sph = Sphere(radius, element_sizes=element_sizes) kwa = {"voxel_indices": sph} vol_qe = IndexQueryEngine(**kwa) vol_sl = Searchlight(meas, queryengine=vol_qe, postproc=postproc) """The following steps are similar to start_easy.py""" attr = SampleAttributes(pathjoin(pymvpa_dataroot, "attributes_literal.txt")) mask = surf_voxsel.get_mask() dataset = fmri_dataset( samples=pathjoin(pymvpa_dataroot, "bold.nii.gz"), targets=attr.targets, chunks=attr.chunks, mask=mask ) if run_slow: # do chunkswise linear detrending on dataset poly_detrend(dataset, polyord=1, chunks_attr="chunks") # zscore dataset relative to baseline ('rest') mean zscore(dataset, chunks_attr="chunks", param_est=("targets", ["rest"])) # select class face and house for this demo analysis # would work with full datasets (just a little slower) dataset = dataset[np.array([l in ["face", "house"] for l in dataset.sa.targets], dtype="bool")] """Apply searchlight to datasets""" surf_dset = surf_sl(dataset) surf_dset2 = surf_sl2(dataset) vol_dset = vol_sl(dataset) surf_data = surf_dset.samples surf_data2 = surf_dset2.samples vol_data = vol_dset.samples assert_array_equal(surf_data, surf_data2) assert_array_equal(surf_data, vol_data)
def test_voxel_selection_alternative_calls(self): # Tests a multitude of different searchlight calls # that all should yield exactly the same results. # # Calls differ by whether the arguments are filenames # or data objects, whether values are specified explicityly # or set to the default implicitly (using None). # and by different calls to run the voxel selection. # # This method does not test for mask functionality. # define the volume vol_shape = (10, 10, 10, 3) vol_affine = np.identity(4) vol_affine[0, 0] = vol_affine[1, 1] = vol_affine[2, 2] = 5 # four versions: array, nifti image, file name, fmri dataset volarr = np.ones(vol_shape) volimg = nb.Nifti1Image(volarr, vol_affine) # There is a detected problem with elderly NumPy's (e.g. 1.6.1 # on precise on travis) leading to segfaults while operating # on memmapped volumes being forwarded to pprocess. # Thus just making it compressed volume for those cases suf = ".gz" if (externals.exists("pprocess") and externals.versions["numpy"] < "1.6.2") else "" fd, volfn = tempfile.mkstemp("vol.nii" + suf, "test") os.close(fd) volimg.to_filename(volfn) volds = fmri_dataset(volfn) fd, volfngz = tempfile.mkstemp("vol.nii.gz", "test") os.close(fd) volimg.to_filename(volfngz) voldsgz = fmri_dataset(volfngz) # make the surfaces sphere_density = 10 # two versions: Surface and file name outer = surf.generate_sphere(sphere_density) * 25.0 + 15 inner = surf.generate_sphere(sphere_density) * 20.0 + 15 intermediate = inner * 0.5 + outer * 0.5 nv = outer.nvertices fd, outerfn = tempfile.mkstemp("outer.asc", "test") os.close(fd) fd, innerfn = tempfile.mkstemp("inner.asc", "test") os.close(fd) fd, intermediatefn = tempfile.mkstemp("intermediate.asc", "test") os.close(fd) for s, fn in zip([outer, inner, intermediate], [outerfn, innerfn, intermediatefn]): surf.write(fn, s, overwrite=True) # searchlight radius (in mm) radius = 10.0 # dataset used to run searchlight on ds = fmri_dataset(volfn) # simple voxel counter (run for each searchlight position) m = _Voxel_Count_Measure() # number of voxels expected in each searchlight r_expected = np.array( [ [ 18, 9, 10, 9, 9, 9, 9, 10, 9, 9, 9, 9, 11, 11, 11, 11, 10, 10, 10, 9, 10, 11, 9, 10, 10, 8, 7, 8, 8, 8, 9, 10, 12, 12, 11, 7, 7, 8, 5, 9, 11, 11, 12, 12, 9, 5, 8, 7, 7, 12, 12, 13, 12, 12, 7, 7, 8, 5, 9, 12, 12, 13, 11, 9, 5, 8, 7, 7, 11, 12, 12, 11, 12, 10, 10, 11, 9, 11, 12, 12, 12, 12, 16, 13, 16, 16, 16, 17, 15, 17, 17, 17, 16, 16, 16, 18, 16, 16, 16, 16, 18, 16, ] ] ) params = dict( intermediate_=(intermediate, intermediatefn, None), center_nodes_=(None, range(nv)), volume_=(volimg, volfn, volds, volfngz, voldsgz), surf_src_=("filename", "surf"), volume_mask_=(None, True, 0, 2), call_method_=("qe", "rvs", "gam"), ) combis = _cartprod(params) # compute all possible combinations combistep = 17 # 173 # some fine prime number to speed things up # if this value becomes too big then not all # cases are covered # the unit test tests itself whether all values # occur at least once tested_params = dict() def val2str(x): return "%r:%r" % (type(x), x) for i in xrange(0, len(combis), combistep): combi = combis[i] intermediate_ = combi["intermediate_"] center_nodes_ = combi["center_nodes_"] volume_ = combi["volume_"] surf_src_ = combi["surf_src_"] volume_mask_ = combi["volume_mask_"] call_method_ = combi["call_method_"] # keep track of which values were used - # so that this unit test tests itself for k in combi.keys(): if not k in tested_params: tested_params[k] = set() tested_params[k].add(val2str(combi[k])) if surf_src_ == "filename": s_i, s_m, s_o = inner, intermediate, outer elif surf_src_ == "surf": s_i, s_m, s_o = innerfn, intermediatefn, outerfn else: raise ValueError("this should not happen") if call_method_ == "qe": # use the fancy query engine wrapper qe = disc_surface_queryengine( radius, volume_, s_i, s_o, s_m, source_surf_nodes=center_nodes_, volume_mask=volume_mask_ ) sl = Searchlight(m, queryengine=qe) r = sl(ds).samples elif call_method_ == "rvs": # use query-engine but build the # ingredients by hand vg = volgeom.from_any(volume_, volume_mask_) vs = volsurf.VolSurfMaximalMapping(vg, s_i, s_o) sel = surf_voxel_selection.voxel_selection(vs, radius, source_surf=s_m, source_surf_nodes=center_nodes_) qe = SurfaceVerticesQueryEngine(sel) sl = Searchlight(m, queryengine=qe) r = sl(ds).samples elif call_method_ == "gam": # build everything from the ground up vg = volgeom.from_any(volume_, volume_mask_) vs = volsurf.VolSurfMaximalMapping(vg, s_i, s_o) sel = surf_voxel_selection.voxel_selection(vs, radius, source_surf=s_m, source_surf_nodes=center_nodes_) mp = sel ks = sel.keys() nk = len(ks) r = np.zeros((1, nk)) for i, k in enumerate(ks): r[0, i] = len(mp[k]) # check if result is as expected assert_array_equal(r_expected, r) # clean up all_fns = [volfn, volfngz, outerfn, innerfn, intermediatefn] map(os.remove, all_fns) for k, vs in params.iteritems(): if not k in tested_params: raise ValueError("Missing key: %r" % k) for v in vs: vstr = val2str(v) if not vstr in tested_params[k]: raise ValueError("Missing value %r for %s" % (tested_params[k], k))
def test_voxel_selection(self): '''Compare surface and volume based searchlight''' ''' Tests to see whether results are identical for surface-based searchlight (just one plane; Euclidean distnace) and volume-based searchlight. Note that the current value is a float; if it were int, it would specify the number of voxels in each searchlight''' radius = 10. '''Define input filenames''' epi_fn = os.path.join(pymvpa_dataroot, 'bold.nii.gz') maskfn = os.path.join(pymvpa_dataroot, 'mask.nii.gz') ''' Use the EPI datafile to define a surface. The surface has as many nodes as there are voxels and is parallel to the volume 'slice' ''' vg = volgeom.from_any(maskfn, mask_volume=True) aff = vg.affine nx, ny, nz = vg.shape[:3] '''Plane goes in x and y direction, so we take these vectors from the affine transformation matrix of the volume''' plane = surf.generate_plane(aff[:3, 3], aff[:3, 0], aff[:3, 1], nx, ny) ''' Simulate pial and white matter as just above and below the central plane ''' normal_vec = aff[:3, 2] outer = plane + normal_vec inner = plane + -normal_vec ''' Combine volume and surface information ''' vsm = volsurf.VolSurfMaximalMapping(vg, outer, inner) ''' Run voxel selection with specified radius (in mm), using Euclidean distance measure ''' surf_voxsel = surf_voxel_selection.voxel_selection(vsm, radius, distance_metric='e') '''Define the measure''' # run_slow=True would give an actual cross-validation with meaningful # accuracies. Because this is a unit-test only the number of voxels # in each searchlight is tested. run_slow = False if run_slow: meas = CrossValidation(GNB(), OddEvenPartitioner(), errorfx=lambda p, t: np.mean(p == t)) postproc = mean_sample else: meas = _Voxel_Count_Measure() postproc = lambda x: x ''' Surface analysis: define the query engine, cross validation, and searchlight ''' surf_qe = SurfaceVerticesQueryEngine(surf_voxsel) surf_sl = Searchlight(meas, queryengine=surf_qe, postproc=postproc) ''' new (Sep 2012): also test 'simple' queryengine wrapper function ''' surf_qe2 = disc_surface_queryengine(radius, maskfn, inner, outer, plane, volume_mask=True, distance_metric='euclidean') surf_sl2 = Searchlight(meas, queryengine=surf_qe2, postproc=postproc) ''' Same for the volume analysis ''' element_sizes = tuple(map(abs, (aff[0, 0], aff[1, 1], aff[2, 2]))) sph = Sphere(radius, element_sizes=element_sizes) kwa = {'voxel_indices': sph} vol_qe = IndexQueryEngine(**kwa) vol_sl = Searchlight(meas, queryengine=vol_qe, postproc=postproc) '''The following steps are similar to start_easy.py''' attr = SampleAttributes( os.path.join(pymvpa_dataroot, 'attributes_literal.txt')) mask = surf_voxsel.get_mask() dataset = fmri_dataset(samples=os.path.join(pymvpa_dataroot, 'bold.nii.gz'), targets=attr.targets, chunks=attr.chunks, mask=mask) if run_slow: # do chunkswise linear detrending on dataset poly_detrend(dataset, polyord=1, chunks_attr='chunks') # zscore dataset relative to baseline ('rest') mean zscore(dataset, chunks_attr='chunks', param_est=('targets', ['rest'])) # select class face and house for this demo analysis # would work with full datasets (just a little slower) dataset = dataset[np.array( [l in ['face', 'house'] for l in dataset.sa.targets], dtype='bool')] '''Apply searchlight to datasets''' surf_dset = surf_sl(dataset) surf_dset2 = surf_sl2(dataset) vol_dset = vol_sl(dataset) surf_data = surf_dset.samples surf_data2 = surf_dset2.samples vol_data = vol_dset.samples assert_array_equal(surf_data, surf_data2) assert_array_equal(surf_data, vol_data)
def test_voxel_selection_alternative_calls(self): # Tests a multitude of different searchlight calls # that all should yield exactly the same results. # # Calls differ by whether the arguments are filenames # or data objects, whether values are specified explicityly # or set to the default implicitly (using None). # and by different calls to run the voxel selection. # # This method does not test for mask functionality. # define the volume vol_shape = (10, 10, 10, 3) vol_affine = np.identity(4) vol_affine[0, 0] = vol_affine[1, 1] = vol_affine[2, 2] = 5 # four versions: array, nifti image, file name, fmri dataset volarr = np.ones(vol_shape) volimg = nb.Nifti1Image(volarr, vol_affine) # There is a detected problem with elderly NumPy's (e.g. 1.6.1 # on precise on travis) leading to segfaults while operating # on memmapped volumes being forwarded to pprocess. # Thus just making it compressed volume for those cases suf = '.gz' \ if externals.exists('pprocess') and externals.versions['numpy'] < '1.6.2' \ else '' fd, volfn = tempfile.mkstemp('vol.nii' + suf, 'test') os.close(fd) volimg.to_filename(volfn) volds = fmri_dataset(volfn) fd, volfngz = tempfile.mkstemp('vol.nii.gz', 'test') os.close(fd) volimg.to_filename(volfngz) voldsgz = fmri_dataset(volfngz) # make the surfaces sphere_density = 10 # two versions: Surface and file name outer = surf.generate_sphere(sphere_density) * 25. + 15 inner = surf.generate_sphere(sphere_density) * 20. + 15 intermediate = inner * .5 + outer * .5 nv = outer.nvertices fd, outerfn = tempfile.mkstemp('outer.asc', 'test') os.close(fd) fd, innerfn = tempfile.mkstemp('inner.asc', 'test') os.close(fd) fd, intermediatefn = tempfile.mkstemp('intermediate.asc', 'test') os.close(fd) for s, fn in zip([outer, inner, intermediate], [outerfn, innerfn, intermediatefn]): surf.write(fn, s, overwrite=True) # searchlight radius (in mm) radius = 10. # dataset used to run searchlight on ds = fmri_dataset(volfn) # simple voxel counter (run for each searchlight position) m = _Voxel_Count_Measure() # number of voxels expected in each searchlight r_expected = np.array([[ 18, 9, 10, 9, 9, 9, 9, 10, 9, 9, 9, 9, 11, 11, 11, 11, 10, 10, 10, 9, 10, 11, 9, 10, 10, 8, 7, 8, 8, 8, 9, 10, 12, 12, 11, 7, 7, 8, 5, 9, 11, 11, 12, 12, 9, 5, 8, 7, 7, 12, 12, 13, 12, 12, 7, 7, 8, 5, 9, 12, 12, 13, 11, 9, 5, 8, 7, 7, 11, 12, 12, 11, 12, 10, 10, 11, 9, 11, 12, 12, 12, 12, 16, 13, 16, 16, 16, 17, 15, 17, 17, 17, 16, 16, 16, 18, 16, 16, 16, 16, 18, 16 ]]) params = dict(intermediate_=(intermediate, intermediatefn, None), center_nodes_=(None, range(nv)), volume_=(volimg, volfn, volds, volfngz, voldsgz), surf_src_=('filename', 'surf'), volume_mask_=(None, True, 0, 2), call_method_=("qe", "rvs", "gam")) combis = _cartprod(params) # compute all possible combinations combistep = 17 #173 # some fine prime number to speed things up # if this value becomes too big then not all # cases are covered # the unit test tests itself whether all values # occur at least once tested_params = dict() def val2str(x): return '%r:%r' % (type(x), x) for i in xrange(0, len(combis), combistep): combi = combis[i] intermediate_ = combi['intermediate_'] center_nodes_ = combi['center_nodes_'] volume_ = combi['volume_'] surf_src_ = combi['surf_src_'] volume_mask_ = combi['volume_mask_'] call_method_ = combi['call_method_'] # keep track of which values were used - # so that this unit test tests itself for k in combi.keys(): if not k in tested_params: tested_params[k] = set() tested_params[k].add(val2str(combi[k])) if surf_src_ == 'filename': s_i, s_m, s_o = inner, intermediate, outer elif surf_src_ == 'surf': s_i, s_m, s_o = innerfn, intermediatefn, outerfn else: raise ValueError('this should not happen') if call_method_ == "qe": # use the fancy query engine wrapper qe = disc_surface_queryengine(radius, volume_, s_i, s_o, s_m, source_surf_nodes=center_nodes_, volume_mask=volume_mask_) sl = Searchlight(m, queryengine=qe) r = sl(ds).samples elif call_method_ == 'rvs': # use query-engine but build the # ingredients by hand vg = volgeom.from_any(volume_, volume_mask_) vs = volsurf.VolSurfMaximalMapping(vg, s_i, s_o) sel = surf_voxel_selection.voxel_selection( vs, radius, source_surf=s_m, source_surf_nodes=center_nodes_) qe = SurfaceVerticesQueryEngine(sel) sl = Searchlight(m, queryengine=qe) r = sl(ds).samples elif call_method_ == 'gam': # build everything from the ground up vg = volgeom.from_any(volume_, volume_mask_) vs = volsurf.VolSurfMaximalMapping(vg, s_i, s_o) sel = surf_voxel_selection.voxel_selection( vs, radius, source_surf=s_m, source_surf_nodes=center_nodes_) mp = sel ks = sel.keys() nk = len(ks) r = np.zeros((1, nk)) for i, k in enumerate(ks): r[0, i] = len(mp[k]) # check if result is as expected assert_array_equal(r_expected, r) # clean up all_fns = [volfn, volfngz, outerfn, innerfn, intermediatefn] map(os.remove, all_fns) for k, vs in params.iteritems(): if not k in tested_params: raise ValueError("Missing key: %r" % k) for v in vs: vstr = val2str(v) if not vstr in tested_params[k]: raise ValueError("Missing value %r for %s" % (tested_params[k], k))