Пример #1
0
    def test_niml_dset_voxsel(self, fn):
        if not externals.exists('nibabel'):
            return

        # This is actually a bit of an integration test.
        # It tests storing and retrieving searchlight results.
        # Imports are inline here so that it does not mess up the header
        # and makes the other unit tests more modular
        # XXX put this in a separate file?
        from mvpa2.misc.surfing import volgeom, surf_voxel_selection, queryengine
        from mvpa2.measures.searchlight import Searchlight
        from mvpa2.support.nibabel import surf
        from mvpa2.measures.base import Measure
        from mvpa2.datasets.mri import fmri_dataset

        class _Voxel_Count_Measure(Measure):
            # used to check voxel selection results
            is_trained = True


            def __init__(self, dtype, **kwargs):
                Measure.__init__(self, **kwargs)
                self.dtype = dtype


            def _call(self, dset):
                return self.dtype(dset.nfeatures)

        sh = (20, 20, 20)
        vg = volgeom.VolGeom(sh, np.identity(4))

        density = 20

        outer = surf.generate_sphere(density) * 10. + 5
        inner = surf.generate_sphere(density) * 5. + 5

        intermediate = outer * .5 + inner * .5
        xyz = intermediate.vertices

        radius = 50

        sel = surf_voxel_selection.run_voxel_selection(radius, vg, inner, outer)
        qe = queryengine.SurfaceVerticesQueryEngine(sel)

        for dtype in (int, float):
            sl = Searchlight(_Voxel_Count_Measure(dtype), queryengine=qe)

            ds = fmri_dataset(vg.get_empty_nifti_image(1))
            r = sl(ds)

            niml.write(fn, r)
            rr = niml.read(fn)

            os.remove(fn)

            assert_array_equal(r.samples, rr.samples)
Пример #2
0
    def test_niml_dset_voxsel(self, fn):
        if not externals.exists('nibabel'):
            return

        # This is actually a bit of an integration test.
        # It tests storing and retrieving searchlight results.
        # Imports are inline here so that it does not mess up the header
        # and makes the other unit tests more modular
        # XXX put this in a separate file?
        from mvpa2.misc.surfing import volgeom, surf_voxel_selection, queryengine
        from mvpa2.measures.searchlight import Searchlight
        from mvpa2.support.nibabel import surf
        from mvpa2.measures.base import Measure
        from mvpa2.datasets.mri import fmri_dataset

        class _Voxel_Count_Measure(Measure):
            # used to check voxel selection results
            is_trained = True

            def __init__(self, dtype, **kwargs):
                Measure.__init__(self, **kwargs)
                self.dtype = dtype

            def _call(self, dset):
                return self.dtype(dset.nfeatures)

        sh = (20, 20, 20)
        vg = volgeom.VolGeom(sh, np.identity(4))

        density = 20

        outer = surf.generate_sphere(density) * 10. + 5
        inner = surf.generate_sphere(density) * 5. + 5

        intermediate = outer * .5 + inner * .5
        xyz = intermediate.vertices

        radius = 50

        sel = surf_voxel_selection.run_voxel_selection(radius, vg, inner,
                                                       outer)
        qe = queryengine.SurfaceVerticesQueryEngine(sel)

        for dtype in (int, float):
            sl = Searchlight(_Voxel_Count_Measure(dtype), queryengine=qe)

            ds = fmri_dataset(vg.get_empty_nifti_image(1))
            r = sl(ds)

            niml.write(fn, r)
            rr = niml.read(fn)

            os.remove(fn)

            assert_array_equal(r.samples, rr.samples)
Пример #3
0
    def test_surface_dset_niml_io_with_unicode(self, fn):
        ds = dataset_wizard(np.arange(20).reshape((4, 5)), targets=1, chunks=1)
        ds.sa['unicode'] = [u'u1', u'uu2', u'uuu3', u'uuuu4']
        ds.sa['str'] = ['s1', 'ss2', 'sss3', 'ssss4']
        ds.fa['node_indices'] = np.arange(5)

        # ensure sample attributes are of String type (not array)
        niml_dict = afni_niml_dset.dset2rawniml(niml.to_niml(ds))
        expected_dtypes = dict(PYMVPA_SA_unicode='String',
                               PYMVPA_SA_str='String',
                               PYMVPA_SA_targets='1*int32')


        def assert_has_expected_datatype(name, expected_dtype, niml):
            """helper function"""
            nodes = niml_dict['nodes']

            for node in nodes:
                if node['name'] == name:
                    assert_equal(node['ni_type'], expected_dtype)
                    return

            raise ValueError('not found: %s', name)


        for name, expected_dtype in expected_dtypes.iteritems():
            assert_has_expected_datatype(name, expected_dtype, niml)


        # test NIML I/O
        niml.write(fn, ds)

        # remove extra fields added when reading the file
        ds2 = niml.from_any(fn)
        ds2.a.pop('history')
        ds2.a.pop('filename')
        ds2.sa.pop('labels')
        ds2.sa.pop('stats')

        # NIML does not support int64, only int32;
        # compare equality of values in samples by setting the
        # datatype the same as in the input (int32 or int64 depending
        # on the platform)
        ds2.samples = np.asarray(ds2.samples, dtype=ds.samples.dtype)
        assert_datasets_equal(ds, ds2)
Пример #4
0
    def test_surface_dset_niml_io_with_unicode(self, fn):
        ds = dataset_wizard(np.arange(20).reshape((4, 5)), targets=1, chunks=1)
        ds.sa['unicode'] = ['u1', 'uu2', 'uuu3', 'uuuu4']
        ds.sa['str'] = ['s1', 'ss2', 'sss3', 'ssss4']
        ds.fa['node_indices'] = np.arange(5)

        # ensure sample attributes are of String type (not array)
        niml_dict = afni_niml_dset.dset2rawniml(niml.to_niml(ds))
        expected_dtypes = dict(PYMVPA_SA_unicode='String',
                               PYMVPA_SA_str='String',
                               PYMVPA_SA_targets='1*int32')

        def assert_has_expected_datatype(name, expected_dtype, niml):
            """helper function"""
            nodes = niml_dict['nodes']

            for node in nodes:
                if node['name'] == name:
                    assert_equal(node['ni_type'], expected_dtype)
                    return

            raise ValueError('not found: %s', name)

        for name, expected_dtype in expected_dtypes.items():
            assert_has_expected_datatype(name, expected_dtype, niml)

        # test NIML I/O
        niml.write(fn, ds)

        # remove extra fields added when reading the file
        ds2 = niml.from_any(fn)
        ds2.a.pop('history')
        ds2.a.pop('filename')
        ds2.sa.pop('labels')
        ds2.sa.pop('stats')

        # NIML does not support int64, only int32;
        # compare equality of values in samples by setting the
        # datatype the same as in the input (int32 or int64 depending
        # on the platform)
        ds2.samples = np.asarray(ds2.samples, dtype=ds.samples.dtype)
        assert_datasets_equal(ds, ds2)
Пример #5
0
    def test_afni_niml_dset_with_2d_strings(self, fn):
        # test for 2D arrays with strings. These are possibly SUMA-incompatible
        # but should still be handled properly for i/o.
        # Addresses https://github.com/PyMVPA/PyMVPA/issues/163 (#163)
        samples = np.asarray([[1, 2, 3], [4, 5, 6]])
        labels = np.asarray(map(list, ['abcd', 'efgh']))
        idxs = np.asarray([np.arange(10, 14), np.arange(20, 24)])

        ds = Dataset(samples, sa=dict(labels=labels, idxs=idxs))

        for fmt in ('binary', 'text', 'base64'):
            niml.write(fn, ds, fmt)

            ds_ = niml.read(fn)

            assert_array_equal(ds.samples, ds_.samples)

            for sa_key in ds.sa.keys():
                v = ds.sa[sa_key].value
                v_ = ds_.sa[sa_key].value
                assert_array_equal(v, v_)
Пример #6
0
    def test_afni_niml_dset_with_2d_strings(self, fn):
        # test for 2D arrays with strings. These are possibly SUMA-incompatible
        # but should still be handled properly for i/o.
        # Addresses https://github.com/PyMVPA/PyMVPA/issues/163 (#163)
        samples = np.asarray([[1, 2, 3], [4, 5, 6]])
        labels = np.asarray(map(list, ['abcd', 'efgh']))
        idxs = np.asarray([np.arange(10, 14), np.arange(20, 24)])

        ds = Dataset(samples, sa=dict(labels=labels, idxs=idxs))

        for fmt in ('binary', 'text', 'base64'):
            niml.write(fn, ds, fmt)

            ds_ = niml.read(fn)

            assert_array_equal(ds.samples, ds_.samples)

            for sa_key in ds.sa.keys():
                v = ds.sa[sa_key].value
                v_ = ds_.sa[sa_key].value
                assert_array_equal(v, v_)