def test_surface_dset_niml_io_with_unicode(self, fn): ds = dataset_wizard(np.arange(20).reshape((4, 5)), targets=1, chunks=1) ds.sa['unicode'] = [u'u1', u'uu2', u'uuu3', u'uuuu4'] ds.sa['str'] = ['s1', 'ss2', 'sss3', 'ssss4'] ds.fa['node_indices'] = np.arange(5) # ensure sample attributes are of String type (not array) niml_dict = afni_niml_dset.dset2rawniml(niml.to_niml(ds)) expected_dtypes = dict(PYMVPA_SA_unicode='String', PYMVPA_SA_str='String', PYMVPA_SA_targets='1*int32') def assert_has_expected_datatype(name, expected_dtype, niml): """helper function""" nodes = niml_dict['nodes'] for node in nodes: if node['name'] == name: assert_equal(node['ni_type'], expected_dtype) return raise ValueError('not found: %s', name) for name, expected_dtype in expected_dtypes.iteritems(): assert_has_expected_datatype(name, expected_dtype, niml) # test NIML I/O niml.write(fn, ds) # remove extra fields added when reading the file ds2 = niml.from_any(fn) ds2.a.pop('history') ds2.a.pop('filename') ds2.sa.pop('labels') ds2.sa.pop('stats') # NIML does not support int64, only int32; # compare equality of values in samples by setting the # datatype the same as in the input (int32 or int64 depending # on the platform) ds2.samples = np.asarray(ds2.samples, dtype=ds.samples.dtype) assert_datasets_equal(ds, ds2)
def test_niml(self): d = dict( data=np.random.normal(size=(10, 2)), node_indices=np.arange(10), stats=["none", "Tstat(2)"], labels=["foo", "bar"], ) a = niml.from_niml(d) b = niml.to_niml(a) _, fn = tempfile.mkstemp(".niml.dset", "dset") afni_niml_dset.write(fn, b) bb = afni_niml_dset.read(fn) cc = niml.from_niml(bb) os.remove(fn) for dset in (a, cc): assert_equal(list(dset.sa["labels"]), d["labels"]) assert_equal(list(dset.sa["stats"]), d["stats"]) assert_array_equal(np.asarray(dset.fa["node_indices"]).ravel(), d["node_indices"]) eps_dec = 4 assert_array_almost_equal(dset.samples, d["data"].transpose(), eps_dec) # some more tests to ensure that the order of elements is ok # (row first or column first) d = np.arange(10).reshape((5, -1)) + 0.5 ds = Dataset(d) fn = _, fn = tempfile.mkstemp(".niml.dset", "dset") writers = [niml.write, afni_niml_dset.write] for i, writer in enumerate(writers): for form in ("text", "binary", "base64"): if i == 0: writer(fn, ds, form=form) else: writer(fn, dict(data=d.transpose()), form=form) x = afni_niml_dset.read(fn) assert_array_equal(x["data"], d.transpose())
def test_niml(self, fn): d = dict(data=np.random.normal(size=(10, 2)), node_indices=np.arange(10), stats=['none', 'Tstat(2)'], labels=['foo', 'bar']) a = niml.from_niml(d) b = niml.to_niml(a) afni_niml_dset.write(fn, b) readers = (niml.from_any, lambda x: niml.from_niml(afni_niml_dset.read(x))) for reader in readers: cc = reader(fn) for dset in (a, cc): assert_equal(list(dset.sa['labels']), d['labels']) assert_equal(list(dset.sa['stats']), d['stats']) assert_array_equal( np.asarray(dset.fa['node_indices']).ravel(), d['node_indices']) eps_dec = 4 assert_array_almost_equal(dset.samples, d['data'].transpose(), eps_dec) # some more tests to ensure that the order of elements is ok # (row first or column first) d = np.arange(10).reshape((5, -1)) + .5 ds = Dataset(d) writers = [niml.write, afni_niml_dset.write] for i, writer in enumerate(writers): for form in ('text', 'binary', 'base64'): if i == 0: writer(fn, ds, form=form) else: writer(fn, dict(data=d.transpose()), form=form) x = afni_niml_dset.read(fn) assert_array_equal(x['data'], d.transpose())
def test_surface_dset_niml_io_with_unicode(self, fn): ds = dataset_wizard(np.arange(20).reshape((4, 5)), targets=1, chunks=1) ds.sa['unicode'] = ['u1', 'uu2', 'uuu3', 'uuuu4'] ds.sa['str'] = ['s1', 'ss2', 'sss3', 'ssss4'] ds.fa['node_indices'] = np.arange(5) # ensure sample attributes are of String type (not array) niml_dict = afni_niml_dset.dset2rawniml(niml.to_niml(ds)) expected_dtypes = dict(PYMVPA_SA_unicode='String', PYMVPA_SA_str='String', PYMVPA_SA_targets='1*int32') def assert_has_expected_datatype(name, expected_dtype, niml): """helper function""" nodes = niml_dict['nodes'] for node in nodes: if node['name'] == name: assert_equal(node['ni_type'], expected_dtype) return raise ValueError('not found: %s', name) for name, expected_dtype in expected_dtypes.items(): assert_has_expected_datatype(name, expected_dtype, niml) # test NIML I/O niml.write(fn, ds) # remove extra fields added when reading the file ds2 = niml.from_any(fn) ds2.a.pop('history') ds2.a.pop('filename') ds2.sa.pop('labels') ds2.sa.pop('stats') # NIML does not support int64, only int32; # compare equality of values in samples by setting the # datatype the same as in the input (int32 or int64 depending # on the platform) ds2.samples = np.asarray(ds2.samples, dtype=ds.samples.dtype) assert_datasets_equal(ds, ds2)
def test_niml(self, fn): d = dict(data=np.random.normal(size=(10, 2)), node_indices=np.arange(10), stats=['none', 'Tstat(2)'], labels=['foo', 'bar']) a = niml.from_niml(d) b = niml.to_niml(a) afni_niml_dset.write(fn, b) readers = (niml.from_any, lambda x: niml.from_niml(afni_niml_dset.read(x))) for reader in readers: cc = reader(fn) for dset in (a, cc): assert_equal(list(dset.sa['labels']), d['labels']) assert_equal(list(dset.sa['stats']), d['stats']) assert_array_equal(np.asarray(dset.fa['node_indices']).ravel(), d['node_indices']) eps_dec = 4 assert_array_almost_equal(dset.samples, d['data'].transpose(), eps_dec) # some more tests to ensure that the order of elements is ok # (row first or column first) d = np.arange(10).reshape((5, -1)) + .5 ds = Dataset(d) writers = [niml.write, afni_niml_dset.write] for i, writer in enumerate(writers): for form in ('text', 'binary', 'base64'): if i == 0: writer(fn, ds, form=form) else: writer(fn, dict(data=d.transpose()), form=form) x = afni_niml_dset.read(fn) assert_array_equal(x['data'], d.transpose())