Exemple #1
0
    def test_niml_dset(self):
        d = dict(data=np.random.normal(size=(10, 2)),
              node_indices=np.arange(10),
              stats=['none', 'Tstat(2)'],
              labels=['foo', 'bar'])
        a = niml_dset.from_niml_dset(d)
        b = niml_dset.to_niml_dset(a)

        _, fn = tempfile.mkstemp('.niml.dset', 'dset')

        afni_niml_dset.write(fn, b)
        bb = afni_niml_dset.read(fn)
        cc = niml_dset.from_niml_dset(bb)

        os.remove(fn)

        for dset in (a, cc):
            assert_equal(list(dset.sa['labels']), d['labels'])
            assert_equal(list(dset.sa['stats']), d['stats'])
            assert_array_equal(np.asarray(dset.fa['node_indices']).ravel(),
                               d['node_indices'])

            eps_dec = 4
            assert_array_almost_equal(dset.samples, d['data'].transpose(),
                                                                    eps_dec)

        # some more tests to ensure that the order of elements is ok
        # (row first or column first)

        d = np.arange(10).reshape((5, -1)) + .5
        ds = Dataset(d)

        fn = _, fn = tempfile.mkstemp('.niml.dset', 'dset')
        writers = [niml_dset.write, afni_niml_dset.write]
        for i, writer in enumerate(writers):
            for form in ('text', 'binary', 'base64'):
                if i == 0:
                    writer(fn, ds, form=form)
                else:
                    writer(fn, dict(data=d.transpose()), form=form)

                x = afni_niml_dset.read(fn)
                assert_array_equal(x['data'], d.transpose())
    def test_niml_dset(self):
        d = dict(data=np.random.normal(size=(10, 2)),
                 node_indices=np.arange(10),
                 stats=['none', 'Tstat(2)'],
                 labels=['foo', 'bar'])
        a = niml_dset.from_niml_dset(d)
        b = niml_dset.to_niml_dset(a)

        _, fn = tempfile.mkstemp('.niml.dset', 'dset')

        afni_niml_dset.write(fn, b)
        bb = afni_niml_dset.read(fn)
        cc = niml_dset.from_niml_dset(bb)

        os.remove(fn)

        for dset in (a, cc):
            assert_equal(list(dset.sa['labels']), d['labels'])
            assert_equal(list(dset.sa['stats']), d['stats'])
            assert_array_equal(
                np.asarray(dset.fa['node_indices']).ravel(), d['node_indices'])

            eps_dec = 4
            assert_array_almost_equal(dset.samples, d['data'].transpose(),
                                      eps_dec)

        # some more tests to ensure that the order of elements is ok
        # (row first or column first)

        d = np.arange(10).reshape((5, -1)) + .5
        ds = Dataset(d)

        fn = _, fn = tempfile.mkstemp('.niml.dset', 'dset')
        writers = [niml_dset.write, afni_niml_dset.write]
        for i, writer in enumerate(writers):
            for form in ('text', 'binary', 'base64'):
                if i == 0:
                    writer(fn, ds, form=form)
                else:
                    writer(fn, dict(data=d.transpose()), form=form)

                x = afni_niml_dset.read(fn)
                assert_array_equal(x['data'], d.transpose())
    def test_niml(self):
        d = dict(
            data=np.random.normal(size=(10, 2)),
            node_indices=np.arange(10),
            stats=["none", "Tstat(2)"],
            labels=["foo", "bar"],
        )
        a = niml.from_niml(d)
        b = niml.to_niml(a)

        _, fn = tempfile.mkstemp(".niml.dset", "dset")

        afni_niml_dset.write(fn, b)
        bb = afni_niml_dset.read(fn)
        cc = niml.from_niml(bb)

        os.remove(fn)

        for dset in (a, cc):
            assert_equal(list(dset.sa["labels"]), d["labels"])
            assert_equal(list(dset.sa["stats"]), d["stats"])
            assert_array_equal(np.asarray(dset.fa["node_indices"]).ravel(), d["node_indices"])

            eps_dec = 4
            assert_array_almost_equal(dset.samples, d["data"].transpose(), eps_dec)

        # some more tests to ensure that the order of elements is ok
        # (row first or column first)

        d = np.arange(10).reshape((5, -1)) + 0.5
        ds = Dataset(d)

        fn = _, fn = tempfile.mkstemp(".niml.dset", "dset")
        writers = [niml.write, afni_niml_dset.write]
        for i, writer in enumerate(writers):
            for form in ("text", "binary", "base64"):
                if i == 0:
                    writer(fn, ds, form=form)
                else:
                    writer(fn, dict(data=d.transpose()), form=form)

                x = afni_niml_dset.read(fn)
                assert_array_equal(x["data"], d.transpose())
def afni_niml_zscore_makefull(fnin, fnout, pad_to_ico_ld=None, pad_to_node=None):
    dset = afni_niml_dset.read(fnin)

    dset_z = afni_niml_zscore_data(dset)

    if pad_to_ico_ld or pad_to_node:
        dset_z_full = afni_niml_dset.sparse2full(dset_z, pad_to_ico_ld=pad_to_ico_ld, pad_to_node=pad_to_node)
    else:
        dset_z_full = dset_z

    afni_niml_dset.write(fnout, dset_z_full)
    return fnout
Exemple #5
0
def afni_niml_zscore_makefull(fnin, fnout, pad_to_ico_ld=None, pad_to_node=None):
    dset = afni_niml_dset.read(fnin)

    dset_z = afni_niml_zscore_data(dset)

    if pad_to_ico_ld or pad_to_node:
        dset_z_full = afni_niml_dset.sparse2full(dset_z, pad_to_ico_ld=pad_to_ico_ld, pad_to_node=pad_to_node)
    else:
        dset_z_full = dset_z

    afni_niml_dset.write(fnout, dset_z_full)
    return fnout
    def test_afni_niml_dset(self):
        sz = (100, 45)  # dataset size
        rng = self._get_rng()  # generate random data

        expected_vals = {
            (0, 0): -2.13856,
            (sz[0] - 1, sz[1] - 1): -1.92434,
            (sz[0], sz[1] - 1): None,
            (sz[0] - 1, sz[1]): None,
            sz: None
        }

        # test for different formats in which the data is stored
        fmts = ['text', 'binary', 'base64']

        # also test for different datatypes
        tps = [np.int32, np.int64, np.float32, np.float64]

        # generated random data
        data = rng.normal(size=sz)

        # set labels for samples, and set node indices
        labels = [
            'lab_%d' % round(rng.uniform() * 1000) for _ in xrange(sz[1])
        ]
        node_indices = np.argsort(rng.uniform(size=(sz[0], )))
        node_indices = np.reshape(node_indices, (sz[0], 1))

        eps = .00001

        # test I/O
        _, fn = tempfile.mkstemp('data.niml.dset', 'test')

        # depending on the mode we do different tests (but on the same data)
        modes = ['normal', 'skipio', 'sparse2full']

        for fmt in fmts:
            for tp in tps:
                for mode in modes:
                    # make a dataset
                    dset = dict(data=np.asarray(data, tp),
                                labels=labels,
                                node_indices=node_indices)
                    dset_keys = dset.keys()

                    if mode == 'skipio':
                        # try conversion to/from raw NIML
                        # do not write to disk
                        r = afni_niml_dset.dset2rawniml(dset)
                        s = afni_niml.rawniml2string(r)
                        r2 = afni_niml.string2rawniml(s)
                        dset2 = afni_niml_dset.rawniml2dset(r2)[0]

                    else:
                        # write and read from disk
                        afni_niml_dset.write(fn, dset, fmt)
                        dset2 = afni_niml_dset.read(fn)
                        os.remove(fn)

                    # data in dset and dset2 should be identical
                    for k in dset_keys:
                        # general idea is to test whether v is equal to v2
                        v = dset[k]
                        v2 = dset2[k]

                        if k == 'data':
                            if mode == 'sparse2full':
                                # test the sparse2full feature
                                # this changes the order of the data over columns
                                # so we skip testing whether dset2 is equal to dset
                                nfull = 2 * sz[0]

                                dset3 = afni_niml_dset.sparse2full(
                                    dset2, pad_to_node=nfull)

                                assert_equal(dset3['data'].shape[0], nfull)

                                idxs = dset['node_indices'][:, 0]
                                idxs3 = dset3['node_indices'][:, 0]
                                vbig = np.zeros((nfull, sz[1]))
                                vbig[idxs, :] = v[np.arange(sz[0]), :]
                                v = vbig
                                v2 = dset3['data'][idxs3, :]
                            else:
                                # check that data is as expected
                                for pos, val in expected_vals.iteritems():
                                    if val is None:
                                        assert_raises(IndexError,
                                                      lambda x: x[pos], v2)
                                    else:
                                        val2 = np.asarray(val, tp)
                                        assert_true(abs(v2[pos] - val2) < eps)
                        if type(v) is list:
                            assert_equal(v, v2)
                        else:
                            eps_dec = 4
                            if mode != 'sparse2full' or k == 'data':
                                assert_array_almost_equal(v, v2, eps_dec)
    def test_afni_niml_dset(self, fn):
        sz = (100, 45) # dataset size
        rng = self._get_rng() # generate random data

        expected_vals = {(0, 0):-2.13856 , (sz[0] - 1, sz[1] - 1):-1.92434,
                         (sz[0], sz[1] - 1):None, (sz[0] - 1, sz[1]):None,
                         sz:None}

        # test for different formats in which the data is stored
        fmts = ['text', 'binary', 'base64']

        # also test for different datatypes
        tps = [np.int32, np.int64, np.float32, np.float64]

        # generated random data
        data = rng.normal(size=sz)

        # set labels for samples, and set node indices
        labels = ['lab_%d' % round(rng.uniform() * 1000)
                        for _ in xrange(sz[1])]
        node_indices = np.argsort(rng.uniform(size=(sz[0],)))
        node_indices = np.reshape(node_indices, (sz[0], 1))


        eps = .00001

        # test I/O
        # depending on the mode we do different tests (but on the same data)
        modes = ['normal', 'skipio', 'sparse2full']

        for fmt in fmts:
            for tp in tps:
                for mode in modes:
                    # make a dataset
                    dset = dict(data=np.asarray(data, tp),
                                labels=labels,
                                node_indices=node_indices)
                    dset_keys = dset.keys()

                    if mode == 'skipio':
                        # try conversion to/from raw NIML
                        # do not write to disk
                        r = afni_niml_dset.dset2rawniml(dset)
                        s = afni_niml.rawniml2string(r)
                        r2 = afni_niml.string2rawniml(s)
                        dset2 = afni_niml_dset.rawniml2dset(r2)[0]

                    else:
                        # write and read from disk
                        afni_niml_dset.write(fn, dset, fmt)
                        dset2 = afni_niml_dset.read(fn)
                        os.remove(fn)

                    # data in dset and dset2 should be identical
                    for k in dset_keys:
                        # general idea is to test whether v is equal to v2
                        v = dset[k]
                        v2 = dset2[k]

                        if k == 'data':
                            if mode == 'sparse2full':
                                # test the sparse2full feature
                                # this changes the order of the data over columns
                                # so we skip testing whether dset2 is equal to dset
                                nfull = 2 * sz[0]

                                dset3 = afni_niml_dset.sparse2full(dset2,
                                                            pad_to_node=nfull)

                                assert_equal(dset3['data'].shape[0], nfull)

                                idxs = dset['node_indices'][:, 0]
                                idxs3 = dset3['node_indices'][:, 0]
                                vbig = np.zeros((nfull, sz[1]))
                                vbig[idxs, :] = v[np.arange(sz[0]), :]
                                v = vbig
                                v2 = dset3['data'][idxs3, :]
                            else:
                                # check that data is as expected
                                for pos, val in expected_vals.iteritems():
                                    if val is None:
                                        assert_raises(IndexError, lambda x:x[pos], v2)
                                    else:
                                        val2 = np.asarray(val, tp)
                                        assert_true(abs(v2[pos] - val2) < eps)
                        if type(v) is list:
                            assert_equal(v, v2)
                        else:
                            eps_dec = 4
                            if mode != 'sparse2full' or k == 'data':
                                assert_array_almost_equal(v, v2, eps_dec)