Ejemplo n.º 1
0
def rawniml2annot(p):
    '''Converts raw NIML to annotation format'''

    if type(p) is list:
        return map(rawniml2annot, p)
    r = dset.rawniml2dset(p)

    for nd in p['nodes']:
        if nd.get('dset_type', None) == 'LabelTableObject':
            r[r'AFNI_labeltable'] = dset.rawniml2dset(nd)

    return r
Ejemplo n.º 2
0
def rawniml2annot(p):
    '''Converts raw NIML to annotation format'''

    if type(p) is list:
        return map(rawniml2annot, p)
    r = dset.rawniml2dset(p)

    for nd in p['nodes']:
        if nd.get('dset_type', None) == 'LabelTableObject':
            r[r'AFNI_labeltable'] = dset.rawniml2dset(nd)

    return r
Ejemplo n.º 3
0
def rawniml2annot(p):
    if type(p) is list:
        return map(rawniml2annot, p)

    r = dset.rawniml2dset(p)
    for node in p['nodes']:
        name = node.get('name', None)
        if name == 'AFNI_labeltable':
            t = _rawniml_labeltable2annot(node)
            r.update(t)
        elif name == 'AFNI_atr' and node['atr_name'].startswith(
                'UNIQUE_VALS_'):
            data = node['data']
            unique_keys = set([data[i, 0] for i in xrange(len(data))])

    keys = r['keys']
    r['key2row'] = dict((keys[r], r) for r in xrange(len(keys)))

    if len(unique_keys - set(r['keys'])) > 0:
        raise ValueError('key mismatch')

    # clean up
    r.pop('stats')

    return r
Ejemplo n.º 4
0
def _rawniml_labeltable2annot(p):
    t = dset.rawniml2dset(p)

    r = dict()
    table = t['data']
    nrows = len(table[0])
    r['rgba'] = [(table[0][i], table[1][i], table[2][i], table[3][i])
                 for i in xrange(nrows)]
    r['keys'] = table[4]

    r['names'] = table[5]

    return r
Ejemplo n.º 5
0
def _rawniml_labeltable2annot(p):
    t = dset.rawniml2dset(p)

    r = dict()
    table = t['data']
    nrows = len(table[0])
    r['rgba'] = [(table[0][i],
                table[1][i],
                table[2][i],
                table[3][i]) for i in xrange(nrows)]
    r['keys'] = table[4]

    r['names'] = table[5]

    return r
Ejemplo n.º 6
0
def rawniml2annot(p):
    if type(p) is list:
        return map(rawniml2annot, p)

    r = dset.rawniml2dset(p)
    for node in p['nodes']:
        name = node.get('name', None)
        if name == 'AFNI_labeltable':
            t = _rawniml_labeltable2annot(node)
            r.update(t)
        elif name == 'AFNI_atr' and node['atr_name'].startswith('UNIQUE_VALS_'):
            data = node['data']
            unique_keys = set([data[i, 0] for i in xrange(len(data))])

    keys = r['keys']
    r['key2row'] = dict((keys[r], r) for r in xrange(len(keys)))

    if len(unique_keys - set(r['keys'])) > 0:
        raise ValueError('key mismatch')

    # clean up
    r.pop('stats')

    return r
Ejemplo n.º 7
0
    def test_afni_niml_dset(self):
        sz = (100, 45)  # dataset size
        rng = self._get_rng()  # generate random data

        expected_vals = {
            (0, 0): -2.13856,
            (sz[0] - 1, sz[1] - 1): -1.92434,
            (sz[0], sz[1] - 1): None,
            (sz[0] - 1, sz[1]): None,
            sz: None
        }

        # test for different formats in which the data is stored
        fmts = ['text', 'binary', 'base64']

        # also test for different datatypes
        tps = [np.int32, np.int64, np.float32, np.float64]

        # generated random data
        data = rng.normal(size=sz)

        # set labels for samples, and set node indices
        labels = [
            'lab_%d' % round(rng.uniform() * 1000) for _ in xrange(sz[1])
        ]
        node_indices = np.argsort(rng.uniform(size=(sz[0], )))
        node_indices = np.reshape(node_indices, (sz[0], 1))

        eps = .00001

        # test I/O
        _, fn = tempfile.mkstemp('data.niml.dset', 'test')

        # depending on the mode we do different tests (but on the same data)
        modes = ['normal', 'skipio', 'sparse2full']

        for fmt in fmts:
            for tp in tps:
                for mode in modes:
                    # make a dataset
                    dset = dict(data=np.asarray(data, tp),
                                labels=labels,
                                node_indices=node_indices)
                    dset_keys = dset.keys()

                    if mode == 'skipio':
                        # try conversion to/from raw NIML
                        # do not write to disk
                        r = afni_niml_dset.dset2rawniml(dset)
                        s = afni_niml.rawniml2string(r)
                        r2 = afni_niml.string2rawniml(s)
                        dset2 = afni_niml_dset.rawniml2dset(r2)[0]

                    else:
                        # write and read from disk
                        afni_niml_dset.write(fn, dset, fmt)
                        dset2 = afni_niml_dset.read(fn)
                        os.remove(fn)

                    # data in dset and dset2 should be identical
                    for k in dset_keys:
                        # general idea is to test whether v is equal to v2
                        v = dset[k]
                        v2 = dset2[k]

                        if k == 'data':
                            if mode == 'sparse2full':
                                # test the sparse2full feature
                                # this changes the order of the data over columns
                                # so we skip testing whether dset2 is equal to dset
                                nfull = 2 * sz[0]

                                dset3 = afni_niml_dset.sparse2full(
                                    dset2, pad_to_node=nfull)

                                assert_equal(dset3['data'].shape[0], nfull)

                                idxs = dset['node_indices'][:, 0]
                                idxs3 = dset3['node_indices'][:, 0]
                                vbig = np.zeros((nfull, sz[1]))
                                vbig[idxs, :] = v[np.arange(sz[0]), :]
                                v = vbig
                                v2 = dset3['data'][idxs3, :]
                            else:
                                # check that data is as expected
                                for pos, val in expected_vals.iteritems():
                                    if val is None:
                                        assert_raises(IndexError,
                                                      lambda x: x[pos], v2)
                                    else:
                                        val2 = np.asarray(val, tp)
                                        assert_true(abs(v2[pos] - val2) < eps)
                        if type(v) is list:
                            assert_equal(v, v2)
                        else:
                            eps_dec = 4
                            if mode != 'sparse2full' or k == 'data':
                                assert_array_almost_equal(v, v2, eps_dec)
Ejemplo n.º 8
0
    def test_afni_niml_dset(self, fn):
        sz = (100, 45) # dataset size
        rng = self._get_rng() # generate random data

        expected_vals = {(0, 0):-2.13856 , (sz[0] - 1, sz[1] - 1):-1.92434,
                         (sz[0], sz[1] - 1):None, (sz[0] - 1, sz[1]):None,
                         sz:None}

        # test for different formats in which the data is stored
        fmts = ['text', 'binary', 'base64']

        # also test for different datatypes
        tps = [np.int32, np.int64, np.float32, np.float64]

        # generated random data
        data = rng.normal(size=sz)

        # set labels for samples, and set node indices
        labels = ['lab_%d' % round(rng.uniform() * 1000)
                        for _ in xrange(sz[1])]
        node_indices = np.argsort(rng.uniform(size=(sz[0],)))
        node_indices = np.reshape(node_indices, (sz[0], 1))


        eps = .00001

        # test I/O
        # depending on the mode we do different tests (but on the same data)
        modes = ['normal', 'skipio', 'sparse2full']

        for fmt in fmts:
            for tp in tps:
                for mode in modes:
                    # make a dataset
                    dset = dict(data=np.asarray(data, tp),
                                labels=labels,
                                node_indices=node_indices)
                    dset_keys = dset.keys()

                    if mode == 'skipio':
                        # try conversion to/from raw NIML
                        # do not write to disk
                        r = afni_niml_dset.dset2rawniml(dset)
                        s = afni_niml.rawniml2string(r)
                        r2 = afni_niml.string2rawniml(s)
                        dset2 = afni_niml_dset.rawniml2dset(r2)[0]

                    else:
                        # write and read from disk
                        afni_niml_dset.write(fn, dset, fmt)
                        dset2 = afni_niml_dset.read(fn)
                        os.remove(fn)

                    # data in dset and dset2 should be identical
                    for k in dset_keys:
                        # general idea is to test whether v is equal to v2
                        v = dset[k]
                        v2 = dset2[k]

                        if k == 'data':
                            if mode == 'sparse2full':
                                # test the sparse2full feature
                                # this changes the order of the data over columns
                                # so we skip testing whether dset2 is equal to dset
                                nfull = 2 * sz[0]

                                dset3 = afni_niml_dset.sparse2full(dset2,
                                                            pad_to_node=nfull)

                                assert_equal(dset3['data'].shape[0], nfull)

                                idxs = dset['node_indices'][:, 0]
                                idxs3 = dset3['node_indices'][:, 0]
                                vbig = np.zeros((nfull, sz[1]))
                                vbig[idxs, :] = v[np.arange(sz[0]), :]
                                v = vbig
                                v2 = dset3['data'][idxs3, :]
                            else:
                                # check that data is as expected
                                for pos, val in expected_vals.iteritems():
                                    if val is None:
                                        assert_raises(IndexError, lambda x:x[pos], v2)
                                    else:
                                        val2 = np.asarray(val, tp)
                                        assert_true(abs(v2[pos] - val2) < eps)
                        if type(v) is list:
                            assert_equal(v, v2)
                        else:
                            eps_dec = 4
                            if mode != 'sparse2full' or k == 'data':
                                assert_array_almost_equal(v, v2, eps_dec)