Exemple #1
0
def pytest_generate_tests(metafunc):
    if 'dumps' in metafunc.fixturenames and 'loads' in metafunc.fixturenames:
        metafunc.parametrize('dumps,loads',
                             (((rj.dumps, rj.loads),
                               (lambda o, **opts: rj.Encoder(**opts)
                                (o), lambda j, **opts: rj.Decoder(**opts)
                                (j)))),
                             ids=('func[string]', 'class[string]'))
    elif 'dumps' in metafunc.fixturenames:
        metafunc.parametrize('dumps', (
            rj.dumps,
            binary_streaming_dumps,
            text_streaming_dumps,
            lambda o, **opts: rj.Encoder(**opts)(o),
            binary_streaming_encoder,
            text_streaming_encoder,
        ),
                             ids=('func[string]', 'func[bytestream]',
                                  'func[textstream]', 'class[string]',
                                  'class[binarystream]', 'class[textstream]'))
    elif 'loads' in metafunc.fixturenames:
        metafunc.parametrize('loads', (
            rj.loads,
            lambda j, **opts: rj.load(
                io.BytesIO(j.encode('utf-8')
                           if isinstance(j, str) else j), **opts),
            lambda j, **opts: rj.load(io.StringIO(j), **opts),
            lambda j, **opts: rj.Decoder(**opts)(j),
            lambda j, **opts: rj.Decoder(**opts)
            (io.BytesIO(j.encode('utf-8') if isinstance(j, str) else j)),
            lambda j, **opts: rj.Decoder(**opts)(io.StringIO(j)),
        ),
                             ids=('func[string]', 'func[bytestream]',
                                  'func[textstream]', 'class[string]',
                                  'class[bytestream]', 'class[textstream]'))
Exemple #2
0
def pytest_generate_tests(metafunc):
    if 'dumps' in metafunc.fixturenames and 'loads' in metafunc.fixturenames:
        metafunc.parametrize('dumps,loads', (
            ((rj.dumps, rj.loads),
            (lambda o,**opts: rj.Encoder(**opts)(o), lambda j,**opts: rj.Decoder(**opts)(j)))))
    elif 'dumps' in metafunc.fixturenames:
        metafunc.parametrize('dumps', (
            rj.dumps, lambda o,**opts: rj.Encoder(**opts)(o)))
    elif 'loads' in metafunc.fixturenames:
        metafunc.parametrize('loads', (
            rj.loads, lambda j,**opts: rj.Decoder(**opts)(j)))
Exemple #3
0
def test_decoder_attrs():
    d = rj.Decoder(number_mode=rj.NM_NAN,
                   datetime_mode=rj.DM_ISO8601,
                   uuid_mode=rj.UM_CANONICAL,
                   parse_mode=rj.PM_COMMENTS)
    assert d.number_mode == rj.NM_NAN
    assert d.datetime_mode == rj.DM_ISO8601
    assert d.uuid_mode == rj.UM_CANONICAL
    assert d.parse_mode == rj.PM_COMMENTS
Exemple #4
0
def test_negative_infinity_c():
    inf = float("-infinity")
    dumped = rj.Encoder()(inf)
    loaded = rj.Decoder()(dumped)
    assert loaded == inf

    with pytest.raises(ValueError):
        rj.Encoder(number_mode=None)(inf)

    d = Decimal(inf)
    assert d.is_infinite()

    with pytest.raises(ValueError):
        rj.Encoder(number_mode=rj.NM_DECIMAL)(d)

    dumped = rj.Encoder(number_mode=rj.NM_DECIMAL|rj.NM_NAN)(d)
    loaded = rj.Decoder(number_mode=rj.NM_DECIMAL|rj.NM_NAN)(dumped)
    assert loaded == inf
    assert loaded.is_infinite()
Exemple #5
0
def test_nan_c():
    nan = float("nan")
    dumped = rj.Encoder()(nan)
    loaded = rj.Decoder()(dumped)

    assert math.isnan(nan)
    assert math.isnan(loaded)

    with pytest.raises(ValueError):
        rj.Encoder(number_mode=None)(nan)

    d = Decimal(nan)
    assert d.is_nan()

    with pytest.raises(ValueError):
        rj.Encoder(number_mode=rj.NM_DECIMAL)(d)

    dumped = rj.Encoder(number_mode=rj.NM_DECIMAL|rj.NM_NAN)(d)
    loaded = rj.Decoder(number_mode=rj.NM_DECIMAL|rj.NM_NAN)(dumped)
    assert loaded.is_nan()
Exemple #6
0
        if 'data' in benchmark['stats']:
            benchmark['stats'].pop('data')


def pytest_addoption(parser):
    parser.addoption('--compare-other-engines',
                     action='store_true',
                     help='compare against other JSON engines')


contenders = []

import rapidjson as rj

contenders.append(Contender('rapidjson_f', rj.dumps, rj.loads))
contenders.append(Contender('rapidjson_c', rj.Encoder(), rj.Decoder()))
contenders.append(
    Contender('rapidjson_nn_f', partial(rj.dumps, number_mode=rj.NM_NATIVE),
              partial(rj.loads, number_mode=rj.NM_NATIVE)))
contenders.append(
    Contender('rapidjson_nn_c', rj.Encoder(number_mode=rj.NM_NATIVE),
              rj.Decoder(number_mode=rj.NM_NATIVE)))

numbers_contenders = [
    Contender('Wide numbers', rj.dumps, rj.loads),
    Contender('Native numbers', partial(rj.dumps, number_mode=rj.NM_NATIVE),
              partial(rj.loads, number_mode=rj.NM_NATIVE)),
]

string_contenders = [
    Contender('rapidjson utf8', partial(rj.dumps, ensure_ascii=False),
Exemple #7
0
def pytest_addoption(parser):
    parser.addoption('--compare-other-engines', action='store_true',
                     help='compare against other JSON engines')


contenders = []

import rapidjson as rj

contenders.append(Contender('rapidjson_f',
                            rj.dumps,
                            rj.loads))
contenders.append(Contender('rapidjson_c',
                            rj.Encoder(),
                            rj.Decoder()))
contenders.append(Contender('rapidjson_nn_f',
                            partial(rj.dumps, number_mode=rj.NM_NATIVE),
                            partial(rj.loads, number_mode=rj.NM_NATIVE)))
contenders.append(Contender('rapidjson_nn_c',
                            rj.Encoder(number_mode=rj.NM_NATIVE),
                            rj.Decoder(number_mode=rj.NM_NATIVE)))

numbers_contenders = [
    Contender('Wide numbers', rj.dumps, rj.loads),
    Contender('Native numbers',
              partial(rj.dumps, number_mode=rj.NM_NATIVE),
              partial(rj.loads, number_mode=rj.NM_NATIVE)),
]

string_contenders = [
Exemple #8
0
def test_explicit_defaults_decoder():
    assert rj.Decoder(number_mode=None,
                      datetime_mode=None,
                      uuid_mode=None,
                      parse_mode=None)('"foo"') == "foo"
 def read(self) -> Union[Dict, TreeNode]:
     """
     I return the best representation the source format supports
     pickle: TreeNode
     else  : Dict[inode -> properties]
     """
     fn = self._path()
     if self.filetype == FileType.PICKLE:
         with open(fn, "rb") as f:
             self.treenode = pickle.load(f)
             return self.treenode
     elif self.filetype == FileType.CSV:
         self.id_dict = {}
         with open(fn, "r") as f:
             r = csv.DictReader(f)
             for line in r:
                 # type conversion
                 for field in [k for k,v in Node._field_types.items() if v != str]:
                     line[field] = int(line[field])
                 self.id_dict[int(line['id'])] = Node(**line)
             return self.id_dict
     elif self.filetype == FileType.MSGPACK:
         # TODO: This will fail with larger files - have to adjust max_xxx_len
         with open(fn, "rb") as f:
             self.id_dict = {}
             for item in msgpack.unpack(f, raw=False):
                 self.id_dict[item['id']] = Node(**item)
         return self.id_dict
     elif self.filetype == FileType.JSON:
         return self._json_read(fn, json.load)
     elif self.filetype == FileType.UJSON:
         return self._json_read(fn, ujson.load)
     elif self.filetype == FileType.SIMPLEJSON:
         # NOTE: simplejson includes key names when serializing NamedTuples
         with open(fn, "r") as f:
             self.id_dict = {}
             if self.json_dict_list:
                 for item in simplejson.load(f):
                     self.id_dict[item['id']] = Node(**item)
             else:
                 for v in simplejson.load(f).values():
                     self.id_dict[v['id']] = Node(**v)
         return self.id_dict
     elif self.filetype == FileType.CBOR2:
         with open(fn, "rb") as f:
             self.id_dict = {}
             for item in cbor2.load(f):
                 self.id_dict[item['id']] = Node(**item)
         return self.id_dict
     elif self.filetype == FileType.CBOR:
         with open(fn, "rb") as f:
             self.id_dict = {}
             for item in cbor.load(f):
                 self.id_dict[item['id']] = Node(**item)
         return self.id_dict
     elif self.filetype == FileType.RAPIDJSON:
         self.id_dict = {}
         with open(fn, "r") as f:
             d = rapidjson.Decoder(number_mode=rapidjson.NM_NATIVE)(f)
             if self.json_dict_list:
                 for item in d:
                     # safer cause key names are included, but slower
                     self.id_dict[item['id']] = Node(**item)
             else:
                 # list(self.id_dict.values()) - produces a list of lists
                 for item in d:
                     self.id_dict[item[0]] = Node._make(item)
         return self.id_dict
     elif self.filetype == FileType.BSON:
         self.id_dict = {}
         with open(fn, "rb") as f:
             for doc in decode_file_iter(f):
                 self.id_dict[doc['id']] = Node(**doc)
         return self.id_dict
Exemple #10
0
class CustomJSONDecoder(JSONDecoder):
    decode = rapidjson.Decoder(
        number_mode=rapidjson.NM_NATIVE,
        datetime_mode=rapidjson.DM_ISO8601,
        uuid_mode=rapidjson.UM_CANONICAL,
    )
Exemple #11
0
import rapidjson

# Dump datetime in ISO format
# treat "native" datetime objects as UTC
DATETIME_MODE = rapidjson.DM_ISO8601 | rapidjson.DM_NAIVE_IS_UTC

dumps = rapidjson.Encoder(datetime_mode=DATETIME_MODE)
dumps_notascii = rapidjson.Encoder(datetime_mode=DATETIME_MODE,
                                   ensure_ascii=False)
loads = rapidjson.Decoder(datetime_mode=DATETIME_MODE)
import rapidjson as json
import sqlite3
import msgpack
from ..loci import Superlocus
from ..loci import Locus
import sys
import collections
import itertools
import numpy as np
from ._locus_line_creator import _create_locus_lines

decoder = json.Decoder()


def manage_index(data, dumps, source):
    index, data = data
    dump_index, gene_counter, gene_max = data
    orig_gene_counter = gene_counter
    conn = sqlite3.connect(dumps[dump_index])
    cursor = conn.cursor()
    batch = []
    try:
        stranded_loci = cursor.execute("SELECT json FROM loci WHERE counter=?",
                                       (str(index), )).fetchone()
    except ValueError:
        raise ValueError((index, type(index)))
    loci = []
    sublocus_dump = decoder(
        msgpack.loads(cursor.execute(
            "SELECT json FROM subloci WHERE counter=?",
            (str(index), )).fetchone()[0],