Exemple #1
0
def test_chunks():
    c = convert(chunks(np.ndarray), a, chunksize=2)
    assert isinstance(c, chunks(np.ndarray))
    assert len(list(c)) == 2
    assert eq(list(c)[1], [3, 4])

    assert eq(convert(np.ndarray, c), a[:])
Exemple #2
0
def test_chunks():
    c = convert(chunks(np.ndarray), a, chunksize=2)
    assert isinstance(c, chunks(np.ndarray))
    assert len(list(c)) == 2
    assert eq(list(c)[1], [3, 4])

    assert eq(convert(np.ndarray, c), a[:])
Exemple #3
0
def test_chunks_numpy_pandas():
    x = np.array([('Alice', 100), ('Bob', 200)],
                 dtype=[('name', 'S7'), ('amount', 'i4')])
    n = chunks(np.ndarray)([x, x])

    pan = convert(chunks(pd.DataFrame), n)
    num = convert(chunks(np.ndarray), pan)

    assert isinstance(pan, chunks(pd.DataFrame))
    assert all(isinstance(chunk, pd.DataFrame) for chunk in pan)

    assert isinstance(num, chunks(np.ndarray))
    assert all(isinstance(chunk, np.ndarray) for chunk in num)
Exemple #4
0
def test_chunks_numpy_pandas():
    x = np.array([('Alice', 100), ('Bob', 200)],
                 dtype=[('name', 'S7'), ('amount', 'i4')])
    n = chunks(np.ndarray)([x, x])

    pan = convert(chunks(pd.DataFrame), n)
    num = convert(chunks(np.ndarray), pan)

    assert isinstance(pan, chunks(pd.DataFrame))
    assert all(isinstance(chunk, pd.DataFrame) for chunk in pan)

    assert isinstance(num, chunks(np.ndarray))
    assert all(isinstance(chunk, np.ndarray) for chunk in num)
Exemple #5
0
def pre_compute(expr,
                data,
                comfortable_memory=None,
                chunksize=2**18,
                **kwargs):
    comfortable_memory = comfortable_memory or min(1e9, available_memory() / 4)

    kwargs = dict()

    # Chunk if the file is large
    if os.path.getsize(data.path) > comfortable_memory:
        kwargs['chunksize'] = chunksize
    else:
        chunksize = None

    # Insert projection into read_csv
    oexpr = optimize(expr, data)
    leaf = oexpr._leaves()[0]
    pth = list(path(oexpr, leaf))
    if len(pth) >= 2 and isinstance(pth[-2], (Projection, Field)):
        kwargs['usecols'] = pth[-2].fields

    if chunksize:
        return into(chunks(pd.DataFrame), data, dshape=leaf.dshape, **kwargs)
    else:
        return into(pd.DataFrame, data, dshape=leaf.dshape, **kwargs)
Exemple #6
0
def pre_compute(expr,
                data,
                comfortable_memory=None,
                chunksize=2**18,
                **kwargs):
    comfortable_memory = comfortable_memory or min(1e9, available_memory() / 4)

    kwargs = dict()

    # Chunk if the file is large
    if os.path.getsize(data.path) > comfortable_memory:
        kwargs['chunksize'] = chunksize
    else:
        chunksize = None

    # Insert projection into read_csv
    oexpr = optimize(expr, data)
    leaf = oexpr._leaves()[0]
    pth = list(path(oexpr, leaf))
    if len(pth) >= 2 and isinstance(pth[-2], (Projection, Field)):
        # NOTE: FIXME: We pass the column names through `str` to workaround a
        # PY2 Pandas bug with strings / unicode objects.
        kwargs['usecols'] = list(map(str, pth[-2].fields))

    if chunksize:
        return into(chunks(pd.DataFrame), data, dshape=leaf.dshape, **kwargs)
    else:
        return into(pd.DataFrame, data, dshape=leaf.dshape, **kwargs)
Exemple #7
0
def test_empty_iterator_to_chunks_ndarray():
    ds = dshape('var * {x: int}')
    result = convert(chunks(np.ndarray), iter([]), dshape=ds)
    data = convert(np.ndarray, result)
    assert isinstance(data, np.ndarray)
    assert len(data) == 0
    assert data.dtype.names == ('x',)
Exemple #8
0
def test_empty_iterator_to_chunks_ndarray():
    ds = dshape('var * {x: int}')
    result = convert(chunks(np.ndarray), iter([]), dshape=ds)
    data = convert(np.ndarray, result)
    assert isinstance(data, np.ndarray)
    assert len(data) == 0
    assert data.dtype.names == ('x', )
Exemple #9
0
def test_iterator_and_numpy_chunks():
    c = iterator_to_numpy_chunks([1, 2, 3], chunksize=2)
    assert isinstance(c, chunks(np.ndarray))
    assert all(isinstance(chunk, np.ndarray) for chunk in c)

    c = iterator_to_numpy_chunks([1, 2, 3], chunksize=2)
    L = convert(list, c)
    assert L == [1, 2, 3]
Exemple #10
0
def test_iterator_and_numpy_chunks():
    c = iterator_to_numpy_chunks([1, 2, 3], chunksize=2)
    assert isinstance(c, chunks(np.ndarray))
    assert all(isinstance(chunk, np.ndarray) for chunk in c)

    c = iterator_to_numpy_chunks([1, 2, 3], chunksize=2)
    L = convert(list, c)
    assert L == [1, 2, 3]
Exemple #11
0
def test_pandas_and_chunks_pandas():
    df = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [1., 2., 3., 4.]})

    c = dataframe_to_chunks_dataframe(df, chunksize=2)
    assert isinstance(c, chunks(pd.DataFrame))
    assert len(list(c)) == 2

    df2 = chunks_dataframe_to_dataframe(c)
    tm.assert_frame_equal(df, df2)
Exemple #12
0
def test_pandas_and_chunks_pandas():
    df = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [1., 2., 3., 4.]})

    c = dataframe_to_chunks_dataframe(df, chunksize=2)
    assert isinstance(c, chunks(pd.DataFrame))
    assert len(list(c)) == 2

    df2 = chunks_dataframe_to_dataframe(c)
    tm.assert_frame_equal(df, df2)
Exemple #13
0
def pre_compute(expr, data, comfortable_memory=None, chunksize=2**18, **kwargs):
    comfortable_memory = comfortable_memory or min(1e9, available_memory() / 4)

    kwargs = dict()

    # Chunk if the file is large
    if os.path.getsize(data.path) > comfortable_memory:
        kwargs['chunksize'] = chunksize
    else:
        chunksize = None

    # Insert projection into read_csv
    oexpr = optimize(expr, data)
    leaf = oexpr._leaves()[0]
    pth = list(path(oexpr, leaf))
    if len(pth) >= 2 and isinstance(pth[-2], (Projection, Field)):
        kwargs['usecols'] = pth[-2].fields

    if chunksize:
        return into(chunks(pd.DataFrame), data, dshape=leaf.dshape, **kwargs)
    else:
        return into(pd.DataFrame, data, dshape=leaf.dshape, **kwargs)
Exemple #14
0
def pre_compute(expr, data, comfortable_memory=None, chunksize=2**18, **kwargs):
    comfortable_memory = comfortable_memory or min(1e9, available_memory() / 4)

    kwargs = dict()

    # Chunk if the file is large
    if os.path.getsize(data.path) > comfortable_memory:
        kwargs['chunksize'] = chunksize
    else:
        chunksize = None

    # Insert projection into read_csv
    oexpr = optimize(expr, data)
    leaf = oexpr._leaves()[0]
    pth = list(path(oexpr, leaf))
    if len(pth) >= 2 and isinstance(pth[-2], (Projection, Field)):
        # NOTE: FIXME: We pass the column names through `str` to workaround a
        # PY2 Pandas bug with strings / unicode objects.
        kwargs['usecols'] = list(map(str, pth[-2].fields))

    if chunksize:
        return into(chunks(pd.DataFrame), data, dshape=leaf.dshape, **kwargs)
    else:
        return into(pd.DataFrame, data, dshape=leaf.dshape, **kwargs)
Exemple #15
0
def test_chunks():
    with file(x) as (fn, f, dset):
        c = convert(chunks(np.ndarray), dset)
        assert eq(convert(np.ndarray, c), x)
Exemple #16
0
def test_append_chunks():
    b = carray(x)
    append(b, chunks(np.ndarray)([x, x]))
    assert len(b) == len(x) * 3
Exemple #17
0
def test_numpy_to_chunks_numpy():
    x = np.arange(100)
    c = numpy_to_chunks_numpy(x, chunksize=10)
    assert isinstance(c, chunks(np.ndarray))
    assert len(list(c)) == 10
    assert eq(list(c)[0], x[:10])
Exemple #18
0
def test_append_chunks():
    with file(df) as (fn, f, dset):
        append(dset, chunks(pd.DataFrame)([df, df]))

        assert discover(dset).shape[0] == len(df) * 3
Exemple #19
0
def test_chunks():
    with file(df) as (fn, f, dset):
        c = convert(chunks(pd.DataFrame), dset)
        assert eq(convert(np.ndarray, c), df)
Exemple #20
0
def test_append_chunks():
    with file(df) as (fn, f, dset):
        append(dset, chunks(pd.DataFrame)([df, df]))

        assert discover(dset).shape[0] == len(df) * 3
Exemple #21
0
def test_convert_chunks():
    with file(df) as (fn, f, dset):
        c = convert(chunks(pd.DataFrame), dset, chunksize=len(df) / 2)
        assert len(list(c)) == 2
        assert eq(convert(pd.DataFrame, c), df)
Exemple #22
0
def test_chunks():
    with file(x) as (fn, f, dset):
        c = convert(chunks(np.ndarray), dset)
        assert eq(convert(np.ndarray, c), x)
Exemple #23
0
def test_chunks_of_lists_and_iterators():
    L = [1, 2], [3, 4]
    cl = chunks(list)(L)
    assert convert(list, cl) == [1, 2, 3, 4]
    assert list(convert(Iterator, cl)) == [1, 2, 3, 4]
    assert len(list(convert(chunks(Iterator), cl))) == 2
Exemple #24
0
def test_chunks_of_lists_and_iterators():
    L = [1, 2], [3, 4]
    cl = chunks(list)(L)
    assert convert(list, cl) == [1, 2, 3, 4]
    assert list(convert(Iterator, cl)) == [1, 2, 3, 4]
    assert len(list(convert(chunks(Iterator), cl))) == 2
Exemple #25
0
def test_pre_compute_on_large_csv_gives_chunked_reader():
    csv = CSV(example('iris.csv'))
    s = symbol('s', discover(csv))
    assert isinstance(pre_compute(s.species, csv, comfortable_memory=10),
                      (chunks(pd.DataFrame), pd.io.parsers.TextFileReader))
Exemple #26
0
def test_numpy_to_chunks_numpy():
    x = np.arange(100)
    c = numpy_to_chunks_numpy(x, chunksize=10)
    assert isinstance(c, chunks(np.ndarray))
    assert len(list(c)) == 10
    assert eq(list(c)[0], x[:10])
def test_pre_compute_on_large_csv_gives_chunked_reader():
    csv = CSV(example('iris.csv'))
    s = symbol('s', discover(csv))
    assert isinstance(pre_compute(s.species, csv, comfortable_memory=10),
                      (chunks(pd.DataFrame), pd.io.parsers.TextFileReader))
Exemple #28
0
def test_chunks():
    with file(df) as (fn, f, dset):
        c = convert(chunks(pd.DataFrame), dset)
        assert eq(convert(np.ndarray, c), df)
Exemple #29
0
def test_empty_iterator_to_chunks_dataframe():
    ds = dshape('var * {x: int}')
    result = convert(chunks(pd.DataFrame), iter([]), dshape=ds)
    data = convert(pd.DataFrame, result)
    assert isinstance(data, pd.DataFrame)
    assert list(data.columns) == ['x']
Exemple #30
0
def test_append_chunks():
    b = carray(x)
    append(b, chunks(np.ndarray)([x, x]))
    assert len(b) == len(x) * 3
Exemple #31
0
def test_append_chunks():
    with file(x) as (fn, f, dset):
        append(dset, chunks(np.ndarray)([x, x]))

        assert len(dset) == len(x) * 3
Exemple #32
0
def test_convert_chunks():
    with file(df) as (fn, f, dset):
        c = convert(chunks(pd.DataFrame), dset, chunksize=len(df) / 2)
        assert len(list(c)) == 2
        assert eq(convert(pd.DataFrame, c), df)
Exemple #33
0
def test_empty_iterator_to_chunks_dataframe():
    ds = dshape('var * {x: int}')
    result = convert(chunks(pd.DataFrame), iter([]), dshape=ds)
    data = convert(pd.DataFrame, result)
    assert isinstance(data, pd.DataFrame)
    assert list(data.columns) == ['x']
Exemple #34
0
def test_append_chunks():
    with file(x) as (fn, f, dset):
        append(dset, chunks(np.ndarray)([x, x]))

        assert len(dset) == len(x) * 3