コード例 #1
0
ファイル: test_s3.py プロジェクト: wikiped/dask
def test_parquet(s3):
    dd = pytest.importorskip('dask.dataframe')
    pytest.importorskip('fastparquet')
    from dask.dataframe.io.parquet import to_parquet, read_parquet

    import pandas as pd
    import numpy as np

    url = 's3://%s/test.parquet' % test_bucket_name

    data = pd.DataFrame({'i32': np.arange(1000, dtype=np.int32),
                         'i64': np.arange(1000, dtype=np.int64),
                         'f': np.arange(1000, dtype=np.float64),
                         'bhello': np.random.choice(
                             ['hello', 'you', 'people'],
                             size=1000).astype("O")},
                        index=pd.Index(np.arange(1000), name='foo'))
    df = dd.from_pandas(data, chunksize=500)
    to_parquet(url, df, object_encoding='utf8')

    files = [f.split('/')[-1] for f in s3.ls(url)]
    assert '_metadata' in files
    assert 'part.0.parquet' in files

    df2 = read_parquet(url, index='foo')
    assert len(df2.divisions) > 1

    pd.util.testing.assert_frame_equal(data, df2.compute())
コード例 #2
0
def test_local():
    with tmpdir() as tmp:
        tmp = str(tmp)
        data = pd.DataFrame({
            'i32':
            np.arange(1000, dtype=np.int32),
            'i64':
            np.arange(1000, dtype=np.int64),
            'f':
            np.arange(1000, dtype=np.float64),
            'bhello':
            np.random.choice(['hello', 'you', 'people'], size=1000).astype("O")
        })
        df = dd.from_pandas(data, chunksize=500)

        to_parquet(tmp, df, write_index=False)

        files = os.listdir(tmp)
        assert '_metadata' in files
        assert 'part.0.parquet' in files

        df2 = read_parquet(tmp, index=False)

        assert len(df2.divisions) > 1

        out, out2 = df.compute(), df2.compute().reset_index()

        for column in df.columns:
            assert (out[column] == out2[column]).all()
コード例 #3
0
def test_parquet(s3):
    dd = pytest.importorskip('dask.dataframe')
    pytest.importorskip('fastparquet')
    from dask.dataframe.io.parquet import to_parquet, read_parquet

    import pandas as pd
    import numpy as np

    url = 's3://%s/test.parquet' % test_bucket_name

    data = pd.DataFrame(
        {
            'i32':
            np.arange(1000, dtype=np.int32),
            'i64':
            np.arange(1000, dtype=np.int64),
            'f':
            np.arange(1000, dtype=np.float64),
            'bhello':
            np.random.choice(['hello', 'you', 'people'], size=1000).astype("O")
        },
        index=pd.Index(np.arange(1000), name='foo'))
    df = dd.from_pandas(data, chunksize=500)
    to_parquet(url, df, object_encoding='utf8')

    files = [f.split('/')[-1] for f in s3.ls(url)]
    assert '_metadata' in files
    assert 'part.0.parquet' in files

    df2 = read_parquet(url, index='foo')
    assert len(df2.divisions) > 1

    pd.util.testing.assert_frame_equal(data, df2.compute())
コード例 #4
0
ファイル: test_parquet.py プロジェクト: gameduell/dask
def test_roundtrip(df, write_kwargs, read_kwargs):
    with tmpdir() as tmp:
        tmp = str(tmp)
        if df.index.name is None:
            df.index.name = 'index'
        ddf = dd.from_pandas(df, npartitions=2)

        to_parquet(tmp, ddf, **write_kwargs)
        ddf2 = read_parquet(tmp, index=df.index.name, **read_kwargs)
        assert_eq(ddf, ddf2)
コード例 #5
0
def test_roundtrip(df, write_kwargs, read_kwargs):
    with tmpdir() as tmp:
        tmp = str(tmp)
        if df.index.name is None:
            df.index.name = 'index'
        ddf = dd.from_pandas(df, npartitions=2)

        to_parquet(tmp, ddf, **write_kwargs)
        ddf2 = read_parquet(tmp, index=df.index.name, **read_kwargs)
        assert_eq(ddf, ddf2)
コード例 #6
0
def test_categorical():
    with tmpdir() as tmp:
        df = pd.DataFrame({'x': ['a', 'b', 'c'] * 100}, dtype='category')
        ddf = dd.from_pandas(df, npartitions=3)
        to_parquet(tmp, ddf)

        ddf2 = read_parquet(tmp, categories=['x'])

        assert ddf2.compute().x.cat.categories.tolist() == ['a', 'b', 'c']
        ddf2.loc[:1000].compute()
        df.index.name = 'index'  # defaults to 'index' in this case
        assert assert_eq(df, ddf2)
コード例 #7
0
ファイル: test_parquet.py プロジェクト: gameduell/dask
def test_categorical():
    with tmpdir() as tmp:
        df = pd.DataFrame({'x': ['a', 'b', 'c'] * 100},
                          dtype='category')
        ddf = dd.from_pandas(df, npartitions=3)
        to_parquet(tmp, ddf)

        ddf2 = read_parquet(tmp, categories=['x'])

        assert ddf2.x.cat.categories.tolist() == ['a', 'b', 'c']
        ddf2.loc[:1000].compute()
        df.index.name = 'index'  # defaults to 'index' in this case
        assert assert_eq(df, ddf2)
コード例 #8
0
ファイル: test_parquet.py プロジェクト: gameduell/dask
def test_ordering():
    with tmpdir() as tmp:
        tmp = str(tmp)
        df = pd.DataFrame({'a': [1, 2, 3],
                           'b': [10, 20, 30],
                           'c': [100, 200, 300]},
                          index=pd.Index([-1, -2, -3], name='myindex'),
                          columns=['c', 'a', 'b'])
        ddf = dd.from_pandas(df, npartitions=2)
        to_parquet(tmp, ddf)

        pf = fastparquet.ParquetFile(tmp)
        assert pf.columns == ['myindex', 'c', 'a', 'b']

        ddf2 = read_parquet(tmp, index='myindex')
        assert_eq(ddf, ddf2)
コード例 #9
0
def test_index():
    with tmpdir() as tmp:
        tmp = str(tmp)

        df = pd.DataFrame(
            {
                'x': [6, 2, 3, 4, 5],
                'y': [1.0, 2.0, 1.0, 2.0, 1.0]
            },
            index=pd.Index([10, 20, 30, 40, 50], name='myindex'))

        ddf = dd.from_pandas(df, npartitions=3)
        to_parquet(tmp, ddf)

        ddf2 = read_parquet(tmp)
        assert_eq(ddf, ddf2)
コード例 #10
0
def test_ordering():
    with tmpdir() as tmp:
        tmp = str(tmp)
        df = pd.DataFrame({'a': [1, 2, 3],
                           'b': [10, 20, 30],
                           'c': [100, 200, 300]},
                          index=pd.Index([-1, -2, -3], name='myindex'),
                          columns=['c', 'a', 'b'])
        ddf = dd.from_pandas(df, npartitions=2)
        to_parquet(tmp, ddf)

        pf = fastparquet.ParquetFile(tmp)
        assert pf.columns == ['myindex', 'c', 'a', 'b']

        ddf2 = read_parquet(tmp, index='myindex')
        assert_eq(ddf, ddf2)
コード例 #11
0
ファイル: test_s3.py プロジェクト: wikiped/dask
def test_parquet_wstoragepars(s3):
    dd = pytest.importorskip('dask.dataframe')
    pytest.importorskip('fastparquet')
    from dask.dataframe.io.parquet import to_parquet, read_parquet

    import pandas as pd
    import numpy as np

    url = 's3://%s/test.parquet' % test_bucket_name

    data = pd.DataFrame({'i32': np.array([0, 5, 2, 5])})
    df = dd.from_pandas(data, chunksize=500)
    to_parquet(url, df, write_index=False)

    read_parquet(url, storage_options={'default_fill_cache': False})
    assert s3.current().default_fill_cache is False
    read_parquet(url, storage_options={'default_fill_cache': True})
    assert s3.current().default_fill_cache is True

    read_parquet(url, storage_options={'default_block_size': 2**20})
    assert s3.current().default_block_size == 2**20
    with s3.current().open(url + '/_metadata') as f:
        assert f.blocksize == 2**20
コード例 #12
0
def test_parquet_wstoragepars(s3):
    dd = pytest.importorskip('dask.dataframe')
    pytest.importorskip('fastparquet')
    from dask.dataframe.io.parquet import to_parquet, read_parquet

    import pandas as pd
    import numpy as np

    url = 's3://%s/test.parquet' % test_bucket_name

    data = pd.DataFrame({'i32': np.array([0, 5, 2, 5])})
    df = dd.from_pandas(data, chunksize=500)
    to_parquet(url, df, write_index=False)

    read_parquet(url, storage_options={'default_fill_cache': False})
    assert s3.current().default_fill_cache is False
    read_parquet(url, storage_options={'default_fill_cache': True})
    assert s3.current().default_fill_cache is True

    read_parquet(url, storage_options={'default_block_size': 2**20})
    assert s3.current().default_block_size == 2**20
    with s3.current().open(url + '/_metadata') as f:
        assert f.blocksize == 2**20
コード例 #13
0
ファイル: test_parquet.py プロジェクト: gameduell/dask
def fn(tmpdir):
    ddf = dd.from_pandas(df, npartitions=3)
    to_parquet(str(tmpdir), ddf)

    return str(tmpdir)
コード例 #14
0
def fn(tmpdir):
    ddf = dd.from_pandas(df, npartitions=3)
    to_parquet(str(tmpdir), ddf)

    return str(tmpdir)
コード例 #15
0
    # Example : Country,Year

tableName = fileName+'.table/'+fileName+'.parquet/'
parquetDataPath = parquetfilePath+tableName

print('Source = '+csvfilePath)
print('Output = '+parquetDataPath)

########## Parquet Convertor ################################

fileArray = os.listdir(csvfilePath)
parquetDataPath = parquetfilePath+tableName

# Browse csv files
for filename in fileArray:
    if filename.endswith(".csv"):
        print('file = '+filename)
        # Create parquet Folder if needed
        if not(os.path.isdir(parquetDataPath)):
            os.makedirs(parquetDataPath)
            print('Create = '+parquetDataPath)
        print('Read file = '+csvfilePath+filename)
        # Read CSV
        df = pd.read_csv(csvfilePath+filename,sep=',', quotechar='"')
        print('to parquet = '+parquetDataPath+filename[:-4]+'.parquet')
        # Parquet partition
        df = dd.from_pandas(df, npartitions=1)
        # Parquet conversion
        ddp.to_parquet(df,parquetDataPath, write_index=False, partition_on=parquetPartition)

##############################################################