def test_filter(): my_rdd = Context().parallelize( [1, 2, 2, 4, 1, 3, 5, 9], 3, ).filter(lambda x: x % 2 == 0) print(my_rdd.collect()) print(my_rdd.count()) assert my_rdd.count() == 3
def test_s3_textFile_loop(): random.seed() fn = f'{S3_TEST_PATH}/pysparkling_test_{random.random() * 999999.0:d}.txt' rdd = Context().parallelize(f'Line {n}' for n in range(200)) rdd.saveAsTextFile(fn) rdd_check = Context().textFile(fn) assert (rdd.count() == rdd_check.count() and all(e1 == e2 for e1, e2 in zip(rdd.collect(), rdd_check.collect())))
def test_gs_textFile_loop(): random.seed() fn = '{}/pysparkling_test_{:d}.txt'.format(GS_TEST_PATH, random.random() * 999999.0) rdd = Context().parallelize('Line {0}'.format(n) for n in range(200)) rdd.saveAsTextFile(fn) rdd_check = Context().textFile(fn) assert (rdd.count() == rdd_check.count() and all(e1 == e2 for e1, e2 in zip(rdd.collect(), rdd_check.collect())))
def test_hdfs_textFile_loop(): random.seed() fn = f'{HDFS_TEST_PATH}/pysparkling_test_{random.random() * 999999.0:d}.txt' print(f'HDFS test file: {fn}') rdd = Context().parallelize(f'Hello World {x}' for x in range(10)) rdd.saveAsTextFile(fn) read_rdd = Context().textFile(fn) print(rdd.collect()) print(read_rdd.collect()) assert (rdd.count() == read_rdd.count() and all(r1 == r2 for r1, r2 in zip(rdd.collect(), read_rdd.collect())))
def test_gs_textFile_loop(): random.seed() fn = '{}/pysparkling_test_{:d}.txt'.format( GS_TEST_PATH, random.random() * 999999.0) rdd = Context().parallelize('Line {0}'.format(n) for n in range(200)) rdd.saveAsTextFile(fn) rdd_check = Context().textFile(fn) assert ( rdd.count() == rdd_check.count() and all(e1 == e2 for e1, e2 in zip(rdd.collect(), rdd_check.collect())) )
def test_hdfs_textFile_loop(): random.seed() fn = '{}/pysparkling_test_{:d}.txt'.format(HDFS_TEST_PATH, random.random() * 999999.0) print('HDFS test file: {0}'.format(fn)) rdd = Context().parallelize('Hello World {0}'.format(x) for x in range(10)) rdd.saveAsTextFile(fn) read_rdd = Context().textFile(fn) print(rdd.collect()) print(read_rdd.collect()) assert (rdd.count() == read_rdd.count() and all(r1 == r2 for r1, r2 in zip(rdd.collect(), read_rdd.collect())))
def test_s3_textFile_loop(): if not AWS_ACCESS_KEY_ID or not S3_TEST_PATH: raise SkipTest random.seed() fn = '{}/pysparkling_test_{0}.txt'.format(S3_TEST_PATH, int(random.random() * 999999.0)) rdd = Context().parallelize("Line {0}".format(n) for n in range(200)) rdd.saveAsTextFile(fn) rdd_check = Context().textFile(fn) assert (rdd.count() == rdd_check.count() and all(e1 == e2 for e1, e2 in zip(rdd.collect(), rdd_check.collect())))
def test_gs_textFile_loop(): if not OAUTH2_CLIENT_ID or not GS_TEST_PATH: raise SkipTest random.seed() fn = GS_TEST_PATH + '/pysparkling_test_{0}.txt'.format( int(random.random() * 999999.0)) rdd = Context().parallelize("Line {0}".format(n) for n in range(200)) rdd.saveAsTextFile(fn) rdd_check = Context().textFile(fn) assert (rdd.count() == rdd_check.count() and all(e1 == e2 for e1, e2 in zip(rdd.collect(), rdd_check.collect())))
def test_hdfs_textFile_loop(): random.seed() fn = '{}/pysparkling_test_{:d}.txt'.format( HDFS_TEST_PATH, random.random() * 999999.0) print('HDFS test file: {0}'.format(fn)) rdd = Context().parallelize('Hello World {0}'.format(x) for x in range(10)) rdd.saveAsTextFile(fn) read_rdd = Context().textFile(fn) print(rdd.collect()) print(read_rdd.collect()) assert ( rdd.count() == read_rdd.count() and all(r1 == r2 for r1, r2 in zip(rdd.collect(), read_rdd.collect())) )
def test_hdfs_textFile_loop(): if not HDFS_TEST_PATH: raise SkipTest random.seed() fn = HDFS_TEST_PATH+'/pysparkling_test_{0}.txt'.format( int(random.random()*999999.0) ) rdd = Context().parallelize('Hello World {0}'.format(x) for x in range(10)) rdd.saveAsTextFile(fn) read_rdd = Context().textFile(fn) assert ( rdd.count() == read_rdd.count() and all(r1 == r2 for r1, r2 in zip(rdd.collect(), read_rdd.collect())) )
def test_s3_textFile_loop(): if not AWS_ACCESS_KEY_ID or not S3_TEST_PATH: raise SkipTest random.seed() fn = S3_TEST_PATH+'/pysparkling_test_{0}.txt'.format( int(random.random()*999999.0) ) rdd = Context().parallelize("Line {0}".format(n) for n in range(200)) rdd.saveAsTextFile(fn) rdd_check = Context().textFile(fn) assert ( rdd.count() == rdd_check.count() and all(e1 == e2 for e1, e2 in zip(rdd.collect(), rdd_check.collect())) )
def test_gs_textFile_loop(): if not OAUTH2_CLIENT_ID or not GS_TEST_PATH: raise SkipTest random.seed() fn = '{}/pysparkling_test_{0}.txt'.format( GS_TEST_PATH, int(random.random() * 999999.0) ) rdd = Context().parallelize("Line {0}".format(n) for n in range(200)) rdd.saveAsTextFile(fn) rdd_check = Context().textFile(fn) assert ( rdd.count() == rdd_check.count() and all(e1 == e2 for e1, e2 in zip(rdd.collect(), rdd_check.collect())) )
def test_parallelize_empty_partitions_at_end(): my_rdd = Context().parallelize(range(3529), 500) print(my_rdd.getNumPartitions()) my_rdd.foreachPartition(lambda p: print(sum(1 for _ in p))) assert my_rdd.getNumPartitions() == 500 and my_rdd.count() == 3529
from pysparkling import Context my_rdd = Context().textFile("tests/*.py") print( "In tests/*.py: all lines={0}, with import={1}".format( my_rdd.count(), my_rdd.filter(lambda l: l.startswith("import ")).count() ) )
from pysparkling import Context my_rdd = Context().textFile('tests/*.py') unfiltered_count = my_rdd.count() filtered_count = my_rdd.filter(lambda l: l.startswith("import ")).count() print( f'In tests/*.py: all lines={unfiltered_count}, with import={filtered_count}' )
from __future__ import print_function from pysparkling import Context my_rdd = Context().textFile('tests/*.py') print('In tests/*.py: all lines={0}, with import={1}'.format( my_rdd.count(), my_rdd.filter(lambda l: l.startswith('import ')).count(), ))
from pysparkling import Context my_rdd = Context().textFile('tests/*.py') print('In tests/*.py: all lines={0}, with import={1}'.format( my_rdd.count(), my_rdd.filter(lambda l: l.startswith('import ')).count()))
def test_distinct(): my_rdd = Context().parallelize([1, 2, 2, 4, 1]).distinct() assert my_rdd.count() == 3
def test_count_partitions(): my_rdd = Context().parallelize([1, 2, 3], 2) print(my_rdd.collect()) my_rdd.foreach(print) assert my_rdd.count() == 3
def test_count(): my_rdd = Context().parallelize([1, 2, 3]) assert my_rdd.count() == 3