def write_orc( vineyard_socket, path, stream_id, storage_options, write_options, proc_num, proc_index, ): client = vineyard.connect(vineyard_socket) streams = client.get(stream_id) if len(streams) != proc_num or streams[proc_index] is None: raise ValueError( f"Fetch stream error with proc_num={proc_num},proc_index={proc_index}" ) instream: DataframeStream = streams[proc_index] reader = instream.open_reader(client) writer = None path += f"_{proc_index}" with fsspec.open(path, "wb", **storage_options) as f: while True: try: batch = reader.next() except (StopIteration, vineyard.StreamDrainedException): writer.close() break if writer is None: # get schema schema = {} for field in batch.schema: schema[field.name] = orc_type(field.type) writer = pyorc.Writer(f, pyorc.Struct(**schema)) writer.writerows(batch.to_pandas.itertuples(False, None)) writer.close()
def write_local_orc(vineyard_socket, stream_id, path, proc_num, proc_index): client = vineyard.connect(vineyard_socket) streams = client.get(stream_id) if len(streams) != proc_num or streams[proc_index] is None: raise ValueError( f'Fetch stream error with proc_num={proc_num},proc_index={proc_index}' ) instream = streams[proc_index] reader = instream.open_reader(client) writer = None with open(path, 'wb') as f: while True: try: buf = reader.next() except vineyard.StreamDrainedException: writer.close() break buf_reader = pa.ipc.open_stream(buf) if writer is None: # get schema schema = {} for field in buf_reader.schema: schema[field.name] = orc_type(field.type) writer = pyorc.Writer(f, pyorc.Struct(**schema)) while True: try: batch = buf_reader.read_next_batch() except StopIteration: break df = batch.to_pandas() writer.writerows(df.itertuples(False, None))
def write_hdfs_orc(vineyard_socket, stream_id, path, proc_num, proc_index): client = vineyard.connect(vineyard_socket) streams = client.get(stream_id) if len(streams) != proc_num or streams[proc_index] is None: raise ValueError( f'Fetch stream error with proc_num={proc_num},proc_index={proc_index}' ) instream = streams[proc_index] reader = instream.open_reader(client) host, port = urlparse(path).netloc.split(':') hdfs = HDFileSystem(host=host, port=int(port)) path = urlparse(path).path writer = None with hdfs.open(path, 'wb') as f: while True: try: buf = reader.next() except: writer.close() break buf_reader = pa.ipc.open_stream(buf) if writer is None: #get schema schema = {} for field in buf_reader.schema: schema[field.name] = orc_type(field.type) writer = pyorc.Writer(f, pyorc.Struct(**schema)) for batch in buf_reader: df = batch.to_pandas() writer.writerows(df.itertuples(False, None))
def get_orc_schema(df): ordered_dict = OrderedDict( (col_name, get_orc_dtype_info(col_dtype)) for col_name, col_dtype in df.dtypes.items() ) schema = pyorc.Struct(**ordered_dict) return schema
def get_orc_schema(df, arrow_table_schema=None): if arrow_table_schema is None: ordered_dict = OrderedDict( (col_name, get_orc_dtype_info(col_dtype)) for col_name, col_dtype in df.dtypes.items()) else: ordered_dict = OrderedDict( (field.name, get_arrow_dtype_info_for_pyorc(field.type)) for field in arrow_table_schema) schema = pyorc.Struct(**ordered_dict) return schema
def test_empty_statistics(): buff = BytesIO() orc_schema = po.Struct( a=po.BigInt(), b=po.Double(), c=po.String(), d=po.Decimal(11, 2), e=po.Date(), f=po.Timestamp(), g=po.Boolean(), h=po.Binary(), i=po.BigInt(), # One column with non null value, else cudf/pyorc readers crash ) data = tuple([None] * (len(orc_schema.fields) - 1) + [1]) with po.Writer(buff, orc_schema) as writer: writer.write(data) got = cudf.io.orc.read_orc_statistics([buff]) # Check for both file and stripe stats for stats in got: # Similar expected stats for the first 6 columns in this case for col_name in ascii_lowercase[:6]: assert stats[0][col_name].get("number_of_values") == 0 assert stats[0][col_name].get("has_null") is True assert stats[0][col_name].get("minimum") is None assert stats[0][col_name].get("maximum") is None for col_name in ascii_lowercase[:3]: assert stats[0][col_name].get("sum") == 0 # Sum for decimal column is a string assert stats[0]["d"].get("sum") == "0" assert stats[0]["g"].get("number_of_values") == 0 assert stats[0]["g"].get("has_null") is True assert stats[0]["g"].get("true_count") == 0 assert stats[0]["g"].get("false_count") == 0 assert stats[0]["h"].get("number_of_values") == 0 assert stats[0]["h"].get("has_null") is True assert stats[0]["h"].get("sum") == 0 assert stats[0]["i"].get("number_of_values") == 1 assert stats[0]["i"].get("has_null") is False assert stats[0]["i"].get("minimum") == 1 assert stats[0]["i"].get("maximum") == 1 assert stats[0]["i"].get("sum") == 1
def test_statistics_sum_overflow(): maxint64 = np.iinfo(np.int64).max minint64 = np.iinfo(np.int64).min buff = BytesIO() with po.Writer(buff, po.Struct(a=po.BigInt(), b=po.BigInt(), c=po.BigInt())) as writer: writer.write((maxint64, minint64, minint64)) writer.write((1, -1, 1)) file_stats, stripe_stats = cudf.io.orc.read_orc_statistics([buff]) assert file_stats[0]["a"].get("sum") is None assert file_stats[0]["b"].get("sum") is None assert file_stats[0]["c"].get("sum") == minint64 + 1 assert stripe_stats[0]["a"].get("sum") is None assert stripe_stats[0]["b"].get("sum") is None assert stripe_stats[0]["c"].get("sum") == minint64 + 1
def test_orc_read_skiprows(tmpdir): buff = BytesIO() df = pd.DataFrame( {"a": [1, 0, 1, 0, None, 1, 1, 1, 0, None, 0, 0, 1, 1, 1, 1]}, dtype=pd.BooleanDtype(), ) writer = pyorc.Writer(buff, pyorc.Struct(a=pyorc.Boolean())) tuples = list( map( lambda x: (None, ) if x[0] is pd.NA else x, list(df.itertuples(index=False, name=None)), )) writer.writerows(tuples) writer.close() skiprows = 10 expected = cudf.read_orc(buff)[skiprows::].reset_index(drop=True) got = cudf.read_orc(buff, skiprows=skiprows) assert_eq(expected, got)
def generate_list_struct_buff(size=28000): rd = random.Random(1) np.random.seed(seed=1) buff = BytesIO() schema = { "lvl3_list": po.Array(po.Array(po.Array(po.BigInt()))), "lvl1_list": po.Array(po.BigInt()), "lvl1_struct": po.Struct(**{"a": po.BigInt(), "b": po.BigInt()}), "lvl2_struct": po.Struct( **{ "a": po.BigInt(), "lvl1_struct": po.Struct( **{"c": po.BigInt(), "d": po.BigInt()} ), } ), "list_nests_struct": po.Array( po.Array(po.Struct(**{"a": po.BigInt(), "b": po.BigInt()})) ), "struct_nests_list": po.Struct( **{ "struct": po.Struct(**{"a": po.BigInt(), "b": po.BigInt()}), "list": po.Array(po.BigInt()), } ), } schema = po.Struct(**schema) lvl3_list = [ rd.choice( [ None, [ [ [ rd.choice([None, np.random.randint(1, 3)]) for z in range(np.random.randint(1, 3)) ] for z in range(np.random.randint(0, 3)) ] for y in range(np.random.randint(0, 3)) ], ] ) for x in range(size) ] lvl1_list = [ [ rd.choice([None, np.random.randint(0, 3)]) for y in range(np.random.randint(1, 4)) ] for x in range(size) ] lvl1_struct = [ rd.choice([None, (np.random.randint(0, 3), np.random.randint(0, 3))]) for x in range(size) ] lvl2_struct = [ rd.choice( [ None, ( rd.choice([None, np.random.randint(0, 3)]), ( rd.choice([None, np.random.randint(0, 3)]), np.random.randint(0, 3), ), ), ] ) for x in range(size) ] list_nests_struct = [ [ [rd.choice(lvl1_struct), rd.choice(lvl1_struct)] for y in range(np.random.randint(1, 4)) ] for x in range(size) ] struct_nests_list = [(lvl1_struct[x], lvl1_list[x]) for x in range(size)] df = pd.DataFrame( { "lvl3_list": lvl3_list, "lvl1_list": lvl1_list, "lvl1_struct": lvl1_struct, "lvl2_struct": lvl2_struct, "list_nests_struct": list_nests_struct, "struct_nests_list": struct_nests_list, } ) writer = po.Writer(buff, schema, stripe_size=1024) tuples = list( map( lambda x: (None,) if x[0] is pd.NA else x, list(df.itertuples(index=False, name=None)), ) ) writer.writerows(tuples) writer.close() return buff
def gen_map_buff(size=10000): from string import ascii_letters as al rd = random.Random(1) np.random.seed(seed=1) buff = BytesIO() schema = { "lvl1_map": po.Map(key=po.String(), value=po.BigInt()), "lvl2_map": po.Map(key=po.String(), value=po.Array(po.BigInt())), "lvl2_struct_map": po.Map( key=po.String(), value=po.Struct(**{ "a": po.BigInt(), "b": po.BigInt() }), ), } schema = po.Struct(**schema) lvl1_map = [ rd.choice([ None, [( rd.choice(al), rd.choice([None, np.random.randint(1, 1500)]), ) for y in range(2)], ]) for x in range(size) ] lvl2_map = [ rd.choice([ None, [( rd.choice(al), rd.choice([ None, [ rd.choice([None, np.random.randint(1, 1500)]) for z in range(5) ], ]), ) for y in range(2)], ]) for x in range(size) ] lvl2_struct_map = [ rd.choice([ None, [( rd.choice(al), rd.choice([ None, ( rd.choice([None, np.random.randint(1, 1500)]), rd.choice([None, np.random.randint(1, 1500)]), ), ]), ) for y in range(2)], ]) for x in range(size) ] pdf = pd.DataFrame({ "lvl1_map": lvl1_map, "lvl2_map": lvl2_map, "lvl2_struct_map": lvl2_struct_map, }) writer = po.Writer(buff, schema, stripe_size=1024, compression=po.CompressionKind.NONE) tuples = list( map( lambda x: (None, ) if x[0] is pd.NA else x, list(pdf.itertuples(index=False, name=None)), )) writer.writerows(tuples) writer.close() return buff