def test_mock_output_stream():
    # Make sure that the MockOutputStream and the BufferOutputStream record the
    # same size

    # 10 bytes
    val = b'dataabcdef'

    f1 = pa.MockOutputStream()
    f2 = pa.BufferOutputStream()

    K = 1000
    for i in range(K):
        f1.write(val)
        f2.write(val)

    assert f1.size() == len(f2.getvalue())

    # Do the same test with a pandas DataFrame
    val = pd.DataFrame({'a': [1, 2, 3]})
    record_batch = pa.RecordBatch.from_pandas(val)

    f1 = pa.MockOutputStream()
    f2 = pa.BufferOutputStream()

    stream_writer1 = pa.RecordBatchStreamWriter(f1, record_batch.schema)
    stream_writer2 = pa.RecordBatchStreamWriter(f2, record_batch.schema)

    stream_writer1.write_batch(record_batch)
    stream_writer2.write_batch(record_batch)
    stream_writer1.close()
    stream_writer2.close()

    assert f1.size() == len(f2.getvalue())
Beispiel #2
0
def write_to_plasma(df, name):
    print("Connecting to Plasma store...")
    client = plasma.connect("/tmp/plasma")
    # Convert the Pandas DataFrame into a PyArrow RecordBatch
    print("Converting df to recordbatch...")
    record_batch = pa.RecordBatch.from_pandas(df)
    # Create the Plasma object from the PyArrow RecordBatch. Most of the work here
    # is done to determine the size of buffer to request from the object store.
    print("Determine size of buffer to request etc...")
    object_id = plasma.ObjectID(np.random.bytes(20))
    mock_sink = pa.MockOutputStream()
    stream_writer = pa.RecordBatchStreamWriter(mock_sink, record_batch.schema)
    stream_writer.write_batch(record_batch)
    stream_writer.close()
    data_size = mock_sink.size()
    buf = client.create(object_id, data_size)
    # Write the PyArrow RecordBatch to Plasma
    print("Write the recordbatch to Plasma...")
    stream = pa.FixedSizeBufferWriter(buf)
    stream_writer = pa.RecordBatchStreamWriter(stream, record_batch.schema)
    stream_writer.write_batch(record_batch)
    stream_writer.close()
    # Seal the Plasma object
    print("Sealing the plasma object in store")
    client.seal(object_id)
    # end the client
    print("Disconnecting from plasma store")
    client.disconnect()
    # Write the new object ID
    print("Storing the object_id to plasma_store")
    with open("plasma_state.pkl", "rb") as f:
        plasma_state = pickle.load(f)
    plasma_state[name] = object_id
    with open("plasma_state.pkl", "wb") as f:
        pickle.dump(plasma_state, f)
Beispiel #3
0
    def write_df(self, df, df_name):
        """
        Write a pandas dataframe to a running
        plasma store. Copied from:
        https://github.com/apache/arrow/blob/master/python/doc/source/plasma.rst
        """
        # Convert the Pandas DataFrame into a PyArrow RecordBatch
        record_batch = pa.RecordBatch.from_pandas(df)

        # Create the Plasma object from the PyArrow RecordBatch.
        # Most of the work here is done to determine the size of
        # buffer to request from the object store.
        object_id = plasma.ObjectID(np.random.bytes(20))

        # Update our little mapping from names to ObjectIDs
        self.object_names[df_name] = object_id

        # Figure out the size to allocate and create the object
        mock_sink = pa.MockOutputStream()
        stream_writer = pa.RecordBatchStreamWriter(mock_sink,
                                                   record_batch.schema)
        stream_writer.write_batch(record_batch)
        stream_writer.close()
        data_size = mock_sink.size()
        print(data_size)
        buf = self.client.create(object_id, data_size)

        # Write the PyArrow RecordBatch to Plasma
        stream = pa.FixedSizeBufferWriter(buf)
        stream_writer = pa.RecordBatchStreamWriter(stream, record_batch.schema)
        stream_writer.write_batch(record_batch)
        stream_writer.close()

        # Seal the Plasma object
        self.client.seal(object_id)
Beispiel #4
0
def write_pandas_plasma():
  import pyarrow as pa
  import pandas as pd

  # Create a Pandas DataFrame
  d = {'one' : pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
     'two' : pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])}
  df = pd.DataFrame(d)

  # Convert the Pandas DataFrame into a PyArrow RecordBatch
  record_batch = pa.RecordBatch.from_pandas(df)

  # Create the Plasma object from the PyArrow RecordBatch. Most of the work here
  # is done to determine the size of buffer to request from the object store.
  
  # object_id = plasma.ObjectID(np.random.bytes(20))
  object_id = plasma.ObjectID(b'az'*10)
  mock_sink = pa.MockOutputStream()
  stream_writer = pa.RecordBatchStreamWriter(mock_sink, record_batch.schema)
  stream_writer.write_batch(record_batch)
  stream_writer.close()
  data_size = mock_sink.size()
  buf = client.create(object_id, data_size)

  # Write the PyArrow RecordBatch to Plasma
  stream = pa.FixedSizeBufferWriter(buf)
  stream_writer = pa.RecordBatchStreamWriter(stream, record_batch.schema)
  stream_writer.write_batch(record_batch)
  stream_writer.close()
  client.seal(object_id)
    def put_dataframe(self, dataframe, object_id=None, overwrite=True):
        if object_id is None:
            object_id = PlasmaConnector.generate_object_id()

        # Delete object if exists
        if overwrite:
            self.client.delete([object_id])
        else:
            if self.client.contains(object_id):
                raise ValueError("object id already exists.")

        # Convert the Pandas DataFrame into a PyArrow RecordBatch
        record_batch = pa.RecordBatch.from_pandas(dataframe)

        # Create the Plasma object from the PyArrow RecordBatch. Most of the work here
        # is done to determine the size of buffer to request from the object store.
        mock_sink = pa.MockOutputStream()
        stream_writer = pa.RecordBatchStreamWriter(mock_sink,
                                                   record_batch.schema)
        stream_writer.write_batch(record_batch)
        stream_writer.close()

        data_size = mock_sink.size()
        buffer = self.client.create(object_id, data_size)

        # Write the PyArrow RecordBatch to Plasma
        stream = pa.FixedSizeBufferWriter(buffer)
        stream_writer = pa.RecordBatchStreamWriter(stream, record_batch.schema)
        stream_writer.write_batch(record_batch)
        stream_writer.close()

        # Seal the Plasma object (make it immutable and usable by others clients)
        self.client.seal(object_id)

        return object_id
Beispiel #6
0
    def cache_data_frame(self, df, key, force_eviction=False):
        object_key = self.get_cache_key(key)
        object_id = plasma.ObjectID(object_key)
        if self.client.contains(object_id):
            string = 'DataWriter: Object exists in cache'
            if force_eviction:
                print('{} - evicting'.format(string))
                self.client.release(object_id)
            else:
                raise Exception(string)

        record_batch = pa.RecordBatch.from_pandas(df)

        # Work out how large our data frame is
        mock_sink = pa.MockOutputStream()
        stream_writer = pa.RecordBatchStreamWriter(mock_sink, record_batch.schema)
        stream_writer.write_batch(record_batch)
        stream_writer.close()
        data_size = mock_sink.size()
        print('DataWriter: Data size is {}'.format(format_size(data_size)))

        # Actually write the data frame to the cache
        buf = self.client.create(object_id, data_size)
        stream = pa.FixedSizeBufferWriter(buf)
        stream_writer = pa.RecordBatchStreamWriter(stream, record_batch.schema)
        stream_writer.write_batch(record_batch)
        stream_writer.close()

        # Make item available to other processes
        self.client.seal(object_id)
    def test_buffer_lifetime(self):
        # ARROW-2195
        arr = pa.array([1, 12, 23, 3, 34], pa.int32())
        batch = pa.RecordBatch.from_arrays([arr], ['field1'])

        # Serialize RecordBatch into Plasma store
        sink = pa.MockOutputStream()
        writer = pa.RecordBatchStreamWriter(sink, batch.schema)
        writer.write_batch(batch)
        writer.close()

        object_id = random_object_id()
        data_buffer = self.plasma_client.create(object_id, sink.size())
        stream = pa.FixedSizeBufferWriter(data_buffer)
        writer = pa.RecordBatchStreamWriter(stream, batch.schema)
        writer.write_batch(batch)
        writer.close()
        self.plasma_client.seal(object_id)
        del data_buffer

        # Unserialize RecordBatch from Plasma store
        [data_buffer] = self.plasma_client2.get_buffers([object_id])
        reader = pa.RecordBatchStreamReader(data_buffer)
        read_batch = reader.read_next_batch()
        # Lose reference to returned buffer.  The RecordBatch must still
        # be backed by valid memory.
        del data_buffer, reader

        assert read_batch.equals(batch)
    def test_store_pandas_dataframe(self):
        import pyarrow.plasma as plasma
        d = {
            'one': pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
            'two': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])
        }
        df = pd.DataFrame(d)

        # Write the DataFrame.
        record_batch = pa.RecordBatch.from_pandas(df)
        # Determine the size.
        s = pa.MockOutputStream()
        stream_writer = pa.RecordBatchStreamWriter(s, record_batch.schema)
        stream_writer.write_batch(record_batch)
        data_size = s.size()
        object_id = plasma.ObjectID(np.random.bytes(20))

        buf = self.plasma_client.create(object_id, data_size)
        stream = pa.FixedSizeBufferWriter(buf)
        stream_writer = pa.RecordBatchStreamWriter(stream, record_batch.schema)
        stream_writer.write_batch(record_batch)

        self.plasma_client.seal(object_id)

        # Read the DataFrame.
        [data] = self.plasma_client.get_buffers([object_id])
        reader = pa.RecordBatchStreamReader(pa.BufferReader(data))
        result = reader.get_next_batch().to_pandas()

        pd.util.testing.assert_frame_equal(df, result)
def put_df(client, object_num, df):
    """Saves a data frame into the plasma store.

    Code adjusted from:
    https://github.com/apache/arrow/blob/master/python/examples/plasma/sorting/sort_df.py

    Args:
        client: Plasma store connection.
        object_num (int): Object number to be used to generate a corresponding object id.
        df (pandas): Data frame to be saved.
    """
    record_batch = pa.RecordBatch.from_pandas(df)

    # Get size of record batch and schema
    mock_sink = pa.MockOutputStream()
    stream_writer = pa.RecordBatchStreamWriter(mock_sink, record_batch.schema)
    stream_writer.write_batch(record_batch)
    data_size = mock_sink.size()

    # Generate an ID and allocate a buffer in the object store for the
    # serialized DataFrame
    object_id = get_object_id(object_num)
    buf = client.create(object_id, data_size)

    # Write the serialized DataFrame to the object store
    sink = pa.FixedSizeBufferWriter(buf)
    stream_writer = pa.RecordBatchStreamWriter(sink, record_batch.schema)
    stream_writer.write_batch(record_batch)

    # Seal the object
    client.seal(object_id)
def transfer(df):
    """
    Function that makes the transfer to the worker

    df is passed in the meassure wrapper
    """

    client = plasma.connect("/tmp/sock/plasma.sock")

    # Convert the Pandas DataFrame into a PyArrow RecordBatch
    record_batch = pa.RecordBatch.from_pandas(df)

    # Create the Plasma object from the PyArrow RecordBatch. Most of the work here
    # is done to determine the size of buffer to request from the object store.
    object_id = plasma.ObjectID(np.random.bytes(20))
    mock_sink = pa.MockOutputStream()
    stream_writer = pa.RecordBatchStreamWriter(mock_sink, record_batch.schema)
    stream_writer.write_batch(record_batch)
    stream_writer.close()
    data_size = mock_sink.size()
    buf = client.create(object_id, data_size)

    # Write the PyArrow RecordBatch to Plasma
    stream = pa.FixedSizeBufferWriter(buf)
    stream_writer = pa.RecordBatchStreamWriter(stream, record_batch.schema)
    stream_writer.write_batch(record_batch)
    stream_writer.close()

    # Seal the Plasma object
    client.seal(object_id)

    object_id_str = re.search('ObjectID\((.*)\)', str(object_id))

    task = read.delay(object_id_str.group(1))
    task.wait()
Beispiel #11
0
 def write_batches(batches, as_table=False):
     with format_fixture._get_writer(pa.MockOutputStream(),
                                     schema) as writer:
         if as_table:
             table = pa.Table.from_batches(batches)
             writer.write_table(table)
         else:
             for batch in batches:
                 writer.write_batch(batch)
         return writer.stats
    def _make_flight_info(self, key, descriptor, table):
        location = pyarrow.flight.Location.for_grpc_tcp(self.host, self.port)
        endpoints = [pyarrow.flight.FlightEndpoint(repr(key), [location]), ]

        mock_sink = pyarrow.MockOutputStream()
        stream_writer = pyarrow.RecordBatchStreamWriter(
            mock_sink, table.schema)
        stream_writer.write_table(table)
        stream_writer.close()
        data_size = mock_sink.size()

        return pyarrow.flight.FlightInfo(table.schema,
                                         descriptor, endpoints,
                                         table.num_rows, data_size)
Beispiel #13
0
    def _make_flight_info(self, key, descriptor: FlightDescriptor, table):
        """NOT USED NOW"""
        if self.tls_certificates:
            location = Location.for_grpc_tls(self.host, self.port)
        else:
            location = Location.for_grpc_tcp(self.host, self.port)
        endpoints = [
            FlightEndpoint(repr(key), [location]),
        ]

        mock_sink = pyarrow.MockOutputStream()
        stream_writer = pyarrow.RecordBatchStreamWriter(
            mock_sink, table.schema)
        stream_writer.write_table(table)
        stream_writer.close()
        data_size = mock_sink.size()

        return FlightInfo(table.schema, descriptor, endpoints, table.num_rows,
                          data_size)
Beispiel #14
0
def stream_testing():
    # connect to plasma
    client = plasma.connect("/tmp/store", "", 0)

    # csv -> table -> record batch
    table = arrow_csv.read_csv('../data/million.csv')
    record_batch = table.to_batches()[0]

    # create an object id
    object_id = plasma.ObjectID(np.random.bytes(20))

    # record batch -> stream writer
    mock_sink = pa.MockOutputStream()
    stream_writer = pa.RecordBatchStreamWriter(mock_sink, record_batch.schema)
    stream_writer.write_batch(record_batch)
    stream_writer.close()

    # create buffer in plasma client
    data_size = mock_sink.size()
    buf = client.create(object_id, data_size)

    # stream writer -> write to plasma buffer
    stream = pa.FixedSizeBufferWriter(buf)
    stream_writer = pa.RecordBatchStreamWriter(stream, record_batch.schema)
    stream_writer.write_batch(record_batch)
    stream_writer.close()

    client.seal(object_id)

    # ----------------Reading Data back from plasma----------------------------

    # Get PlasmaBuffer from ObjectID
    [data] = client.get_buffers([object_id])
    buffer = pa.BufferReader(data)

    # Plasmabuffer -> record batch
    reader = pa.RecordBatchStreamReader(buffer)
    record_batch = reader.read_next_batch()

    # record batch -> python dictionary
    py_dict = record_batch.to_pydict()
Beispiel #15
0
    def list_flights(self, context, criteria):
        for key, table in self.flights.items():
            if key[1] is not None:
                descriptor = \
                    pyarrow.flight.FlightDescriptor.for_command(key[1])
            else:
                descriptor = pyarrow.flight.FlightDescriptor.for_path(*key[2])

            endpoints = [
                pyarrow.flight.FlightEndpoint(
                    repr(key),
                    [pyarrow.flight.Location.for_grpc_tcp(self.host, self.port)]),
            ]

            mock_sink = pyarrow.MockOutputStream()
            stream_writer = pyarrow.RecordBatchStreamWriter(mock_sink, table.schema)
            stream_writer.write_table(table)
            stream_writer.close()
            data_size = mock_sink.size()

            yield pyarrow.flight.FlightInfo(table.schema,
                                            descriptor, endpoints,
                                            table.num_rows, data_size)
Beispiel #16
0
def put_df_to_plasma(client, df, object_id):
    """
    Precondition: the Plasma Object Store has been opened.
    e.g. by: plasma_store -m 1000000000 -s /tmp/plasma
    """
    record_batch = pa.RecordBatch.from_pandas(df)
    # Get size of record batch and schema
    mock_output_stream = pa.MockOutputStream()
    stream_writer = pa.RecordBatchStreamWriter(mock_output_stream,
                                               record_batch.schema)
    stream_writer.write_batch(record_batch)
    data_size = mock_output_stream.size()

    # Allocate a buffer in the object store for the serialized DataFrame
    buf = client.create(object_id, data_size)

    # Write the serialized DataFrame to the object store
    stream = pa.FixedSizeBufferWriter(buf)
    stream_writer = pa.RecordBatchStreamWriter(stream, record_batch.schema)
    stream_writer.write_batch(record_batch)

    # Seal the object
    client.seal(object_id)
Beispiel #17
0
def put_df(df):
    record_batch = pa.RecordBatch.from_pandas(df)

    # Get size of record batch and schema
    mock_sink = pa.MockOutputStream()
    stream_writer = pa.RecordBatchStreamWriter(mock_sink, record_batch.schema)
    stream_writer.write_batch(record_batch)
    data_size = mock_sink.size()

    # Generate an ID and allocate a buffer in the object store for the
    # serialized DataFrame
    object_id = plasma.ObjectID(np.random.bytes(20))
    buf = client.create(object_id, data_size)

    # Write the serialized DataFrame to the object store
    sink = pa.FixedSizeBufferOutputStream(buf)
    stream_writer = pa.RecordBatchStreamWriter(sink, record_batch.schema)
    stream_writer.write_batch(record_batch)

    # Seal the object
    client.seal(object_id)

    return object_id
Beispiel #18
0
def put_df_to_object_store(df, client):
    """ Precondition: the Plasma Object Store has been opened.
    Returns the object ID.
    """
    record_batch = pa.RecordBatch.from_pandas(df)

    # the primary intent of using a Arrow stream is to pass it to other Arrow facilities that will make use of it, such as Arrow IPC routines
    # https://arrow.apache.org/docs/python/generated/pyarrow.NativeFile.html#pyarrow.NativeFile

    # Get size of record batch and schema
    mock_output_stream = pa.MockOutputStream()
    # MockOutputStream is a helper class to tracks the size of allocations.
    # Writes to this stream do not copy or retain any data,
    # they just bump a size counter that can be later used to know exactly which data size needs to be allocated for actual writing.
    stream_writer = pa.RecordBatchStreamWriter(mock_output_stream,
                                               record_batch.schema)
    stream_writer.write_batch(record_batch)
    data_size = mock_output_stream.size()

    # Generate an ID and allocate a buffer in the object store for the
    # serialized DataFrame
    object_id = plasma.ObjectID(b"0FF1CE00C0FFEE00BEEF")

    buf = client.create(object_id, data_size)

    # Write the serialized DataFrame to the object store
    # A FixedSizeBufferWriter is a stream writing to a Arrow buffer.
    # https://arrow.apache.org/docs/python/generated/pyarrow.FixedSizeBufferWriter.html
    stream = pa.FixedSizeBufferWriter(buf)
    stream_writer = pa.RecordBatchStreamWriter(stream, record_batch.schema)
    stream_writer.write_batch(record_batch)

    # Seal the object
    client.seal(object_id)

    return object_id
Beispiel #19
0
def put_df_to_plasma(df, client):
    """ Precondition: the Plasma Object Store has been opened.
        Returns the object ID.
    """
    record_batch = pa.RecordBatch.from_pandas(df)

    # Get size of record batch and schema
    mock_output_stream = pa.MockOutputStream()
    stream_writer = pa.RecordBatchStreamWriter(mock_output_stream,
                                               record_batch.schema)
    stream_writer.write_batch(record_batch)
    data_size = mock_output_stream.size()

    object_id = plasma.ObjectID.from_random()

    buf = client.create(object_id, data_size)
    stream = pa.FixedSizeBufferWriter(buf)
    stream_writer = pa.RecordBatchStreamWriter(stream, record_batch.schema)
    stream_writer.write_batch(record_batch)

    # Seal the object
    client.seal(object_id)

    return object_id
Beispiel #20
0
def hello():
    channel = grpc.insecure_channel('untrusted:50051')
    stub = codeRunner_pb2_grpc.codeRunnerStub(channel)

    rand = random.choice([True, False])

    from pyarrow import csv
    fn = "IRAhandle_tweets_1.csv" if rand else "mimic.csv"
    table = csv.read_csv(fn)
    start = time.clock()

    print("data loaded")

    batches = table.to_batches()
    print(1)
    client = plasma.connect("/tmp/plasma")

    print(2)

    code = """
import time
while True:
    print(7)
    time.sleep(0.5)
""" if False else """
import os
import pyarrow
import sys

authors = dataTable.column("author")
newData = []
for i in range(len(authors)):
    newData.append(1 if i == 0 or authors[i] != authors[i-1] else newData[-1]+1)
newColumn = dataTable.column(3).from_array("authorTweetCount", [newData])
newTable = dataTable.append_column(newColumn)
    """ if rand else """
import os
import pyarrow
import sys

ages = dataTable.column("age")
maxV = max(ages.to_pylist())
newData = []
for i in ages:
    newData.append(1 if i == maxV else 0)
newColumn = dataTable.column(3).from_array("oldest", [newData])
newTable = dataTable.append_column(newColumn)
    """

    tables = []

    for i in range(len(batches)):
        id_ = randString()

        strId = makeID(id_)

        mock_sink = pyarrow.MockOutputStream()  #find data size
        stream_writer = pyarrow.RecordBatchStreamWriter(
            mock_sink, batches[0].schema)
        stream_writer.write_batch(batches[i])
        stream_writer.close()
        data_size = mock_sink.size()
        buf = client.create(strId, data_size)

        stream = pyarrow.FixedSizeBufferWriter(buf)
        stream_writer = pyarrow.RecordBatchStreamWriter(
            stream, batches[0].schema)
        stream_writer.write_batch(batches[i])
        stream_writer.close()

        client.seal(strId)
        print("sent batch " + str(i + 1))

        codeToSend = codeRunner_pb2.code(toRun=code, id_=id_)

        newId = stub.runCode(codeToSend, timeout=1)
        newId = newId.id_

        [data] = client.get_buffers([makeID(newId)])
        outputBuf = pyarrow.py_buffer(data.to_pybytes())
        buffer_ = pyarrow.BufferReader(outputBuf)
        reader = pyarrow.RecordBatchStreamReader(buffer_)
        if i == 0:
            datatable = reader.read_all()
        else:
            datatable = pyarrow.concat_tables([
                datatable,
                datatable.from_batches(reader.read_all().to_batches())
            ])

    html = str(datatable.column("authorTweetCount" if rand else "oldest").data)
    print("data received after " + str(time.clock() - start))

    return html
Beispiel #21
0
 def write_batches():
     with stream_fixture._get_writer(pa.MockOutputStream(),
                                     schema) as writer:
         for batch in batches:
             writer.write_batch(batch)
         return writer.stats
Beispiel #22
0
def put_df(df):
    id_num = df['rIDs'].values[0]
    id_num = str(id_num)
    if id_num == '23':
        id_num = 'X'
    elif id_num == '24':
        id_num = 'Y'
    elif id_num == '25':
        id_num = 'M'

    record_batch = pa.RecordBatch.from_pandas(df)
    record_batch_rows = record_batch.num_rows
    record_batch_rows_actual = record_batch_rows
    index = 0
    limit = 5714285
    check = False
    print(record_batch_rows_actual)
    i = 0
    while record_batch_rows > limit:

        check = True
        record_batch_rows = record_batch_rows - limit
        record_batch_slice = record_batch.slice(index, limit)
        index = index + limit

        # Get size of record batch and schema
        mock_sink = pa.MockOutputStream()
        stream_writer = pa.RecordBatchStreamWriter(mock_sink,
                                                   record_batch_slice.schema)
        stream_writer.write_batch(record_batch_slice)
        data_size = mock_sink.size()

        # Generate an ID and allocate a buffer in the object store for the
        # serialized DataFrame
        object_id = plasma.ObjectID(''.join(
            random.choice(string.ascii_uppercase + string.digits)
            for _ in range(20)))
        #print(id_num)
        buf = client.create(object_id, data_size)

        # Write the serialized DataFrame to the object store
        sink = pa.FixedSizeBufferWriter(buf)
        stream_writer = pa.RecordBatchStreamWriter(sink,
                                                   record_batch_slice.schema)
        stream_writer.write_batch(record_batch_slice)

        # Seal the object
        client.seal(object_id)

        f = open("/home/tahmad/bulk/apps/objIDsPy.txt", "a")
        f.write('Chr' + id_num + '_' + str(i) + '\t' + object_id.binary() +
                '\n')
        f.close()
        i = i + 1

    if check == True:
        record_batch = record_batch.slice(index, record_batch_rows)

    # Get size of record batch and schema
    mock_sink = pa.MockOutputStream()
    stream_writer = pa.RecordBatchStreamWriter(mock_sink, record_batch.schema)
    stream_writer.write_batch(record_batch)
    data_size = mock_sink.size()

    # Generate an ID and allocate a buffer in the object store for the
    # serialized DataFrame
    object_id = plasma.ObjectID(''.join(
        random.choice(string.ascii_uppercase + string.digits)
        for _ in range(20)))
    #print(id_num)
    buf = client.create(object_id, data_size)

    # Write the serialized DataFrame to the object store
    sink = pa.FixedSizeBufferWriter(buf)
    stream_writer = pa.RecordBatchStreamWriter(sink, record_batch.schema)
    stream_writer.write_batch(record_batch)

    # Seal the object
    client.seal(object_id)

    #get_df(object_id) #Loopback

    return object_id, id_num