def process_queue(pool_id=0, errors={}):
    rosetta = Rosetta()
    wdg = WriterDelegator(rosetta, push_to_queue=True)
    print('starting consumer')
    # send a 'close' message to stop consumer consumer at the end assuming that this will go at the end of the nodes and edges.
    wdg.flush()
    wdg.close()
    start_consuming(max_retries=-1)
        p_q_partial = partial(process_queue, r, errors)
        error_call_back_partial = partial(write_error_to_file, r)
        success_call_back_partial = partial(write_termination, r)
        pp = pool.apply_async(p_q_partial, [],
                              callback=success_call_back_partial,
                              error_callback=error_call_back_partial)
        finished.append(pp)
    [x.wait() for x in finished]
    pool.close()
    pool.join()


if __name__ == '__main__':
    rosetta = Rosetta()
    wdg = WriterDelegator(rosetta, push_to_queue=True)
    wdg.flush()
    wdg.close()
    # # clear out the queue
    wdg.channel.queue_purge('neo4j')
    # # # # # source nodes len
    source_node_length = 100
    write_to_queue(source_node_length, wdg)
    # # # # expect node_length * 3 in queue
    assert check_queue(source_node_length * 3) == True
    errors = {}
    # start consumer(s)
    start_multiple_consumers(1, errors={})
    # process_queue(1, {})
    print('checking neo4j')
    check_neo4j(source_node_length)