Ejemplo n.º 1
0
def test_word_count():
    ray.init()
    env = Environment(config=Conf(channel_type=Config.NATIVE_CHANNEL))
    env.read_text_file(__file__) \
        .set_parallelism(1) \
        .filter(lambda x: "word" in x) \
        .inspect(lambda x: print("result", x))
    env_handle = env.execute()
    ray.get(env_handle)  # Stay alive until execution finishes
    env.wait_finish()
    ray.shutdown()
Ejemplo n.º 2
0
        return True
    return False


if __name__ == "__main__":

    args = parser.parse_args()

    ray.init(local_mode=False)

    # A Ray streaming environment with the default configuration
    env = Environment(config=Conf(channel_type=Config.NATIVE_CHANNEL))

    # Stream represents the ouput of the filter and
    # can be forked into other dataflows
    stream = env.read_text_file(args.input_file) \
        .shuffle() \
        .flat_map(splitter) \
        .set_parallelism(2) \
        .filter(filter_fn) \
        .set_parallelism(2) \
        .inspect(lambda x: print("result", x))     # Prints the contents of the
    # stream to stdout
    start = time.time()
    env_handle = env.execute()
    ray.get(env_handle)  # Stay alive until execution finishes
    env.wait_finish()
    end = time.time()
    logger.info("Elapsed time: {} secs".format(end - start))
    logger.debug("Output stream id: {}".format(stream.id))
Ejemplo n.º 3
0
if __name__ == "__main__":
    # Get program parameters
    args = parser.parse_args()
    input_file = str(args.input_file)

    ray.init()
    ray.register_custom_serializer(Record, use_dict=True)

    # A Ray streaming environment with the default configuration
    env = Environment()
    env.set_parallelism(2)  # Each operator will be executed by two actors

    # 'key_by("word")' physically partitions the stream of records
    # based on the hash value of the 'word' attribute (see Record class above)
    # 'map(as_tuple)' maps a record of type Record into a tuple
    # 'sum(1)' sums the 2nd element of the tuple, i.e. the word count
    stream = env.read_text_file(input_file) \
                .round_robin() \
                .flat_map(splitter) \
                .key_by("word") \
                .map(as_tuple) \
                .sum(1) \
                .inspect(print)     # Prints the content of the
    # stream to stdout
    start = time.time()
    env_handle = env.execute()  # Deploys and executes the dataflow
    ray.get(env_handle)  # Stay alive until execution finishes
    end = time.time()
    logger.info("Elapsed time: {} secs".format(end - start))
    logger.debug("Output stream id: {}".format(stream.id))