def test_shuffle_hang(): try: shuffle.run(object_store_memory=1e9, num_partitions=200, partition_size=10e6) finally: ray.shutdown()
def test_shuffle_multi_node_no_streaming(ray_start_cluster): cluster = ray_start_cluster for _ in range(4): cluster.add_node(num_cpus=2, object_store_memory=1e9) shuffle.run(ray_address="auto", num_partitions=200, partition_size=10e6, no_streaming=True)
def test_streaming_shuffle(set_kill_interval): lineage_reconstruction_enabled, kill_interval, _ = set_kill_interval try: # Create our own tracker so that it gets scheduled onto the head node. tracker = ShuffleStatusTracker.remote() ray.get(tracker.get_progress.remote()) assert len(ray.nodes()) == 1, ( "Tracker actor may have been scheduled to remote node " "and may get killed during the test") shuffle.run(ray_address="auto", no_streaming=False, num_partitions=200, partition_size=1e6, tracker=tracker) except (RayTaskError, ObjectLostError): assert kill_interval is not None
def test_shuffle(): try: shuffle.run() finally: ray.shutdown()
def test_shuffle_no_streaming(): try: shuffle.run(no_streaming=True) finally: ray.shutdown()