def ray_start_reconstruction(request): num_nodes = request.param plasma_store_memory = 10**9 cluster = Cluster( initialize_head=True, head_node_args={ "num_cpus": 1, "object_store_memory": plasma_store_memory // num_nodes, "redis_max_memory": 10**7, "redirect_output": True, "_internal_config": json.dumps({ "initial_reconstruction_timeout_milliseconds": 200 }) }) for i in range(num_nodes - 1): cluster.add_node( num_cpus=1, object_store_memory=plasma_store_memory // num_nodes, redirect_output=True, _internal_config=json.dumps({ "initial_reconstruction_timeout_milliseconds": 200 })) ray.init(redis_address=cluster.redis_address) yield plasma_store_memory, num_nodes, cluster # Clean up the Ray cluster. ray.shutdown() cluster.shutdown()
def ray_start_reconstruction(request): num_nodes = request.param plasma_store_memory = 10**9 cluster = Cluster( initialize_head=True, head_node_args={ "num_cpus": 1, "object_store_memory": plasma_store_memory // num_nodes, "redis_max_memory": 10**7, "redirect_output": True, "_internal_config": json.dumps({ "initial_reconstruction_timeout_milliseconds": 200 }) }) for i in range(num_nodes - 1): cluster.add_node( num_cpus=1, object_store_memory=plasma_store_memory // num_nodes, redirect_output=True, _internal_config=json.dumps({ "initial_reconstruction_timeout_milliseconds": 200 })) ray.init(redis_address=cluster.redis_address) yield plasma_store_memory, num_nodes, cluster # Clean up the Ray cluster. ray.shutdown() cluster.shutdown()
def ray_start_empty_cluster(): cluster = Cluster() yield cluster # The code after the yield will run as teardown code. ray.shutdown() cluster.shutdown()
def start_connected_cluster(): # Start the Ray processes. g = Cluster(initialize_head=True, connect=True) yield g # The code after the yield will run as teardown code. ray.shutdown() g.shutdown()
def ray_start_empty_cluster(): cluster = Cluster() yield cluster # The code after the yield will run as teardown code. ray.shutdown() cluster.shutdown()
def cluster_start(): # Start the Ray processes. cluster = Cluster(initialize_head=True, connect=True, head_node_args={ "resources": dict(CPU=1), "_internal_config": json.dumps({"num_heartbeats_timeout": 10}) }) yield cluster ray.shutdown() cluster.shutdown()
def ray_start_two_nodes(): # Start the Ray processes. cluster = Cluster() for _ in range(2): cluster.add_node(num_cpus=0, _internal_config=json.dumps( {"num_heartbeats_timeout": 40})) ray.init(redis_address=cluster.redis_address) yield cluster # The code after the yield will run as teardown code. ray.shutdown() cluster.shutdown()
def ray_start_workers_separate_multinode(request): num_nodes = request.param[0] num_initial_workers = request.param[1] # Start the Ray processes. cluster = Cluster() for _ in range(num_nodes): cluster.add_node(num_cpus=num_initial_workers) ray.init(redis_address=cluster.redis_address) yield num_nodes, num_initial_workers # The code after the yield will run as teardown code. ray.shutdown() cluster.shutdown()
def start_connected_cluster(): # Start the Ray processes. g = Cluster(initialize_head=True, connect=True, head_node_args={ "resources": dict(CPU=1), "_internal_config": json.dumps({"num_heartbeats_timeout": 10}) }) yield g # The code after the yield will run as teardown code. ray.shutdown() g.shutdown()
def start_connected_longer_cluster(): """Creates a cluster with a longer timeout.""" g = Cluster(initialize_head=True, connect=True, head_node_args={ "resources": dict(CPU=1), "_internal_config": json.dumps({"num_heartbeats_timeout": 20}) }) yield g # The code after the yield will run as teardown code. ray.shutdown() g.shutdown()
def ray_start_workers_separate_multinode(request): num_nodes = request.param[0] num_initial_workers = request.param[1] # Start the Ray processes. cluster = Cluster() for _ in range(num_nodes): cluster.add_node(num_cpus=num_initial_workers) ray.init(redis_address=cluster.redis_address) yield num_nodes, num_initial_workers # The code after the yield will run as teardown code. ray.shutdown() cluster.shutdown()
def cluster_start(): # Start the Ray processes. cluster = Cluster( initialize_head=True, connect=True, head_node_args={ "num_cpus": 1, "_internal_config": json.dumps({ "num_heartbeats_timeout": 10 }) }) yield cluster ray.shutdown() cluster.shutdown()
def start_connected_longer_cluster(): """Creates a cluster with a longer timeout.""" g = Cluster( initialize_head=True, connect=True, head_node_args={ "num_cpus": 1, "_internal_config": json.dumps({ "num_heartbeats_timeout": 20 }) }) yield g # The code after the yield will run as teardown code. ray.shutdown() g.shutdown()
def start_connected_cluster(): # Start the Ray processes. g = Cluster( initialize_head=True, connect=True, head_node_args={ "num_cpus": 1, "_internal_config": json.dumps({ "num_heartbeats_timeout": 10 }) }) yield g # The code after the yield will run as teardown code. ray.shutdown() g.shutdown()
def start_connected_emptyhead_cluster(): """Starts head with no resources.""" cluster = Cluster(initialize_head=True, connect=True, head_node_args={ "resources": dict(CPU=0), "_internal_config": json.dumps({"num_heartbeats_timeout": 10}) }) # Pytest doesn't play nicely with imports _register_all() yield cluster # The code after the yield will run as teardown code. ray.shutdown() cluster.shutdown()
def ray_start_combination(request): num_nodes = request.param[0] num_workers_per_scheduler = request.param[1] # Start the Ray processes. cluster = Cluster(initialize_head=True, head_node_args={ "num_cpus": 10, "redis_max_memory": 10**7 }) for i in range(num_nodes - 1): cluster.add_node(num_cpus=10) ray.init(redis_address=cluster.redis_address) yield num_nodes, num_workers_per_scheduler, cluster # The code after the yield will run as teardown code. ray.shutdown() cluster.shutdown()
def start_connected_emptyhead_cluster(): """Starts head with no resources.""" cluster = Cluster( initialize_head=True, connect=True, head_node_args={ "num_cpus": 0, "_internal_config": json.dumps({ "num_heartbeats_timeout": 10 }) }) # Pytest doesn't play nicely with imports _register_all() yield cluster # The code after the yield will run as teardown code. ray.shutdown() cluster.shutdown()
def ray_start_combination(request): num_nodes = request.param[0] num_workers_per_scheduler = request.param[1] # Start the Ray processes. cluster = Cluster( initialize_head=True, head_node_args={ "num_cpus": 10, "redis_max_memory": 10**7 }) for i in range(num_nodes - 1): cluster.add_node(num_cpus=10) ray.init(redis_address=cluster.redis_address) yield num_nodes, num_workers_per_scheduler # The code after the yield will run as teardown code. ray.shutdown() cluster.shutdown()
def ray_start_cluster(): node_args = { "num_cpus": 8, "_internal_config": json.dumps({ "initial_reconstruction_timeout_milliseconds": 1000, "num_heartbeats_timeout": 10 }) } # Start with 4 worker nodes and 8 cores each. cluster = Cluster( initialize_head=True, connect=True, head_node_args=node_args) workers = [] for _ in range(4): workers.append(cluster.add_node(**node_args)) cluster.wait_for_nodes() yield cluster ray.shutdown() cluster.shutdown()
def ray_initialize_cluster(): # Start with 4 workers and 4 cores. num_nodes = 4 num_workers_per_scheduler = 8 cluster = Cluster() for _ in range(num_nodes): cluster.add_node( num_cpus=num_workers_per_scheduler, _internal_config=json.dumps({ "initial_reconstruction_timeout_milliseconds": 1000, "num_heartbeats_timeout": 10, })) ray.init(redis_address=cluster.redis_address) yield None ray.shutdown() cluster.shutdown()
def ray_start_cluster(): node_args = { "resources": dict(CPU=8), "_internal_config": json.dumps({ "initial_reconstruction_timeout_milliseconds": 1000, "num_heartbeats_timeout": 10 }) } # Start with 4 worker nodes and 8 cores each. g = Cluster(initialize_head=True, connect=True, head_node_args=node_args) workers = [] for _ in range(4): workers.append(g.add_node(**node_args)) g.wait_for_nodes() yield g ray.shutdown() g.shutdown()
def ray_initialize_cluster(): # Start with 4 workers and 4 cores. num_nodes = 4 num_workers_per_scheduler = 8 cluster = Cluster() for _ in range(num_nodes): cluster.add_node(num_cpus=num_workers_per_scheduler, _internal_config=json.dumps({ "initial_reconstruction_timeout_milliseconds": 1000, "num_heartbeats_timeout": 10, })) ray.init(redis_address=cluster.redis_address) yield cluster ray.shutdown() cluster.shutdown()
def ray_start_cluster(): node_args = { "num_cpus": 4, "_internal_config": json.dumps({ "initial_reconstruction_timeout_milliseconds": 1000, "num_heartbeats_timeout": 10 }) } # Start with 3 worker nodes and 4 cores each. cluster = Cluster(initialize_head=True, connect=True, head_node_args=node_args) workers = [] for _ in range(3): workers.append(cluster.add_node(**node_args)) cluster.wait_for_nodes() yield cluster ray.shutdown() cluster.shutdown()
def test_shutdown(): g = Cluster(initialize_head=False) node = g.add_node() node2 = g.add_node() g.shutdown() assert not any(n.any_processes_alive() for n in [node, node2])
def test_shutdown(): g = Cluster(initialize_head=False) node = g.add_node() node2 = g.add_node() g.shutdown() assert not any(n.any_processes_alive() for n in [node, node2])