예제 #1
0
def test_pull_bundle_deadlock(ray_start_cluster):
    # Test https://github.com/ray-project/ray/issues/13689
    cluster = ray_start_cluster
    cluster.add_node(
        num_cpus=0,
        _system_config={
            "max_direct_call_object_size": int(1e7),
        },
    )
    ray.init(address=cluster.address)
    worker_node_1 = cluster.add_node(
        num_cpus=8,
        resources={"worker_node_1": 1},
    )
    cluster.add_node(
        num_cpus=8,
        resources={"worker_node_2": 1},
        object_store_memory=int(1e8 * 2 - 10),
    )
    cluster.wait_for_nodes()

    @ray.remote(num_cpus=0)
    def get_node_id():
        return ray.get_runtime_context().node_id

    worker_node_1_id = ray.get(
        get_node_id.options(resources={
            "worker_node_1": 0.1
        }).remote())
    worker_node_2_id = ray.get(
        get_node_id.options(resources={
            "worker_node_2": 0.1
        }).remote())

    object_a = ray.put(np.zeros(int(1e8), dtype=np.uint8))

    @ray.remote(scheduling_strategy=NodeAffinitySchedulingStrategy(
        worker_node_1_id, soft=True))
    def task_a_to_b(a):
        return np.zeros(int(1e8), dtype=np.uint8)

    object_b = task_a_to_b.remote(object_a)
    ray.wait([object_b], fetch_local=False)

    @ray.remote(scheduling_strategy=NodeAffinitySchedulingStrategy(
        worker_node_2_id, soft=False))
    def task_b_to_c(b):
        return "c"

    object_c = task_b_to_c.remote(object_b)
    # task_a_to_b will be re-executed on worker_node_2 so pull manager there will
    # have object_a pull request after the existing object_b pull request.
    # Make sure object_b pull request won't block the object_a pull request.
    cluster.remove_node(worker_node_1, allow_graceful=False)
    assert ray.get(object_c) == "c"
예제 #2
0
 def _get_or_create_actor_prefetcher() -> "ActorHandle":
     node_id = ray.get_runtime_context().node_id
     actor_name = f"dataset-block-prefetcher-{node_id}"
     return _BlockPretcher.options(
         scheduling_strategy=NodeAffinitySchedulingStrategy(node_id, soft=False),
         name=actor_name,
         namespace=PREFETCHER_ACTOR_NAMESPACE,
         get_if_exists=True,
     ).remote()
예제 #3
0
    def _trigger_lineage_reconstruction(with_workflow):
        (tmp_path / "f2").unlink(missing_ok=True)
        (tmp_path / "num_executed").write_text("0")

        worker_node_1 = cluster.add_node(
            num_cpus=2, resources={"worker_1": 1}, storage=str(tmp_path)
        )
        worker_node_2 = cluster.add_node(
            num_cpus=2, resources={"worker_2": 1}, storage=str(tmp_path)
        )
        worker_node_id_1 = ray.get(
            get_node_id.options(num_cpus=0, resources={"worker_1": 1}).remote()
        )
        worker_node_id_2 = ray.get(
            get_node_id.options(num_cpus=0, resources={"worker_2": 1}).remote()
        )
        dag = f2.options(
            scheduling_strategy=NodeAffinitySchedulingStrategy(
                worker_node_id_2, soft=True
            )
        ).bind(
            f1.options(
                scheduling_strategy=NodeAffinitySchedulingStrategy(
                    worker_node_id_1, soft=True
                )
            ).bind()
        )

        with FileLock(lock_path):
            if with_workflow:
                ref = workflow.run_async(dag)
            else:
                ref = dag.execute()
            while not (tmp_path / "f2").exists():
                time.sleep(0.1)
            cluster.remove_node(worker_node_1, allow_graceful=False)
            cluster.remove_node(worker_node_2, allow_graceful=False)
        return ray.get(ref).sum()
예제 #4
0
    def __init__(
        self,
        output_num_blocks: int,
        num_rounds: int,
        num_map_tasks_per_round: int,
        num_merge_tasks_per_round: int,
        merge_task_placement: List[str],
    ):
        self.num_rounds = num_rounds
        self.num_map_tasks_per_round = num_map_tasks_per_round
        self.num_merge_tasks_per_round = num_merge_tasks_per_round
        self.merge_task_placement = merge_task_placement
        self._merge_task_options = [
            {"scheduling_strategy": NodeAffinitySchedulingStrategy(node_id, soft=True)}
            for node_id in self.merge_task_placement
        ]

        self._compute_reducers_per_merge_task(output_num_blocks)
예제 #5
0
파일: http_state.py 프로젝트: parasj/ray
    def _start_proxies_if_needed(self) -> None:
        """Start a proxy on every node if it doesn't already exist."""
        for node_id, node_ip_address in self._get_target_nodes():
            if node_id in self._proxy_actors:
                continue

            name = format_actor_name(SERVE_PROXY_NAME, self._controller_name, node_id)
            try:
                proxy = ray.get_actor(name, namespace=SERVE_NAMESPACE)
            except ValueError:
                logger.info(
                    "Starting HTTP proxy with name '{}' on node '{}' "
                    "listening on '{}:{}'".format(
                        name, node_id, self._config.host, self._config.port
                    )
                )
                proxy = HTTPProxyActor.options(
                    num_cpus=self._config.num_cpus,
                    name=name,
                    namespace=SERVE_NAMESPACE,
                    lifetime="detached" if self._detached else None,
                    max_concurrency=ASYNC_CONCURRENCY,
                    max_restarts=-1,
                    max_task_retries=-1,
                    scheduling_strategy=NodeAffinitySchedulingStrategy(
                        node_id, soft=False
                    ),
                ).remote(
                    self._config.host,
                    self._config.port,
                    self._config.root_path,
                    controller_name=self._controller_name,
                    node_ip_address=node_ip_address,
                    http_middlewares=self._config.middlewares,
                )

            self._proxy_actors[node_id] = proxy
            self._proxy_actor_names[node_id] = name
예제 #6
0
파일: controller.py 프로젝트: parasj/ray
 def __init__(
     self,
     controller_name: str,
     checkpoint_path: str,
     detached: bool = False,
     dedicated_cpu: bool = False,
     http_proxy_port: int = 8000,
 ):
     try:
         self._controller = ray.get_actor(controller_name, namespace="serve")
     except ValueError:
         self._controller = None
     if self._controller is None:
         # Used for scheduling things to the head node explicitly.
         head_node_id = ray.get_runtime_context().node_id.hex()
         http_config = HTTPOptions()
         http_config.port = http_proxy_port
         self._controller = ServeController.options(
             num_cpus=1 if dedicated_cpu else 0,
             name=controller_name,
             lifetime="detached" if detached else None,
             max_restarts=-1,
             max_task_retries=-1,
             # Schedule the controller on the head node with a soft constraint. This
             # prefers it to run on the head node in most cases, but allows it to be
             # restarted on other nodes in an HA cluster.
             scheduling_strategy=NodeAffinitySchedulingStrategy(
                 head_node_id, soft=True
             ),
             namespace="serve",
             max_concurrency=CONTROLLER_MAX_CONCURRENCY,
         ).remote(
             controller_name,
             http_config=http_config,
             checkpoint_path=checkpoint_path,
             head_node_id=head_node_id,
             detached=detached,
         )
예제 #7
0
    def __init__(
        self,
        output_num_blocks: int,
        num_rounds: int,
        num_map_tasks_per_round: int,
        merge_task_placement: List[str],
    ):
        self.num_rounds = num_rounds
        self.num_map_tasks_per_round = num_map_tasks_per_round
        self.num_merge_tasks_per_round = len(merge_task_placement)

        node_strategies = {
            node_id: {
                "scheduling_strategy":
                NodeAffinitySchedulingStrategy(node_id, soft=True)
            }
            for node_id in set(merge_task_placement)
        }
        self._merge_task_options = [
            node_strategies[node_id] for node_id in merge_task_placement
        ]

        self.merge_schedule = _MergeTaskSchedule(
            output_num_blocks, self.num_merge_tasks_per_round)
예제 #8
0
def start(
    detached: bool = False,
    http_options: Optional[Union[dict, HTTPOptions]] = None,
    dedicated_cpu: bool = False,
    _checkpoint_path: str = DEFAULT_CHECKPOINT_PATH,
    _override_controller_namespace: Optional[str] = None,
    **kwargs,
) -> ServeControllerClient:
    """Initialize a serve instance.

    By default, the instance will be scoped to the lifetime of the returned
    Client object (or when the script exits). If detached is set to True, the
    instance will instead persist until serve.shutdown() is called. This is
    only relevant if connecting to a long-running Ray cluster (e.g., with
    ray.init(address="auto") or ray.init("ray://<remote_addr>")).

    Args:
        detached (bool): Whether not the instance should be detached from this
          script. If set, the instance will live on the Ray cluster until it is
          explicitly stopped with serve.shutdown().
        http_options (Optional[Dict, serve.HTTPOptions]): Configuration options
          for HTTP proxy. You can pass in a dictionary or HTTPOptions object
          with fields:

            - host(str, None): Host for HTTP servers to listen on. Defaults to
              "127.0.0.1". To expose Serve publicly, you probably want to set
              this to "0.0.0.0".
            - port(int): Port for HTTP server. Defaults to 8000.
            - root_path(str): Root path to mount the serve application
              (for example, "/serve"). All deployment routes will be prefixed
              with this path. Defaults to "".
            - middlewares(list): A list of Starlette middlewares that will be
              applied to the HTTP servers in the cluster. Defaults to [].
            - location(str, serve.config.DeploymentMode): The deployment
              location of HTTP servers:

                - "HeadOnly": start one HTTP server on the head node. Serve
                  assumes the head node is the node you executed serve.start
                  on. This is the default.
                - "EveryNode": start one HTTP server per node.
                - "NoServer" or None: disable HTTP server.
            - num_cpus (int): The number of CPU cores to reserve for each
              internal Serve HTTP proxy actor.  Defaults to 0.
        dedicated_cpu (bool): Whether to reserve a CPU core for the internal
          Serve controller actor.  Defaults to False.
    """
    usage_lib.record_library_usage("serve")

    http_deprecated_args = ["http_host", "http_port", "http_middlewares"]
    for key in http_deprecated_args:
        if key in kwargs:
            raise ValueError(
                f"{key} is deprecated, please use serve.start(http_options="
                f'{{"{key}": {kwargs[key]}}}) instead.')
    # Initialize ray if needed.
    ray.worker.global_worker.filter_logs_by_job = False
    if not ray.is_initialized():
        ray.init(namespace="serve")

    controller_namespace = get_controller_namespace(
        detached,
        _override_controller_namespace=_override_controller_namespace)

    try:
        client = get_global_client(
            _override_controller_namespace=_override_controller_namespace,
            _health_check_controller=True,
        )
        logger.info("Connecting to existing Serve instance in namespace "
                    f"'{controller_namespace}'.")

        _check_http_and_checkpoint_options(client, http_options,
                                           _checkpoint_path)
        return client
    except RayServeException:
        pass

    if detached:
        controller_name = SERVE_CONTROLLER_NAME
    else:
        controller_name = format_actor_name(get_random_letters(),
                                            SERVE_CONTROLLER_NAME)

    if isinstance(http_options, dict):
        http_options = HTTPOptions.parse_obj(http_options)
    if http_options is None:
        http_options = HTTPOptions()

    controller = ServeController.options(
        num_cpus=1 if dedicated_cpu else 0,
        name=controller_name,
        lifetime="detached" if detached else None,
        max_restarts=-1,
        max_task_retries=-1,
        # Schedule the controller on the head node with a soft constraint. This
        # prefers it to run on the head node in most cases, but allows it to be
        # restarted on other nodes in an HA cluster.
        scheduling_strategy=NodeAffinitySchedulingStrategy(
            ray.get_runtime_context().node_id, soft=True),
        namespace=controller_namespace,
        max_concurrency=CONTROLLER_MAX_CONCURRENCY,
    ).remote(
        controller_name,
        http_options,
        _checkpoint_path,
        detached=detached,
        _override_controller_namespace=_override_controller_namespace,
    )

    proxy_handles = ray.get(controller.get_http_proxies.remote())
    if len(proxy_handles) > 0:
        try:
            ray.get(
                [handle.ready.remote() for handle in proxy_handles.values()],
                timeout=HTTP_PROXY_TIMEOUT,
            )
        except ray.exceptions.GetTimeoutError:
            raise TimeoutError(
                f"HTTP proxies not available after {HTTP_PROXY_TIMEOUT}s.")

    client = ServeControllerClient(
        controller,
        controller_name,
        detached=detached,
        _override_controller_namespace=_override_controller_namespace,
    )
    set_global_client(client)
    logger.info(f"Started{' detached ' if detached else ' '}Serve instance in "
                f"namespace '{controller_namespace}'.")
    return client
예제 #9
0
def test_demand_report_for_node_affinity_scheduling_strategy(shutdown_only):
    from ray.cluster_utils import AutoscalingCluster

    cluster = AutoscalingCluster(
        head_resources={"CPU": 0},
        worker_node_types={
            "cpu_node": {
                "resources": {
                    "CPU": 1,
                    "object_store_memory": 1024 * 1024 * 1024,
                },
                "node_config": {},
                "min_workers": 1,
                "max_workers": 1,
            },
        },
    )

    cluster.start()
    info = ray.init(address="auto")

    @ray.remote(num_cpus=1)
    def f(sleep_s):
        time.sleep(sleep_s)
        return ray.get_runtime_context().node_id

    worker_node_id = ray.get(f.remote(0))

    tasks = []
    tasks.append(f.remote(10000))
    # This is not reported since there is feasible node.
    tasks.append(
        f.options(scheduling_strategy=NodeAffinitySchedulingStrategy(
            worker_node_id, soft=False)).remote(0))
    # This is reported since there is no feasible node and soft is True.
    tasks.append(
        f.options(
            num_gpus=1,
            scheduling_strategy=NodeAffinitySchedulingStrategy(
                ray.NodeID.from_random().hex(), soft=True),
        ).remote(0))

    global_state_accessor = make_global_state_accessor(info)

    def check_resource_demand():
        message = global_state_accessor.get_all_resource_usage()
        if message is None:
            return False

        resource_usage = gcs_utils.ResourceUsageBatchData.FromString(message)
        aggregate_resource_load = resource_usage.resource_load_by_shape.resource_demands

        if len(aggregate_resource_load) != 1:
            return False

        if aggregate_resource_load[0].num_infeasible_requests_queued != 1:
            return False

        if aggregate_resource_load[0].shape != {"CPU": 1.0, "GPU": 1.0}:
            return False

        return True

    wait_for_condition(check_resource_demand, 20)
    cluster.shutdown()
예제 #10
0
def test_node_affinity_scheduling_strategy(ray_start_cluster,
                                           connect_to_client):
    cluster = ray_start_cluster
    cluster.add_node(num_cpus=8, resources={"head": 1})
    ray.init(address=cluster.address)
    cluster.add_node(num_cpus=8, resources={"worker": 1})
    cluster.wait_for_nodes()

    with connect_to_client_or_not(connect_to_client):

        @ray.remote
        def get_node_id():
            return ray.get_runtime_context().node_id

        head_node_id = ray.get(
            get_node_id.options(num_cpus=0, resources={
                "head": 1
            }).remote())
        worker_node_id = ray.get(
            get_node_id.options(num_cpus=0, resources={
                "worker": 1
            }).remote())

        assert worker_node_id == ray.get(
            get_node_id.options(
                scheduling_strategy=NodeAffinitySchedulingStrategy(
                    worker_node_id, soft=False)).remote())
        assert head_node_id == ray.get(
            get_node_id.options(
                scheduling_strategy=NodeAffinitySchedulingStrategy(
                    head_node_id, soft=False)).remote())

        # Doesn't fail when the node doesn't exist since soft is true.
        ray.get(
            get_node_id.options(
                scheduling_strategy=NodeAffinitySchedulingStrategy(
                    ray.NodeID.from_random().hex(), soft=True)).remote())

        # Doesn't fail when the node is infeasible since soft is true.
        assert worker_node_id == ray.get(
            get_node_id.options(
                scheduling_strategy=NodeAffinitySchedulingStrategy(
                    head_node_id, soft=True),
                resources={
                    "worker": 1
                },
            ).remote())

        # Fail when the node doesn't exist.
        with pytest.raises(ray.exceptions.TaskUnschedulableError):
            ray.get(
                get_node_id.options(
                    scheduling_strategy=NodeAffinitySchedulingStrategy(
                        ray.NodeID.from_random().hex(), soft=False)).remote())

        # Fail when the node is infeasible.
        with pytest.raises(ray.exceptions.TaskUnschedulableError):
            ray.get(
                get_node_id.options(
                    scheduling_strategy=NodeAffinitySchedulingStrategy(
                        head_node_id, soft=False),
                    resources={
                        "not_exist": 1
                    },
                ).remote())

        crashed_worker_node = cluster.add_node(num_cpus=8,
                                               resources={"crashed_worker": 1})
        cluster.wait_for_nodes()
        crashed_worker_node_id = ray.get(
            get_node_id.options(num_cpus=0, resources={
                "crashed_worker": 1
            }).remote())

        @ray.remote(
            max_retries=-1,
            scheduling_strategy=NodeAffinitySchedulingStrategy(
                crashed_worker_node_id, soft=True),
        )
        def crashed_get_node_id():
            if ray.get_runtime_context().node_id == crashed_worker_node_id:
                internal_kv._internal_kv_put("crashed_get_node_id",
                                             "crashed_worker_node_id")
                while True:
                    time.sleep(1)
            else:
                return ray.get_runtime_context().node_id

        r = crashed_get_node_id.remote()
        while not internal_kv._internal_kv_exists("crashed_get_node_id"):
            time.sleep(0.1)
        cluster.remove_node(crashed_worker_node, allow_graceful=False)
        assert ray.get(r) in {head_node_id, worker_node_id}

        @ray.remote(num_cpus=1)
        class Actor:
            def get_node_id(self):
                return ray.get_runtime_context().node_id

        actor = Actor.options(
            scheduling_strategy=NodeAffinitySchedulingStrategy(
                worker_node_id, soft=False)).remote()
        assert worker_node_id == ray.get(actor.get_node_id.remote())

        actor = Actor.options(
            scheduling_strategy=NodeAffinitySchedulingStrategy(
                head_node_id, soft=False)).remote()
        assert head_node_id == ray.get(actor.get_node_id.remote())

        # Wait until the target node becomes available.
        worker_actor = Actor.options(resources={"worker": 1}).remote()
        assert worker_node_id == ray.get(worker_actor.get_node_id.remote())
        actor = Actor.options(
            scheduling_strategy=NodeAffinitySchedulingStrategy(worker_node_id,
                                                               soft=True),
            resources={
                "worker": 1
            },
        ).remote()
        del worker_actor
        assert worker_node_id == ray.get(actor.get_node_id.remote())

        # Doesn't fail when the node doesn't exist since soft is true.
        actor = Actor.options(
            scheduling_strategy=NodeAffinitySchedulingStrategy(
                ray.NodeID.from_random().hex(), soft=True)).remote()
        assert ray.get(actor.get_node_id.remote())

        # Doesn't fail when the node is infeasible since soft is true.
        actor = Actor.options(
            scheduling_strategy=NodeAffinitySchedulingStrategy(head_node_id,
                                                               soft=True),
            resources={
                "worker": 1
            },
        ).remote()
        assert worker_node_id == ray.get(actor.get_node_id.remote())

        # Fail when the node doesn't exist.
        with pytest.raises(ray.exceptions.ActorUnschedulableError):
            actor = Actor.options(
                scheduling_strategy=NodeAffinitySchedulingStrategy(
                    ray.NodeID.from_random().hex(), soft=False)).remote()
            ray.get(actor.get_node_id.remote())

        # Fail when the node is infeasible.
        with pytest.raises(ray.exceptions.ActorUnschedulableError):
            actor = Actor.options(
                scheduling_strategy=NodeAffinitySchedulingStrategy(
                    worker_node_id, soft=False),
                resources={
                    "not_exist": 1
                },
            ).remote()
            ray.get(actor.get_node_id.remote())