Exemplo n.º 1
0
def request_resources(num_cpus: Optional[int] = None,
                      bundles: Optional[List[dict]] = None) -> None:
    """Command the autoscaler to scale to accommodate the specified requests.

    The cluster will immediately attempt to scale to accommodate the requested
    resources, bypassing normal upscaling speed constraints. This takes into
    account existing resource usage.

    For example, suppose you call ``request_resources(num_cpus=100)`` and
    there are 45 currently running tasks, each requiring 1 CPU. Then, enough
    nodes will be added so up to 100 tasks can run concurrently. It does
    **not** add enough nodes so that 145 tasks can run.

    This call is only a hint to the autoscaler. The actual resulting cluster
    size may be slightly larger or smaller than expected depending on the
    internal bin packing algorithm and max worker count restrictions.

    Args:
        num_cpus (int): Scale the cluster to ensure this number of CPUs are
            available. This request is persistent until another call to
            request_resources() is made to override.
        bundles (List[ResourceDict]): Scale the cluster to ensure this set of
            resource shapes can fit. This request is persistent until another
            call to request_resources() is made to override.

    Examples:
        >>> # Request 1000 CPUs.
        >>> request_resources(num_cpus=1000)
        >>> # Request 64 CPUs and also fit a 1-GPU/4-CPU task.
        >>> request_resources(num_cpus=64, bundles=[{"GPU": 1, "CPU": 4}])
        >>> # Same as requesting num_cpus=3.
        >>> request_resources(bundles=[{"CPU": 1}, {"CPU": 1}, {"CPU": 1}])
    """
    return commands.request_resources(num_cpus, bundles)
Exemplo n.º 2
0
def request_resources(num_cpus: Optional[int] = None,
                      bundles: Optional[List[dict]] = None) -> None:
    """Command the autoscaler to scale to accommodate the specified requests.

    The cluster will immediately attempt to scale to accommodate the requested
    resources, bypassing normal upscaling speed constraints. This takes into
    account existing resource usage.

    For example, suppose you call ``request_resources(num_cpus=100)`` and
    there are 45 currently running tasks, each requiring 1 CPU. Then, enough
    nodes will be added so up to 100 tasks can run concurrently. It does
    **not** add enough nodes so that 145 tasks can run.

    This call is only a hint to the autoscaler. The actual resulting cluster
    size may be slightly larger or smaller than expected depending on the
    internal bin packing algorithm and max worker count restrictions.

    Args:
        num_cpus: Scale the cluster to ensure this number of CPUs are
            available. This request is persistent until another call to
            request_resources() is made to override.
        bundles (List[ResourceDict]): Scale the cluster to ensure this set of
            resource shapes can fit. This request is persistent until another
            call to request_resources() is made to override.

    Examples:
        >>> from ray.autoscaler.sdk import request_resources
        >>> # Request 1000 CPUs.
        >>> request_resources(num_cpus=1000) # doctest: +SKIP
        >>> # Request 64 CPUs and also fit a 1-GPU/4-CPU task.
        >>> request_resources( # doctest: +SKIP
        ...     num_cpus=64, bundles=[{"GPU": 1, "CPU": 4}])
        >>> # Same as requesting num_cpus=3.
        >>> request_resources( # doctest: +SKIP
        ...     bundles=[{"CPU": 1}, {"CPU": 1}, {"CPU": 1}])
    """
    if num_cpus is not None and not isinstance(num_cpus, int):
        raise TypeError("num_cpus should be of type int.")
    if bundles is not None:
        if isinstance(bundles, List):
            for bundle in bundles:
                if isinstance(bundle, Dict):
                    for key in bundle.keys():
                        if not (isinstance(key, str)
                                and isinstance(bundle[key], int)):
                            raise TypeError(
                                "each bundle key should be str and value as int."
                            )
                else:
                    raise TypeError("each bundle should be a Dict.")
        else:
            raise TypeError("bundles should be of type List")

    return commands.request_resources(num_cpus, bundles)
Exemplo n.º 3
0
Arquivo: sdk.py Projeto: yynst2/ray
def request_resources(num_cpus=None, bundles=None):
    """Remotely request some CPU or GPU resources from the autoscaler.

    This function is to be called e.g. on a node before submitting a bunch of
    ray.remote calls to ensure that resources rapidly become available.

    This function is EXPERIMENTAL.

    Args:
        num_cpus: int -- the number of CPU cores to request
        bundles: List[dict] -- list of resource dicts (e.g., {"CPU": 1}). This
            only has an effect if you've configured `available_node_types`
            if your cluster config.
    """
    return commands.request_resources(num_cpus, bundles)
Exemplo n.º 4
0
def request_resources(num_cpus: Optional[int] = None,
                      bundles: Optional[List[dict]] = None) -> None:
    """Remotely request some CPU or GPU resources from the autoscaler.

    This function is to be called e.g. on a node before submitting a bunch of
    ray.remote calls to ensure that resources rapidly become available.

    Args:
        num_cpus (int): Scale the cluster to ensure this number of CPUs are
            available. This request is persistent until another call to
            request_resources() is made.
        bundles (List[ResourceDict]): Scale the cluster to ensure this set of
            resource shapes can fit. This request is persistent until another
            call to request_resources() is made.

    Examples:
        >>> # Request 1000 CPUs.
        >>> request_resources(num_cpus=1000)
        >>> # Request 64 CPUs and also fit a 1-GPU/4-CPU task.
        >>> request_resources(num_cpus=64, bundles=[{"GPU": 1, "CPU": 4}])
        >>> # Same as requesting num_cpus=3.
        >>> request_resources(bundles=[{"CPU": 1}, {"CPU": 1}, {"CPU": 1}])
    """
    return commands.request_resources(num_cpus, bundles)