Ejemplo n.º 1
0
    def make_local(cls, spec=None, cluster_kwargs=None, client_kwargs=None):
        """
        Spin up a local dask cluster

        interesting cluster_kwargs:
            threads_per_worker
            n_workers

        Returns
        -------
        DaskJobExecutor
            the connected JobExecutor
        """
        if spec is None:
            spec = cluster_spec(**detect())
        if client_kwargs is None:
            client_kwargs = {}
        if client_kwargs.get('set_as_default') is None:
            client_kwargs['set_as_default'] = False

        if cluster_kwargs is None:
            cluster_kwargs = {}
        if cluster_kwargs.get('silence_logs') is None:
            cluster_kwargs['silence_logs'] = logging.WARN

        cluster = dd.SpecCluster(workers=spec, **(cluster_kwargs or {}))
        client = dd.Client(cluster, **(client_kwargs or {}))

        return cls(client=client, is_local=True, lt_resources=True)
Ejemplo n.º 2
0
def local_cluster_url():
    """
    Shared dask cluster, can be used repeatedly by different executors.

    This allows numba caching across tests, without sharing the executor,
    for example
    """
    cluster_port = find_unused_port()
    devices = detect()
    spec = cluster_spec(
        # Only use at most 2 CPUs and 1 GPU
        cpus=devices['cpus'][:2],
        cudas=devices['cudas'][:1],
        has_cupy=devices['has_cupy'])

    cluster_kwargs = {
        'silence_logs': logging.WARN,
        'scheduler': {
            'cls': Scheduler,
            'options': {
                'port': cluster_port
            },
        },
    }

    cluster = dd.SpecCluster(workers=spec, **(cluster_kwargs or {}))

    yield 'tcp://localhost:%d' % cluster_port

    cluster.close()
Ejemplo n.º 3
0
    def make_local(cls, spec=None, cluster_kwargs=None, client_kwargs=None):
        """
        Spin up a local dask cluster

        interesting cluster_kwargs:
            threads_per_worker
            n_workers

        Returns
        -------
        DaskJobExecutor
            the connected JobExecutor
        """

        # Distributed doesn't adjust the event loop policy when being run
        # from within pytest as of version 2.21.0. For that reason we
        # adjust the policy ourselves here.
        adjust_event_loop_policy()

        if spec is None:
            from libertem.utils.devices import detect
            spec = cluster_spec(**detect())
        if client_kwargs is None:
            client_kwargs = {}
        if client_kwargs.get('set_as_default') is None:
            client_kwargs['set_as_default'] = False

        if cluster_kwargs is None:
            cluster_kwargs = {}
        if cluster_kwargs.get('silence_logs') is None:
            cluster_kwargs['silence_logs'] = logging.WARN

        cluster = dd.SpecCluster(workers=spec, **(cluster_kwargs or {}))
        client = dd.Client(cluster, **(client_kwargs or {}))

        client.wait_for_workers(len(spec))

        return cls(client=client, is_local=True, lt_resources=True)
Ejemplo n.º 4
0
    def make_local(cls,
                   spec: Optional[dict] = None,
                   cluster_kwargs: Optional[dict] = None,
                   client_kwargs: Optional[dict] = None,
                   preload: Optional[Tuple[str]] = None):
        """
        Spin up a local dask cluster

        Parameters
        ----------
        spec
            Dask cluster spec, see
            http://distributed.dask.org/en/stable/api.html#distributed.SpecCluster
            for more info.
            :func:`libertem.utils.devices.detect` allows to detect devices that can be used
            with LiberTEM, and :func:`cluster_spec` can be used to create a :code:`spec`
            with customized parameters.
        cluster_kwargs
            Passed to :class:`distributed.SpecCluster`.
        client_kwargs
            Passed to :class:`distributed.Client`. Pass
            :code:`client_kwargs={'set_as_default': True}` to set the Client as the
            default Dask scheduler.
        preload: Optional[Tuple[str]]
            Passed to :func:`cluster_spec` if :code:`spec` is :code:`None`.

        Returns
        -------
        DaskJobExecutor
            the connected JobExecutor
        """

        # Distributed doesn't adjust the event loop policy when being run
        # from within pytest as of version 2.21.0. For that reason we
        # adjust the policy ourselves here.
        adjust_event_loop_policy()

        if spec is None:
            from libertem.utils.devices import detect
            spec = cluster_spec(**detect(), preload=preload)
        else:
            if preload is not None:
                raise ValueError(
                    "Passing both spec and preload is not supported. "
                    "Instead, include preloading specification in the spec")
        if client_kwargs is None:
            client_kwargs = {}
        if client_kwargs.get('set_as_default') is None:
            client_kwargs['set_as_default'] = False

        if cluster_kwargs is None:
            cluster_kwargs = {}
        if cluster_kwargs.get('silence_logs') is None:
            cluster_kwargs['silence_logs'] = logging.WARN

        with set_num_threads_env(n=1):
            cluster = dd.SpecCluster(workers=spec, **(cluster_kwargs or {}))
            client = dd.Client(cluster, **(client_kwargs or {}))
            client.wait_for_workers(len(spec))

        is_local = not client_kwargs['set_as_default']

        return cls(client=client, is_local=is_local, lt_resources=True)