示例#1
0
def test_backend_context_manager():
    all_test_backends = ["test_backend_%d" % i for i in range(3)]
    for test_backend in all_test_backends:
        register_parallel_backend(test_backend, FakeParallelBackend)
    all_backends = ["multiprocessing", "threading"] + all_test_backends

    try:
        assert_equal(_active_backend_type(), MultiprocessingBackend)
        # check that this possible to switch parallel backends sequentially
        for test_backend in all_backends:
            yield check_backend_context_manager, test_backend

        # The default backend is retored
        assert_equal(_active_backend_type(), MultiprocessingBackend)

        # Check that context manager switching is thread safe:
        Parallel(n_jobs=2, backend="threading")(
            delayed(check_backend_context_manager)(b) for b in all_backends if not b
        )

        # The default backend is again retored
        assert_equal(_active_backend_type(), MultiprocessingBackend)
    finally:
        for backend_name in list(BACKENDS.keys()):
            if backend_name.startswith("test_"):
                del BACKENDS[backend_name]
示例#2
0
def test_backend_context_manager():
    all_test_backends = ['test_backend_%d' % i for i in range(5)]
    for test_backend in all_test_backends:
        register_parallel_backend(test_backend, MyParallelBackend)

    try:
        assert_equal(parallel.get_default_backend(), 'multiprocessing')
        # check that this possible to switch parallel backens sequentially
        for test_backend in all_test_backends:
            check_backend_context_manager(test_backend)

        # The default backend is retored
        assert_equal(parallel.get_default_backend(), 'multiprocessing')

        # Check that context manager switching is thread safe:
        Parallel(n_jobs=2,
                 backend='threading')(delayed(check_backend_context_manager)(b)
                                      for b in all_test_backends)

        # The default backend is again retored
        assert_equal(parallel.get_default_backend(), 'multiprocessing')
    finally:
        for backend_name in list(BACKENDS.keys()):
            if backend_name.startswith('test_'):
                del BACKENDS[backend_name]
示例#3
0
def test_register_parallel_backend():
    try:
        register_parallel_backend("test_backend", FakeParallelBackend)
        assert "test_backend" in BACKENDS
        assert BACKENDS["test_backend"] == FakeParallelBackend
    finally:
        del BACKENDS["test_backend"]
示例#4
0
def test_backend_context_manager():
    all_test_backends = ['test_backend_%d' % i for i in range(3)]
    for test_backend in all_test_backends:
        register_parallel_backend(test_backend, FakeParallelBackend)
    all_backends = ['multiprocessing', 'threading'] + all_test_backends

    try:
        assert _active_backend_type() == MultiprocessingBackend
        # check that this possible to switch parallel backends sequentially
        for test_backend in all_backends:
            # TODO: parametrize this block later
            # yield check_backend_context_manager, test_backend
            check_backend_context_manager(test_backend)

        # The default backend is retored
        assert _active_backend_type() == MultiprocessingBackend

        # Check that context manager switching is thread safe:
        Parallel(n_jobs=2,
                 backend='threading')(delayed(check_backend_context_manager)(b)
                                      for b in all_backends if not b)

        # The default backend is again retored
        assert _active_backend_type() == MultiprocessingBackend
    finally:
        for backend_name in list(BACKENDS.keys()):
            if backend_name.startswith('test_'):
                del BACKENDS[backend_name]
示例#5
0
def test_backend_context_manager():
    all_test_backends = ['test_backend_%d' % i for i in range(3)]
    for test_backend in all_test_backends:
        register_parallel_backend(test_backend, FakeParallelBackend)
    all_backends = ['multiprocessing', 'threading'] + all_test_backends

    try:
        assert _active_backend_type() == MultiprocessingBackend
        # check that this possible to switch parallel backends sequentially
        for test_backend in all_backends:
            # TODO: parametrize this block later
            # yield check_backend_context_manager, test_backend
            check_backend_context_manager(test_backend)

        # The default backend is retored
        assert _active_backend_type() == MultiprocessingBackend

        # Check that context manager switching is thread safe:
        Parallel(n_jobs=2, backend='threading')(
            delayed(check_backend_context_manager)(b)
            for b in all_backends if not b)

        # The default backend is again retored
        assert _active_backend_type() == MultiprocessingBackend
    finally:
        for backend_name in list(BACKENDS.keys()):
            if backend_name.startswith('test_'):
                del BACKENDS[backend_name]
示例#6
0
def test_register_parallel_backend():
    try:
        register_parallel_backend("test_backend", FakeParallelBackend)
        assert "test_backend" in BACKENDS
        assert BACKENDS["test_backend"] == FakeParallelBackend
    finally:
        del BACKENDS["test_backend"]
示例#7
0
def test_register_parallel_backend():
    try:
        register_parallel_backend("test_backend", MyParallelBackend)
        assert_true("test_backend" in BACKENDS)
        assert_equal(BACKENDS["test_backend"], MyParallelBackend)
    finally:
        del BACKENDS["test_backend"]
示例#8
0
def test_register_parallel_backend():
    try:
        register_parallel_backend("test_backend", FakeParallelBackend)
        assert_true("test_backend" in BACKENDS)
        assert_equal(BACKENDS["test_backend"], FakeParallelBackend)
    finally:
        del BACKENDS["test_backend"]
示例#9
0
def register_lithops():
    """ Register Lithops Backend to be called with parallel_backend("lithops"). """
    try:
        from lithops.util.joblib.lithops_backend import LithopsBackend
        register_parallel_backend("lithops", LithopsBackend)
    except ImportError:
        msg = ("To use the Lithops backend you must install lithops.")
        raise ImportError(msg)
示例#10
0
def test_overwrite_default_backend():
    assert_equal(_active_backend_type(), MultiprocessingBackend)
    try:
        register_parallel_backend("threading", BACKENDS["threading"], make_default=True)
        assert_equal(_active_backend_type(), ThreadingBackend)
    finally:
        # Restore the global default manually
        parallel.DEFAULT_BACKEND = "multiprocessing"
    assert_equal(_active_backend_type(), MultiprocessingBackend)
示例#11
0
def test_overwrite_default_backend():
    assert _active_backend_type() == DefaultBackend
    try:
        register_parallel_backend("threading", BACKENDS["threading"],
                                  make_default=True)
        assert _active_backend_type() == ThreadingBackend
    finally:
        # Restore the global default manually
        parallel.DEFAULT_BACKEND = DEFAULT_BACKEND
    assert _active_backend_type() == DefaultBackend
示例#12
0
def register_ray():
    """ Register Ray Backend to be called with parallel_backend("ray"). """
    try:
        from ray.util.joblib.ray_backend import RayBackend
        register_parallel_backend("ray", RayBackend)
    except ImportError:
        msg = ("To use the ray backend you must install ray."
               "Try running 'pip install ray'."
               "See https://ray.readthedocs.io/en/latest/installation.html"
               "for more information.")
        raise ImportError(msg)
def test_overwrite_default_backend():
    assert_equal(_active_backend_type(), MultiprocessingBackend)
    try:
        register_parallel_backend("threading",
                                  BACKENDS["threading"],
                                  make_default=True)
        assert_equal(_active_backend_type(), ThreadingBackend)
    finally:
        # Restore the global default manually
        parallel.DEFAULT_BACKEND = 'multiprocessing'
    assert_equal(_active_backend_type(), MultiprocessingBackend)
示例#14
0
def register():
    try:
        import sklearn
        if LooseVersion(sklearn.__version__) < LooseVersion('0.21'):
            warnings.warn(
                "Your sklearn version is < 0.21, but joblib-spark only support "
                "sklearn >=0.21 . You can upgrade sklearn to version >= 0.21 to "
                "make sklearn use spark backend.")
    except ImportError:
        pass
    register_parallel_backend('spark', SparkDistributedBackend)
示例#15
0
def test_overwrite_default_backend():
    assert_equal(parallel.get_default_backend(), 'multiprocessing')
    try:
        register_parallel_backend("threading",
                                  BACKENDS["threading"],
                                  make_default=True)
        assert_equal(parallel.get_default_backend(), 'threading')
    finally:
        # Restore the global default manually
        parallel.DEFAULT_BACKEND = 'multiprocessing'
    assert_equal(parallel.get_default_backend(), 'multiprocessing')
示例#16
0
def register(name='ipyparallel', make_default=False):
    """Register the default ipyparallel Client as a joblib backend
    
    See joblib.parallel.register_parallel_backend for details.
    """
    return register_parallel_backend(name,
                                     IPythonParallelBackend,
                                     make_default=make_default)
示例#17
0
def test_retrieval_context():
    import contextlib

    class MyBackend(ThreadingBackend):
        i = 0

        @contextlib.contextmanager
        def retrieval_context(self):
            self.i += 1
            yield

    register_parallel_backend("retrieval", MyBackend)

    def nested_call(n):
        return Parallel(n_jobs=2)(delayed(id)(i) for i in range(n))

    with parallel_backend("retrieval") as (ba, _):
        Parallel(n_jobs=2)(delayed(nested_call, check_pickle=False)(i)
                           for i in range(5))
        assert ba.i == 1
示例#18
0
def test_parameterized_backend_context_manager():
    register_parallel_backend('param_backend', ParameterizedParallelBackend)
    try:
        assert _active_backend_type() == MultiprocessingBackend

        with parallel_backend('param_backend', param=42, n_jobs=3):
            active_backend, active_n_jobs = parallel.get_active_backend()
            assert type(active_backend) == ParameterizedParallelBackend
            assert active_backend.param == 42
            assert active_n_jobs == 3
            p = Parallel()
            assert p.n_jobs == 3
            assert p._backend is active_backend
            results = p(delayed(sqrt)(i) for i in range(5))
        assert results == [sqrt(i) for i in range(5)]

        # The default backend is again retored
        assert _active_backend_type() == MultiprocessingBackend
    finally:
        del BACKENDS['param_backend']
示例#19
0
def test_parameterized_backend_context_manager():
    register_parallel_backend('param_backend', ParameterizedParallelBackend)
    try:
        assert _active_backend_type() == MultiprocessingBackend

        with parallel_backend('param_backend', param=42, n_jobs=3):
            active_backend, active_n_jobs = parallel.get_active_backend()
            assert type(active_backend) == ParameterizedParallelBackend
            assert active_backend.param == 42
            assert active_n_jobs == 3
            p = Parallel()
            assert p.n_jobs == 3
            assert p._backend is active_backend
            results = p(delayed(sqrt)(i) for i in range(5))
        assert results == [sqrt(i) for i in range(5)]

        # The default backend is again retored
        assert _active_backend_type() == MultiprocessingBackend
    finally:
        del BACKENDS['param_backend']
示例#20
0
def test_parameterized_backend_context_manager():
    register_parallel_backend("param_backend", ParameterizedParallelBackend)
    try:
        assert_equal(_active_backend_type(), MultiprocessingBackend)

        with parallel_backend("param_backend", param=42, n_jobs=3):
            active_backend, active_n_jobs = parallel.get_active_backend()
            assert_equal(type(active_backend), ParameterizedParallelBackend)
            assert_equal(active_backend.param, 42)
            assert_equal(active_n_jobs, 3)
            p = Parallel()
            assert_equal(p.n_jobs, 3)
            assert_true(p._backend is active_backend)
            results = p(delayed(sqrt)(i) for i in range(5))
        assert_equal(results, [sqrt(i) for i in range(5)])

        # The default backend is again retored
        assert_equal(_active_backend_type(), MultiprocessingBackend)
    finally:
        del BACKENDS["param_backend"]
def test_parameterized_backend_context_manager():
    register_parallel_backend('param_backend', ParameterizedParallelBackend)
    try:
        assert_equal(_active_backend_type(), MultiprocessingBackend)

        with parallel_backend('param_backend', param=42, n_jobs=3):
            active_backend, active_n_jobs = parallel.get_active_backend()
            assert_equal(type(active_backend), ParameterizedParallelBackend)
            assert_equal(active_backend.param, 42)
            assert_equal(active_n_jobs, 3)
            p = Parallel()
            assert_equal(p.n_jobs, 3)
            assert_true(p._backend is active_backend)
            results = p(delayed(sqrt)(i) for i in range(5))
        assert_equal(results, [sqrt(i) for i in range(5)])

        # The default backend is again retored
        assert_equal(_active_backend_type(), MultiprocessingBackend)
    finally:
        del BACKENDS['param_backend']
示例#22
0
    def register_joblib_backend(self, name='ipyparallel', make_default=False):
        """Register this View as a joblib parallel backend

        To make this the default backend, set make_default=True.

        Use with::

            p = Parallel(backend='ipyparallel')
            ...

        See joblib docs for details

        Requires joblib >= 0.10

        .. versionadded:: 5.1
        """
        from joblib.parallel import register_parallel_backend
        from ._joblib import IPythonParallelBackend
        register_parallel_backend(name,
            lambda : IPythonParallelBackend(view=self),
            make_default=make_default)
示例#23
0
    def register_joblib_backend(self, name='ipyparallel', make_default=False):
        """Register this View as a joblib parallel backend

        To make this the default backend, set make_default=True.

        Use with::

            p = Parallel(backend='ipyparallel')
            ...

        See joblib docs for details

        Requires joblib >= 0.10

        .. versionadded:: 5.1
        """
        from joblib.parallel import register_parallel_backend
        from ._joblib import IPythonParallelBackend
        register_parallel_backend(name,
                                  lambda: IPythonParallelBackend(view=self),
                                  make_default=make_default)
示例#24
0
def register_mars_backend():
    register_parallel_backend('mars', MarsDistributedBackend)
示例#25
0
def register():
    """
    Register Flink backend into Joblib called with parallel_backend('flink').
    """

    register_parallel_backend('flink', FlinkBackend)
示例#26
0
def register(name='ipyparallel', make_default=False):
    """Register the default ipyparallel Client as a joblib backend
    
    See joblib.parallel.register_parallel_backend for details.
    """
    return register_parallel_backend(name, IPythonParallelBackend, make_default=make_default)
示例#27
0
class DistributedBackend(ParallelBackendBase, AutoBatchingMixin):
    MIN_IDEAL_BATCH_DURATION = 0.2
    MAX_IDEAL_BATCH_DURATION = 1.0

    def __init__(self, scheduler_host='127.0.0.1:8786', loop=None):
        self.executor = Executor(scheduler_host, loop=loop)

    def configure(self, n_jobs=1, parallel=None, **backend_args):
        return self.effective_n_jobs(n_jobs)

    def effective_n_jobs(self, n_jobs=1):
        return sum(self.executor.ncores().values())

    def apply_async(self, func, *args, **kwargs):
        callback = kwargs.pop('callback', None)
        kwargs['pure'] = False
        future = self.executor.submit(func, *args, **kwargs)

        @gen.coroutine
        def callback_wrapper():
            result = yield _wait([future])
            callback(result)  # gets called in separate thread

        self.executor.loop.add_callback(callback_wrapper)

        future.get = future.result  # monkey patch to achieve AsyncResult API
        return future


register_parallel_backend('distributed', DistributedBackend)
示例#28
0
def register_lithops():
    """ Register Lithops Backend to be called with parallel_backend("lithops"). """
    register_parallel_backend("lithops", LithopsBackend)