Esempio n. 1
0
    def ClusterConstructor(**kwargs):
        log = logging.getLogger(__file__)
        log.setLevel(logging.DEBUG)
        log.handlers = [logging.StreamHandler(sys.stdout)]
        kwargs['log'] = log

        kwargs.setdefault("controller", controller_launcher_class)
        kwargs.setdefault("engines", engine_launcher_class)
        cfg = kwargs.setdefault("config", cluster_config)
        cfg.EngineLauncher.engine_args = ['--log-level=10']
        cfg.ControllerLauncher.controller_args = ['--log-level=10']
        kwargs.setdefault("controller_args", ['--ping=250'])
        kwargs.setdefault("load_profile", False)

        c = ipp.Cluster(**kwargs)
        if not kwargs['load_profile']:
            assert c.config == cfg
        request.addfinalizer(c.stop_cluster_sync)
        return c
Esempio n. 2
0
def test_ipcluster_clean(ipython_dir):
    default_profile = ProfileDir.find_profile_dir_by_name(ipython_dir)
    default_profile_dir = default_profile.location
    log_file = os.path.join(default_profile.log_dir, "test.log")
    with open(log_file, "w") as f:
        f.write("logsssss")
    cluster_file = os.path.join(default_profile.security_dir,
                                "cluster-abc.json")
    c = ipp.Cluster()
    with open(cluster_file, 'w') as f:
        json.dump(c.to_dict(), f)
    connection_file = os.path.join(default_profile.security_dir,
                                   "ipcontroller-client.json")
    with open(connection_file, 'w') as f:
        f.write("{}")
    check_call([sys.executable, "-m", "ipyparallel.cluster", "clean"])

    assert not os.path.exists(log_file)
    assert not os.path.exists(cluster_file)
    assert not os.path.exists(connection_file)
Esempio n. 3
0
    def __init__(self, view=None, **kwargs):
        super().__init__(**kwargs)
        self._cluster_owner = False
        self._client_owner = False
        if view is None:
            self._client_owner = True
            try:
                # load the default cluster
                cluster = ipp.Cluster.from_file()
            except FileNotFoundError:
                # other load errors?
                cluster = self._cluster = ipp.Cluster()
                self._cluster_owner = True
                cluster.start_cluster_sync()
            else:
                # cluster running, ensure some engines are, too
                if not cluster.engines:
                    cluster.start_engines_sync()
            rc = cluster.connect_client_sync()
            rc.wait_for_engines(cluster.n or 1)
            view = rc.load_balanced_view()

            # use cloudpickle or dill for closures, if available.
            # joblib tends to create closures default pickle can't handle.
            try:
                import cloudpickle  # noqa
            except ImportError:
                try:
                    import dill  # noqa
                except ImportError:
                    pass
                else:
                    view.client[:].use_dill()
            else:
                view.client[:].use_cloudpickle()
        self._view = view
Esempio n. 4
0
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2021  <@localhost>
#
# Distributed under terms of the MIT license.

import time
import ipyparallel as ipp

task_durations = [1] * 25
# request a cluster
with ipp.Cluster() as rc:
    # get a view on the cluster
    view = rc.load_balanced_view()
    # submit the tasks
    asyncresult = view.map_async(time.sleep, task_durations)
    # wait interactively for results
    asyncresult.wait_interactive()
    # retrieve actual results
    result = asyncresult.get()
# at this point, the cluster processes have been shutdown
Esempio n. 5
0
def test_ipcluster_start_stop(request, ipython_dir, daemonize):
    default_profile = ProfileDir.find_profile_dir_by_name(ipython_dir)
    default_profile_dir = default_profile.location

    # cleanup the security directory to avoid leaking files from one test to the next
    def cleanup_security():
        for f in glob.glob(os.path.join(default_profile.security_dir,
                                        "*.json")):
            print(f"Cleaning up {f}")
            try:
                os.remove(f)
            except Exception as e:
                print(f"Error removing {f}: {e}")

    request.addfinalizer(cleanup_security)

    n = 2
    start_args = ["-n", str(n)]
    if daemonize:
        start_args.append("--daemonize")
    start = Popen(
        [sys.executable, "-m", "ipyparallel.cluster", "start", "--debug"] +
        start_args)
    request.addfinalizer(start.terminate)
    if daemonize:
        # if daemonize, should exit after starting
        start.wait(30)
    else:
        # wait for file to appear
        # only need this if not daemonize
        cluster_file = ipp.Cluster(profile_dir=default_profile_dir,
                                   cluster_id="").cluster_file
        for i in range(100):
            if os.path.isfile(cluster_file) or start.poll() is not None:
                break
            else:
                time.sleep(0.1)
        assert os.path.isfile(cluster_file)

    # list should show a file
    out = ipcluster_list()
    assert len(out.splitlines()) == 2

    # cluster running, try to connect with default args
    cluster = ipp.Cluster.from_file(log_level=10)

    try:
        with cluster.connect_client_sync() as rc:
            rc.wait_for_engines(n=2, timeout=60)
            rc[:].apply_async(os.getpid).get(timeout=10)
    except Exception:
        print("controller output")
        print(cluster.controller.get_output())
        print("engine output")
        for engine_set in cluster.engines.values():
            print(engine_set.get_output())
        raise

    # stop with ipcluster stop
    check_call([sys.executable, "-m", "ipyparallel.cluster", "stop"])
    # start should exit when this happens
    start.wait(30)

    # and ipcluster list should return empty
    out = ipcluster_list()
    assert len(out.splitlines()) == 1

    # stop all succeeds even if there's nothing to stop
    check_call([sys.executable, "-m", "ipyparallel.cluster", "stop", "--all"])
Esempio n. 6
0
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2021  <@localhost>
#
# Distributed under terms of the MIT license.
import ipyparallel as ipp

# start cluster, connect client
with ipp.Cluster(n=4) as rc:
    print(rc)
    e_all = rc[:]
    print(e_all)
    ar = e_all.apply(lambda x, y: x**2 + y**2, 5, 12)
    print(ar)
    rc.wait_interactive()
    results = ar.get()
    print(results)
# have results, cluster is shutdown
Esempio n. 7
0
#!/usr/bin/env python
import ipyparallel as ipp

rc = ipp.Cluster(n=4).start_and_connect_sync()
rc.wait_for_engines(n=4)
print(rc.ids)
print(rc[:].apply_sync(lambda: "Hello, World"))
    comm = MPI.COMM_WORLD
    size = MPI.COMM_WORLD.Get_size()
    rank = MPI.COMM_WORLD.Get_rank()
    steps = 100000
    re = 1000
    base_lenght = 100
    uw = 0.1
    relaxation = (2 * re) / (6 * base_lenght * uw + re)
    process_info = slidingLidMPI.fill_mpi_struct_fields(
        rank, size, 2, 2, base_lenght, relaxation, steps, uw)
    # process_info.boundaries_info.apply_left = True
    # process_info.boundaries_info.apply_right = True
    # process_info.boundaries_info.apply_top = True
    # process_info.boundaries_info.apply_bottom = True
    slidingLidMPI.sliding_lid_mpi(process_info, comm)
    return f"{process_info}"


# Main caller
# request an MPI cluster with 2 engines
with ipp.Cluster(engines='mpi', n=cores) as rc:
    # get a broadcast_view on the cluster which is best
    # print(rc.ids)
    # suited for MPI style computation
    view = rc.broadcast_view()
    # run the mpi_example function on all engines in parallel
    r = view.apply_sync(test_4_simulation)
    # Retrieve and print the result from the engines
    print("\n".join(r))
# at this point, the cluster processes have been shutdow