Exemple #1
0
def _add_folder_to_ipython_engines_path(
        client: Client, folder: str) -> None:  # pragma: no cover
    """
    Add a folder to sys.path of each ipython engine.

    The list of engines is get as a direct view from 'client'.

    This will also add the folder to the local python path.

    Parameters
    ----------
    client : Client
        The client from which we will get a direct view to access the
        engines.
    folder : str
        The folder to be added to the python path at each engine.
    """
    # Add the folder to the python path of the main application
    sys.path.append(folder)

    # We create a direct view to run coe in all engines
    dview = client.direct_view()
    # Reset the engines so that we don't have variables there from last
    # computations
    dview.execute('%reset')
    dview.execute('import sys')
    # Add the folder to the python path of each engine. We use
    # block=True to ensure that all engines have modified their
    # path to include the folder with the simulator before we
    # continue.
    dview.execute('sys.path.append("{0}")'.format(folder), block=True)
Exemple #2
0
def simulate_general(runner, results_filename):
    """
    Function with the general code to simulate the MIMO schemes.
    """

    # xxxxxxxxxx Print the simulation parameters xxxxxxxxxxxxxxxxxxxxxxxxxx
    pprint(runner.params.parameters)
    print("MIMO Scheme: {0}".format(runner.mimo_object.__class__.__name__))
    # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

    # xxxxx Replace any parameter mention in results_filename xxxxxxxxxxxxx
    runner.set_results_filename(results_filename)
    # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

    # xxxxxxxxxx Perform the simulation xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
    # The simulation will be run either in parallel or serially depending
    # if the IPython engines are running or not.
    run_in_parallel = True
    # noinspection PyBroadException,PyBroadException
    try:
        # If we can get an IPython view that means that the IPython engines
        # are running. In that case we will perform the simulation in
        # parallel
        from ipyparallel import Client
        cl = Client()
        # We create a direct view to run coe in all engines
        dview = cl.direct_view()

        # Reset the engines so that we don't have variables there from last
        # computations
        dview.execute('%reset')

        dview.execute('import sys')
        # We use block=True to ensure that all engines have modified their
        # path to include the folder with the simulator before we create
        # the load lanced view in the following.
        dview.execute('sys.path.append("{0}")'.format(parent_dir), block=True)

        # But for the actual simulation we are better using a load balanced
        # view
        lview = cl.load_balanced_view()
    except Exception:  # pylint: disable=W0703
        # If we can't get an IPython view then we will perform the
        # simulation serially
        run_in_parallel = False

    if run_in_parallel is True:
        print("-----> Simulation will be run in Parallel")
        runner.simulate_in_parallel(lview)
    else:
        print("-----> Simulation will be run serially")
        runner.simulate()
    # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

    print("Runned iterations: {0}".format(runner.runned_reps))
    print("Elapsed Time: {0}".format(runner.elapsed_time))
    print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n")

    return runner.results, runner.results_filename
Exemple #3
0
def launch_ipcluster_dv(profile="default",
                        targets="all",
                        block=True,
                        max_engines=None):
    # initiate ipcluster
    rc = Client(profile=profile)

    # print ipcluster information
    n_proc = len(rc.ids)
    if targets == "all":
        targets = rc.ids

    dv = rc.direct_view(targets=targets)

    # check number of engines
    # print(rc.ids, dv.targets, targets, max_engines)
    if max_engines is not None:
        if len(dv.targets) > max_engines:
            targets = deepcopy(dv.targets)
            np.random.shuffle(targets)
            targets = targets[:max_engines]
            targets.sort()

            dv = rc.direct_view(targets=targets)

    print("===================================================")
    print("@Slam: ipcluster[{}, n_engines={}/{}]".format(
        profile, len(dv.targets), n_proc))
    print("---------------------------------------------------")

    dv.block = block

    # import basic modules in ipcluster
    dv.execute("import os")
    dv.execute("import numpy as np")
    dv.execute("from joblib import Parallel, delayed, dump, load")

    # print host information
    dv.execute("host_names = os.uname()[1]").get()
    u_host_names, u_counts = np.unique(dv["host_names"], return_counts=True)
    for i in range(len(u_counts)):
        print("host: {} x {}".format(u_host_names[i], u_counts[i]))
    print("===================================================")

    return dv
Exemple #4
0
def setup_parallel(dbname):
    c = Client()
    dview = c.direct_view()
    dview.push({'dbname': str(dbname)})
    # dview.push({'remove_duplicates_from_image_name_data':
    #             remove_duplicates_from_image_name_data,
    #             'get_temp_fname': get_temp_fname,
    #             'dbname': dbname})
    lbview = c.load_balanced_view()
    return lbview
Exemple #5
0
def setup_parallel(dbname):
    c = Client()
    dview = c.direct_view()
    dview.push({'dbname': str(dbname)})
    # dview.push({'remove_duplicates_from_image_name_data':
    #             remove_duplicates_from_image_name_data,
    #             'get_temp_fname': get_temp_fname,
    #             'dbname': dbname})
    lbview = c.load_balanced_view()
    return lbview
Exemple #6
0
from numpy import array, nan, percentile, savez

from ipyparallel import Client

from .adf_simulation import adf_simulation

# Number of repetitions
EX_NUM = 500
# Number of simulations per exercise
EX_SIZE = 200000
# Approximately controls memory use, in MiB
MAX_MEMORY_SIZE = 100

rc = Client()
dview = rc.direct_view()
with dview.sync_imports():
    from numpy import arange, zeros
    from numpy.random import RandomState


def wrapper(n, trend, b, seed=0):
    """
    Wraps and blocks the main simulation so that the maximum amount of memory
    can be controlled on multi processor systems when executing in parallel
    """
    rng = RandomState()
    rng.seed(seed)
    remaining = b
    res = zeros(b)
    finished = 0
Exemple #7
0
class Snudda(object):

    ############################################################################

    def __init__(self, network_path):

        self.network_path = network_path
        self.d_view = None
        self.lb_view = None
        self.rc = None
        self.slurm_id = 0

        # Add current dir to python path
        sys.path.append(os.getcwd())

        self.start = timeit.default_timer()

    ############################################################################

    @staticmethod
    def help_info(args):
        from snudda.help import snudda_help_text

    ############################################################################

    def init_config(self, args):
        # self.networkPath = args.path
        print("Creating config file")
        print(f"Network path: {self.network_path}")

        assert args.size is not None, "You need to specify --size when initialising config for the network"

        from snudda.init.init import SnuddaInit
        struct_def = {
            "Striatum": args.size,
            "GPe": 0,
            "GPi": 0,
            "SNr": 0,
            "STN": 0,
            "Cortex": 0,
            "Thalamus": 0
        }
        # Cortex and thalamus axons disabled right now, set to 1 to include one

        if not args.overwrite:
            assert not os.path.exists(self.network_path), \
                "Network path {self.network_path} already exists (aborting to prevent accidental overwriting)"

        self.make_dir_if_needed(self.network_path)

        random_seed = args.randomseed

        config_file = os.path.join(self.network_path, "network-config.json")
        SnuddaInit(struct_def=struct_def,
                   config_file=config_file,
                   random_seed=random_seed)

        if args.size > 1e5:
            print(
                f"Make sure there is enough disk space in {self.network_path}")
            print("Large networks take up ALOT of space")

    ############################################################################

    def place_neurons(self, args):
        # self.networkPath = args.path
        print("Placing neurons")
        print(f"Network path: {self.network_path}")

        config_file = os.path.join(self.network_path, "network-config.json")
        position_file = os.path.join(self.network_path,
                                     "network-neuron-positions.hdf5")
        log_file_name = os.path.join(self.network_path, "log",
                                     "logFile-place-neurons.txt")

        random_seed = args.randomseed

        self.setup_log_file(log_file_name)  # sets self.logFile

        if args.parallel:
            self.setup_parallel()  # sets self.d_view and self.lb_view

        from snudda.place.place import SnuddaPlace

        if args.h5legacy:
            h5libver = "earliest"
        else:
            h5libver = "latest"  # default

        sp = SnuddaPlace(config_file=config_file,
                         log_file=self.logfile,
                         verbose=args.verbose,
                         d_view=self.d_view,
                         h5libver=h5libver,
                         raytrace_borders=args.raytrace_borders,
                         random_seed=random_seed)

        sp.parse_config()
        sp.write_data(position_file)

        self.stop_parallel()
        self.close_log_file()

    ############################################################################

    def touch_detection(self, args):
        # self.networkPath = args.path
        print("Touch detection")
        print("Network path: " + str(self.network_path))

        if args.hvsize is not None:
            hyper_voxel_size = int(args.hvsize)
        else:
            hyper_voxel_size = 100

        if args.volumeID is not None:
            volume_id = args.volumeID
        else:
            volume_id = None

        log_dir = os.path.join(self.network_path, "log")
        if not os.path.exists(log_dir):
            print(f"Creating directory {log_dir}")
            os.makedirs(log_dir, exist_ok=True)

        config_file = os.path.join(self.network_path, "network-config.json")
        position_file = os.path.join(self.network_path,
                                     "network-neuron-positions.hdf5")
        log_filename = os.path.join(self.network_path, "log",
                                    "logFile-touch-detection.txt")
        save_file = os.path.join(self.network_path, "voxels",
                                 "network-putative-synapses.hdf5")

        random_seed = args.randomseed

        voxel_dir = os.path.join(self.network_path, "voxels")
        self.make_dir_if_needed(voxel_dir)

        self.setup_log_file(log_filename)  # sets self.logfile

        if args.parallel:
            self.setup_parallel()  # sets self.d_view and self.lb_view

        if args.h5legacy:
            h5libver = "earliest"
        else:
            h5libver = "latest"  # default

        from snudda.detect.detect import SnuddaDetect

        # You can now setup SnuddaDetect with only network_path and it will use default values
        # for config_file, position_file, logfile, save_file
        sd = SnuddaDetect(config_file=config_file,
                          position_file=position_file,
                          logfile=self.logfile,
                          save_file=save_file,
                          slurm_id=self.slurm_id,
                          volume_id=volume_id,
                          rc=self.rc,
                          hyper_voxel_size=hyper_voxel_size,
                          h5libver=h5libver,
                          random_seed=random_seed,
                          verbose=args.verbose)

        if args.cont:
            # Continue previous run
            print("Continuing previous touch detection")
            sd.detect(restart_detection_flag=False)
        else:
            sd.detect(restart_detection_flag=True)

        # Also run SnuddaProject to handle projections between volume

        from snudda.detect.project import SnuddaProject

        sp = SnuddaProject(network_path=self.network_path)
        sp.project()
        sp.write()

        self.stop_parallel()
        self.close_log_file()

    ############################################################################

    def prune_synapses(self, args):
        # self.networkPath = args.path
        print("Prune synapses")
        print("Network path: " + str(self.network_path))

        from snudda.detect.prune import SnuddaPrune

        log_filename = os.path.join(self.network_path, "log",
                                    "logFile-synapse-pruning.txt")

        random_seed = args.randomseed

        self.setup_log_file(log_filename)  # sets self.logfile

        if args.parallel:
            self.setup_parallel()  # sets self.d_view and self.lb_view

        # Optionally set this
        scratch_path = None

        if args.merge_only:
            pre_merge_only = True
        else:
            pre_merge_only = False

        print(f"preMergeOnly : {pre_merge_only}")

        if args.h5legacy:
            h5libver = "earliest"
        else:
            h5libver = "latest"  # default

        sp = SnuddaPrune(network_path=self.network_path,
                         logfile=self.logfile,
                         logfile_name=log_filename,
                         config_file=args.config_file,
                         d_view=self.d_view,
                         lb_view=self.lb_view,
                         scratch_path=scratch_path,
                         h5libver=h5libver,
                         random_seed=random_seed,
                         verbose=args.verbose)

        sp.prune(pre_merge_only=pre_merge_only)

        self.stop_parallel()
        self.close_log_file()

    ############################################################################

    def setup_input(self, args):

        from snudda.input.input import SnuddaInput

        print("Setting up inputs, assuming input.json exists")
        log_filename = os.path.join(self.network_path, "log",
                                    "logFile-setup-input.log")
        self.setup_log_file(log_filename)  # sets self.logfile

        if args.parallel:
            self.setup_parallel()  # sets self.d_view and self.lb_view

        if "input" in args and args.input:
            input_config = args.input
        else:
            input_config = os.path.join(self.network_path, "input.json")

        if not snudda_isfile(input_config):
            print(f"Missing input config file: {input_config}")
            return

        if args.network_file:
            network_file = args.network_file
        else:
            network_file = os.path.join(self.network_path,
                                        "network-synapses.hdf5")

        if args.input_file:
            spike_file = args.input_file
        else:
            spike_file = os.path.join(self.network_path, "input-spikes.hdf5")

        if args.time:
            input_time = args.time
        else:
            input_time = None

        random_seed = args.randomseed

        if args.h5legacy:
            h5libver = "earliest"
        else:
            h5libver = "latest"  # default

        print(f"Writing input spikes to {spike_file}")

        si = SnuddaInput(input_config_file=input_config,
                         hdf5_network_file=network_file,
                         spike_data_filename=spike_file,
                         time=input_time,
                         logfile=self.logfile,
                         rc=self.rc,
                         random_seed=random_seed,
                         h5libver=h5libver,
                         verbose=args.verbose)
        si.generate()

        self.stop_parallel()
        self.close_log_file()

    ############################################################################

    def export_to_SONATA(self, args):

        assert False, "Old export to SONATA borken, fixme!"
        # TODO: Fix this
        from snudda.ConvertNetwork import ConvertNetwork

        print("Exporting to SONATA format")
        print(f"Network path: {self.network_path}")

        if args.network_file:
            network_file = args.network_file
        else:
            network_file = os.path.join(self.network_path,
                                        "network-synapses.hdf5")

        if args.input_file:
            input_file = args.input_file
        else:
            input_file = os.path.join(self.network_path, "input-spikes.hdf5")

        out_dir = os.path.join(self.network_path, "SONATA")

        cn = ConvertNetwork(networkFile=network_file,
                            inputFile=input_file,
                            outDir=out_dir)

    ############################################################################

    def simulate(self, args):

        start = timeit.default_timer()

        from snudda.simulate.simulate import SnuddaSimulate

        if args.network_file:
            network_file = args.network_file
        else:
            network_file = os.path.join(self.network_path,
                                        "network-synapses.hdf5")

        if args.input_file:
            input_file = args.input_file
        else:
            input_file = os.path.join(self.network_path, "input-spikes.hdf5")

        self.make_dir_if_needed(os.path.join(self.network_path, "simulation"))

        print(f"Using input file {input_file}")

        # nWorkers = args.ncores
        # print("Using " + str(nWorkers) + " workers for neuron")

        # Problems with nested symbolic links when the second one is a relative
        # path going beyond the original base path
        if args.mech_dir is None:
            # mech_dir = os.path.join(os.path.dirname(network_file), "mechanisms")

            # TODO!!! problem with paths, testing to create mechanism dir in current dir
            mech_dir = "mechanisms"

            if not os.path.exists(mech_dir):
                try:
                    m_dir = os.path.realpath(
                        os.path.join(os.path.dirname(__file__), "data",
                                     "neurons", "mechanisms"))
                    os.symlink(m_dir, mech_dir)
                except:
                    print(f"Failed to create symlink {mech_dir} -> {m_dir}")
        else:
            mech_dir = args.mech_dir

        # !!! These are saved in current directory x86_64
        # --- problem since nrnivmodl seems to want a relative path...

        make_mods_str = f"nrnivmodl {mech_dir}"

        # x86_64 on linux, nrnmech.dll on windows...
        if not os.path.exists("x86_64") and not os.path.exists("nrnmech.dll"):
            print(f"Please first run: {make_mods_str}")
            os.sys.exit(-1)
            # I was having problems when running nrnivmodl in the script, but
            # running it manually in bash works... WHY?!!

        # os.system(makeModsStr)

        save_dir = os.path.join(os.path.dirname(network_file), "simulation")

        if not os.path.exists(save_dir):
            print(f"Creating directory {save_dir}")
            os.makedirs(save_dir, exist_ok=True)

        # Get the SlurmID, used in default file names
        slurm_id = os.getenv('SLURM_JOBID')

        if slurm_id is None:
            slurm_id = str(666)

        print(f"args: {args}")

        if args.volt_out is not None:
            # Save neuron voltage
            if args.volt_out == "default":
                volt_file = os.path.join(save_dir,
                                         f"network-voltage-{slurm_id}.csv")
            else:
                volt_file = args.volt_out
        else:
            volt_file = None

        if args.spikes_out is None or args.spikes_out == "default":
            spikes_file = os.path.join(
                save_dir, f"network-output-spikes-{slurm_id}.txt")
        else:
            spikes_file = args.spikes_out

        disable_gj = args.disable_gj
        if disable_gj:
            print("!!! WE HAVE DISABLED GAP JUNCTIONS !!!")

        log_file = os.path.join(os.path.dirname(network_file), "log",
                                "network-simulation-log.txt")

        log_dir = os.path.join(os.path.dirname(network_file), "log")
        if not os.path.exists(log_dir):
            print(f"Creating directory {log_dir}")
            os.makedirs(log_dir, exist_ok=True)

        from mpi4py import MPI  # This must be imported before neuron, to run parallel
        from neuron import h  # , gui

        pc = h.ParallelContext()

        # Simulate is deterministic, no random seed.
        sim = SnuddaSimulate(network_file=network_file,
                             input_file=input_file,
                             disable_gap_junctions=disable_gj,
                             log_file=log_file,
                             verbose=args.verbose)

        sim.add_external_input()

        sim.check_memory_status()

        if volt_file is not None:
            sim.add_recording(
                side_len=None)  # Side len let you record from a subset
            # sim.addRecordingOfType("dSPN",5) # Side len let you record from a subset

        t_sim = args.time * 1000  # Convert from s to ms for Neuron simulator

        if args.exportCoreNeuron:
            sim.export_to_core_neuron()
            return  # We do not run simulation when exporting to core neuron

        sim.check_memory_status()
        print("Running simulation for " + str(t_sim) + " ms.")
        sim.run(t_sim)  # In milliseconds

        print("Simulation done, saving output")
        if spikes_file is not None:
            sim.write_spikes(spikes_file)

        if volt_file is not None:
            sim.write_voltage(volt_file)

        stop = timeit.default_timer()
        if sim.pc.id() == 0:
            print("Program run time: " + str(stop - start))

        # sim.plot()

    ############################################################################

    def analyse(self, args):

        print("Add analysis code here, see Network_analyse.py")

    ############################################################################

    def setup_parallel(self):

        self.slurm_id = os.getenv('SLURM_JOBID')

        if self.slurm_id is None:
            self.slurm_id = self.next_run_id()
        else:
            self.slurm_id = int(self.slurm_id)

        self.logfile.write(f"Using slurm_id: {self.slurm_id}")

        ipython_profile = os.getenv('IPYTHON_PROFILE')
        if not ipython_profile:
            ipython_profile = "default"

        ipython_dir = os.getenv('IPYTHONDIR')
        if not ipython_dir:
            ipython_dir = os.path.join(os.path.abspath(os.getcwd()),
                                       ".ipython")

        self.logfile.write('Creating ipyparallel client\n')

        from ipyparallel import Client

        u_file = os.path.join(ipython_dir, f"profile_{ipython_profile}",
                              "security", "ipcontroller-client.json")
        self.rc = Client(url_file=u_file, timeout=120, debug=False)

        self.logfile.write(f'Client IDs: {self.rc.ids}')

        # http://davidmasad.com/blog/simulation-with-ipyparallel/
        # http://people.duke.edu/~ccc14/sta-663-2016/19C_IPyParallel.html
        self.d_view = self.rc.direct_view(
            targets='all')  # rc[:] # Direct view into clients
        self.lb_view = self.rc.load_balanced_view(targets='all')

    ############################################################################

    def stop_parallel(self):

        # Disable this function, keep the pool running for now
        return

        # if self.rc is not None:
        #    print("Stopping ipyparallel")
        #    self.rc.shutdown(hub=True)

    ############################################################################

    def setup_log_file(self, log_file_name):
        data_dir = os.path.dirname(log_file_name)

        self.make_dir_if_needed(data_dir)

        try:
            self.logfile = open(log_file_name, 'w')
            self.logfile.write('Starting log file\n')
        except:
            print("Unable to set up log file " + str(log_file_name))

    ############################################################################

    def close_log_file(self):

        stop = timeit.default_timer()

        print("\nProgram run time: " + str(stop - self.start))

        self.logfile.write("Program run time: " + str(stop - self.start))
        self.logfile.write("End of log. Closing file.")
        self.logfile.close()

    ##############################################################################

    def next_run_id(self):

        import pickle

        run_id_file = ".runID.pickle"

        try:
            if os.path.isfile(run_id_file):

                with open(run_id_file, 'rb') as f:
                    run_id = pickle.load(f)
                    next_id = int(np.ceil(np.max(run_id)) + 1)

                run_id.append(next_id)

                with open(run_id_file, 'wb') as f:
                    pickle.dump(run_id, f, -1)

            else:

                with open(run_id_file, 'wb') as f:
                    next_id = 1
                    run_id = [1]
                    pickle.dump(run_id, f, -1)

        except Exception as e:
            import traceback
            tstr = traceback.format_exc()
            print(tstr)
            print("Problem reading .runID.pickle file, setting runID to 0")
            return 0

        print("Using runID = " + str(next_id))

        return next_id

    ############################################################################

    @staticmethod
    def make_dir_if_needed(dir_path):

        if not os.path.exists(dir_path):
            print("Creating missing directory " + dir_path)
            try:
                os.makedirs(dir_path)
                print("Created directory " + dir_path)
            except:
                print("Failed to create dir " + dir_path)
Exemple #8
0
    def test_project(self):

        # Are there connections dSPN->iSPN
        from snudda.utils.load import SnuddaLoad
        network_file = os.path.join(self.network_path, "network-synapses.hdf5")
        sl = SnuddaLoad(network_file)

        dspn_id_list = sl.get_cell_id_of_type("dSPN")
        ispn_id_list = sl.get_cell_id_of_type("iSPN")

        tot_proj_ctr = 0

        for dspn_id in dspn_id_list:
            for ispn_id in ispn_id_list:

                synapses, synapse_coords = sl.find_synapses(pre_id=dspn_id,
                                                            post_id=ispn_id)
                if synapses is not None:
                    tot_proj_ctr += synapses.shape[0]

        with self.subTest(stage="projection_exists"):
            # There should be projection synapses between dSPN and iSPN in this toy example
            self.assertTrue(tot_proj_ctr > 0)

        tot_dd_syn_ctr = 0
        for dspn_id in dspn_id_list:
            for dspn_id2 in dspn_id_list:

                synapses, synapse_coords = sl.find_synapses(pre_id=dspn_id,
                                                            post_id=dspn_id2)
                if synapses is not None:
                    tot_dd_syn_ctr += synapses.shape[0]

        tot_ii_syn_ctr = 0
        for ispn_id in ispn_id_list:
            for ispn_id2 in ispn_id_list:

                synapses, synapse_coords = sl.find_synapses(pre_id=ispn_id,
                                                            post_id=ispn_id2)
                if synapses is not None:
                    tot_ii_syn_ctr += synapses.shape[0]

        with self.subTest(stage="normal_synapses_exist"):
            # In this toy example neurons are quite sparsely placed, but we should have at least some
            # synapses
            self.assertTrue(tot_dd_syn_ctr > 0)
            self.assertTrue(tot_ii_syn_ctr > 0)

        # We need to run in parallel also to verify we get same result (same random seed)

        serial_synapses = sl.data["synapses"].copy()
        del sl  # Close old file so we can overwrite it

        os.environ["IPYTHONDIR"] = os.path.join(os.path.abspath(os.getcwd()),
                                                ".ipython")
        os.environ["IPYTHON_PROFILE"] = "default"
        os.system(
            "ipcluster start -n 4 --profile=$IPYTHON_PROFILE --ip=127.0.0.1&")
        time.sleep(10)

        # Run place, detect and prune in parallel by passing rc
        from ipyparallel import Client
        u_file = os.path.join(".ipython", "profile_default", "security",
                              "ipcontroller-client.json")
        rc = Client(url_file=u_file, timeout=120, debug=False)
        d_view = rc.direct_view(
            targets='all')  # rc[:] # Direct view into clients

        from snudda.detect.detect import SnuddaDetect
        sd = SnuddaDetect(network_path=self.network_path,
                          hyper_voxel_size=100,
                          rc=rc,
                          verbose=True)
        sd.detect()

        from snudda.detect.project import SnuddaProject
        # TODO: Currently SnuddaProject only runs in serial
        sp = SnuddaProject(network_path=self.network_path)
        sp.project()
        sp.write()

        from snudda.detect.prune import SnuddaPrune
        # Prune has different methods for serial and parallel execution, important to test it!
        sp = SnuddaPrune(network_path=self.network_path, rc=rc, verbose=True)
        sp.prune()

        with self.subTest(stage="check-parallel-identical"):
            sl2 = SnuddaLoad(network_file)
            parallel_synapses = sl2.data["synapses"].copy()

            # ParameterID, sec_X etc are randomised in hyper voxel, so you need to use same
            # hypervoxel size for reproducability between serial and parallel execution

            # All synapses should be identical regardless of serial or parallel execution path
            self.assertTrue((serial_synapses == parallel_synapses).all())

        os.system("ipcluster stop")
Exemple #9
0
    def proc_thar(self, fp, clear=True, ipcprofile=None):
        if isinstance(fp, list):
            # multiple thar
            print("@Slit[{}]: cleared tws ...".format(self.slit))
            if ipcprofile is None:
                print("@Slit[{}]: processing {} thar sequentially ...".format(
                    self.slit, len(fp)))
                tws = [self.proc_thar(fp_, clear=True) for fp_ in fp]
                if clear:
                    self.clear_tws()
                for _ in tws:
                    self.tws.add_row(_)
                self.tws.sort("jdmid")
            else:
                # use ipcluster
                rc = Client(profile=ipcprofile)
                print(
                    "@Slit[{}]: dispatching {} thar to ipcluster (profile={}, nproc={}) ..."
                    .format(self.slit, len(fp), ipcprofile, len(rc.ids)))
                dv = rc.direct_view()
                dv.block = True
                dv.push({"this_slit": self})
                dv.scatter("fp", fp)
                dv.execute(
                    "tws = [this_slit.proc_thar(_, clear=True) for _ in fp]")
                if clear:
                    self.clear_tws()
                # print(self.tws, dv.gather("tws"))
                # print(self.tws.colnames)
                for _ in dv.gather("tws"):
                    # print(_.keys())
                    self.tws.add_row(_)
                self.tws.sort("jdmid")
                dv.execute("%reset -f")
        else:
            # single thar

            # 1.read ThAr
            hdr = self.read_header(fp)
            # assert slit is correct
            assert hdr["SLIT"] == self.slit
            # thar data
            thar_data = self.read_image(fp)
            # thar time
            jdmid = hdr["JD-MID"]
            exptime = hdr["EXPTIME"]

            # 2.correct sensitivity
            thar_bg_sens = (thar_data - self.bias) / self.sensitivity
            # im_thar_denm_err = np.sqrt(np.abs(im_thar_denm)) / s.master_flats[slit]["norm"]

            # 3.extract ThAr spectrum
            rextr = self.ap.extract_all(thar_bg_sens, **self.kwargs_extract)
            thar_obs = rextr["spec_sum"]
            # thar_err = rextr["err_sum"]

            # 4.xcorrelate for initial wavelength guess
            wave_init = thar.corr_thar(self.wave_temp,
                                       self.thar_temp,
                                       thar_obs,
                                       maxshift=100)

            # 5. find thar lines
            tlines = thar.find_lines(wave_init,
                                     thar_obs,
                                     self.thar_line_list,
                                     npix_chunk=20)
            tlines = tlines[np.isfinite(tlines["line_x_ccf"])]

            # fit grating equation
            x = tlines["line_x_ccf"]  # line_x_ccf/line_x_gf
            y = tlines["order"]
            z = tlines["line"]
            pf1, pf2, indselect = thar.grating_equation(
                x, y, z, **self.kwargs_grating)
            tlines.add_column(table.Column(indselect, "indselect"))
            nlines = np.sum(indselect)
            # mpflux
            mpflux = np.median(tlines["line_peakflux"][tlines["indselect"]])
            # rms
            rms = np.std((pf2.predict(x, y) - z)[indselect])
            # predict wavelength solution
            nx, norder = thar_obs.shape
            mx, morder = np.meshgrid(np.arange(norder), np.arange(nx))
            wave_solu = pf2.predict(mx, morder)  # polynomial fitter

            # result
            calibration_dict = OrderedDict(fp=fp,
                                           jdmid=jdmid,
                                           exptime=exptime,
                                           wave_init=wave_init,
                                           wave_solu=wave_solu,
                                           tlines=tlines,
                                           nlines=nlines,
                                           rms=rms,
                                           pf1=pf1,
                                           pf2=pf2,
                                           mpflux=mpflux,
                                           thar_obs=thar_obs)
            # clear if necessary
            if clear:
                self.clear_tws()
            # add this thar to list
            if fp in self.tws["fp"]:
                # overwrite record
                idx = np.int(np.where(fp == self.tws["fp"])[0])
                self.tws.remove_row(idx)
            self.tws.add_row(calibration_dict)
            self.tws.sort("jdmid")
            return calibration_dict
import datetime
import re
import pymongo
from pymongo import MongoClient
from ipyparallel import Client

from CreationModules import FileSearch as FS
from CreationModules import PriorBreathData as PDB

__author__ = 'sottilep'

ipclient = Client()
print(ipclient.ids)
ipview = ipclient.direct_view()

client = MongoClient()
db = client.VentDB
input_log = db.input_log
breath_col = db.breath_collection

# input_log.drop()
# breath_col.drop()

try:
    input_log.create_index([('type', pymongo.TEXT)])
    input_log.create_index([('loaded', pymongo.ASCENDING)])
    input_log.create_index([('analyzed', pymongo.ASCENDING)])
    input_log.create_index([('loc', pymongo.GEO2D)], min = -1,
                           max = (datetime.datetime.now() + datetime.timedelta(days = 1440)).timestamp())

    breath_col.create_index([('patient_id', pymongo.ASCENDING)])
from numpy import array, nan, percentile, savez

from ipyparallel import Client

from .adf_simulation import adf_simulation

# Number of repetitions
EX_NUM = 500
# Number of simulations per exercise
EX_SIZE = 200000
# Approximately controls memory use, in MiB
MAX_MEMORY_SIZE = 100

rc = Client()
dview = rc.direct_view()
with dview.sync_imports():
    from numpy import arange, zeros
    from numpy.random import RandomState


def lmap(*args):
    return list(map(*args))


def wrapper(n, trend, b, seed=0):
    """
    Wraps and blocks the main simulation so that the maximum amount of memory
    can be controlled on multi processor systems when executing in parallel
    """
    rng = RandomState()
Exemple #12
0
class Snudda(object):

    ############################################################################

    def __init__(self, networkPath):

        if (networkPath[-1] == "/"):
            self.networkPath = networkPath[:-1]
        else:
            self.networkPath = networkPath

        # Add current dir to python path
        sys.path.append(os.getcwd())

        self.start = timeit.default_timer()

    ############################################################################

    def helpInfo(self, args):
        from .snudda_help import snudda_help_text
        print(snudda_help_text())

    ############################################################################

    def initConfig(self, args):
        # self.networkPath = args.path
        print("Creating config file")
        print("Network path: " + str(self.networkPath))

        assert args.size is not None, \
          "You need to speicfy --size when initialising config for network2"

        from .init import SnuddaInit
        structDef = {
            "Striatum": args.size,
            "GPe": 0,
            "GPi": 0,
            "SNr": 0,
            "STN": 0,
            "Cortex": 0,
            "Thalamus": 0
        }
        # Cortex and thalamus axons disabled right now, set to 1 to include one

        if not args.overwrite:
            assert not os.path.exists(self.networkPath), \
              "Network path " + str(self.networkPath) + " already exists" \
              + " (aborting to prevent accidental overwriting)"

        self.makeDirIfNeeded(self.networkPath)

        nChannels = args.nchannels

        configFile = self.networkPath + "/network-config.json"
        SnuddaInit(structDef=structDef,
                   configName=configFile,
                   nChannels=nChannels)

        if (args.size > 1e5):
            print("Make sure there is enough disk space in " +
                  str(self.networkPath))
            print("Large networks take up ALOT of space")

    ############################################################################

    def placeNeurons(self, args):
        # self.networkPath = args.path
        print("Placing neurons")
        print("Network path: " + str(self.networkPath))

        configFile = self.networkPath + "/network-config.json"
        positionFile = self.networkPath + "/network-neuron-positions.hdf5"
        logFileName = self.networkPath + "/log/logFile-place-neurons.txt"

        self.setupLogFile(logFileName)  # sets self.logFile
        self.setupParallel()  # sets self.dView and self.lbView

        from .place import SnuddaPlace

        if (args.h5legacy):
            h5libver = "earliest"
        else:
            h5libver = "latest"  # default

        npn = SnuddaPlace(config_file=configFile,
                          logFile=self.logFile,
                          verbose=True,
                          dView=self.dView,
                          h5libver=h5libver)

        npn.writeDataHDF5(positionFile)

        self.stopParallel()
        self.closeLogFile()

    ############################################################################

    def touchDetection(self, args):
        # self.networkPath = args.path
        print("Touch detection")
        print("Network path: " + str(self.networkPath))

        if (args.hvsize is not None):
            hyperVoxelSize = int(args.hvsize)
        else:
            hyperVoxelSize = 100

        if (args.volumeID is not None):
            volumeID = args.volumeID
        else:
            volumeID = None

        logDir = self.networkPath + "/log"

        configFile = self.networkPath + "/network-config.json"
        positionFile = self.networkPath + "/network-neuron-positions.hdf5"
        logFileName = self.networkPath + "/log/logFile-touch-detection.txt"
        saveFile = self.networkPath + "/voxels/network-putative-synapses.hdf5"

        voxelDir = self.networkPath + "/voxels"
        self.makeDirIfNeeded(voxelDir)

        self.setupLogFile(logFileName)  # sets self.logFile
        self.setupParallel()  # sets self.dView and self.lbView

        if (args.h5legacy):
            h5libver = "earliest"
        else:
            h5libver = "latest"  # default

        from .detect import SnuddaDetect

        if (args.cont):
            # Continue previous run
            print("Continuing previous touch detection")

            ncv = SnuddaDetect(configFile=configFile,
                               positionFile=positionFile,
                               logFile=self.logFile,
                               saveFile=saveFile,
                               SlurmID=self.SlurmID,
                               volumeID=volumeID,
                               rc=self.rc,
                               hyperVoxelSize=hyperVoxelSize,
                               h5libver=h5libver,
                               restartDetectionFlag=False)

        else:
            ncv = SnuddaDetect(configFile=configFile,
                               positionFile=positionFile,
                               logFile=self.logFile,
                               saveFile=saveFile,
                               SlurmID=self.SlurmID,
                               volumeID=volumeID,
                               rc=self.rc,
                               h5libver=h5libver,
                               hyperVoxelSize=hyperVoxelSize)

        self.stopParallel()
        self.closeLogFile()

    ############################################################################

    def pruneSynapses(self, args):
        # self.networkPath = args.path
        print("Prune synapses")
        print("Network path: " + str(self.networkPath))

        from .prune import SnuddaPrune

        logFileName = self.networkPath + "/log/logFile-synapse-pruning.txt"

        workLog = self.networkPath + "/log/network-detect-worklog.hdf5"

        self.setupLogFile(logFileName)  # sets self.logFile
        self.setupParallel()  # sets self.dView and self.lbView

        # Optionally set this
        scratchPath = None

        if (args.mergeonly):
            preMergeOnly = True
        else:
            preMergeOnly = False

        print("preMergeOnly : " + str(preMergeOnly))

        if (args.h5legacy):
            h5libver = "earliest"
        else:
            h5libver = "latest"  # default

        ncvp = SnuddaPrune(workHistoryFile=workLog,
                           logFile=self.logFile,
                           logFileName=logFileName,
                           dView=self.dView,
                           lbView=self.lbView,
                           scratchPath=scratchPath,
                           h5libver=h5libver,
                           preMergeOnly=preMergeOnly)

        self.stopParallel()
        self.closeLogFile()

    ############################################################################

    def setupInput(self, args):

        from .input import SnuddaInput

        print("Setting up inputs, assuming input.json exists")
        logFileName = self.networkPath + "/log/logFile-setup-input.log"
        self.setupLogFile(logFileName)  # sets self.logFile
        self.setupParallel()  # sets self.dView and self.lbView

        if "input" in args:
            inputConfig = args.input
        else:
            inputConfig = self.networkPath + "/input.json"

        if (not os.path.isfile(inputConfig)):
            print("Missing input config file: " + str(inputConfig))
            return

        if (args.networkFile):
            networkFile = args.networkFile
        else:
            networkFile = self.networkPath \
              + "/network-pruned-synapses.hdf5"

        if (args.inputFile):
            spikeFile = args.inputFile
        else:
            spikeFile = self.networkPath + "/input-spikes.hdf5"

        if (args.time):
            inputTime = args.time

        print("Writing input spikes to " + spikeFile)

        ni = SnuddaInput(inputConfigFile=inputConfig,
                         HDF5networkFile=networkFile,
                         spikeDataFileName=spikeFile,
                         time=inputTime,
                         logFile=self.logFile)

        self.stopParallel()
        self.closeLogFile()

    ############################################################################

    def exportToSONATA(self, args):

        from ConvertNetwork import ConvertNetwork

        print("Exporting to SONATA format")
        print("Network path: " + str(self.networkPath))

        if (args.networkFile):
            networkFile = args.networkFile
        else:
            networkFile = self.networkPath \
              + "/network-pruned-synapses.hdf5"

        if (args.inputFile):
            inputFile = args.inputFile
        else:
            inputFile = self.networkPath + "/input-spikes.hdf5"

        outDir = self.networkPath + "/SONATA/"

        cn = ConvertNetwork(networkFile=networkFile,
                            inputFile=inputFile,
                            outDir=outDir)

    ############################################################################

    def simulate(self, args):

        start = timeit.default_timer()

        from .simulate import SnuddaSimulate

        if (args.networkFile):
            networkFile = args.networkFile
        else:
            networkFile = self.networkPath \
              + "/network-pruned-synapses.hdf5"

        if (args.inputFile):
            inputFile = args.inputFile
        else:
            inputFile = self.networkPath + "/input-spikes.hdf5"

        self.makeDirIfNeeded(self.networkPath + "/simulation")

        print("Using input file " + inputFile)

        #nWorkers = args.ncores
        #print("Using " + str(nWorkers) + " workers for neuron")

        # Problems with nested symbolic links when the second one is a relative
        # path going beyond the original base path
        if (args.mechDir is None):
            mechDir = os.path.dirname(networkFile) + "/mechanisms"

            # !!! problem with paths, testing to create mechanism dir in current dir
            mechDir = "mechanisms"

            if (not os.path.exists(mechDir)):
                mDir = os.path.dirname(__file__) + "/data/cellspecs/mechanisms"
                os.symlink(mDir, mechDir)
        else:
            mechDir = args.mechDir

        # !!! These are saved in current directory x86_64
        # --- problem since nrnivmodl seems to want a relative path...

        makeModsStr = "nrnivmodl " + mechDir
        if (not os.path.exists('x86_64')):
            print("Please first run: " + makeModsStr)
            exit(-1)
            # I was having problems when running nrnivmodl in the script, but
            # running it manually in bash works... WHY?!!

        # os.system(makeModsStr)

        saveDir = os.path.dirname(networkFile) + "/simulation/"

        if (not os.path.exists(saveDir)):
            print("Creating directory " + saveDir)
            os.makedirs(saveDir, exist_ok=True)

        # Get the SlurmID, used in default file names
        SlurmID = os.getenv('SLURM_JOBID')

        if (SlurmID is None):
            SlurmID = str(666)

        print("args: " + str(args))

        if (args.voltOut is not None):
            # Save neuron voltage
            if (args.voltOut == "default"):
                voltFile = saveDir + 'network-voltage-' + SlurmID + '.csv'
            else:
                voltFile = args.voltOut
        else:
            voltFile = None

        if (args.spikesOut is None or args.spikesOut == "default"):
            spikesFile = saveDir + 'network-output-spikes-' + SlurmID + '.txt'
        else:
            spikesFile = args.spikesOut

        disableGJ = args.disableGJ
        if (disableGJ):
            print("!!! WE HAVE DISABLED GAP JUNCTIONS !!!")

        logFile = os.path.dirname(networkFile) \
          + "/log/network-simulation-log.txt"

        logDir = os.path.dirname(networkFile) + "/log"
        if (not os.path.exists(logDir)):
            print("Creating directory " + logDir)
            os.makedirs(logDir, exist_ok=True)

        from mpi4py import MPI  # This must be imported before neuron, to run parallel
        from neuron import h  #, gui

        pc = h.ParallelContext()

        sim = SnuddaSimulate(networkFile=networkFile,
                             inputFile=inputFile,
                             disableGapJunctions=disableGJ,
                             logFile=logFile,
                             verbose=args.verbose)

        sim.addExternalInput()
        sim.checkMemoryStatus()

        if (voltFile is not None):
            sim.addRecording(
                sideLen=None)  # Side len let you record from a subset
            #sim.addRecordingOfType("dSPN",5) # Side len let you record from a subset

        tSim = args.time * 1000  # Convert from s to ms for Neuron simulator

        sim.checkMemoryStatus()
        print("Running simulation for " + str(tSim) + " ms.")
        sim.run(tSim)  # In milliseconds

        print("Simulation done, saving output")
        if (spikesFile is not None):
            sim.writeSpikes(spikesFile)

        if (voltFile is not None):
            sim.writeVoltage(voltFile)

        stop = timeit.default_timer()
        if (sim.pc.id() == 0):
            print("Program run time: " + str(stop - start))

        # sim.plot()
        exit(0)

        #cmdStr = "nrnivmodl " + mechDir + " && mpiexec -n " + str(nWorkers) + " -map-by socket:OVERSUBSCRIBE python3 " + os.path.dirname(__file__) + " simulate.py " + networkFile + " " + inputFile + " --time " + str(args.time)

        #if(args.voltOut is not None):
        #  cmdStr += " --voltOut " + args.voltOut

        #os.system(cmdStr)

    ############################################################################

    def analyse(self, args):

        print("Add analysis code here, see Network_analyse.py")

    ############################################################################

    def setupParallel(self):
        self.SlurmID = os.getenv('SLURM_JOBID')

        if (self.SlurmID is None):
            self.SlurmID = self.nextRunID()
        else:
            self.SlurmID = int(self.SlurmID)

        self.logFile.write("Using SlurmID: " + str(self.SlurmID))

        if (os.getenv('IPYTHON_PROFILE') is not None):

            self.logFile.write('Creating ipyparallel client\n')

            from ipyparallel import Client
            #self.rc = Client(profile=os.getenv('IPYTHON_PROFILE'),
            #            # sshserver='127.0.0.1',
            #            debug=False)

            ufile = os.getenv('IPYTHONDIR') + "/profile_" \
                    + os.getenv('IPYTHON_PROFILE') \
                    + "/security/ipcontroller-client.json"
            self.rc = Client(url_file=ufile, timeout=120, debug=False)

            self.logFile.write('Client IDs: ' + str(self.rc.ids))

            # http://davidmasad.com/blog/simulation-with-ipyparallel/
            # http://people.duke.edu/~ccc14/sta-663-2016/19C_IPyParallel.html
            self.dView = self.rc.direct_view(
                targets='all')  # rc[:] # Direct view into clients
            self.lbView = self.rc.load_balanced_view(targets='all')

            # Define nc globally
            # self.dView.execute("nc = None",block=True)
        else:
            self.logFile.write(
                "No IPYTHON_PROFILE enviroment variable set, running in serial"
            )
            self.dView = None
            self.lbView = None
            self.rc = None

    ############################################################################

    def stopParallel(self):

        # Disable this function, keep the pool running for now
        return

        if (self.rc is not None):
            print("Stopping ipyparallel")
            self.rc.shutdown(hub=True)

    ############################################################################

    def setupLogFile(self, logFileName):
        dataDir = os.path.dirname(logFileName)

        self.makeDirIfNeeded(dataDir)

        try:
            self.logFile = open(logFileName, 'w')
            self.logFile.write('Starting log file\n')
        except:
            print("Unable to set up log file " + str(logFileName))

    ############################################################################

    def closeLogFile(self):

        stop = timeit.default_timer()

        print("\nProgram run time: " + str(stop - self.start))

        self.logFile.write("Program run time: " + str(stop - self.start))
        self.logFile.write("End of log. Closing file.")
        self.logFile.close()

    ##############################################################################

    def nextRunID(self):

        import pickle

        runIDfile = ".runID.pickle"

        try:
            if (os.path.isfile(runIDfile)):

                with open(runIDfile, 'rb') as f:
                    runID = pickle.load(f)
                    nextID = int(np.ceil(np.max(runID)) + 1)

                runID.append(nextID)

                with open(runIDfile, 'wb') as f:
                    pickle.dump(runID, f, -1)

            else:

                with open(runIDfile, 'wb') as f:
                    nextID = 1
                    runID = [1]
                    pickle.dump(runID, f, -1)

        except Exception as e:
            import traceback
            tstr = traceback.format_exc()
            print(tstr)

            print("Problem reading .runID.pickle file, setting runID to 0")
            import pdb
            pdb.set_trace()
            return 0

        print("Using runID = " + str(nextID))

        return nextID

############################################################################

    def makeDirIfNeeded(self, dirPath):

        if (not os.path.exists(dirPath)):
            print("Creating missing directory " + dirPath)
            os.makedirs(dirPath)
Exemple #13
0
class ClusterLab(epyc.Lab):
    """A :class:`Lab` running on an ``ipyparallel`` compute
    cluster.

    Experiments are submitted to engines in the cluster for
    execution in parallel, with the experiments being performed
    asynchronously to allow for disconnection and subsequent retrieval
    of results. Combined with a persistent :class:`LabNotebook`, this allows
    for fully decoupled access to an on-going computational experiment
    with piecewise retrieval of results.

    This class requires a cluster to already be set up and running, configured
    for persistent access, with access to the necessary code and libraries,
    and with appropriate security information available to the client.
    """

    # Tuning parameters
    WaitingTime = 30           #: Waiting time for checking for job completion. Lower values increase network traffic.

    
    def __init__( self, notebook = None, url_file = None, profile = None, profile_dir = None, ipython_dir = None, context = None, debug = False, sshserver = None, sshkey = None, password = None, paramiko = None, timeout = 10, cluster_id = None, use_dill = False, **extra_args ):
        """Create an empty lab attached to the given cluster. Most of the arguments
        are as expected by the ``ipyparallel.Client`` class, and are used to create the
        underlying connection to the cluster. The connection is opened immediately,
        meaning the cluster must be up and accessible when creating a lab to use it.

        :param notebook: the notebook used to results (defaults to an empty :class:`LabNotebook`)
        :param url_file: file containing connection information for accessing cluster
        :param profile: name of the IPython profile to use
        :param profile_dir: directory containing the profile's connection information
        :param ipython_dir: directory containing profile directories
        :param context: ZMQ context
        :param debug: whether to issue debugging information (defaults to False)
        :param sshserver: username and machine for ssh connections
        :param sshkey: file containing ssh key
        :param password: ssh password
        :param paramiko: True to use paramiko for ssh (defaults to False)
        :param timeout: timeout in seconds for ssh connection (defaults to 10s)
        :param cluster_id: string added to runtime files to prevent collisions
        :param use_dill: whether to use Dill as pickler (defaults to False)"""
        super(epyc.ClusterLab, self).__init__(notebook)
        
        # record all the connection arguments for later
        self._arguments = dict(url_file = url_file,
                               profile = profile,
                               profile_dir = profile_dir,
                               ipython_dir = ipython_dir,
                               context = context,
                               debug = debug,
                               sshserver = sshserver,
                               sshkey = sshkey,
                               password = password,
                               paramiko = paramiko,
                               timeout = timeout,
                               cluster_id = cluster_id,
                               **extra_args)
        self._client = None

        # connect to the cluster
        self.open()

        # use Dill if requested
        if use_dill:
            self.use_dill()
        
    def open( self ):
        """Connect to the cluster."""
        if self._client is None:
            self._client = Client(**self._arguments)
        
    def close( self ):
        """Close down the connection to the cluster."""
        if self._client is not None:
            self._client.close()
            self._client = None
        
    def numberOfEngines( self ):
        """Return the number of engines available to this lab.

        :returns: the number of engines"""
        return len(self.engines())

    def engines( self ):
        """Return a list of the available engines.

        :returns: a list of engines"""
        self.open()
        return self._client[:]

    def use_dill( self ):
        """Make the cluster use Dill as pickler for transferring results. This isn't
        generally needed, but is sometimes useful for particularly complex experiments
        such as those involving closures. (Or, to put it another way, if you find yourself
        tempted to use this method, consider re-structuring your experiment code.)"""
        self.open()
        with self.sync_imports(quiet = True):
            import dill
        self._client.direct_view().use_dill()

    def sync_imports( self, quiet = False ):
        """Return a context manager to control imports onto all the engines
        in the underlying cluster. This method is used within a ``with`` statement.

        Any imports should be done with no experiments running, otherwise the
        method will block until the cluster is quiet. Generally imports will be one
        of the first things done when connecting to a cluster. (But be careful
        not to accidentally try to re-import if re-connecting to a running
        cluster.)

        :param quiet: if True, suppresses messages (defaults to False)
        :returns: a context manager"""
        self.open()
        return self._client[:].sync_imports(quiet = quiet)
    
    def _mixup( self, ps ):
        """Private method to mix up a list of values in-place using a Fisher-Yates
        shuffle (see https://en.wikipedia.org/wiki/Fisher-Yates_shuffle).

        :param ps: the array
        :returns: the array, shuffled in-place"""
        for i in range(len(ps) - 1, 0, -1):
            j = int(numpy.random.random() * i)
            temp = ps[i]
            ps[i] = ps[j]
            ps[j] = temp
        return ps
     
    def runExperiment( self, e ):
        """Run the experiment across the parameter space in parallel using
        all the engines in the cluster. This method returns immediately.

        The experiments are run asynchronously, with the points in the parameter
        space being explored randomly so that intermediate retrievals of results
        are more representative of the overall result. Put another way, for a lot
        of experiments the results available will converge towards a final
        answer, so we can plot them and see the answer emerge.

        :param e: the experiment"""

        # create the parameter space
        space = self.parameterSpace()

        # only proceed if there's work to do
        if len(space) > 0:
            nb = self.notebook()
            
            # randomise the order of the parameter space so that we evaluate across
            # the space as we go along to try to make intermediate (incomplete) result
            # sets more representative of the overall result set
            ps = self._mixup(space)

            try:
                # connect to the cluster
                self.open()

                # submit an experiment at each point in the parameter space to the cluster
                view = self._client.load_balanced_view()
                jobs = []
                for p in ps:
                    jobs.extend((view.apply_async((lambda p: e.set(p).run()), p)).msg_ids)

                    # there seems to be a race condition in submitting jobs,
                    # whereby jobs get dropped if they're submitted too quickly
                    time.sleep(0.01)
                
                # record the mesage ids of all the jobs as submitted but not yet completed
                psjs = zip(ps, jobs)
                for (p, j) in psjs:
                    nb.addPendingResult(p, j)
            finally:
                # commit our pending results in the notebook
                nb.commit()
                self.close()

    def updateResults( self ):
        """Update our results within any pending results that have completed since we
        last retrieved results from the cluster.

        :returns: the number of pending results completed at this call"""

        # we do all the tests for pending results against the notebook directly,
        # as the corresponding methods on self call this method themselves
        nb = self.notebook()

        # look for pending results if we're waiting for any
        n = 0
        if nb.numberOfPendingResults() > 0:
            # we have results to get
            self.open()
            for j in set(nb.pendingResults()):
                # query the status of a job
                status = self._client.result_status(j, status_only = False)
                    
                # add all completed jobs to the notebook
                if j in status['completed']:
                    r = status[j]
                        
                    # update the result in the notebook, cancelling
                    # the pending result as well
                    # values come back from Client.result_status() in
                    # varying degrees of list-nesting, which LabNotebook.addResult()
                    # handles itself
                    nb.addResult(r, j)

                    # commit changes to the notebook
                    nb.commit()

                    # purge the completed job from the cluster
                    self._client.purge_hub_results(j)
                         
                    # record that we retrieved the results for the given job
                    n = n + 1
        return n

    def numberOfResults( self ):
        """Return the number of results we have available at the moment.

        :returns: the number of results"""
        self.updateResults()
        return self.notebook().numberOfResults()

    def numberOfPendingResults( self ):
        """Return the number of resultswe are waiting for.

        :returns: the number of pending results"""
        self.updateResults()
        return self.notebook().numberOfPendingResults()
    
    def _availableResultsFraction( self ):
        """Private method to return the fraction of results available, as a real number
        between 0 and 1. This does not update the results fetched from the cluster.

        :returns: the fraction of available results"""
        tr = self.notebook().numberOfResults() + self.notebook().numberOfPendingResults()
        if tr == 0:
            return 0
        else:
            return (self.notebook().numberOfResults() + 0.0) / tr
    
    def readyFraction( self ):
        """Test what fraction of results are available. This will change over
        time as the results come in.

        :returns: the fraction from 0 to 1"""
        self.updateResults()
        return self._availableResultsFraction()
    
    def ready( self ):
        """Test whether all the results are available. This will change over
        time as the results come in.

        :returns: True if all the results are available"""
        return (self.readyFraction() == 1)

    def wait( self, timeout = -1 ):
        """Wait for all pending results to be finished. If timeout is set,
        return after this many seconds regardless.

        :param timeout: timeout period in seconds (defaults to forever)
        :returns: True if all the results completed"""

        # we can't use ipyparallel.Client.wait() for this, because that
        # method only works for cases where the Client object is the one that
        # submitted the jobs to the cluster hub -- and therefore has the
        # necessary data structures to perform synchronisation. This isn't the
        # case for us, as one of the main goals of epyc is to support disconnected
        # operation, which implies a different Client object retrieving results
        # than the one that submitted the jobs in the first place. This is
        # unfortunate, but understandable given the typical use cases for
        # Client objects.
        #
        # Instead. we have to code around a little busily. The ClusterLab.WaitingTime
        # global sets the latency for waiting, and we repeatedly wait for this amount
        # of time before updating the results. The latency value essentially controls
        # how busy this process is: given that most simulations are expected to
        # be long, a latency in the tens of seconds feels about right as a default
        if self.numberOfPendingResults() > 0:
            # we've got pending results, wait for them
            timeWaited = 0
            while (timeout < 0) or (timeWaited < timeout):
                if self.numberOfPendingResults() == 0:
                    # no pending jobs left, we're complete
                    return True
                else:
                    # not done yet, calculate the waiting period
                    if timeout == -1:
                        # wait for the default waiting period
                        dt = self.WaitingTime
                    else:
                        # wait for the default waiting period or until the end of the timeout.
                        # whichever comes first
                        if (timeout - timeWaited) < self.WaitingTime:
                            dt = timeout - timeWaited
                        else:
                            dt = self.WaitingTime
                            
                    # sleep for a while
                    time.sleep(dt)
                    timeWaited = timeWaited + dt

            # if we get here, the timeout expired, so do a final check
            # and then exit
            return (self.numberOfPendingResults() == 0)

        else:
            # no results, so we got them all
            return True
        
    def pendingResults( self ):
        """Return the list of job iods for any pending results.

        :returns: a list of job ids"""
        return self.notebook().pendingResults()
    
    def pendingResultsFor( self, params ):
        """Return a list of job ids for any results pending for experiments
        at the given point in the parameter space.

        :param params: the experimental parameters
        :returns: a list of job ids"""
        return self.notebook().pendingResultsFor(params)
    
    def _abortJobs( self, js ):
        """Private method to abort a set of jobs.

        :param js: the job ids to be aborted"""
        self.open()
        self._client.abort(jobs = js)
        self.close()
        
    def cancelPendingResultsFor( self, params ):
        """Cancel any results pending for experiments at the given point
        in the parameter space.

        :param params: the experimental parameters"""
        
        # grab the result job ids
        jobs = self.pendingResultsFor(params)
        
        if len(jobs) > 0:
            # abort in the cluster
            self._abortJobs(jobs)
            
            # cancel in the notebook                  
            self.notebook().cancelPendingResultsFor(params)
        
    def cancelAllPendingResults( self ):
        """Cancel all pending results."""

        # grab all the pending job ids
        jobs = self.pendingResults()
        
        if len(jobs) > 0:
            # abort in the cluster
            self._abortJobs(jobs)
            
            # cancel in the notebook                  
            self.notebook().cancelAllPendingResults()
Exemple #14
0
    def proc_star(self,
                  fp,
                  write=True,
                  ipcprofile=None,
                  prefix="tstar",
                  push=True):
        if isinstance(fp, list):
            # multiple star
            if len(fp) == 0:
                return []

            if ipcprofile is None:
                # use joblib
                print("@Slit[{}]: processing {} star sequentially ...".format(
                    self.slit, len(fp)))
                results = []
                for fp_ in fp:
                    results.append(
                        self.proc_star(fp_, write=True, prefix=prefix))
                # check invalid results
                for fp_ in results:
                    if prefix not in fp_:
                        warnings.warn(" >>> invalid data >>> {} ".format(fp_))
                return results

            else:
                # use ipcluster
                rc = Client(profile=ipcprofile)
                print(
                    "@Slit[{}]: dispatching {} files to ipcluster (profile={}, nproc={}) ..."
                    .format(self.slit, len(fp), ipcprofile, len(rc.ids)))
                dv = rc.direct_view()
                dv.block = True
                if push:
                    dv.push({"this_slit": self, "prefix": prefix})
                dv.scatter("fp", fp)
                dv.execute("fps_out = this_slit.proc_star(fp, prefix=prefix)")
                # dv.execute()
                # dv.execute("tws = this_slit.tws")
                # self.tws = table.vstack(dv.gather("tws"))
                # self.tws.sort("jdmid")
                print("@Slit[{}]: Done!".format(self.slit))
                print("saved to files:")
                print("===========================================")
                results = dv.gather("fps_out")
                for fp_ in results:
                    print(fp_)
                print("===========================================")
                # dv.execute("%reset -f")
                # dv.execute("del()")
                # check invalid results
                for fp_ in results:
                    if prefix not in fp_:
                        warnings.warn(" >>> invalid data >>> {} ".format(fp_))
                return results
        else:
            # single star
            try:
                # 1.read star
                hdr = self.read_header(fp)
                # assert slit is correct
                assert hdr["SLIT"] == self.slit
                # star data
                star_data = self.read_image(fp)
                # star time
                jdmid = hdr["JD-MID"]
                exptime = hdr["EXPTIME"]
                bvc = hdr["BVC"]

                # 2.subtract bias & correct sensitivity
                star_bias_sens = (star_data - self.bias) / self.sensitivity

                # 3. subtract background
                bg = self.ap.background(star_bias_sens,
                                        **self.kwargs_background_star)
                star_bias_sens_bg = star_bias_sens - bg

                # 4.extract star spectrum
                rextr = self.ap.extract_all(star_bias_sens_bg,
                                            **self.kwargs_extract)
                # star_obs = rextr["spec_sum"]
                # star_err = rextr["err_sum"]

                # 5. append wavelength solution
                id_tws = np.argmin(np.abs(self.tws["jdmid"] - jdmid))
                rextr["wave"] = self.tws["wave_solu"][id_tws]
                rextr["wave_rms"] = self.tws["rms"][id_tws]
                rextr["blaze"] = self.blaze

                # 6. append info
                rextr["jdmid"] = jdmid
                rextr["exptime"] = exptime
                rextr["bvc"] = bvc

                # convert to table
                tstar = table.Table([rextr])
                tstar.meta = OrderedDict(hdr)

                # colname mapping
                tstar.rename_columns([
                    'err_extr', 'err_extr1', 'err_extr2', 'err_sum',
                    'mask_extr', 'spec_extr', 'spec_extr1', 'spec_extr2',
                    'spec_sum'
                ], [
                    'err', 'err1', 'err2', 'err_sum', 'mask', 'flux', 'flux1',
                    'flux2', 'flux_sum'
                ])

                if not write:
                    return tstar
                assert os.path.exists(self.extdir)
                fp_output = "{}/slit{}_{}_{}".format(self.extdir, self.slit,
                                                     prefix,
                                                     os.path.basename(fp))
                print("@Slit[{}]: saving to {} ...".format(
                    self.slit, fp_output))
                tstar.write(fp_output, overwrite=True)
                return fp_output

            except Exception:
                return fp
        # Get the SNR from the simulation parameters
        SNR = np.array(self.params['SNR'])

        # Calculates the Theoretical SER and BER
        theoretical_ser = modulator.calcTheoreticalSER(SNR)
        theoretical_ber = modulator.calcTheoreticalBER(SNR)
        return SNR, ber, ser, theoretical_ber, theoretical_ser


if __name__ == '__main__':
    # Since we are using the parallel capabilities provided by IPython, we
    # need to create a client and then a view of the IPython engines that
    # will be used.
    from ipyparallel import Client
    cl = Client()
    dview = cl.direct_view()

    # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
    # NOTE: Before running the code above, initialize the ipython
    # engines. One easy way to do that is to call the "ipcluster start"
    # command in a terminal.
    # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

    # Add the folder containing PyPhysim to the python path in all the
    # engines
    dview.execute('import sys')
    dview.execute('sys.path.append("{0}")'.format(parent_dir))

    from matplotlib import pyplot as plt
    # noinspection PyUnresolvedReferences
    from apps.awgn_modulators.simulate_parallel_psk import \
        # Get the SNR from the simulation parameters
        SNR = np.array(self.params['SNR'])

        # Calculates the Theoretical SER and BER
        theoretical_ser = modulator.calcTheoreticalSER(SNR)
        theoretical_ber = modulator.calcTheoreticalBER(SNR)
        return SNR, ber, ser, theoretical_ber, theoretical_ser


if __name__ == '__main__':
    # Since we are using the parallel capabilities provided by IPython, we
    # need to create a client and then a view of the IPython engines that
    # will be used.
    from ipyparallel import Client
    cl = Client()
    dview = cl.direct_view()

    # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
    # NOTE: Before running the code above, initialize the ipython
    # engines. One easy way to do that is to call the "ipcluster start"
    # command in a terminal.
    # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

    # Add the folder containing PyPhysim to the python path in all the
    # engines
    dview.execute('import sys')
    dview.execute('sys.path.append("{0}")'.format(parent_dir))

    from matplotlib import pyplot as plt
    from apps.awgn_modulators.simulate_parallel_psk import \
        VerySimplePskSimulationRunner
Exemple #17
0
    # sm = RegionMesh("cube.obj",useCache=False)
    # sm.simpleTestCase()

    if os.getenv('IPYTHON_PROFILE') is not None:
        from ipyparallel import Client

        rc = Client(
            profile=os.getenv('IPYTHON_PROFILE'),
            # sshserver='127.0.0.1',
            debug=False)
        print('Client IDs: ' + str(rc.ids))

        # http://davidmasad.com/blog/simulation-with-ipyparallel/
        # http://people.duke.edu/~ccc14/sta-663-2016/19C_IPyParallel.html
        print("Client IDs: " + str(rc.ids))
        d_view = rc.direct_view(
            targets='all')  # rc[:] # Direct view into clients
        lb_view = rc.load_balanced_view(targets='all')
    else:
        print("No IPYTHON_PROFILE enviroment variable set, running in serial")
        d_view = None
        lb_view = None
        rc = None

    meshFile = 'mesh/striatum-mesh.obj'
    # meshFile = "mesh/cortex-mesh-200.obj"
    sm = RegionMesh(meshFile,
                    d_view=d_view,
                    lb_view=lb_view,
                    raytrace_borders=False)

    # import cProfile
Exemple #18
0
class ClusterLab(epyc.Lab):
    """A :class:`Lab` running on an ``pyparallel`` compute
    cluster.

    Experiments are submitted to engines in the cluster for
    execution in parallel, with the experiments being performed
    asynchronously to allow for disconnection and subsequent retrieval
    of results. Combined with a persistent :class:`LabNotebook`, this allows
    for fully decoupled access to an on-going computational experiment
    with piecewise retrieval of results.

    This class requires a cluster to already be set up and running, configured
    for persistent access, with access to the necessary code and libraries,
    and with appropriate security information available to the client.
    """

    # Tuning parameters
    WaitingTime = 30  #: Waiting time for checking for job completion. Lower values increase network traffic.

    def __init__(self,
                 notebook=None,
                 url_file=None,
                 profile=None,
                 profile_dir=None,
                 ipython_dir=None,
                 context=None,
                 debug=False,
                 sshserver=None,
                 sshkey=None,
                 password=None,
                 paramiko=None,
                 timeout=10,
                 cluster_id=None,
                 use_dill=False,
                 **extra_args):
        """Create an empty lab attached to the given cluster. Most of the arguments
        are as expected by the ``pyparallel.Client`` class, and are used to create the
        underlying connection to the cluster. The connection is opened immediately,
        meaning the cluster must be up and accessible when creating a lab to use it.

        :param notebook: the notebook used to results (defaults to an empty :class:`LabNotebook`)
        :param url_file: file containing connection information for accessing cluster
        :param profile: name of the IPython profile to use
        :param profile_dir: directory containing the profile's connection information
        :param ipython_dir: directory containing profile directories
        :param context: ZMQ context
        :param debug: whether to issue debugging information (defaults to False)
        :param sshserver: username and machine for ssh connections
        :param sshkey: file containing ssh key
        :param password: ssh password
        :param paramiko: True to use paramiko for ssh (defaults to False)
        :param timeout: timeout in seconds for ssh connection (defaults to 10s)
        :param cluster_id: string added to runtime files to prevent collisions
        :param use_dill: whether to use Dill as pickler (defaults to False)"""
        super(epyc.ClusterLab, self).__init__(notebook)

        # record all the connection arguments for later
        self._arguments = dict(url_file=url_file,
                               profile=profile,
                               profile_dir=profile_dir,
                               ipython_dir=ipython_dir,
                               context=context,
                               debug=debug,
                               sshserver=sshserver,
                               sshkey=sshkey,
                               password=password,
                               paramiko=paramiko,
                               timeout=timeout,
                               cluster_id=cluster_id,
                               use_dill=use_dill,
                               **extra_args)
        self._client = None

        # connect to the cluster
        self.open()

        # use Dill if requested
        if use_dill:
            self.use_dill()

    # ---------- Protocol ----------

    def open(self):
        """Connect to the cluster."""
        if self._client is None:
            self._client = Client(**self._arguments)

    def close(self):
        """Close down the connection to the cluster."""
        if self._client is not None:
            self._client.close()
            self._client = None

    def recreate(self):
        '''Save the arguments needed to re-connect to the cluster we use.

        :returns: a (classname, args) pair'''
        (cn, args) = super(ClusterLab, self).recreate()
        nargs = args.copy()
        nargs.update(self._arguments)
        return (classname, nargs)

    # ---------- Remote control of the compute engines ----------

    def numberOfEngines(self):
        """Return the number of engines available to this lab.

        :returns: the number of engines"""
        return len(self.engines())

    def engines(self):
        """Return a list of the available engines.

        :returns: a list of engines"""
        self.open()
        return self._client[:]

    def use_dill(self):
        """Make the cluster use Dill as pickler for transferring results. This isn't
        generally needed, but is sometimes useful for particularly complex experiments
        such as those involving closures. (Or, to put it another way, if you find yourself
        tempted to use this method, consider re-structuring your experiment code.)"""
        self.open()
        with self.sync_imports(quiet=True):
            import dill
        self._client.direct_view().use_dill()

    def sync_imports(self, quiet=False):
        """Return a context manager to control imports onto all the engines
        in the underlying cluster. This method is used within a ``with`` statement.

        Any imports should be done with no experiments running, otherwise the
        method will block until the cluster is quiet. Generally imports will be one
        of the first things done when connecting to a cluster. (But be careful
        not to accidentally try to re-import if re-connecting to a running
        cluster.)

        :param quiet: if True, suppresses messages (defaults to False)
        :returns: a context manager"""
        self.open()
        return self._client[:].sync_imports(quiet=quiet)

    # ---------- Running experiments ----------

    def _mixup(self, ps):
        """Private method to mix up a list of values in-place using a Fisher-Yates
        shuffle (see https://en.wikipedia.org/wiki/Fisher-Yates_shuffle).

        :param ps: the array
        :returns: the array, shuffled in-place"""
        for i in range(len(ps) - 1, 0, -1):
            j = int(numpy.random.random() * i)
            temp = ps[i]
            ps[i] = ps[j]
            ps[j] = temp
        return ps

    def runExperiment(self, e):
        """Run the experiment across the parameter space in parallel using
        all the engines in the cluster. This method returns immediately.

        The experiments are run asynchronously, with the points in the parameter
        space being explored randomly so that intermediate retrievals of results
        are more representative of the overall result. Put another way, for a lot
        of experiments the results available will converge towards a final
        answer, so we can plot them and see the answer emerge.

        :param e: the experiment"""

        # create the parameter space
        space = self.parameterSpace()

        # only proceed if there's work to do
        if len(space) > 0:
            nb = self.notebook()

            # randomise the order of the parameter space so that we evaluate across
            # the space as we go along to try to make intermediate (incomplete) result
            # sets more representative of the overall result set
            ps = self._mixup(space)

            try:
                # connect to the cluster
                self.open()

                # submit an experiment at each point in the parameter space to the cluster
                view = self._client.load_balanced_view()
                jobs = []
                for p in ps:
                    jobs.extend((view.apply_async((lambda p: e.set(p).run()),
                                                  p)).msg_ids)

                    # there seems to be a race condition in submitting jobs,
                    # whereby jobs get dropped if they're submitted too quickly
                    time.sleep(0.01)

                # record the mesage ids of all the jobs as submitted but not yet completed
                psjs = zip(ps, jobs)
                for (p, j) in psjs:
                    nb.addPendingResult(p, j)
            finally:
                # commit our pending results in the notebook
                nb.commit()
                self.close()

    def updateResults(self):
        """Update our results within any pending results that have completed since we
        last retrieved results from the cluster.

        :returns: the number of pending results completed at this call"""

        # we do all the tests for pending results against the notebook directly,
        # as the corresponding methods on self call this method themselves
        nb = self.notebook()

        # look for pending results if we're waiting for any
        n = 0
        if nb.numberOfPendingResults() > 0:
            # we have results to get
            self.open()
            for j in set(nb.pendingResults()):
                # query the status of a job
                status = self._client.result_status(j, status_only=False)

                # add all completed jobs to the notebook
                if j in status['completed']:
                    r = status[j]

                    # update the result in the notebook, cancelling
                    # the pending result as well
                    # values come back from Client.result_status() in
                    # varying degrees of list-nesting, which LabNotebook.addResult()
                    # handles itself
                    nb.addResult(r, j)

                    # commit changes to the notebook
                    nb.commit()

                    # purge the completed job from the cluster
                    self._client.purge_hub_results(j)

                    # record that we retrieved the results for the given job
                    n = n + 1
        return n

    # ---------- Accessing results ----------

    def numberOfResults(self):
        """Return the number of results we have available at the moment.

        :returns: the number of results"""
        self.updateResults()
        return self.notebook().numberOfResults()

    def numberOfPendingResults(self):
        """Return the number of resultswe are waiting for.

        :returns: the number of pending results"""
        self.updateResults()
        return self.notebook().numberOfPendingResults()

    def _availableResultsFraction(self):
        """Private method to return the fraction of results available, as a real number
        between 0 and 1. This does not update the results fetched from the cluster.

        :returns: the fraction of available results"""
        tr = self.notebook().numberOfResults() + self.notebook(
        ).numberOfPendingResults()
        if tr == 0:
            return 0
        else:
            return (self.notebook().numberOfResults() + 0.0) / tr

    def readyFraction(self):
        """Test what fraction of results are available. This will change over
        time as the results come in.

        :returns: the fraction from 0 to 1"""
        self.updateResults()
        return self._availableResultsFraction()

    def ready(self):
        """Test whether all the results are available. This will change over
        time as the results come in.

        :returns: True if all the results are available"""
        return (self.readyFraction() == 1)

    def wait(self, timeout=-1):
        """Wait for all pending results to be finished. If timeout is set,
        return after this many seconds regardless.

        :param timeout: timeout period in seconds (defaults to forever)
        :returns: True if all the results completed"""

        # we can't use pyparallel.Client.wait() for this, because that
        # method only works for cases where the Client object is the one that
        # submitted the jobs to the cluster hub -- and therefore has the
        # necessary data structures to perform synchronisation. This isn't the
        # case for us, as one of the main goals of epyc is to support disconnected
        # operation, which implies a different Client object retrieving results
        # than the one that submitted the jobs in the first place. This is
        # unfortunate, but understandable given the typical use cases for
        # Client objects in pyparallel.
        #
        # Instead. we have to code around a little busily. The ClusterLab.WaitingTime
        # global sets the latency for waiting, and we repeatedly wait for this amount
        # of time before updating the results. The latency value essentially controls
        # how busy this process is: given that most simulations are expected to
        # be long, a latency in the tens of seconds feels about right as a default
        if self.numberOfPendingResults() > 0:
            # we've got pending results, wait for them
            timeWaited = 0
            while (timeout < 0) or (timeWaited < timeout):
                if self.numberOfPendingResults() == 0:
                    # no pending jobs left, we're complete
                    return True
                else:
                    # not done yet, calculate the waiting period
                    if timeout == -1:
                        # wait for the default waiting period
                        dt = self.WaitingTime
                    else:
                        # wait for the default waiting period or until the end of the timeout.
                        # whichever comes first
                        if (timeout - timeWaited) < self.WaitingTime:
                            dt = timeout - timeWaited
                        else:
                            dt = self.WaitingTime

                    # sleep for a while
                    time.sleep(dt)
                    timeWaited = timeWaited + dt

            # if we get here, the timeout expired, so do a final check
            # and then exit
            return (self.numberOfPendingResults() == 0)

        else:
            # no results, so we got them all
            return True

    # ---------- Managing pending results ----------

    def pendingResults(self):
        """Return the list of job iods for any pending results.

        :returns: a list of job ids"""
        return self.notebook().pendingResults()

    def pendingResultsFor(self, params):
        """Return a list of job ids for any results pending for experiments
        at the given point in the parameter space.

        :param params: the experimental parameters
        :returns: a list of job ids"""
        return self.notebook().pendingResultsFor(params)

    def _abortJobs(self, js):
        """Private method to abort a set of jobs.

        :param js: the job ids to be aborted"""
        self.open()
        self._client.abort(jobs=js)
        self.close()

    def cancelPendingResultsFor(self, params):
        """Cancel any results pending for experiments at the given point
        in the parameter space.

        :param params: the experimental parameters"""

        # grab the result job ids
        jobs = self.pendingResultsFor(params)

        if len(jobs) > 0:
            # abort in the cluster
            self._abortJobs(jobs)

            # cancel in the notebook
            self.notebook().cancelPendingResultsFor(params)

    def cancelAllPendingResults(self):
        """Cancel all pending results."""

        # grab all the pending job ids
        jobs = self.pendingResults()

        if len(jobs) > 0:
            # abort in the cluster
            self._abortJobs(jobs)

            # cancel in the notebook
            self.notebook().cancelAllPendingResults()