def setUpClass(cls):

        if not MPControl.is_initialized:
            MPControl.set_multiprocess_engine("local")
            MPControl.connect()

        DotProduct.set_mkl(True)
def set_up_dask(n_jobs=2):
    MPControl.set_multiprocess_engine("dask-cluster")
    MPControl.client.use_default_configuration("rusty_ccb", n_jobs=n_jobs)
    MPControl.client.add_worker_conda(
        "source ~/.local/anaconda3/bin/activate inferelator")
    MPControl.client.add_slurm_command_line("--constraint=broadwell")
    MPControl.connect()
示例#3
0
 def initialize_multiprocessing(self):
     """
     Register the multiprocessing controller if set and run .connect()
     """
     if self.multiprocessing_controller is not None:
         MPControl.set_multiprocess_engine(self.multiprocessing_controller)
     MPControl.connect()
示例#4
0
    def setUpClass(cls):
        cls.tempdir = tempfile.mkdtemp()
        MPControl.shutdown()
        MPControl.set_multiprocess_engine(cls.name)

        # Create a wrapper for LocalCluster so that the HPC controller can be tested locally
        # And then bind it so that it works in py27 right
        def fake_cluster(*args, **kwargs):
            replace_args = dict()
            replace_args["n_workers"] = kwargs.pop("n_workers", 0)
            replace_args["threads_per_worker"] = kwargs.pop(
                "threads_per_worker", 1)
            replace_args["processes"] = kwargs.pop("processes", True)
            replace_args["local_dir"] = kwargs.pop("local_directory", None)

            clust = distributed.LocalCluster(**replace_args)
            clust._active_worker_n = 0

            def _count_active_workers(self):
                val = self._active_worker_n
                self._active_worker_n += 1
                return val

            clust._count_active_workers = types.MethodType(
                _count_active_workers, clust)
            return clust

        MPControl.client._cluster_controller_class = types.MethodType(
            fake_cluster, MPControl.client)
        MPControl.client.set_job_size_params(n_jobs=1,
                                             n_cores_per_job=1,
                                             mem_per_job="1gb")
        MPControl.client._local_directory = cls.tempdir
        MPControl.connect()
def start_mpcontrol_dask(n_cores=N_CORES):
    utils.Debug.set_verbose_level(1)
    MPControl.set_multiprocess_engine("dask-cluster")
    MPControl.client.minimum_cores = n_cores
    MPControl.client.maximum_cores = n_cores
    MPControl.client.walltime = '48:00:00'
    MPControl.client.add_worker_env_line('module load slurm')
    MPControl.client.add_worker_env_line('module load gcc/8.3.0')
    MPControl.client.add_worker_env_line('source ' + CONDA_ACTIVATE_PATH)
    MPControl.client.cluster_controller_options.append("-p ccb")
    MPControl.connect()
示例#6
0
    def __init__(self, *args, **kwargs):
        super(TestBBSRrunnerPython, self).__init__(*args, **kwargs)
        # Extra behavior: only run if KVSClient can reach the host:
        try:
            MPControl.connect()
        except Exception as e:
            if str(e) == 'Missing host':
                print(
                    'Test test_bbsr.py exiting since KVS host is not running')
                print(
                    'Try rerunning tests with python $LOCALREPO/kvsstcp.py --execcmd "nosetests  --nocapture -v"'
                )
                self.missing_kvs_host = True

        # Mock out Slurm process IDs so that KVS can access this process ID in bbsr_python.py
        os.environ['SLURM_PROCID'] = str(0)
        os.environ['SLURM_NTASKS'] = str(1)
示例#7
0
from inferelator.distributed.inferelator_mp import MPControl

MPControl.set_multiprocess_engine("local")
MPControl.connect()
示例#8
0
 def tearDownClass(cls):
     MPControl.shutdown()
     MPControl.set_multiprocess_engine("local")
     MPControl.connect()
     if cls.tempdir is not None:
         shutil.rmtree(cls.tempdir)
示例#9
0
 def setUpClass(cls):
     cls.tempdir = tempfile.mkdtemp()
     MPControl.shutdown()
     MPControl.set_multiprocess_engine("dask-local")
     MPControl.connect(local_dir=cls.tempdir, n_workers=1, processes=False)
示例#10
0
def set_up_dask(n_jobs=1):
    MPControl.set_multiprocess_engine("dask-cluster")
    MPControl.client.use_default_configuration("rusty_rome", n_jobs=n_jobs)
    MPControl.client.add_worker_conda("source ~/.local/anaconda3/bin/activate inferelator")
    MPControl.client.set_job_size_params(walltime="168:00:00")
    MPControl.connect()
 def setUpClass(cls):
     if not MPControl.is_initialized:
         MPControl.connect()
示例#12
0
 def tearDownClass(cls):
     MPControl.shutdown()
     MPControl.set_multiprocess_engine("local")
     MPControl.connect()
示例#13
0
 def setUpClass(cls):
     MPControl.shutdown()
     MPControl.set_multiprocess_engine(cls.name)
     MPControl.connect()
示例#14
0
 def setUpClass(cls):
     cls.tempdir = tempfile.mkdtemp()
     MPControl.shutdown()
     MPControl.set_multiprocess_engine(cls.name)
     MPControl.connect(local_dir=cls.tempdir, n_workers=1)
示例#15
0
 def setUpClass(cls):
     cls.temp_dir = tempfile.mkdtemp()
     cls.server = kvsstcp.KVSServer("", 0)
     MPControl.shutdown()
     MPControl.set_multiprocess_engine(cls.name)
     MPControl.connect(host=cls.server.cinfo[0], port=cls.server.cinfo[1])
示例#16
0
def start_mpcontrol_dask(n_cores=N_CORES):
    MPControl.set_multiprocess_engine("dask-cluster")
    MPControl.client.use_default_configuration("rusty_ccb", n_jobs=2)
    MPControl.client.add_worker_conda(
        "source /mnt/ceph/users/sysbio/.anaconda3/bin/activate inferelator")
    MPControl.connect()