コード例 #1
0
def test_generate_worker_config_missing_params():
    ramp_config = read_config(ramp_config_template())
    # rename on of the key to make the generation failed
    ramp_config['worker']['env'] = ramp_config['worker']['conda_env']
    del ramp_config['worker']['conda_env']
    err_msg = "The conda worker is missing the parameter"
    with pytest.raises(ValueError, match=err_msg):
        generate_worker_config(ramp_config)
コード例 #2
0
def test_aws_worker():
    if not os.path.isfile(os.path.join(HERE, 'config.yml')):
        pytest.skip("Only for local tests for now")

    ramp_kit_dir = os.path.join(HERE, 'kits', 'iris')

    # make sure predictio and log dirs exist, if not, add them
    add_empty_dir(os.path.join(ramp_kit_dir, 'predictions'))
    add_empty_dir(os.path.join(ramp_kit_dir, 'logs'))

    # if the prediction / log files are still there, remove them
    for subdir in os.listdir(os.path.join(ramp_kit_dir, 'predictions')):
        if os.path.isdir(subdir):
            shutil.rmtree(subdir)
    for subdir in os.listdir(os.path.join(ramp_kit_dir, 'logs')):
        if os.path.isdir(subdir):
            shutil.rmtree(subdir)

    config = read_config(os.path.join(HERE, 'config.yml'))
    worker_config = generate_worker_config(config)
    worker = AWSWorker(worker_config, submission='starting_kit_local')
    worker.setup()
    assert worker.status == 'setup'
    worker.launch_submission()
    assert worker.status in ('running', 'finished')
    worker.collect_results()
    assert worker.status == 'collected'
    assert os.path.isdir(
        os.path.join(ramp_kit_dir, 'predictions', 'starting_kit_local',
                     'fold_0'))
    assert os.path.isfile(
        os.path.join(ramp_kit_dir, 'logs', 'starting_kit_local', 'log'))

    worker.teardown()
    assert worker.status == 'killed'
コード例 #3
0
ファイル: dispatcher.py プロジェクト: tomMoral/ramp-board
 def __init__(self,
              config,
              event_config,
              worker=None,
              n_workers=1,
              n_threads=None,
              hunger_policy=None):
     self.worker = CondaEnvWorker if worker is None else worker
     self.n_workers = (max(multiprocessing.cpu_count() + 1 +
                           n_workers, 1) if n_workers < 0 else n_workers)
     self.hunger_policy = hunger_policy
     # init the poison pill to kill the dispatcher
     self._poison_pill = False
     # create the different dispatcher queues
     self._awaiting_worker_queue = Queue()
     self._processing_worker_queue = LifoQueue(maxsize=self.n_workers)
     self._processed_submission_queue = Queue()
     # split the different configuration required
     if (isinstance(config, str) and isinstance(event_config, str)):
         self._database_config = read_config(config,
                                             filter_section='sqlalchemy')
         self._ramp_config = generate_ramp_config(event_config, config)
     else:
         self._database_config = config['sqlalchemy']
         self._ramp_config = event_config['ramp']
     self._worker_config = generate_worker_config(event_config, config)
     # set the number of threads for openmp, openblas, and mkl
     self.n_threads = n_threads
     if self.n_threads is not None:
         if not isinstance(self.n_threads, numbers.Integral):
             raise TypeError(
                 "The parameter 'n_threads' should be a positive integer. "
                 "Got {} instead.".format(repr(self.n_threads)))
         for lib in ('OMP', 'MKL', 'OPENBLAS'):
             os.environ[lib + '_NUM_THREADS'] = str(self.n_threads)
コード例 #4
0
ファイル: dispatcher.py プロジェクト: mehdidc/ramp-board
 def __init__(self,
              config,
              event_config,
              worker=None,
              n_worker=1,
              hunger_policy=None):
     self.worker = CondaEnvWorker if worker is None else worker
     self.n_worker = (max(multiprocessing.cpu_count() + 1 +
                          n_worker, 1) if n_worker < 0 else n_worker)
     self.hunger_policy = hunger_policy
     # init the poison pill to kill the dispatcher
     self._poison_pill = False
     # create the different dispatcher queues
     self._awaiting_worker_queue = Queue()
     self._processing_worker_queue = LifoQueue(maxsize=self.n_worker)
     self._processed_submission_queue = Queue()
     # split the different configuration required
     if (isinstance(config, six.string_types)
             and isinstance(event_config, six.string_types)):
         self._database_config = read_config(config,
                                             filter_section='sqlalchemy')
         self._ramp_config = generate_ramp_config(event_config, config)
     else:
         self._database_config = config['sqlalchemy']
         self._ramp_config = event_config['ramp']
     self._worker_config = generate_worker_config(event_config, config)
コード例 #5
0
ファイル: test_worker.py プロジェクト: Micseb/ramp-board
def test_generate_worker_config(config):
    worker_config = generate_worker_config(config)
    expected_config = {
        'worker_type': 'conda',
        'conda_env': 'ramp-iris',
        'kit_dir': os.path.join('/tmp/databoard_test', 'ramp-kits', 'iris'),
        'data_dir': os.path.join('/tmp/databoard_test', 'ramp-data', 'iris'),
        'submissions_dir': os.path.join('/tmp/databoard_test', 'submissions'),
        'predictions_dir': os.path.join('/tmp/databoard_test', 'preds'),
        'logs_dir': os.path.join('/tmp/databoard_test', 'log')
    }
    assert worker_config == expected_config
コード例 #6
0
ファイル: cli.py プロジェクト: tomMoral/ramp-board
def worker(config, event_config, submission, verbose):
    """Launch a standalone RAMP worker.

    The RAMP worker is in charger of processing a single submission by
    specifying the different locations (kit, data, logs, predictions)
    """
    if verbose:
        if verbose == 1:
            level = logging.INFO
        else:
            level = logging.DEBUG
        logging.basicConfig(
            format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
            level=level,
            datefmt='%Y:%m:%d %H:%M:%S')
    worker_params = generate_worker_config(event_config, config)
    worker_type = available_workers[worker_params['worker_type']]
    worker = worker_type(worker_params, submission)
    worker.launch()