def __init__(self, n_workers): self.is_done = False self.executor = executor_factory.create(args.backend, n_workers) self.suggest_error = WaitingForTrials self.trials = [] self.status = [] self.working_dir = ""
def executor(self): """Returns the current executor to use to run jobs in parallel""" if self._executor is None: self._executor_owner = True self._executor = executor_factory.create( orion.core.config.worker.executor, n_workers=orion.core.config.worker.n_workers, **orion.core.config.worker.executor_configuration, ) return self._executor
def __init__(self, n_workers, backend="joblib", executor=None): self.is_done = False if executor is None: self.executor = executor_factory.create(backend, n_workers) else: self.executor = executor self.suggest_error = WaitingForTrials self.trials = [] self.status = [] self.working_dir = ""
def test_user_executor_is_not_deleted(): """Check that executors passed to the client are not cleanup""" global config conf = copy.deepcopy(config) executor = executor_factory.create("joblib", 1) conf["executor"] = executor with create_experiment(config, base_trial) as (cfg, experiment, client): assert client.executor is not None, "Client has an executor" assert client._executor_owner is True, "Client does not own the executor" future = executor.submit(function, 2, 2) assert future.get() == 4, "Executor was not closed & can still be used"
def test_executors_have_default_args(executor): with executor_factory.create(executor): pass
def get_executor(self, task_index): return executor_factory.create(self.executor_name, n_workers=self.workers[task_index], **self.executor_config)