def test_master_with_async_jobs(self, mock_spawn, mock_get_result): # There are async job, so will start to monitor the job status def job(context, deployable): pass works = utils.ThreadWorks() dep_uuid = self.deployable_uuids[0] fake_dep = fake_deployable.fake_deployable_obj(self.context, uuid=dep_uuid) arq_job_binds = { self.class_objects["bitstream_program"]: works.spawn(job, self.context, fake_dep), self.class_objects["function_program"]: works.spawn(job, self.context, fake_dep) } arq_binds = { self.class_objects["gpu"]: None, self.class_objects["no_program"]: None, self.class_objects["mlu"]: None, } arq_binds.update(arq_job_binds) objects.ext_arq.ExtARQJobMixin.master(self.context, arq_binds) mock_get_result.return_value = "Jobs_Generator" mock_get_result.assert_called_once() mock_spawn.assert_called_once()
def test_job_monitor_with_empty_arq(self, mock_result): works = utils.ThreadWorks() good_job = works.spawn(lambda x: x, 1) works_generator = works.get_workers_result([good_job], timeout=CONF.bind_timeout) extarqs = [] objects.ext_arq.ExtARQ.job_monitor(self.context, works_generator, extarqs) mock_result.assert_not_called()
def test_job_monitor_without_jobs(self, mock_result): works = utils.ThreadWorks() works_generator = works.get_workers_result([], timeout=CONF.bind_timeout) extarqs = [ self.class_objects["bitstream_program"], self.class_objects["function_program"] ] objects.ext_arq.ExtARQ.job_monitor(self.context, works_generator, extarqs) mock_result.assert_called_once_with(self.context, extarqs)
def _bind_job(self, context, deployable): """The bind process of an acclerator.""" check_extra_job = getattr(self, "_need_extra_bind_job", None) need_job = None if check_extra_job: need_job = check_extra_job(context, deployable) if getattr(self.bind, "is_job", False) and need_job is not False: LOG.info("Start job for ARQ(%s) bind.", self.arq.uuid) works = utils.ThreadWorks() job = works.spawn(self.bind, context, deployable) return job else: LOG.info("ARQ(%s) bind process is instant.", self.arq.uuid) self.bind(context, deployable)
def test_job_monitor_with_job_successful(self, mock_result): works = utils.ThreadWorks() job1 = works.spawn(lambda x: x, 1) job2 = works.spawn(lambda x: x, 2) works_generator = works.get_workers_result([job1, job2], timeout=CONF.bind_timeout) extarqs = [ self.class_objects["bitstream_program"], self.class_objects["function_program"] ] objects.ext_arq.ExtARQ.job_monitor(self.context, works_generator, extarqs) mock_result.assert_called_once_with(self.context, extarqs)
def test_job_monitor_with_job_exception(self, mock_result): works = utils.ThreadWorks() err_job = works.spawn(lambda x: x / 0, 1) good_job = works.spawn(lambda x: x, 1) works_generator = works.get_workers_result([err_job, good_job], timeout=CONF.bind_timeout) extarqs = [ self.class_objects["bitstream_program"], self.class_objects["function_program"] ] objects.ext_arq.ExtARQ.job_monitor(self.context, works_generator, extarqs) mock_result.assert_called_once_with(self.context, extarqs)
def master(cls, context, arq_binds): """Start a master thread to monitor job workers.""" jobs = {} instant = {} arq_uuids = [ea.arq.uuid for ea in arq_binds.keys()] for arq_uuid, job in arq_binds.items(): kv = {arq_uuid: job} jobs.update(kv) if job else instant.update(kv) if not jobs: LOG.info("All ARQ(%s) bind process are instant.", arq_uuids) cls.check_bindings_result(context, arq_binds.keys()) return th_workers = utils.ThreadWorks() works_generator = th_workers.get_workers_result( jobs.values(), timeout=CONF.bind_timeout) # arq_binds, timeout=1) LOG.info("Check ARQ(%s) bind jobs status.", arq_uuids) th_workers.spawn_master( cls.job_monitor, context, works_generator, arq_binds.keys())