def test_cleanup_after_job(self): with patch('openquake.engine.kvs.cache_gc') as cache_gc: with patch('openquake.engine.supervising.supervisor.' '_get_task_ids') as gti: with patch('celery.task.control.revoke') as revoke: gti.return_value = ['task-id-1', 'task-id-2'] supervisor.cleanup_after_job(self.job.id, terminate=True) self.assertEqual(1, cache_gc.call_count) self.assertEqual(((self.job.id, ), {}), cache_gc.call_args) self.assertEqual(1, gti.call_count) self.assertEqual(((self.job.id, ), {}), gti.call_args) self.assertEqual(2, revoke.call_count) exp_revoke_args = [(('task-id-1',), {'terminate': True}), (('task-id-2',), {'terminate': True})] self.assertEqual(exp_revoke_args, revoke.call_args_list) with patch('celery.task.control.revoke') as revoke: gti.return_value = ['task-id-1', 'task-id-2'] supervisor.cleanup_after_job(self.job.id, terminate=False) self.assertEqual(2, cache_gc.call_count) self.assertEqual(((self.job.id, ), {}), cache_gc.call_args) self.assertEqual(2, gti.call_count) self.assertEqual(((self.job.id, ), {}), gti.call_args) self.assertEqual(2, revoke.call_count) exp_revoke_args = [(('task-id-1',), {'terminate': False}), (('task-id-2',), {'terminate': False})] self.assertEqual(exp_revoke_args, revoke.call_args_list)
def test_cleanup_after_job(self): with patch('openquake.engine.kvs.cache_gc') as cache_gc: with patch('openquake.engine.supervising.supervisor.' '_get_task_ids') as gti: with patch('celery.task.control.revoke') as revoke: gti.return_value = ['task-id-1', 'task-id-2'] supervisor.cleanup_after_job(self.job.id, terminate=True) self.assertEqual(1, cache_gc.call_count) self.assertEqual(((self.job.id, ), {}), cache_gc.call_args) self.assertEqual(1, gti.call_count) self.assertEqual(((self.job.id, ), {}), gti.call_args) self.assertEqual(2, revoke.call_count) exp_revoke_args = [(('task-id-1', ), { 'terminate': True }), (('task-id-2', ), { 'terminate': True })] self.assertEqual(exp_revoke_args, revoke.call_args_list) with patch('celery.task.control.revoke') as revoke: gti.return_value = ['task-id-1', 'task-id-2'] supervisor.cleanup_after_job(self.job.id, terminate=False) self.assertEqual(2, cache_gc.call_count) self.assertEqual(((self.job.id, ), {}), cache_gc.call_args) self.assertEqual(2, gti.call_count) self.assertEqual(((self.job.id, ), {}), gti.call_args) self.assertEqual(2, revoke.call_count) exp_revoke_args = [(('task-id-1', ), { 'terminate': False }), (('task-id-2', ), { 'terminate': False })] self.assertEqual(exp_revoke_args, revoke.call_args_list)
def test_cleanup_after_job(self): with patch('openquake.engine.kvs.cache_gc') as cache_gc: supervisor.cleanup_after_job(self.job.id) self.assertEqual(1, cache_gc.call_count) self.assertEqual(((self.job.id, ), {}), cache_gc.call_args)
os.waitpid(job_pid, 0) os.waitpid(supervisor_pid, 0) else: try: with job_stats(job): _job_exec(job, log_level, exports, job_type, calc) except Exception, ex: logs.LOG.critical("Calculation failed with exception: '%s'" % str(ex)) raise finally: job.is_running = False job.save() # Normally the supervisor process does this, but since we don't # have one in this case, we have to call the cleanup manually. supervisor.cleanup_after_job(job.id, terminate=False) # Refresh the job record, in case we are forking and another process has # modified the job state. return _get_job(job.id) def _get_job(job_id): """ Helper function to get a job object by ID. Makes testing/mocking easier. """ return models.OqJob.objects.get(id=job_id) def _job_exec(job, log_level, exports, job_type, calc): """
# wait till both child processes are done os.waitpid(job_pid, 0) os.waitpid(supervisor_pid, 0) else: try: _job_exec(job, log_level, exports, job_type, calc) except Exception, ex: logs.LOG.critical("Calculation failed with exception: '%s'" % str(ex)) raise finally: job.is_running = False job.save() # Normally the supervisor process does this, but since we don't # have one in this case, we have to call the cleanup manually. supervisor.cleanup_after_job(job.id) # Refresh the job record, in case we are forking and another process has # modified the job state. return _get_job(job.id) def _get_job(job_id): """ Helper function to get a job object by ID. Makes testing/mocking easier. """ return models.OqJob.objects.get(id=job_id) def _job_exec(job, log_level, exports, job_type, calc): """
# wait till both child processes are done os.waitpid(job_pid, 0) os.waitpid(supervisor_pid, 0) else: try: _job_exec(job, log_level, log_file, exports, job_type, calc) except Exception, ex: logs.LOG.critical("Calculation failed with exception: '%s'" % str(ex)) raise finally: job.is_running = False job.save() # Normally the supervisor process does this, but since we don't # have one in this case, we have to call the cleanup manually. supervisor.cleanup_after_job(job.id) # Refresh the job record, in case we are forking and another process has # modified the job state. return _get_job(job.id) def _get_job(job_id): """ Helper function to get a job object by ID. Makes testing/mocking easier. """ return models.OqJob.objects.get(id=job_id) def _job_exec(job, log_level, log_file, exports, job_type, calc): """