def _handle_enqueue_action(message, run_queue, event_queue): """ Handle the ``'enqueue'`` action. """ run = Run('{0}{1}/'.format( config.get('job_runner_worker', 'run_resource_uri'), message['run_id'])) worker_list = Worker.get_list( config.get('job_runner_worker', 'worker_resource_uri')) if run.enqueue_dts: logger.warning( 'Was expecting that run: {0} was not in queue yet'.format(run.id)) elif len(worker_list) != 1: logger.warning('API returned multiple workers, expected one') else: run.patch({ 'enqueue_dts': datetime.now(utc).isoformat(' '), # set the worker so we know which worker of the pool claimed the # run 'worker': worker_list[0].resource_uri, }) run_queue.put(run) event_queue.put( json.dumps({ 'event': 'enqueued', 'run_id': run.id, 'kind': 'run' }))
def reset_incomplete_runs(): """ Cleanup incomplete runs. A run is left incomplete when a worker dies while the run hasn't been finished (or was marked as enqueued). These runs needs to be re-started and therefore reset to scheduled state. """ logger.info('Cleaning up incomplete runs') incomplete_runs = [] for state in ['in_queue', 'started']: incomplete_runs.extend(Run.get_list( config.get('job_runner_worker', 'run_resource_uri'), params={ 'state': state, } )) for run in incomplete_runs: logger.warning('Run {0} was left incomplete'.format(run.resource_uri)) run.patch({ 'enqueue_dts': None, 'start_dts': None, })
def reset_incomplete_runs(): """ Cleanup incomplete runs. A run is left incomplete when a worker dies while the run hasn't been finished (or was marked as enqueued). These runs needs to be re-started and therefore reset to scheduled state. """ logger.info('Cleaning up incomplete runs') incomplete_runs = [] for state in ['in_queue', 'started']: incomplete_runs.extend(Run.get_list( config.get('job_runner_worker', 'run_resource_uri'), params={ 'state': state, 'worker__api_key': config.get('job_runner_worker', 'api_key'), } )) for run in incomplete_runs: logger.warning('Run {0} was left incomplete'.format(run.resource_uri)) run.patch({ 'enqueue_dts': None, 'start_dts': None, })
def test_job_property(self, JobMock): """ Test job property. """ run_model = Run(Mock(), {'job': '/job/resource'}) self.assertEqual(JobMock.return_value, run_model.job) JobMock.assert_called_once_with('/job/resource')
def _handle_enqueue_action(message, run_queue, event_queue): """ Handle the ``'enqueue'`` action. """ run = Run('{0}{1}/'.format( config.get('job_runner_worker', 'run_resource_uri'), message['run_id'] )) if run.enqueue_dts: logger.warning( 'Was expecting that run: {0} was not in queue yet'.format( run.id)) else: run.patch({ 'enqueue_dts': datetime.now(utc).isoformat(' ') }) run_queue.put(run) event_queue.put(json.dumps( {'event': 'enqueued', 'run_id': run.id, 'kind': 'run'}))
def _handle_enqueue_action(message, run_queue, event_queue): """ Handle the ``'enqueue'`` action. """ run = Run("{0}{1}/".format(config.get("job_runner_worker", "run_resource_uri"), message["run_id"])) worker_list = Worker.get_list(config.get("job_runner_worker", "worker_resource_uri")) if run.enqueue_dts: logger.warning("Was expecting that run: {0} was not in queue yet".format(run.id)) elif len(worker_list) != 1: logger.warning("API returned multiple workers, expected one") else: run.patch( { "enqueue_dts": datetime.now(utc).isoformat(" "), # set the worker so we know which worker of the pool claimed the # run "worker": worker_list[0].resource_uri, } ) run_queue.put(run) event_queue.put(json.dumps({"event": "enqueued", "run_id": run.id, "kind": "run"}))