def _handle_garbage_collection(self): """ Periodically look for executions which have "handling" set to "True" and haven't been updated for a while (this likely indicates that an execution as picked up by a scheduler process which died before finishing the processing or similar) and reset handling to False so other scheduler can pick it up. """ query = { 'scheduled_start_timestamp__lte': date.append_milliseconds_to_time( date.get_datetime_utc_now(), -EXECUTION_SCHEDUELING_TIMEOUT_THRESHOLD_MS), 'handling': True } execution_queue_item_dbs = ActionExecutionSchedulingQueue.query( **query) or [] for execution_queue_item_db in execution_queue_item_dbs: execution_queue_item_db.handling = False try: ActionExecutionSchedulingQueue.add_or_update( execution_queue_item_db, publish=False) LOG.info('Removing lock for orphaned execution queue item: %s', execution_queue_item_db.id) except db_exc.StackStormDBObjectWriteConflictError: LOG.info( 'Execution queue item updated before rescheduling: %s', execution_queue_item_db.id)
def _handle_garbage_collection(self): """ Periodically look for executions which have "handling" set to "True" and haven't been updated for a while (this likely indicates that an execution as picked up by a scheduler process which died before finishing the processing or similar) and reset handling to False so other scheduler can pick it up. """ query = { 'scheduled_start_timestamp__lte': date.append_milliseconds_to_time( date.get_datetime_utc_now(), -EXECUTION_SCHEDUELING_TIMEOUT_THRESHOLD_MS ), 'handling': True } execution_queue_item_dbs = ActionExecutionSchedulingQueue.query(**query) or [] for execution_queue_item_db in execution_queue_item_dbs: execution_queue_item_db.handling = False try: ActionExecutionSchedulingQueue.add_or_update(execution_queue_item_db, publish=False) LOG.info('Removing lock for orphaned execution queue item: %s', execution_queue_item_db.id) except db_exc.StackStormDBObjectWriteConflictError: LOG.info( 'Execution queue item updated before rescheduling: %s', execution_queue_item_db.id )
def _delay(self, liveaction_db, execution_queue_item_db): action_execution_id = str(execution_queue_item_db.action_execution_id) liveaction_id = str(execution_queue_item_db.liveaction_id) queue_item_id = str(execution_queue_item_db.id) extra = {'queue_item_id': queue_item_id} LOG.info( '[%s] Liveaction "%s" is delayed and scheduling queue is updated.', action_execution_id, liveaction_id, extra=extra ) liveaction_db = action_service.update_status( liveaction_db, action_constants.LIVEACTION_STATUS_DELAYED, publish=False ) execution_queue_item_db.scheduled_start_timestamp = date.append_milliseconds_to_time( date.get_datetime_utc_now(), POLICY_DELAYED_EXECUTION_RESCHEDULE_TIME_MS ) try: execution_queue_item_db.handling = False ActionExecutionSchedulingQueue.add_or_update(execution_queue_item_db, publish=False) except db_exc.StackStormDBObjectWriteConflictError: LOG.warning( '[%s] Database write conflict on updating scheduling queue.', action_execution_id, extra=extra )
def _reset_handling_flag(self): """ Periodically look for executions which have "handling" set to "True" and haven't been updated for a while (this likely indicates that an execution as picked up by a scheduler process which died before finishing the processing or similar) and reset handling to False so other scheduler can pick it up. """ query = { 'scheduled_start_timestamp__lte': date.append_milliseconds_to_time( date.get_datetime_utc_now(), -self._execution_scheduling_timeout_threshold_min ), 'handling': True } execution_queue_item_dbs = ActionExecutionSchedulingQueue.query(**query) or [] for execution_queue_item_db in execution_queue_item_dbs: execution_queue_item_db.handling = False try: ActionExecutionSchedulingQueue.add_or_update(execution_queue_item_db, publish=False) LOG.info( '[%s] Removing lock for orphaned execution queue item "%s".', execution_queue_item_db.action_execution_id, str(execution_queue_item_db.id) ) except db_exc.StackStormDBObjectWriteConflictError: LOG.info( '[%s] Execution queue item "%s" updated during garbage collection.', execution_queue_item_db.action_execution_id, str(execution_queue_item_db.id) )
def _regulate_and_schedule(self, liveaction_db, execution_queue_item_db): action_execution_id = str(execution_queue_item_db.action_execution_id) liveaction_id = str(execution_queue_item_db.liveaction_id) queue_item_id = str(execution_queue_item_db.id) extra = {'queue_item_id': queue_item_id} LOG.info( '[%s] Liveaction "%s" has status "%s" before applying policies.', action_execution_id, liveaction_id, liveaction_db.status, extra=extra) # Apply policies defined for the action. liveaction_db = policy_service.apply_pre_run_policies(liveaction_db) LOG.info( '[%s] Liveaction "%s" has status "%s" after applying policies.', action_execution_id, liveaction_id, liveaction_db.status, extra=extra) if liveaction_db.status == action_constants.LIVEACTION_STATUS_DELAYED: LOG.info( '[%s] Liveaction "%s" is delayed and scheduling queue is updated.', action_execution_id, liveaction_id, extra=extra) liveaction_db = action_service.update_status( liveaction_db, action_constants.LIVEACTION_STATUS_DELAYED, publish=False) execution_queue_item_db.handling = False execution_queue_item_db.scheduled_start_timestamp = date.append_milliseconds_to_time( date.get_datetime_utc_now(), POLICY_DELAYED_EXECUTION_RESCHEDULE_TIME_MS) try: ActionExecutionSchedulingQueue.add_or_update( execution_queue_item_db, publish=False) except db_exc.StackStormDBObjectWriteConflictError: LOG.warning( '[%s] Database write conflict on updating scheduling queue.', action_execution_id, extra=extra) return if (liveaction_db.status in action_constants.LIVEACTION_COMPLETED_STATES or liveaction_db.status in action_constants.LIVEACTION_CANCEL_STATES): ActionExecutionSchedulingQueue.delete(execution_queue_item_db) return self._schedule(liveaction_db, execution_queue_item_db)
def test_next_execution(self): self.reset() schedule_q_dbs = [] delays = [2000, 5000, 4000] expected_order = [0, 2, 1] test_cases = [] for delay in delays: liveaction_db = self._create_liveaction_db() delayed_start = date.append_milliseconds_to_time( liveaction_db.start_timestamp, delay) test_case = { 'liveaction': liveaction_db, 'delay': delay, 'delayed_start': delayed_start } test_cases.append(test_case) for test_case in test_cases: schedule_q_dbs.append( ActionExecutionSchedulingQueue.add_or_update( self.scheduler. _create_execution_queue_item_db_from_liveaction( test_case['liveaction'], test_case['delay'], ))) # Wait maximum delay seconds so the query works as expected eventlet.sleep(3.2) for index in expected_order: test_case = test_cases[index] date_mock = mock.MagicMock() date_mock.get_datetime_utc_now.return_value = test_case[ 'delayed_start'] date_mock.append_milliseconds_to_time = date.append_milliseconds_to_time with mock.patch('st2actions.scheduler.handler.date', date_mock): schedule_q_db = self.scheduling_queue._get_next_execution() ActionExecutionSchedulingQueue.delete(schedule_q_db) self.assertIsInstance(schedule_q_db, ActionExecutionSchedulingQueueItemDB) self.assertEqual(schedule_q_db.delay, test_case['delay']) self.assertEqual(schedule_q_db.liveaction_id, str(test_case['liveaction'].id)) # NOTE: We can't directly assert on the timestamp due to the delays on the code and # timing variance scheduled_start_timestamp = schedule_q_db.scheduled_start_timestamp test_case_start_timestamp = test_case['delayed_start'] start_timestamp_diff = (scheduled_start_timestamp - test_case_start_timestamp) self.assertTrue( start_timestamp_diff <= datetime.timedelta(seconds=1))
def test_next_execution(self): self.reset() schedule_q_dbs = [] delays = [2000, 5000, 4000] expected_order = [0, 2, 1] test_cases = [] for delay in delays: liveaction_db = self._create_liveaction_db() delayed_start = date.append_milliseconds_to_time(liveaction_db.start_timestamp, delay) test_case = { 'liveaction': liveaction_db, 'delay': delay, 'delayed_start': delayed_start } test_cases.append(test_case) for test_case in test_cases: schedule_q_dbs.append( ActionExecutionSchedulingQueue.add_or_update( self.scheduler._create_execution_queue_item_db_from_liveaction( test_case['liveaction'], test_case['delay'], ) ) ) # Wait maximum delay seconds so the query works as expected eventlet.sleep(3.2) for index in expected_order: test_case = test_cases[index] date_mock = mock.MagicMock() date_mock.get_datetime_utc_now.return_value = test_case['delayed_start'] date_mock.append_milliseconds_to_time = date.append_milliseconds_to_time with mock.patch('st2actions.scheduler.handler.date', date_mock): schedule_q_db = self.scheduling_queue._get_next_execution() ActionExecutionSchedulingQueue.delete(schedule_q_db) self.assertIsInstance(schedule_q_db, ActionExecutionSchedulingQueueItemDB) self.assertEqual(schedule_q_db.delay, test_case['delay']) self.assertEqual(schedule_q_db.liveaction_id, str(test_case['liveaction'].id)) # NOTE: We can't directly assert on the timestamp due to the delays on the code and # timing variance scheduled_start_timestamp = schedule_q_db.scheduled_start_timestamp test_case_start_timestamp = test_case['delayed_start'] start_timestamp_diff = (scheduled_start_timestamp - test_case_start_timestamp) self.assertTrue(start_timestamp_diff <= datetime.timedelta(seconds=1))
def _create_execution_queue_item_db_from_liveaction( self, liveaction, delay=None): """ Create ActionExecutionSchedulingQueueItemDB from live action. """ execution_queue_item_db = ActionExecutionSchedulingQueueItemDB() execution_queue_item_db.liveaction_id = str(liveaction.id) execution_queue_item_db.scheduled_start_timestamp = date.append_milliseconds_to_time( liveaction.start_timestamp, delay or 0) execution_queue_item_db.delay = delay return execution_queue_item_db
def _create_execution_queue_item_db_from_liveaction(self, liveaction, delay=None): """ Create ActionExecutionSchedulingQueueItemDB from live action. """ execution_queue_item_db = ActionExecutionSchedulingQueueItemDB() execution_queue_item_db.liveaction_id = str(liveaction.id) execution_queue_item_db.scheduled_start_timestamp = date.append_milliseconds_to_time( liveaction.start_timestamp, delay or 0 ) execution_queue_item_db.delay = delay return execution_queue_item_db
def test_next_execution(self): self.reset() schedule_q_dbs = [] delays = [100, 5000, 1000] expected_order = [0, 2, 1] test_cases = [] for delay in delays: liveaction_db = self._create_liveaction_db() delayed_start = date.append_milliseconds_to_time( liveaction_db.start_timestamp, delay) test_case = { 'liveaction': liveaction_db, 'delay': delay, 'delayed_start': delayed_start } test_cases.append(test_case) for test_case in test_cases: schedule_q_dbs.append( ActionExecutionSchedulingQueue.add_or_update( self.scheduler. _create_execution_queue_item_db_from_liveaction( test_case['liveaction'], test_case['delay'], ))) for index in expected_order: test_case = test_cases[index] date_mock = mock.MagicMock() date_mock.get_datetime_utc_now.return_value = test_case[ 'delayed_start'] date_mock.append_milliseconds_to_time = date.append_milliseconds_to_time with mock.patch('st2actions.scheduler.handler.date', date_mock): schedule_q_db = self.scheduling_queue._get_next_execution() ActionExecutionSchedulingQueue.delete(schedule_q_db) self.assertIsInstance(schedule_q_db, ActionExecutionSchedulingQueueItemDB) self.assertEqual(schedule_q_db.scheduled_start_timestamp, test_case['delayed_start']) self.assertEqual(schedule_q_db.delay, test_case['delay']) self.assertEqual(schedule_q_db.liveaction_id, str(test_case['liveaction'].id))
def test_create_from_liveaction(self): liveaction_db = self._create_liveaction_db() delay = 500 schedule_q_db = self.scheduler._create_execution_queue_item_db_from_liveaction( liveaction_db, delay, ) delay_date = date.append_milliseconds_to_time(liveaction_db.start_timestamp, delay) self.assertIsInstance(schedule_q_db, ActionExecutionSchedulingQueueItemDB) self.assertEqual(schedule_q_db.scheduled_start_timestamp, delay_date) self.assertEqual(schedule_q_db.delay, delay) self.assertEqual(schedule_q_db.liveaction_id, str(liveaction_db.id))
def _regulate_and_schedule(self, liveaction_db, execution_queue_item_db): # Apply policies defined for the action. liveaction_db = policy_service.apply_pre_run_policies(liveaction_db) liveaction_id = str(liveaction_db.id) queue_item_id = str(execution_queue_item_db.id) extra = { 'liveaction_id': liveaction_id, 'liveaction_status': liveaction_db.status, 'queue_item_id': queue_item_id } LOG.info('Liveaction (%s) Status Pre-Run: %s (%s)', liveaction_id, liveaction_db.status, queue_item_id, extra=extra) if liveaction_db.status is action_constants.LIVEACTION_STATUS_POLICY_DELAYED: liveaction_db = action_service.update_status( liveaction_db, action_constants.LIVEACTION_STATUS_DELAYED, publish=False) execution_queue_item_db.handling = False execution_queue_item_db.scheduled_start_timestamp = date.append_milliseconds_to_time( date.get_datetime_utc_now(), POLICY_DELAYED_EXECUTION_RESCHEDULE_TIME_MS) try: ActionExecutionSchedulingQueue.add_or_update( execution_queue_item_db, publish=False) except db_exc.StackStormDBObjectWriteConflictError: LOG.warning( 'Execution queue item update conflict during scheduling: %s', execution_queue_item_db.id) return if (liveaction_db.status in action_constants.LIVEACTION_COMPLETED_STATES or liveaction_db.status in action_constants.LIVEACTION_CANCEL_STATES): ActionExecutionSchedulingQueue.delete(execution_queue_item_db) return self._schedule(liveaction_db, execution_queue_item_db)
def _delay(self, liveaction_db, execution_queue_item_db): liveaction_db = action_service.update_status( liveaction_db, action_constants.LIVEACTION_STATUS_DELAYED, publish=False ) execution_queue_item_db.scheduled_start_timestamp = date.append_milliseconds_to_time( date.get_datetime_utc_now(), POLICY_DELAYED_EXECUTION_RESCHEDULE_TIME_MS ) try: execution_queue_item_db.handling = False ActionExecutionSchedulingQueue.add_or_update(execution_queue_item_db, publish=False) except db_exc.StackStormDBObjectWriteConflictError: LOG.warning( 'Execution queue item update conflict during scheduling: %s', execution_queue_item_db.id )
def _delay(self, liveaction_db, execution_queue_item_db): liveaction_db = action_service.update_status( liveaction_db, action_constants.LIVEACTION_STATUS_DELAYED, publish=False) execution_queue_item_db.scheduled_start_timestamp = date.append_milliseconds_to_time( date.get_datetime_utc_now(), POLICY_DELAYED_EXECUTION_RESCHEDULE_TIME_MS) try: execution_queue_item_db.handling = False ActionExecutionSchedulingQueue.add_or_update( execution_queue_item_db, publish=False) except db_exc.StackStormDBObjectWriteConflictError: LOG.warning( 'Execution queue item update conflict during scheduling: %s', execution_queue_item_db.id)
def _regulate_and_schedule(self, liveaction_db, execution_queue_item_db): # Apply policies defined for the action. liveaction_db = policy_service.apply_pre_run_policies(liveaction_db) liveaction_id = str(liveaction_db.id) queue_item_id = str(execution_queue_item_db.id) extra = { 'liveaction_id': liveaction_id, 'liveaction_status': liveaction_db.status, 'queue_item_id': queue_item_id } LOG.info('Liveaction (%s) Status Pre-Run: %s (%s)', liveaction_id, liveaction_db.status, queue_item_id, extra=extra) if liveaction_db.status is action_constants.LIVEACTION_STATUS_POLICY_DELAYED: liveaction_db = action_service.update_status( liveaction_db, action_constants.LIVEACTION_STATUS_DELAYED, publish=False ) execution_queue_item_db.handling = False execution_queue_item_db.scheduled_start_timestamp = date.append_milliseconds_to_time( date.get_datetime_utc_now(), POLICY_DELAYED_EXECUTION_RESCHEDULE_TIME_MS ) try: ActionExecutionSchedulingQueue.add_or_update(execution_queue_item_db, publish=False) except db_exc.StackStormDBObjectWriteConflictError: LOG.warning( 'Execution queue item update conflict during scheduling: %s', execution_queue_item_db.id ) return if (liveaction_db.status in action_constants.LIVEACTION_COMPLETED_STATES or liveaction_db.status in action_constants.LIVEACTION_CANCEL_STATES): ActionExecutionSchedulingQueue.delete(execution_queue_item_db) return self._schedule(liveaction_db, execution_queue_item_db)