def __init__(self, call_request, schedule, failure_threshold=None, last_run=None, enabled=True): super(ScheduledCall, self).__init__() schedule_tag = resource_tag(dispatch_constants.RESOURCE_SCHEDULE_TYPE, str(self._id)) call_request.tags.append(schedule_tag) interval, start, runs = dateutils.parse_iso8601_interval(schedule) now = datetime.utcnow() zero = timedelta(seconds=0) start = start and dateutils.to_naive_utc_datetime(start) self.serialized_call_request = call_request.serialize() self.schedule = schedule self.failure_threshold = failure_threshold self.consecutive_failures = 0 self.first_run = start or now # NOTE using != because ordering comparison with a Duration is not allowed while interval != zero and self.first_run <= now: # try to schedule the first run in the future self.first_run = dateutils.add_interval_to_datetime( interval, self.first_run) self.last_run = last_run and dateutils.to_naive_utc_datetime(last_run) self.next_run = None # will calculated and set by the scheduler self.remaining_runs = runs self.enabled = enabled
def test_updated_scheduled_next_run(self): call_request = CallRequest(itinerary_call) interval = datetime.timedelta(minutes=2) now = datetime.datetime.now(tz=dateutils.utc_tz()) old_schedule = dateutils.format_iso8601_interval(interval, now) scheduled_id = self.scheduler.add(call_request, old_schedule) self.assertNotEqual(scheduled_id, None) scheduled_call = self.scheduled_call_collection.find_one({'_id': ObjectId(scheduled_id)}) self.assertNotEqual(scheduled_call, None) old_interval, start_time = dateutils.parse_iso8601_interval(old_schedule)[:2] start_time = dateutils.to_naive_utc_datetime(start_time) self.assertEqual(scheduled_call['last_run'], None) self.assertEqual(scheduled_call['first_run'], start_time + old_interval) self.assertEqual(scheduled_call['next_run'], start_time + old_interval) interval = datetime.timedelta(minutes=1) new_schedule = dateutils.format_iso8601_interval(interval, now) self.scheduler.update(scheduled_id, schedule=new_schedule) updated_scheduled_call = self.scheduled_call_collection.find_one({'_id': ObjectId(scheduled_id)}) new_interval = dateutils.parse_iso8601_interval(new_schedule)[0] self.assertEqual(updated_scheduled_call['last_run'], None) self.assertEqual(updated_scheduled_call['first_run'], start_time + old_interval) self.assertEqual(updated_scheduled_call['next_run'], start_time + new_interval)
def __init__(self, call_request, schedule, failure_threshold=None, last_run=None, enabled=True): super(ScheduledCall, self).__init__() schedule_tag = resource_tag(dispatch_constants.RESOURCE_SCHEDULE_TYPE, str(self._id)) call_request.tags.append(schedule_tag) interval, start, runs = dateutils.parse_iso8601_interval(schedule) now = datetime.utcnow() zero = timedelta(seconds=0) start = start and dateutils.to_naive_utc_datetime(start) self.serialized_call_request = call_request.serialize() self.schedule = schedule self.failure_threshold = failure_threshold self.consecutive_failures = 0 self.first_run = start or now # NOTE using != because ordering comparison with a Duration is not allowed while interval != zero and self.first_run <= now: # try to schedule the first run in the future self.first_run = dateutils.add_interval_to_datetime(interval, self.first_run) self.last_run = last_run and dateutils.to_naive_utc_datetime(last_run) self.next_run = None # will calculated and set by the scheduler self.remaining_runs = runs self.enabled = enabled
def calculate_first_run(schedule): """ Given a schedule in ISO8601 interval format, calculate the first time the schedule should be run. This method make a best effort to calculate a time in the future. :param schedule: ISO8601 interval schedule :type schedule: str :return: when the schedule should be run for the first time :rtype: datetime.datetime """ now = datetime.datetime.utcnow() interval, start = dateutils.parse_iso8601_interval(schedule)[0:2] first_run = dateutils.to_naive_utc_datetime(start) if start else now # the "zero time" handles the really off case where the schedule is a # start time and a single run instead of something recurring while interval != ZERO_TIME and first_run <= now: first_run = dateutils.add_interval_to_datetime(interval, first_run) return first_run
def __init__(self, call_request, schedule, failure_threshold=None, last_run=None, enabled=True): super(ScheduledCall, self).__init__() # add custom scheduled call tag to call request schedule_tag = resource_tag(dispatch_constants.RESOURCE_SCHEDULE_TYPE, str(self._id)) call_request.tags.append(schedule_tag) self.serialized_call_request = call_request.serialize() self.schedule = schedule self.enabled = enabled self.failure_threshold = failure_threshold self.consecutive_failures = 0 # scheduling fields self.first_run = None # will be calculated and set by the scheduler self.last_run = last_run and dateutils.to_naive_utc_datetime(last_run) self.next_run = None # will be calculated and set by the scheduler self.remaining_runs = dateutils.parse_iso8601_interval(schedule)[2] # run-time call group metadata for tracking success or failure self.call_count = 0 self.call_exit_states = []
def _sync_schedules(v1_database, v2_database, report): v1_repo_collection = v1_database.repos v2_repo_importer_collection = v2_database.repo_importers v2_scheduled_call_collection = v2_database.scheduled_calls # ugly hack to find out which repos have already been scheduled # necessary because $size is not a meta-query and doesn't support $gt, etc repos_without_schedules = v2_repo_importer_collection.find( {'scheduled_syncs': { '$size': 0 }}, fields=['repo_id']) repo_ids_without_schedules = [ r['repo_id'] for r in repos_without_schedules ] repos_with_schedules = v2_repo_importer_collection.find( {'repo_id': { '$nin': repo_ids_without_schedules }}, fields=['repo_id']) repo_ids_with_schedules = [r['repo_id'] for r in repos_with_schedules] repos_to_schedule = v1_repo_collection.find( { 'id': { '$nin': repo_ids_with_schedules }, 'sync_schedule': { '$ne': None } }, fields=['id', 'sync_schedule', 'sync_options', 'last_sync']) for repo in repos_to_schedule: if repo['id'] not in repo_ids_without_schedules: report.error('Repository [%s] not found in the v2 database.' 'sync scheduling being canceled.' % repo['id']) return False args = [repo['id']] kwargs = {'overrides': {}} call_request = CallRequest(sync_with_auto_publish_itinerary, args, kwargs, principal=SystemUser()) scheduled_call_document = { '_id': ObjectId(), 'id': None, 'serialized_call_request': None, 'schedule': repo['sync_schedule'], 'failure_threshold': None, 'consecutive_failures': 0, 'first_run': None, 'last_run': None, 'next_run': None, 'remaining_runs': None, 'enabled': True } scheduled_call_document['id'] = str(scheduled_call_document['_id']) schedule_tag = resource_tag(dispatch_constants.RESOURCE_SCHEDULE_TYPE, scheduled_call_document['id']) call_request.tags.append(schedule_tag) scheduled_call_document[ 'serialized_call_request'] = call_request.serialize() if isinstance(repo['sync_options'], dict): scheduled_call_document['failure_threshold'] = repo[ 'sync_options'].get('failure_threshold', None) interval, start, recurrences = dateutils.parse_iso8601_interval( scheduled_call_document['schedule']) scheduled_call_document['first_run'] = start or datetime.utcnow() scheduled_call_document['remaining_runs'] = recurrences scheduled_call_document['next_run'] = _calculate_next_run( scheduled_call_document) if repo['last_sync'] is not None: scheduled_call_document[ 'last_run'] = dateutils.to_naive_utc_datetime( dateutils.parse_iso8601_datetime(repo['last_sync'])) v2_scheduled_call_collection.insert(scheduled_call_document, safe=True) v2_repo_importer_collection.update( {'repo_id': repo['id']}, {'$push': { 'scheduled_syncs': scheduled_call_document['id'] }}, safe=True) return True
def _sync_schedules(v1_database, v2_database, report): v1_repo_collection = v1_database.repos v2_repo_importer_collection = v2_database.repo_importers v2_scheduled_call_collection = v2_database.scheduled_calls # ugly hack to find out which repos have already been scheduled # necessary because $size is not a meta-query and doesn't support $gt, etc repos_without_schedules = v2_repo_importer_collection.find( {'scheduled_syncs': {'$size': 0}}, fields=['repo_id']) repo_ids_without_schedules = [r['repo_id'] for r in repos_without_schedules] repos_with_schedules = v2_repo_importer_collection.find( {'repo_id': {'$nin': repo_ids_without_schedules}}, fields=['repo_id']) repo_ids_with_schedules = [r['repo_id'] for r in repos_with_schedules] repos_to_schedule = v1_repo_collection.find( {'id': {'$nin': repo_ids_with_schedules}, 'sync_schedule': {'$ne': None}}, fields=['id', 'sync_schedule', 'sync_options', 'last_sync']) for repo in repos_to_schedule: if repo['id'] not in repo_ids_without_schedules: report.error('Repository [%s] not found in the v2 database.' 'sync scheduling being canceled.' % repo['id']) return False args = [repo['id']] kwargs = {'overrides': {}} call_request = CallRequest(sync_with_auto_publish_itinerary, args, kwargs, principal=SystemUser()) scheduled_call_document = { '_id': ObjectId(), 'id': None, 'serialized_call_request': None, 'schedule': repo['sync_schedule'], 'failure_threshold': None, 'consecutive_failures': 0, 'first_run': None, 'last_run': None, 'next_run': None, 'remaining_runs': None, 'enabled': True} scheduled_call_document['id'] = str(scheduled_call_document['_id']) schedule_tag = resource_tag(dispatch_constants.RESOURCE_SCHEDULE_TYPE, scheduled_call_document['id']) call_request.tags.append(schedule_tag) scheduled_call_document['serialized_call_request'] = call_request.serialize() if isinstance(repo['sync_options'], dict): scheduled_call_document['failure_threshold'] = repo['sync_options'].get('failure_threshold', None) interval, start, recurrences = dateutils.parse_iso8601_interval(scheduled_call_document['schedule']) scheduled_call_document['first_run'] = start or datetime.utcnow() scheduled_call_document['remaining_runs'] = recurrences scheduled_call_document['next_run'] = _calculate_next_run(scheduled_call_document) if repo['last_sync'] is not None: scheduled_call_document['last_run'] = dateutils.to_naive_utc_datetime(dateutils.parse_iso8601_datetime(repo['last_sync'])) v2_scheduled_call_collection.insert(scheduled_call_document, safe=True) v2_repo_importer_collection.update({'repo_id': repo['id']}, {'$push': {'scheduled_syncs': scheduled_call_document['id']}}, safe=True) return True