def _gen_base_attributes(item_id=None): values = {} if item_id is None: values['id'] = str(uuid.uuid4()) values['created_at'] = timeutils.utcnow() values['updated_at'] = timeutils.utcnow() return copy.deepcopy(values)
def create(self, request, body): if (body is None or body.get('job') is None or body['job'].get('schedule_id') is None): raise webob.exc.HTTPBadRequest() job = body['job'] try: schedule = self.db_api.schedule_get_by_id(job['schedule_id']) except exception.NotFound: raise webob.exc.HTTPNotFound() # Update schedule last_scheduled and next_run values = {} values['next_run'] = api_utils.schedule_to_next_run(schedule) print values['next_run'] values['last_scheduled'] = timeutils.utcnow() self.db_api.schedule_update(schedule['id'], values) # Create job values = {} values.update(job) values['tenant'] = schedule['tenant'] values['action'] = schedule['action'] values['status'] = 'queued' job_metadata = [] for metadata in schedule['schedule_metadata']: job_metadata.append({ 'key': metadata['key'], 'value': metadata['value'] }) values['job_metadata'] = job_metadata now = timeutils.utcnow() job_timeout_seconds = self._job_get_timeout(values['action']) if not 'timeout' in values: values['timeout'] = now +\ datetime.timedelta(seconds=job_timeout_seconds) values['hard_timeout'] = now +\ datetime.timedelta(seconds=job_timeout_seconds) job = self.db_api.job_create(values) utils.serialize_datetimes(job) api_utils.serialize_job_metadata(job) return {'job': job}
def test_cron_string_to_datetime(self): minute = timeutils.utcnow().minute if minute == 0: minute = 59 else: minute -= 1 hour = timeutils.utcnow().hour if hour == 0: hour = 23 else: hour -= 1 next_run = utils.cron_string_to_next_datetime(minute=minute, hour=hour) self.assertTrue(next_run > timeutils.utcnow())
def _create_jobs(self, gap, *fixtures): now = timeutils.utcnow() self.jobs = [] for fixture in fixtures: self.jobs.append(self.db_api.job_create(fixture)) timeutils.advance_time_seconds(gap) return now
def test_job_create_no_worker_assigned(self): fixture = { 'action': 'snapshot', 'tenant_id': unit_utils.TENANT1, 'schedule_id': unit_utils.SCHEDULE_UUID2, 'status': 'queued', 'job_metadata': [ { 'key': 'instance_id', 'value': 'my_instance', }, ], } timeutils.set_time_override() now = timeutils.utcnow() job = self.db_api.job_create(fixture) timeutils.clear_time_override() self.assertTrue(uuidutils.is_uuid_like(job['id'])) self.assertNotEqual(job['created_at'], None) self.assertNotEqual(job['updated_at'], None) self.assertEqual(job['timeout'], now + timedelta(seconds=30)) self.assertEqual(job['hard_timeout'], now + timedelta(seconds=30)) self.assertEqual(job['schedule_id'], fixture['schedule_id']) self.assertEqual(job['worker_id'], None) self.assertEqual(job['status'], fixture['status']) self.assertEqual(job['retry_count'], 0) metadata = job['job_metadata'] self.assertEqual(len(metadata), 1) self.assertEqual(metadata[0]['key'], fixture['job_metadata'][0]['key']) self.assertEqual(metadata[0]['value'], fixture['job_metadata'][0]['value'])
def _create_jobs(self): now = timeutils.utcnow() timeout = now + datetime.timedelta(hours=1) hard_timeout = now + datetime.timedelta(hours=4) fixture = { 'id': unit_utils.JOB_UUID1, 'action': 'snapshot', 'tenant': unit_utils.TENANT1, 'schedule_id': unit_utils.SCHEDULE_UUID1, 'worker_id': unit_utils.WORKER_UUID1, 'status': 'queued', 'timeout': timeout, 'hard_timeout': hard_timeout, 'retry_count': 0, } self.job_1 = self.db_api.job_create(fixture) fixture = { 'id': unit_utils.JOB_UUID2, 'action': 'snapshot', 'tenant': unit_utils.TENANT1, 'schedule_id': unit_utils.SCHEDULE_UUID2, 'worker_id': unit_utils.WORKER_UUID2, 'status': 'error', 'timeout': timeout, 'hard_timeout': hard_timeout, 'retry_count': 0, } self.job_2 = self.db_api.job_create(fixture)
def job_get_and_assign_next_by_action(action, worker_id, max_retry): """Get the next available job for the given action and assign it to the worker for worker_id. This must be an atomic action!""" job_ref = None now = timeutils.utcnow() jobs = _jobs_get_sorted() for job in jobs: if job['action'] == action and \ job['retry_count'] < max_retry and \ job['hard_timeout'] > now and \ (job['worker_id'] is None or job['timeout'] <= now): job_ref = job break if job_ref is None: return None job_id = job_ref['id'] DATA['jobs'][job_id]['worker_id'] = worker_id DATA['jobs'][job_id]['retry_count'] = job_ref['retry_count'] + 1 job = copy.deepcopy(DATA['jobs'][job_id]) job['job_metadata'] = job_meta_get_all_by_job_id(job_id) return job
def _create_job_fixtures(self): now = timeutils.utcnow() timeout = now + datetime.timedelta(seconds=30) hard_timeout = now + datetime.timedelta(seconds=30) self.job_fixture_1 = { 'action': 'snapshot', 'tenant': unit_utils.TENANT1, 'schedule_id': unit_utils.SCHEDULE_UUID1, 'worker_id': None, 'status': None, 'timeout': timeout, 'hard_timeout': hard_timeout, 'retry_count': 0, } self.job_fixture_2 = { 'action': 'snapshot', 'tenant': unit_utils.TENANT1, 'schedule_id': unit_utils.SCHEDULE_UUID2, 'worker_id': unit_utils.WORKER_UUID2, 'status': 'queued', 'timeout': timeout, 'hard_timeout': hard_timeout, 'retry_count': 0, }
def job_create(job_values): global DATA db_utils.validate_job_values(job_values) values = job_values.copy() job = {} metadata = [] if 'job_metadata' in values: metadata = values['job_metadata'] del values['job_metadata'] if not 'retry_count' in values: values['retry_count'] = 0 job['worker_id'] = None now = timeutils.utcnow() job_timeout_seconds = _job_get_timeout(values['action']) if not 'timeout' in values: values['timeout'] = now + timedelta(seconds=job_timeout_seconds) values['hard_timeout'] = now + timedelta(seconds=job_timeout_seconds) job.update(values) job.update(_gen_base_attributes()) DATA['jobs'][job['id']] = job for metadatum in metadata: job_meta_create(job['id'], metadatum) return job_get_by_id(job['id'])
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn(_('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone, e: self.stop() done.send(e.retvalue)
def cron_string_to_next_datetime(minute="*", hour="*", day_of_month="*", month="*", day_of_week="*"): cron_string = ("%s %s %s %s %s" % (minute or '*', hour or '*', day_of_month or '*', month or '*', day_of_week or '*')) iter = croniter(cron_string, timeutils.utcnow()) return iter.get_next(datetime.datetime)
def _create_basic_job(self): now = timeutils.utcnow() timeout = now + datetime.timedelta(hours=1) hard_timeout = now + datetime.timedelta(hours=4) return db_api.job_create({ 'action': 'snapshot', 'timeout': timeout, 'hard_timeout': hard_timeout, 'tenant': unit_utils.TENANT1 })
def job_meta_update(job_id, key, values): global DATA _check_job_meta_exists(job_id, key) meta = DATA['job_metadata'][job_id][key] meta.update(values) meta['updated_at'] = timeutils.utcnow() DATA['job_metadata'][job_id][key] = meta return copy.deepcopy(meta)
def _jobs_cleanup_hard_timed_out(): """Find all jobs with hard_timeout values which have passed and delete them, logging the timeout / failure as appropriate""" now = timeutils.utcnow() session = get_session() num_del = session.query(models.Job)\ .filter(models.Job.hard_timeout <= now)\ .delete() session.flush() return num_del
def notify(context, publisher_id, event_type, priority, payload): """Sends a notification using the specified driver :param publisher_id: the source worker_type.host of the message :param event_type: the literal type of event (ex. Instance Creation) :param priority: patterned after the enumeration of Python logging levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) :param payload: A python dictionary of attributes Outgoing message format includes the above parameters, and appends the following: message_id a UUID representing the id for this notification timestamp the GMT timestamp the notification was sent at The composite message will be constructed as a dictionary of the above attributes, which will then be sent via the transport mechanism defined by the driver. Message example:: {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', 'timestamp': timeutils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} """ if priority not in log_levels: raise BadPriorityException( _('%s not in valid priorities') % priority) # Ensure everything is JSON serializable. payload = jsonutils.to_primitive(payload, convert_instances=True) msg = dict(message_id=str(uuid.uuid4()), publisher_id=publisher_id, event_type=event_type, priority=priority, payload=payload, timestamp=str(timeutils.utcnow())) for driver in _get_drivers(): try: driver.notify(context, msg) except Exception as e: LOG.exception(_("Problem '%(e)s' attempting to " "send to notification system. " "Payload=%(payload)s") % dict(e=e, payload=payload))
def notify(context, publisher_id, event_type, priority, payload): """Sends a notification using the specified driver :param publisher_id: the source worker_type.host of the message :param event_type: the literal type of event (ex. Instance Creation) :param priority: patterned after the enumeration of Python logging levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) :param payload: A python dictionary of attributes Outgoing message format includes the above parameters, and appends the following: message_id a UUID representing the id for this notification timestamp the GMT timestamp the notification was sent at The composite message will be constructed as a dictionary of the above attributes, which will then be sent via the transport mechanism defined by the driver. Message example:: {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', 'timestamp': timeutils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} """ if priority not in log_levels: raise BadPriorityException(_('%s not in valid priorities') % priority) # Ensure everything is JSON serializable. payload = jsonutils.to_primitive(payload, convert_instances=True) msg = dict(message_id=str(uuid.uuid4()), publisher_id=publisher_id, event_type=event_type, priority=priority, payload=payload, timestamp=str(timeutils.utcnow())) for driver in _get_drivers(): try: driver.notify(context, msg) except Exception as e: LOG.exception( _("Problem '%(e)s' attempting to " "send to notification system. " "Payload=%(payload)s") % dict(e=e, payload=payload))
def test_get_next_job_timed_out(self): now = timeutils.utcnow() retries = 2 self.job_fixture_2['timeout'] = now + datetime.timedelta(seconds=5) self._create_jobs(10, self.job_fixture_2, self.job_fixture_1) job = db_api.job_get_and_assign_next_by_action('snapshot', unit_utils.WORKER_UUID1, retries) expected = self.jobs[0] self.assertEqual(job['id'], expected['id']) self.assertEqual(job['worker_id'], unit_utils.WORKER_UUID1) self.assertEqual(job['timeout'], expected['timeout']) self.assertEqual(job['hard_timeout'], expected['hard_timeout']) self.assertEqual(job['retry_count'], expected['retry_count'] + 1)
def _jobs_cleanup_hard_timed_out(): """Find all jobs with hard_timeout values which have passed and delete them, logging the timeout / failure as appropriate""" now = timeutils.utcnow() del_ids = [] for job_id in DATA['jobs']: job = DATA['jobs'][job_id] print now, job['hard_timeout'] print now - job['hard_timeout'] if (now - job['hard_timeout']) > datetime.timedelta(microseconds=0): del_ids.append(job_id) for job_id in del_ids: job_delete(job_id) return len(del_ids)
def test_get_next_job_timed_out(self): timeutils.set_time_override() now = timeutils.utcnow() self.job_fixture_2['timeout'] = now + timedelta(seconds=5) self._create_jobs(10, self.job_fixture_2, self.job_fixture_1) job = db_api.job_get_and_assign_next_by_action('snapshot', unit_utils.WORKER_UUID1) expected = self.jobs[0] self.assertEqual(job['id'], expected['id']) self.assertEqual(job['worker_id'], unit_utils.WORKER_UUID1) timeout = expected['created_at'] + timedelta(seconds=5) hard_timeout = expected['created_at'] + timedelta(seconds=30) self.assertEqual(job['timeout'], timeout) self.assertEqual(job['hard_timeout'], hard_timeout) self.assertEqual(job['retry_count'], expected['retry_count'] + 1)
def job_get_and_assign_next_by_action(action, worker_id, max_retry): """Get the next available job for the given action and assign it to the worker for worker_id. This must be an atomic action!""" now = timeutils.utcnow() session = get_session() job_id = None try: job_ref = _job_get_next_by_action(session, now, action, max_retry) if job_ref is None: return None job_ref.update({'worker_id': worker_id, 'retry_count': job_ref['retry_count'] + 1}) job_id = job_ref['id'] job_ref.save(session) except sa_orm.exc.NoResultFound: raise exception.NotFound() return _job_get_by_id(job_id)
def job_create(job_values): db_utils.validate_job_values(job_values) values = job_values.copy() session = get_session() job_ref = models.Job() if 'job_metadata' in values: metadata = values['job_metadata'] _set_job_metadata(job_ref, metadata) del values['job_metadata'] now = timeutils.utcnow() job_timeout_seconds = _job_get_timeout(values['action']) if not 'timeout' in values: values['timeout'] = now + timedelta(seconds=job_timeout_seconds) values['hard_timeout'] = now + timedelta(seconds=job_timeout_seconds) job_ref.update(values) job_ref.save(session=session) return _job_get_by_id(job_ref['id'])
def schedule_update(schedule_id, schedule_values): global DATA values = schedule_values.copy() if schedule_id not in DATA['schedules']: raise exception.NotFound() metadata = None if 'schedule_metadata' in values: metadata = values['schedule_metadata'] del values['schedule_metadata'] if len(values) > 0: schedule = DATA['schedules'][schedule_id] schedule['updated_at'] = timeutils.utcnow() schedule.update(values) if metadata is not None: DATA['schedule_metadata'][schedule_id] = {} for metadatum in metadata: schedule_meta_create(schedule_id, metadatum) return schedule_get_by_id(schedule_id)
def test_job_create(self): now = timeutils.utcnow() timeout = now + datetime.timedelta(hours=1) hard_timeout = now + datetime.timedelta(hours=4) fixture = { 'action': 'snapshot', 'tenant': unit_utils.TENANT1, 'schedule_id': unit_utils.SCHEDULE_UUID2, 'worker_id': unit_utils.WORKER_UUID2, 'status': 'queued', 'timeout': timeout, 'hard_timeout': hard_timeout, 'job_metadata': [ { 'key': 'instance_id', 'value': 'my_instance', }, ], } job = self.db_api.job_create(fixture) self.assertTrue(uuidutils.is_uuid_like(job['id'])) self.assertNotEqual(job['created_at'], None) self.assertNotEqual(job['updated_at'], None) self.assertEqual(job['timeout'], fixture['timeout']) self.assertEqual(job['hard_timeout'], fixture['hard_timeout']) self.assertEqual(job['schedule_id'], fixture['schedule_id']) self.assertEqual(job['worker_id'], fixture['worker_id']) self.assertEqual(job['status'], fixture['status']) self.assertEqual(job['retry_count'], 0) metadata = job['job_metadata'] self.assertEqual(len(metadata), 1) self.assertEqual(metadata[0]['key'], fixture['job_metadata'][0]['key']) self.assertEqual(metadata[0]['value'], fixture['job_metadata'][0]['value'])
def job_update(job_id, job_values): global DATA values = job_values.copy() if job_id not in DATA['jobs']: raise exception.NotFound() metadata = None if 'job_metadata' in values: metadata = values['job_metadata'] del values['job_metadata'] if len(values) > 0: job = DATA['jobs'][job_id] #NOTE(ameade): This must come before update specified values since # we may be trying to manually set updated_at job['updated_at'] = timeutils.utcnow() job.update(values) if metadata is not None: DATA['job_metadata'][job_id] = {} for metadatum in metadata: job_meta_create(job_id, metadatum) return job_get_by_id(job_id)
def _gen_base_attributes(): values = {} values['id'] = str(uuid.uuid4()) values['created_at'] = timeutils.utcnow() values['updated_at'] = timeutils.utcnow() return copy.deepcopy(values)
def _get_utcnow(self): return timeutils.utcnow()
# under the License. """ Fakes For Worker tests. """ import datetime from qonos.openstack.common import timeutils WORKER_ID = '11111111-1111-1111-1111-11111111' JOB_ID = '22222222-2222-2222-2222-22222222' SCHEDULE_ID = '33333333-3333-3333-3333-33333333' TENANT = '44444444-4444-4444-4444-44444444' INSTANCE_ID = '55555555-5555-5555-5555-55555555' BASE_TIME = timeutils.utcnow() TIMEOUT = BASE_TIME + datetime.timedelta(hours=1) HARD_TIMEOUT = BASE_TIME + datetime.timedelta(hours=2) JOB = { 'job': { 'id': JOB_ID, 'created_at': BASE_TIME, 'modified_at': BASE_TIME, 'schedule_id': SCHEDULE_ID, 'tenant': TENANT, 'worker_id': WORKER_ID, 'status': 'SCHEDULED', 'action': 'snapshot', 'retry_count': 3, 'timeout': TIMEOUT,
def _create_jobs(self): fixture = { 'id': unit_utils.SCHEDULE_UUID1, 'tenant': unit_utils.TENANT1, 'action': 'snapshot', 'minute': '30', 'hour': '2', 'next_run': '2012-11-27T02:30:00Z' } self.schedule_1 = db_api.schedule_create(fixture) fixture = { 'id': unit_utils.SCHEDULE_UUID2, 'tenant': unit_utils.TENANT2, 'action': 'snapshot', 'minute': '30', 'hour': '2', 'next_run': '2012-11-27T02:30:00Z', 'schedule_metadata': [ { 'key': 'instance_id', 'value': 'my_instance', } ], } self.schedule_2 = db_api.schedule_create(fixture) now = timeutils.utcnow() timeout = now + datetime.timedelta(hours=1) hard_timeout = now + datetime.timedelta(hours=4) fixture = { 'id': unit_utils.JOB_UUID1, 'schedule_id': self.schedule_1['id'], 'tenant': unit_utils.TENANT1, 'worker_id': unit_utils.WORKER_UUID1, 'action': 'snapshot', 'status': 'queued', 'timeout': timeout, 'hard_timeout': hard_timeout, 'retry_count': 0, } self.job_1 = db_api.job_create(fixture) fixture = { 'id': unit_utils.JOB_UUID2, 'schedule_id': self.schedule_2['id'], 'tenant': unit_utils.TENANT2, 'worker_id': unit_utils.WORKER_UUID2, 'action': 'snapshot', 'status': 'error', 'timeout': timeout, 'hard_timeout': hard_timeout, 'retry_count': 1, 'job_metadata': [ { 'key': 'instance_id', 'value': 'my_instance', }, ] } self.job_2 = db_api.job_create(fixture) fixture = { 'id': unit_utils.JOB_UUID3, 'schedule_id': self.schedule_1['id'], 'tenant': unit_utils.TENANT1, 'worker_id': unit_utils.WORKER_UUID1, 'action': 'snapshot', 'status': 'queued', 'timeout': timeout, 'hard_timeout': hard_timeout, 'retry_count': 0, } self.job_3 = db_api.job_create(fixture) fixture = { 'id': unit_utils.JOB_UUID4, 'schedule_id': self.schedule_1['id'], 'tenant': unit_utils.TENANT1, 'worker_id': unit_utils.WORKER_UUID1, 'action': 'snapshot', 'status': 'queued', 'timeout': timeout, 'hard_timeout': hard_timeout, 'retry_count': 0, } self.job_4 = db_api.job_create(fixture)
def _create_jobs(self): now = timeutils.utcnow() timeout = now + datetime.timedelta(hours=1) hard_timeout = now + datetime.timedelta(hours=4) fixture = { 'id': unit_utils.JOB_UUID1, 'schedule_id': self.schedule_1['id'], 'tenant': unit_utils.TENANT1, 'worker_id': None, 'action': 'snapshot', 'status': None, 'timeout': timeout, 'hard_timeout': hard_timeout, 'retry_count': 0, } self.job_1 = db_api.job_create(fixture) fixture = { 'id': unit_utils.JOB_UUID2, 'schedule_id': self.schedule_2['id'], 'tenant': unit_utils.TENANT2, 'worker_id': unit_utils.WORKER_UUID2, 'action': 'snapshot', 'status': None, 'timeout': timeout, 'hard_timeout': hard_timeout, 'retry_count': 1, 'job_metadata': [ { 'key': 'instance_id', 'value': 'my_instance', }, ], } self.job_2 = db_api.job_create(fixture) fixture = { 'id': unit_utils.JOB_UUID3, 'schedule_id': self.schedule_3['id'], 'tenant': unit_utils.TENANT3, 'worker_id': unit_utils.WORKER_UUID2, 'action': 'snapshot', 'status': None, 'timeout': timeout, 'hard_timeout': hard_timeout, 'retry_count': 1, 'job_metadata': [ { 'key': 'instance_id', 'value': 'my_instance', }, ], } self.job_3 = db_api.job_create(fixture) fixture = { 'id': unit_utils.JOB_UUID4, 'schedule_id': self.schedule_4['id'], 'tenant': unit_utils.TENANT4, 'worker_id': unit_utils.WORKER_UUID2, 'action': 'snapshot', 'status': None, 'timeout': timeout, 'hard_timeout': hard_timeout, 'retry_count': 1, 'job_metadata': [ { 'key': 'instance_id', 'value': 'my_instance', }, ], } self.job_4 = db_api.job_create(fixture)