def test_schedule_update_remove_metadata(self): fixture = { 'id': str(uuid.uuid4()), 'tenant': str(uuid.uuid4()), 'action': 'snapshot', 'minute': 30, 'hour': 2, 'schedule_metadata': [ { 'key': 'instance_id', 'value': 'my_instance', }, ], } schedule = self.db_api.schedule_create(fixture) fixture = { 'schedule_metadata': [], } timeutils.set_time_override() timeutils.advance_time_seconds(2) updated_schedule = self.db_api.schedule_update(schedule['id'], fixture) timeutils.clear_time_override() self.assertTrue(uuidutils.is_uuid_like(schedule['id'])) self.assertEqual(updated_schedule['tenant'], schedule['tenant']) self.assertEqual(updated_schedule['action'], schedule['action']) self.assertEqual(updated_schedule['minute'], schedule['minute']) self.assertEqual(updated_schedule['hour'], schedule['hour']) metadata = updated_schedule['schedule_metadata'] self.assertEqual(len(metadata), 0) # updated child metadata collection doesn't update the parent schedule self.assertEqual(updated_schedule['updated_at'], schedule['updated_at'])
def test_jobs_cleanup_hard_timed_out(self): jobs = self.db_api.job_get_all() self.assertEqual(len(jobs), 2) timeutils.set_time_override() timeutils.advance_time_delta(datetime.timedelta(hours=4, minutes=1)) self.db_api._jobs_cleanup_hard_timed_out() timeutils.clear_time_override() jobs = self.db_api.job_get_all() self.assertEqual(len(jobs), 0)
def test_process_job_should_exponentially_increates_timeout(self): status = MockImageStatus('ERROR') job = copy.deepcopy(self.job) self._do_test_process_job_should_update_image_error(status, job=job) self._reset_mocks() new_now = timeutils.utcnow() + datetime.timedelta(minutes=120) timeutils.clear_time_override() timeutils.set_time_override(new_now) job['status'] = 'ERROR' job['retry_count'] = 2 self._do_test_process_job_should_update_image_error(status, include_create=False, include_queued=False, is_retry=True, job=job)
def test_polling_job_timeout_extension_with_max_retries(self): timeout_extension = 3600 job_timeout_max_updates_count = 3 self.config(job_timeout_extension_sec=timeout_extension, group='snapshot_worker') self.config(job_timeout_max_updates=job_timeout_max_updates_count, group='snapshot_worker') self.config(job_timeout_initial_value_sec=10800, group='snapshot_worker') server = self.server_instance_fixture("INSTANCE_ID", "test") job = self.job_fixture(server.id) images = [ self.image_fixture('IMAGE_ID', 'QUEUED', server.id), self.image_fixture('IMAGE_ID', 'SAVING', server.id), self.image_fixture('IMAGE_ID', 'SAVING', server.id), self.image_fixture('IMAGE_ID', 'SAVING', server.id) ] now = timeutils.utcnow() timeutils.set_time_override(now) timeutils.advance_time_delta( datetime.timedelta(seconds=timeout_extension)) try: with TestableSnapshotProcessor(job, server, images) as p: p.next_timeout = now + p.initial_timeout p.next_update = now + p.update_interval #NOTE(venkatesh): unfortunately had to use a protected method # for testing. Else there seems to be no easier way to test # this scenario. we need to fix this as part of refactoring # SnapshotJobProcessor. while True: try: p._update_job(job['id'], 'PROCESSING') except exception.OutOfTimeException: break timeutils.advance_time_delta( datetime.timedelta(seconds=timeout_extension)) total_timeout_duration = datetime.timedelta( seconds=(timeout_extension * job_timeout_max_updates_count)) self.assertEqual( now + (p.initial_timeout + total_timeout_duration), p.next_timeout) self.assertEqual(3, p.timeout_count) finally: timeutils.clear_time_override()
def test_polling_job_timeout_extension_with_max_retries(self): timeout_extension = 3600 job_timeout_max_updates_count = 3 self.config(job_timeout_extension_sec=timeout_extension, group='snapshot_worker') self.config(job_timeout_max_updates=job_timeout_max_updates_count, group='snapshot_worker') self.config(job_timeout_initial_value_sec=10800, group='snapshot_worker') server = self.server_instance_fixture("INSTANCE_ID", "test") job = self.job_fixture(server.id) images = [self.image_fixture('IMAGE_ID', 'QUEUED', server.id), self.image_fixture('IMAGE_ID', 'SAVING', server.id), self.image_fixture('IMAGE_ID', 'SAVING', server.id), self.image_fixture('IMAGE_ID', 'SAVING', server.id)] now = timeutils.utcnow() timeutils.set_time_override(now) timeutils.advance_time_delta( datetime.timedelta(seconds=timeout_extension)) try: with TestableSnapshotProcessor(job, server, images) as p: p.next_timeout = now + p.initial_timeout p.next_update = now + p.update_interval # NOTE(venkatesh): unfortunately had to use a protected method # for testing. Else there seems to be no easier way to test # this scenario. we need to fix this as part of refactoring # SnapshotJobProcessor. while True: try: p._update_job(job['id'], 'PROCESSING') except exception.OutOfTimeException: break timeutils.advance_time_delta( datetime.timedelta(seconds=timeout_extension)) total_timeout_duration = datetime.timedelta( seconds=(timeout_extension * job_timeout_max_updates_count) ) self.assertEqual( now + (p.initial_timeout + total_timeout_duration), p.next_timeout ) self.assertEqual(3, p.timeout_count) finally: timeutils.clear_time_override()
def test_process_job_should_exponentially_increase_timeout(self): status = MockImageStatus('ERROR') job = copy.deepcopy(self.job) self._do_test_process_job_should_update_image_error(status, job=job) self._reset_mocks() new_now = timeutils.utcnow() + datetime.timedelta(minutes=120) timeutils.clear_time_override() timeutils.set_time_override(new_now) job['status'] = 'ERROR' job['retry_count'] = 2 job['hard_timeout'] = timeutils.strtime( at=(new_now + datetime.timedelta(minutes=120))) self._do_test_process_job_should_update_image_error( status, include_create=False, include_queued=False, is_retry=True, job=job)
def _reset_mocks(self): self.mox = mox.Mox() self.nova_client = MockNovaClient() self.nova_client.servers = self.mox.CreateMockAnything() self.nova_client.rax_scheduled_images_python_novaclient_ext =\ self.mox.CreateMockAnything() self.nova_client.images = self.mox.CreateMockAnything() self.qonos_client = self.mox.CreateMockAnything() self.worker = self.mox.CreateMockAnything() self.worker.get_qonos_client().AndReturn(self.qonos_client) self.snapshot_meta = { "org.openstack__1__created_by": "scheduled_images_service" } timeutils.clear_time_override()
def test_schedule_to_next_run(self): timeutils.set_time_override() self.called = False def fake_next_datetime(min, h, dom, m, dow, start_time): self.called = True self.assertEqual(min, '*') self.assertEqual(h, '*') self.assertEqual(dom, '*') self.assertEqual(m, '*') self.assertEqual(dow, '*') self.assertEqual(timeutils.utcnow(), start_time) self.stubs.Set(utils, 'cron_string_to_next_datetime', fake_next_datetime) api_utils.schedule_to_next_run({}) self.assertTrue(self.called) timeutils.clear_time_override()
def test_schedule_update(self): fixture = { 'id': str(uuid.uuid4()), 'tenant': str(uuid.uuid4()), 'action': 'snapshot', 'minute': 30, 'hour': 2, } schedule = self.db_api.schedule_create(fixture) fixture = {'hour': 3} timeutils.set_time_override() timeutils.advance_time_seconds(2) updated_schedule = self.db_api.schedule_update(schedule['id'], fixture) timeutils.clear_time_override() self.assertTrue(uuidutils.is_uuid_like(schedule['id'])) self.assertEqual(updated_schedule['tenant'], schedule['tenant']) self.assertEqual(updated_schedule['action'], schedule['action']) self.assertEqual(updated_schedule['minute'], schedule['minute']) self.assertEqual(updated_schedule['hour'], fixture['hour']) self.assertEqual(updated_schedule['created_at'], schedule['created_at']) self.assertNotEqual(updated_schedule['updated_at'], schedule['updated_at'])
def tearDown(self): super(TestSchedulesApi, self).tearDown() timeutils.clear_time_override() db_api.reset()
def tearDown(self): super(TestJobsDBGetNextJobApi, self).tearDown() timeutils.clear_time_override() self.db_api.reset()
def tearDown(self): super(TestSchedulesDBApi, self).setUp() self.db_api.reset() timeutils.clear_time_override()