def test_jobs_cleanup_hard_timed_out(self): jobs = self.db_api.job_get_all() self.assertEqual(len(jobs), 2) timeutils.set_time_override() timeutils.advance_time_delta(datetime.timedelta(hours=4, minutes=1)) self.db_api._jobs_cleanup_hard_timed_out() timeutils.clear_time_override() jobs = self.db_api.job_get_all() self.assertEqual(len(jobs), 0)
def test_polling_job_timeout_extension_with_max_retries(self): timeout_extension = 3600 job_timeout_max_updates_count = 3 self.config(job_timeout_extension_sec=timeout_extension, group='snapshot_worker') self.config(job_timeout_max_updates=job_timeout_max_updates_count, group='snapshot_worker') self.config(job_timeout_initial_value_sec=10800, group='snapshot_worker') server = self.server_instance_fixture("INSTANCE_ID", "test") job = self.job_fixture(server.id) images = [ self.image_fixture('IMAGE_ID', 'QUEUED', server.id), self.image_fixture('IMAGE_ID', 'SAVING', server.id), self.image_fixture('IMAGE_ID', 'SAVING', server.id), self.image_fixture('IMAGE_ID', 'SAVING', server.id) ] now = timeutils.utcnow() timeutils.set_time_override(now) timeutils.advance_time_delta( datetime.timedelta(seconds=timeout_extension)) try: with TestableSnapshotProcessor(job, server, images) as p: p.next_timeout = now + p.initial_timeout p.next_update = now + p.update_interval #NOTE(venkatesh): unfortunately had to use a protected method # for testing. Else there seems to be no easier way to test # this scenario. we need to fix this as part of refactoring # SnapshotJobProcessor. while True: try: p._update_job(job['id'], 'PROCESSING') except exception.OutOfTimeException: break timeutils.advance_time_delta( datetime.timedelta(seconds=timeout_extension)) total_timeout_duration = datetime.timedelta( seconds=(timeout_extension * job_timeout_max_updates_count)) self.assertEqual( now + (p.initial_timeout + total_timeout_duration), p.next_timeout) self.assertEqual(3, p.timeout_count) finally: timeutils.clear_time_override()
def test_polling_job_timeout_extension_with_max_retries(self): timeout_extension = 3600 job_timeout_max_updates_count = 3 self.config(job_timeout_extension_sec=timeout_extension, group='snapshot_worker') self.config(job_timeout_max_updates=job_timeout_max_updates_count, group='snapshot_worker') self.config(job_timeout_initial_value_sec=10800, group='snapshot_worker') server = self.server_instance_fixture("INSTANCE_ID", "test") job = self.job_fixture(server.id) images = [self.image_fixture('IMAGE_ID', 'QUEUED', server.id), self.image_fixture('IMAGE_ID', 'SAVING', server.id), self.image_fixture('IMAGE_ID', 'SAVING', server.id), self.image_fixture('IMAGE_ID', 'SAVING', server.id)] now = timeutils.utcnow() timeutils.set_time_override(now) timeutils.advance_time_delta( datetime.timedelta(seconds=timeout_extension)) try: with TestableSnapshotProcessor(job, server, images) as p: p.next_timeout = now + p.initial_timeout p.next_update = now + p.update_interval # NOTE(venkatesh): unfortunately had to use a protected method # for testing. Else there seems to be no easier way to test # this scenario. we need to fix this as part of refactoring # SnapshotJobProcessor. while True: try: p._update_job(job['id'], 'PROCESSING') except exception.OutOfTimeException: break timeutils.advance_time_delta( datetime.timedelta(seconds=timeout_extension)) total_timeout_duration = datetime.timedelta( seconds=(timeout_extension * job_timeout_max_updates_count) ) self.assertEqual( now + (p.initial_timeout + total_timeout_duration), p.next_timeout ) self.assertEqual(3, p.timeout_count) finally: timeutils.clear_time_override()