def test_recipe_running_then_cancelled(self): """ This tests the case where the recipe is running, has a valid reservation request, but is cancelled before it's completed. """ with session.begin(): recipe = data_setup.create_recipe( task_list=[Task.by_name(u'/distribution/install')] * 2, reservesys=True) job = data_setup.create_job_for_recipes([recipe]) job_id = job.id data_setup.mark_recipe_running(recipe) data_setup.mark_recipe_installation_finished(recipe) # we want at least one task to be Completed here # https://bugzilla.redhat.com/show_bug.cgi?id=1195558 job.recipesets[0].recipes[0].tasks[0].stop() job.recipesets[0].recipes[0].tasks[1].start() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.running) job.recipesets[0].cancel() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.cancelled)
def test_concurrent_recipe_completion(self): # This test simulates two recipes finishing at the same time. So we # have two concurrent transactions both updating the respective task states. # Previously there was no separate job.update_status() step, so the two # transactions would update the job status using out-of-date values in # both transactions, leaving the job running. with session.begin(): recipe1 = data_setup.create_recipe() recipe2 = data_setup.create_recipe() job = data_setup.create_job_for_recipes([recipe1, recipe2]) assert len(recipe1.tasks) == 1 assert len(recipe2.tasks) == 1 data_setup.mark_recipe_running(recipe1) data_setup.mark_recipe_running(recipe2) recipe1.tasks[-1].pass_(u'/', 0, u'Pass') recipe2.tasks[-1].pass_(u'/', 0, u'Pass') # Complete the recipes "concurrently" in two separate transactions class RecipeCompletionThread(Thread): def __init__(self, recipe_id=None, **kwargs): super(RecipeCompletionThread, self).__init__(**kwargs) self.recipe_id = recipe_id self.ready_evt = Event() self.continue_evt = Event() def run(self): session.begin() recipe = Recipe.by_id(self.recipe_id) self.ready_evt.set() self.continue_evt.wait() recipe.tasks[-1].stop() session.commit() thread1 = RecipeCompletionThread(name='recipe1', recipe_id=recipe1.id) thread2 = RecipeCompletionThread(name='recipe2', recipe_id=recipe2.id) thread1.start() thread2.start() # Wait for both threads to start their transactions thread1.ready_evt.wait() thread2.ready_evt.wait() # Allow recipe 1 to complete thread1.continue_evt.set() thread1.join() with session.begin(): session.expire_all() job.update_status() self.assertEquals(recipe1.status, TaskStatus.completed) self.assertEquals(recipe1.ptasks, 1) self.assertEquals(job.status, TaskStatus.running) self.assertEquals(job.ptasks, 1) # Now recipe 2 completes thread2.continue_evt.set() thread2.join() with session.begin(): session.expire_all() job.update_status() self.assertEquals(recipe2.status, TaskStatus.completed) self.assertEquals(recipe2.ptasks, 1) self.assertEquals(job.status, TaskStatus.completed) self.assertEquals(job.ptasks, 2)
def setUp(self): with session.begin(): self.recipe = data_setup.create_recipe( task_list=[data_setup.create_task(), data_setup.create_task()]) data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_running(self.recipe)
def test_watchdog_extend_by_fqdn(self): with session.begin(): lc = data_setup.create_labcontroller() system = data_setup.create_system(lab_controller=lc) recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([recipe]) data_setup.mark_recipe_running(recipe, system=system) run_client(['bkr', 'watchdog-extend', '--by=600', system.fqdn]) with session.begin(): session.expire_all() assert_datetime_within(recipe.watchdog.kill_time, tolerance=datetime.timedelta(seconds=10), reference=datetime.datetime.utcnow() + datetime.timedelta(seconds=600)) # nonexistent fqdn try: run_client( ['bkr', 'watchdog-extend', 'ireallydontexistblah.test.fqdn']) self.fail('Must raise') except ClientError as e: self.assertIn( 'Cannot find any recipe running on ireallydontexistblah.test.fqdn', e.stderr_output)
def setUp(self): with session.begin(): self.system = data_setup.create_system( lab_controller=self.get_lc()) self.recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_running(self.recipe, system=self.system)
def test_counts(self): lc = data_setup.create_labcontroller() manual_system = data_setup.create_system(lab_controller=lc) data_setup.create_manual_reservation(manual_system, start=datetime.datetime(2012, 1, 1, 0, 0, 0)) recipe_system = data_setup.create_system(lab_controller=lc) data_setup.mark_recipe_running( data_setup.create_job().recipesets[0].recipes[0], system=recipe_system) idle_manual_system = data_setup.create_system(lab_controller=lc, status=SystemStatus.manual) idle_automated_system = data_setup.create_system(lab_controller=lc, status=SystemStatus.automated) idle_broken_system = data_setup.create_system(lab_controller=lc, status=SystemStatus.broken) idle_removed_system = data_setup.create_system(lab_controller=lc, status=SystemStatus.removed) session.flush() counts = system_utilisation_counts(System.query.filter( System.lab_controller == lc)) self.assertEqual(counts['recipe'], 1) self.assertEqual(counts['manual'], 1) self.assertEqual(counts['idle_manual'], 1) self.assertEqual(counts['idle_automated'], 1) self.assertEqual(counts['idle_broken'], 1) self.assertEqual(counts['idle_removed'], 1)
def test_role_fqdns_not_duplicated(self): with session.begin(): dt = data_setup.create_distro_tree() lc = data_setup.create_labcontroller() systems = [ data_setup.create_system(fqdn=u'server.bz951283', lab_controller=lc), data_setup.create_system(fqdn=u'client.bz951283', lab_controller=lc), ] job = data_setup.create_job_for_recipes([ data_setup.create_recipe(distro_tree=dt, role=u'SERVERS'), data_setup.create_recipe(distro_tree=dt, role=u'CLIENTS'), ]) # same roles on the tasks as on the recipes job.recipesets[0].recipes[0].tasks[0].role = u'SERVERS' job.recipesets[0].recipes[1].tasks[0].role = u'CLIENTS' for i in range(2): data_setup.mark_recipe_running(job.recipesets[0].recipes[i], system=systems[i]) self.server.auth.login_password(self.lc.user.user_name, u'logmein') expected = { 'SERVERS': ['server.bz951283'], 'CLIENTS': ['client.bz951283'], } for i in range(2): self.assertEquals(self.server.recipes.tasks.peer_roles( job.recipesets[0].recipes[i].tasks[0].id), expected)
def test_peer_roles(self): with session.begin(): dt = data_setup.create_distro_tree() lc = data_setup.create_labcontroller() systems = [ data_setup.create_system(fqdn=u'server.peer-roles.invalid', lab_controller=lc), data_setup.create_system(fqdn=u'clientone.peer-roles.invalid', lab_controller=lc), data_setup.create_system(fqdn=u'clienttwo.peer-roles.invalid', lab_controller=lc), ] job = data_setup.create_job_for_recipes([ data_setup.create_recipe(distro_tree=dt, role=u'SERVERS'), data_setup.create_recipe(distro_tree=dt, role=u'CLIENTS'), data_setup.create_recipe(distro_tree=dt, role=u'CLIENTS'), ]) job.recipesets[0].recipes[0].tasks[0].role = None # Normally you wouldn't use the same role name with different # meaning at the task level, because that would just get # confusing... but it is possible job.recipesets[0].recipes[1].tasks[0].role = u'SERVERS' job.recipesets[0].recipes[2].tasks[0].role = u'CLIENTTWO' for i in range(3): data_setup.mark_recipe_running(job.recipesets[0].recipes[i], system=systems[i]) self.server.auth.login_password(self.lc.user.user_name, u'logmein') expected = { 'SERVERS': ['server.peer-roles.invalid', 'clientone.peer-roles.invalid'], 'CLIENTS': ['clientone.peer-roles.invalid', 'clienttwo.peer-roles.invalid'], 'None': ['server.peer-roles.invalid'], 'CLIENTTWO': ['clienttwo.peer-roles.invalid'], } for i in range(3): self.assertEquals(self.server.recipes.tasks.peer_roles( job.recipesets[0].recipes[i].tasks[0].id), expected)
def test_unknown_fqdns_dont_appear(self): # If we have a recipe where the FQDN is not known (for example # a guest that hasn't finished installing yet), previously it would # appear as the string 'None'. Now it's just not included. with session.begin(): hostrecipe = data_setup.create_recipe(role=u'SERVERS') guestrecipe = data_setup.create_guestrecipe(host=hostrecipe, role=u'CLIENTS') data_setup.create_job_for_recipes([hostrecipe, guestrecipe]) system = data_setup.create_system(fqdn=u'host.bz952948', lab_controller=self.lc) data_setup.mark_recipe_running(hostrecipe, system=system) data_setup.mark_recipe_waiting(guestrecipe) self.assertEquals(guestrecipe.resource.fqdn, None) self.server.auth.login_password(self.lc.user.user_name, u'logmein') self.assertEquals(self.server.recipes.tasks.peer_roles( hostrecipe.tasks[0].id), {'SERVERS': ['host.bz952948'], 'STANDALONE': ['host.bz952948'], 'CLIENTS': []}) self.assertEquals(self.server.recipes.tasks.peer_roles( guestrecipe.tasks[0].id), {'SERVERS': ['host.bz952948'], 'STANDALONE': ['host.bz952948'], 'CLIENTS': []})
def test_reserved_openstack_instance(self): with session.begin(): owner = data_setup.create_user( email_address=u'*****@*****.**') distro_tree = data_setup.create_distro_tree( distro_name=u'MicrowaveOS', variant=u'ThreeHeats', arch=u'x86_64') job = data_setup.create_job( owner=owner, distro_tree=distro_tree, whiteboard=u'Operation Righteous Cowboy Lightning', recipe_whiteboard=u'Everything Sunny All the Time Always') recipe = job.recipesets[0].recipes[0] data_setup.mark_recipe_running( recipe, virt=True, instance_id=uuid.UUID('00000000-1111-2222-3333-444444444444'), fqdn=u'bitenuker.ge.invalid') with session.begin(): bkr.server.mail.reservesys_notify(recipe) self.assertEqual(len(self.mail_capture.captured_mails), 1) sender, rcpts, raw_msg = self.mail_capture.captured_mails[0] self.assertEqual(rcpts, [owner.email_address]) msg = email.message_from_string(raw_msg) self.assertEqual(msg['To'], owner.email_address) self.assertEqual(msg['Subject'], '[Beaker System Reserved] bitenuker.ge.invalid') self.assertEqual(msg['X-Beaker-Notification'], 'system-reservation') expected_mail_body = u"""\ ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** This System is reserved by [email protected] To return this system early, you can click on 'Release System' against this recipe from the Web UI. Ensure you have your logs off the system before returning to Beaker. %(base)srecipes/%(recipeid)s For system details, see: http://openstack.example.invalid/dashboard/project/instances/00000000-1111-2222-3333-444444444444/ For the default root password, see: %(base)sprefs Beaker Test information: HOSTNAME=bitenuker.ge.invalid JOBID=%(jobid)s RECIPEID=%(recipeid)s DISTRO=MicrowaveOS ThreeHeats x86_64 ARCHITECTURE=x86_64 Job Whiteboard: Operation Righteous Cowboy Lightning Recipe Whiteboard: Everything Sunny All the Time Always ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **""" \ % dict(base=get_server_base(), recipeid=recipe.id, jobid=job.id) actual_mail_body = msg.get_payload(decode=True) self.assertMultiLineEqual(actual_mail_body, expected_mail_body)
def test_concurrent_recipe_completion(self): # This test simulates two recipes finishing at the same time. So we # have two concurrent transactions both updating the respective task states. # Previously there was no separate job.update_status() step, so the two # transactions would update the job status using out-of-date values in # both transactions, leaving the job running. with session.begin(): recipe1 = data_setup.create_recipe() recipe2 = data_setup.create_recipe() job = data_setup.create_job_for_recipes([recipe1, recipe2]) assert len(recipe1.tasks) == 1 assert len(recipe2.tasks) == 1 data_setup.mark_recipe_running(recipe1) data_setup.mark_recipe_running(recipe2) recipe1.tasks[-1].pass_(u"/", 0, u"Pass") recipe2.tasks[-1].pass_(u"/", 0, u"Pass") # Complete the recipes "concurrently" in two separate transactions class RecipeCompletionThread(Thread): def __init__(self, recipe_id=None, **kwargs): super(RecipeCompletionThread, self).__init__(**kwargs) self.recipe_id = recipe_id self.ready_evt = Event() self.continue_evt = Event() def run(self): session.begin() recipe = Recipe.by_id(self.recipe_id) self.ready_evt.set() self.continue_evt.wait() recipe.tasks[-1].stop() session.commit() thread1 = RecipeCompletionThread(name="recipe1", recipe_id=recipe1.id) thread2 = RecipeCompletionThread(name="recipe2", recipe_id=recipe2.id) thread1.start() thread2.start() # Wait for both threads to start their transactions thread1.ready_evt.wait() thread2.ready_evt.wait() # Allow recipe 1 to complete thread1.continue_evt.set() thread1.join() with session.begin(): session.expire_all() job.update_status() self.assertEquals(recipe1.status, TaskStatus.completed) self.assertEquals(recipe1.ptasks, 1) self.assertEquals(job.status, TaskStatus.running) self.assertEquals(job.ptasks, 1) # Now recipe 2 completes thread2.continue_evt.set() thread2.join() with session.begin(): session.expire_all() job.update_status() self.assertEquals(recipe2.status, TaskStatus.completed) self.assertEquals(recipe2.ptasks, 1) self.assertEquals(job.status, TaskStatus.completed) self.assertEquals(job.ptasks, 2)
def test_role_fqdns_not_duplicated(self): with session.begin(): dt = data_setup.create_distro_tree() lc = data_setup.create_labcontroller() systems = [ data_setup.create_system(fqdn=u'server.bz951283', lab_controller=lc), data_setup.create_system(fqdn=u'client.bz951283', lab_controller=lc), ] job = data_setup.create_job_for_recipes([ data_setup.create_recipe(distro_tree=dt, role=u'SERVERS'), data_setup.create_recipe(distro_tree=dt, role=u'CLIENTS'), ]) # same roles on the tasks as on the recipes job.recipesets[0].recipes[0].tasks[0].role = u'SERVERS' job.recipesets[0].recipes[1].tasks[0].role = u'CLIENTS' for i in range(2): data_setup.mark_recipe_running(job.recipesets[0].recipes[i], system=systems[i]) self.server.auth.login_password(self.lc.user.user_name, u'logmein') expected = { 'SERVERS': ['server.bz951283'], 'CLIENTS': ['client.bz951283'], } for i in range(2): self.assertEquals( self.server.recipes.tasks.peer_roles( job.recipesets[0].recipes[i].tasks[0].id), expected)
def test_unknown_fqdns_dont_appear(self): # If we have a recipe where the FQDN is not known (for example # a guest that hasn't finished installing yet), previously it would # appear as the string 'None'. Now it's just not included. with session.begin(): hostrecipe = data_setup.create_recipe(role=u'SERVERS') guestrecipe = data_setup.create_guestrecipe(host=hostrecipe, role=u'CLIENTS') data_setup.create_job_for_recipes([hostrecipe, guestrecipe]) system = data_setup.create_system(fqdn=u'host.bz952948') data_setup.mark_recipe_running(hostrecipe, system=system) data_setup.mark_recipe_waiting(guestrecipe) self.assertEquals(guestrecipe.resource.fqdn, None) self.server.auth.login_password(self.lc.user.user_name, u'logmein') self.assertEquals( self.server.recipes.tasks.peer_roles(hostrecipe.tasks[0].id), { 'SERVERS': ['host.bz952948'], 'STANDALONE': ['host.bz952948'], 'CLIENTS': [] }) self.assertEquals( self.server.recipes.tasks.peer_roles(guestrecipe.tasks[0].id), { 'SERVERS': ['host.bz952948'], 'STANDALONE': [], 'CLIENTS': [] })
def test_complete_job_results(self): complete_job_xml = pkg_resources.resource_string( 'bkr.inttest', 'complete-job.xml') xmljob = lxml.etree.fromstring(complete_job_xml) job = testutil.call(self.controller.process_xmljob, xmljob, self.user) session.flush() # Complete the job, filling in values to match what's hardcoded in # complete-job-results.xml... recipe = job.recipesets[0].recipes[0] guestrecipe = recipe.guests[0] data_setup.mark_recipe_running( recipe, fqdn=u'system.test-complete-job-results', start_time=datetime.datetime(2016, 1, 31, 23, 0, 0), install_started=datetime.datetime(2016, 1, 31, 23, 0, 1), install_finished=datetime.datetime(2016, 1, 31, 23, 0, 2), postinstall_finished=datetime.datetime(2016, 1, 31, 23, 0, 3), task_start_time=datetime.datetime(2016, 1, 31, 23, 0, 4)) data_setup.mark_recipe_complete( guestrecipe, fqdn=u'guest.test-complete-job-results', mac_address='ff:ff:ff:00:00:00', start_time=datetime.datetime(2016, 1, 31, 23, 30, 0), install_started=datetime.datetime(2016, 1, 31, 23, 30, 1), install_finished=datetime.datetime(2016, 1, 31, 23, 30, 2), postinstall_finished=datetime.datetime(2016, 1, 31, 23, 30, 3), finish_time=datetime.datetime(2016, 1, 31, 23, 30, 4)) data_setup.mark_recipe_complete( recipe, only=True, start_time=datetime.datetime(2016, 1, 31, 23, 0, 4), finish_time=datetime.datetime(2016, 1, 31, 23, 59, 0)) recipe.installation.rendered_kickstart.url = u'http://example.com/recipe.ks' guestrecipe.installation.rendered_kickstart.url = u'http://example.com/guest.ks' session.flush() # Hack up the database ids... This will fail if it's flushed, but it's # the easiest way to make them match the expected values. job.id = 1 job.recipesets[0].id = 1 recipe.id = 1 guestrecipe.id = 2 recipe.tasks[0].id = 1 recipe.tasks[1].id = 2 guestrecipe.tasks[0].id = 3 guestrecipe.tasks[0].results[0].id = 1 recipe.tasks[0].results[0].id = 2 recipe.tasks[1].results[0].id = 3 expected_results_xml = pkg_resources.resource_string( 'bkr.inttest', 'complete-job-results.xml') expected_results_xml = expected_results_xml.replace( '${BEAKER_SERVER_BASE_URL}', get_server_base()) actual_results_xml = lxml.etree.tostring(job.to_xml(clone=False), pretty_print=True, encoding='utf8') self.assertMultiLineEqual(expected_results_xml, actual_results_xml)
def test_recipe_not_provisioned_yet(self): with session.begin(): recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([recipe]) data_setup.mark_recipe_running(recipe, virt=True) # VM is created but recipe.provision() hasn't been called yet response = requests.get(get_server_base() + 'systems/by-uuid/%s/ipxe-script' % recipe.resource.instance_id) self.assertEquals(response.status_code, 503)
def test_reserved_openstack_instance(self): with session.begin(): owner = data_setup.create_user( email_address=u'*****@*****.**') distro_tree = data_setup.create_distro_tree(distro_name=u'MicrowaveOS-20141016.1', variant=u'ThreeHeats', arch=u'x86_64') job = data_setup.create_job(owner=owner, distro_tree=distro_tree, whiteboard=u'Operation Righteous Cowboy Lightning', recipe_whiteboard=u'Everything Sunny All the Time Always') recipe = job.recipesets[0].recipes[0] data_setup.mark_recipe_running(recipe, virt=True, instance_id=uuid.UUID('00000000-1111-2222-3333-444444444444'), fqdn=u'bitenuker.ge.invalid') mail_capture_thread.start_capturing() with session.begin(): bkr.server.mail.reservesys_notify(recipe) captured_mails = mail_capture_thread.stop_capturing() self.assertEqual(len(captured_mails), 1) sender, rcpts, raw_msg = captured_mails[0] self.assertEqual(rcpts, [owner.email_address]) msg = email.message_from_string(raw_msg) self.assertEqual(msg['To'], owner.email_address) self.assertEqual(msg['Subject'], '[Beaker System Reserved] bitenuker.ge.invalid') self.assertEqual(msg['X-Beaker-Notification'], 'system-reservation') expected_mail_body = u"""\ ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** This System is reserved by [email protected] To return this system early, you can click on 'Release System' against this recipe from the Web UI. Ensure you have your logs off the system before returning to Beaker. %(base)srecipes/%(recipeid)s For system details, see: http://openstack.example.invalid/dashboard/project/instances/00000000-1111-2222-3333-444444444444/ For the default root password, see: %(base)sprefs Beaker Test information: HOSTNAME=bitenuker.ge.invalid JOBID=%(jobid)s RECIPEID=%(recipeid)s DISTRO=MicrowaveOS-20141016.1 ThreeHeats x86_64 ARCHITECTURE=x86_64 Job Whiteboard: Operation Righteous Cowboy Lightning Recipe Whiteboard: Everything Sunny All the Time Always ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **""" \ % dict(base=get_server_base(), recipeid=recipe.id, jobid=job.id) actual_mail_body = msg.get_payload(decode=True) self.assertMultiLineEqual(actual_mail_body, expected_mail_body)
def setUp(self): with session.begin(): self.recipe = data_setup.create_recipe(task_name=u'/distribution/install') self.recipe.tasks.extend([ RecipeTask.from_task(data_setup.create_task()), RecipeTask.from_fetch_url(u'http://example.com/tasks/example.tar.bz2'), ]) data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_running(self.recipe)
def setUp(self): with session.begin(): self.system = data_setup.create_system(lab_controller=self.get_lc()) self.recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_running(self.recipe, system=self.system) self.console_log = os.path.join(get_conf().get('CONSOLE_LOGS'), self.system.fqdn) self.cached_console_log = os.path.join(get_conf().get('CACHEPATH'), 'recipes', str(self.recipe.id // 1000) + '+', str(self.recipe.id), 'console.log')
def setUp(self): with session.begin(): self.watchdog = Watchdog() self.recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_running(self.recipe, virt=True, lab_controller=self.get_lc()) self.cached_console_log = os.path.join(get_conf().get('CACHEPATH'), 'recipes', str(self.recipe.id // 1000) + '+', str(self.recipe.id), 'console.log')
def setUp(self): with session.begin(): self.recipe = data_setup.create_recipe( task_name=u'/distribution/install') self.recipe.tasks.extend([ RecipeTask.from_task(data_setup.create_task()), RecipeTask.from_fetch_url( u'http://example.com/tasks/example.tar.bz2'), ]) data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_running(self.recipe)
def test_task_start_time_is_localised(self): with session.begin(): recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([recipe]) data_setup.mark_recipe_running(recipe) b = self.browser go_to_recipe_view(b, recipe, tab='Tasks') tab = b.find_element_by_id('tasks') start_time = tab.find_element_by_xpath('//div[@id="task%s"]' '//div[@class="task-start-time"]/time' % recipe.tasks[0].id) self.check_datetime_localised(start_time.get_attribute('title'))
def setUp(self): with session.begin(): self.watchdog = Watchdog() self.recipe = data_setup.create_recipe() job = data_setup.create_job_for_recipes([self.recipe]) self.addCleanup(self.cleanup_job, job) data_setup.mark_recipe_running(self.recipe, virt=True, lab_controller=self.get_lc()) self.cached_console_log = os.path.join(get_conf().get('CACHEPATH'), 'recipes', str(self.recipe.id // 1000) + '+', str(self.recipe.id), 'console.log')
def test_watchdog_show_running_task(self): with session.begin(): r1 = data_setup.create_recipe() data_setup.create_job_for_recipes([r1]) data_setup.mark_recipe_running(r1) session.flush() t1 = r1.tasks[0] t1.watchdog.kill_time = datetime.datetime.utcnow() + \ datetime.timedelta(seconds=99) out = run_client(['bkr', 'watchdog-show', str(t1.id)]) # Let's just check it is somewhere between 10-99 self.assertTrue(re.match('%s: \d\d\\n' % t1.id, out))
def setUp(self): with session.begin(): self.recipe = data_setup.create_recipe() job = data_setup.create_job_for_recipes([self.recipe]) self.addCleanup(self.cleanup_job, job) data_setup.mark_recipe_running(self.recipe, virt=True, lab_controller=self.get_lc()) self.cached_console_log = os.path.join(get_conf().get('CACHEPATH'), 'recipes', str(self.recipe.id // 1000) + '+', str(self.recipe.id), 'console.log') self.watchdog = Watchdog() self.monitor = Monitor({'recipe_id': self.recipe.id, 'is_virt_recipe': True}, self.watchdog)
def test_handles_null_recipe_task_id(self): with session.begin(): dt = data_setup.create_distro_tree() running_recipe = data_setup.create_recipe(distro_tree=dt) waiting_recipe = data_setup.create_recipe(distro_tree=dt) job = data_setup.create_job_for_recipes([running_recipe, waiting_recipe]) data_setup.mark_recipe_running(running_recipe) data_setup.mark_recipe_waiting(waiting_recipe) self.assertEquals(waiting_recipe.watchdog.recipetask, None) b = self.browser b.get(get_server_base() + "watchdogs/") self.assertEquals(b.title, "Watchdogs")
def test_handles_null_recipe_task_id(self): with session.begin(): dt = data_setup.create_distro_tree() running_recipe = data_setup.create_recipe(distro_tree=dt) waiting_recipe = data_setup.create_recipe(distro_tree=dt) job = data_setup.create_job_for_recipes( [running_recipe, waiting_recipe]) data_setup.mark_recipe_running(running_recipe) data_setup.mark_recipe_waiting(waiting_recipe) self.assertEquals(waiting_recipe.watchdog.recipetask, None) b = self.browser b.get(get_server_base() + 'watchdogs/') self.assertEquals(b.title, 'Watchdogs')
def setUp(self): with session.begin(): self.system = data_setup.create_system( lab_controller=self.get_lc()) self.recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_running(self.recipe, system=self.system) self.console_log = os.path.join(get_conf().get('CONSOLE_LOGS'), self.system.fqdn) self.cached_console_log = os.path.join( get_conf().get('CACHEPATH'), 'recipes', str(self.recipe.id // 1000) + '+', str(self.recipe.id), 'console.log')
def setUp(self): with session.begin(): self.recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_running(self.recipe) self.recipe.logs[:] = [LogRecipe(path=u'/', filename=u'console.log'), LogRecipe(path=u'some-dir', filename=u'some-file.txt')] self.task = self.recipe.tasks[0] self.task.logs[:] = [LogRecipeTask(path=u'/', filename=u'TESTOUT.log'), LogRecipeTask(path=u'debug', filename=u'.task_beah_raw')] self.task.pass_(u'', 0, u'Pass') self.result = self.recipe.tasks[0].results[0] self.result.logs[:] = [LogRecipeTaskResult(path=u'/', filename=u'test.log'), LogRecipeTaskResult(path=u'some-dir', filename=u'some-file.txt')]
def test_by_log_server_only_returns_completed_recipesets(self): with session.begin(): dt = data_setup.create_distro_tree() completed_recipe = data_setup.create_recipe(distro_tree=dt) incomplete_recipe = data_setup.create_recipe(distro_tree=dt) job = data_setup.create_job_for_recipes( [completed_recipe, incomplete_recipe]) job.recipesets[0].lab_controller = self.lc data_setup.mark_recipe_running(incomplete_recipe, system=data_setup.create_system(lab_controller=self.lc)) data_setup.mark_recipe_complete(completed_recipe, system=data_setup.create_system(lab_controller=self.lc)) result = self.server.recipes.by_log_server(self.lc.fqdn) self.assertEqual(result, [])
def test_install_done_updates_resource_fqdn(self): with session.begin(): distro_tree = data_setup.create_distro_tree() recipe = data_setup.create_recipe(distro_tree=distro_tree) guestrecipe = data_setup.create_guestrecipe(host=recipe, distro_tree=distro_tree) data_setup.create_job_for_recipes([recipe, guestrecipe]) data_setup.mark_recipe_running(recipe) data_setup.mark_recipe_waiting(guestrecipe) fqdn = 'theguestname' result = self.server.recipes.install_done(guestrecipe.id, fqdn) self.assertEqual(result, fqdn) with session.begin(): session.expire(guestrecipe.resource) self.assertEqual(guestrecipe.resource.fqdn, fqdn)
def test_recipe_running_then_cancelled(self): """ This tests the case where the recipe is running, has a valid reservation request, but is cancelled before it's completed. """ data_setup.mark_recipe_running(self.recipe) # we want at least one task to be Completed here # https://bugzilla.redhat.com/show_bug.cgi?id=1195558 self.recipe.tasks[0].stop() self.recipe.tasks[1].start() self.job.update_status() self.assertEqual(self.recipe.status, TaskStatus.running) self.job.recipesets[0].cancel() self.job.update_status() self.assertEqual(self.recipe.status, TaskStatus.cancelled)
def test_install_done_updates_resource_fqdn(self): with session.begin(): distro_tree = data_setup.create_distro_tree() recipe = data_setup.create_recipe(distro_tree=distro_tree) guestrecipe = data_setup.create_guestrecipe( host=recipe, distro_tree=distro_tree) data_setup.create_job_for_recipes([recipe, guestrecipe]) data_setup.mark_recipe_running(recipe) data_setup.mark_recipe_waiting(guestrecipe) fqdn = 'theguestname' result = self.server.recipes.install_done(guestrecipe.id, fqdn) self.assertEqual(result, fqdn) with session.begin(): session.expire(guestrecipe.resource) self.assertEqual(guestrecipe.resource.fqdn, fqdn)
def test_get_system_with_running_hardware_scan_recipe(self): # The bug was a circular reference from system -> recipe -> system # which caused JSON serialization to fail. with session.begin(): Job.inventory_system_job(data_setup.create_distro_tree(), owner=self.owner, system=self.system) recipe = self.system.find_current_hardware_scan_recipe() data_setup.mark_recipe_running(recipe, system=self.system) response = requests.get( get_server_base() + "/systems/%s/" % self.system.fqdn, headers={"Accept": "application/json"} ) response.raise_for_status() in_progress_scan = response.json()["in_progress_scan"] self.assertEquals(in_progress_scan["recipe_id"], recipe.id) self.assertEquals(in_progress_scan["status"], u"Running") self.assertEquals(in_progress_scan["job_id"], recipe.recipeset.job.t_id)
def test_complete_job_results(self): complete_job_xml = pkg_resources.resource_string('bkr.inttest', 'complete-job.xml') xmljob = lxml.etree.fromstring(complete_job_xml) job = testutil.call(self.controller.process_xmljob, xmljob, self.user) session.flush() # Complete the job, filling in values to match what's hardcoded in # complete-job-results.xml... recipe = job.recipesets[0].recipes[0] guestrecipe = recipe.guests[0] data_setup.mark_recipe_running(recipe, fqdn=u'system.test-complete-job-results', start_time=datetime.datetime(2016, 1, 31, 23, 0, 0), install_started=datetime.datetime(2016, 1, 31, 23, 0, 1), install_finished=datetime.datetime(2016, 1, 31, 23, 0, 2), postinstall_finished=datetime.datetime(2016, 1, 31, 23, 0, 3), task_start_time=datetime.datetime(2016, 1, 31, 23, 0, 4)) data_setup.mark_recipe_complete(guestrecipe, fqdn=u'guest.test-complete-job-results', mac_address='ff:ff:ff:00:00:00', start_time=datetime.datetime(2016, 1, 31, 23, 30, 0), install_started=datetime.datetime(2016, 1, 31, 23, 30, 1), install_finished=datetime.datetime(2016, 1, 31, 23, 30, 2), postinstall_finished=datetime.datetime(2016, 1, 31, 23, 30, 3), finish_time=datetime.datetime(2016, 1, 31, 23, 30, 4)) data_setup.mark_recipe_complete(recipe, only=True, start_time=datetime.datetime(2016, 1, 31, 23, 0, 4), finish_time=datetime.datetime(2016, 1, 31, 23, 59, 0)) recipe.installation.rendered_kickstart.url = u'http://example.com/recipe.ks' guestrecipe.installation.rendered_kickstart.url = u'http://example.com/guest.ks' session.flush() # Hack up the database ids... This will fail if it's flushed, but it's # the easiest way to make them match the expected values. job.id = 1 job.recipesets[0].id = 1 recipe.id = 1 guestrecipe.id = 2 recipe.tasks[0].id = 1 recipe.tasks[1].id = 2 guestrecipe.tasks[0].id = 3 guestrecipe.tasks[0].results[0].id = 1 recipe.tasks[0].results[0].id = 2 recipe.tasks[1].results[0].id = 3 expected_results_xml = pkg_resources.resource_string('bkr.inttest', 'complete-job-results.xml') expected_results_xml = expected_results_xml.replace( '${BEAKER_SERVER_BASE_URL}', get_server_base()) actual_results_xml = lxml.etree.tostring(job.to_xml(clone=False), pretty_print=True, encoding='utf8') self.assertMultiLineEqual(expected_results_xml, actual_results_xml)
def test_install_failure_is_not_reported_after_installation_is_finished(self): anaconda_success = 'blah blah installing... done\n' open(self.console_log, 'w').write(anaconda_success) wait_for_condition(self.check_console_log_registered) wait_for_condition(lambda: self.check_cached_log_contents(anaconda_success)) with session.begin(): data_setup.mark_recipe_installation_finished(self.recipe) data_setup.mark_recipe_running(self.recipe, only=True) anaconda_failure = "Press 'OK' to reboot your system.\n" open(self.console_log, 'a').write(anaconda_failure) wait_for_condition(lambda: self.check_cached_log_contents( anaconda_success + anaconda_failure)) with session.begin(): task = self.recipe.tasks[0] session.refresh(task) self.assertEquals(task.status, TaskStatus.running)
def test_install_start(self): with session.begin(): system = data_setup.create_system(lab_controller=self.lc) recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([recipe]) data_setup.mark_recipe_running(recipe, system=system) self.server.auth.login_password(self.lc.user.user_name, u'logmein') self.server.recipes.install_start(recipe.id) with session.begin(): session.expire_all() assert_datetime_within(recipe.watchdog.kill_time, tolerance=datetime.timedelta(seconds=10), reference=datetime.datetime.utcnow() + datetime.timedelta(hours=3)) self.assertEqual(recipe.tasks[0].results[0].result, TaskResult.pass_) self.assertEqual(recipe.tasks[0].results[0].path, u'/start') self.assertEqual(recipe.tasks[0].results[0].log, u'Install Started')
def test_recipe_running_then_cancelled(self): """ This tests the case where the recipe is running, has a valid reservation request, but is cancelled before it's completed. """ recipe = data_setup.create_recipe(task_list=[Task.by_name(u"/distribution/install")] * 2, reservesys=True) job = data_setup.create_job_for_recipes([recipe]) data_setup.mark_recipe_running(recipe) # we want at least one task to be Completed here # https://bugzilla.redhat.com/show_bug.cgi?id=1195558 job.recipesets[0].recipes[0].tasks[0].stop() job.recipesets[0].recipes[0].tasks[1].start() job.update_status() self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.running) job.recipesets[0].cancel() job.update_status() self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.cancelled)
def test_grouped_counts(self): lc = data_setup.create_labcontroller() manual_ia64_system = data_setup.create_system(lab_controller=lc, arch="ia64") data_setup.create_manual_reservation(manual_ia64_system, start=datetime.datetime(2012, 1, 1, 0, 0, 0)) manual_ppc_system = data_setup.create_system(lab_controller=lc, arch="ppc") data_setup.create_manual_reservation(manual_ppc_system, start=datetime.datetime(2012, 1, 1, 0, 0, 0)) recipe_ia64_system = data_setup.create_system(lab_controller=lc, arch="ia64") data_setup.mark_recipe_running(data_setup.create_job().recipesets[0].recipes[0], system=recipe_ia64_system) session.flush() counts = system_utilisation_counts_by_group( Arch.arch, System.query.join(System.arch).filter(System.lab_controller == lc) ) print counts self.assertEqual(counts["ia64"]["recipe"], 1) self.assertEqual(counts["ia64"]["manual"], 1) self.assertEqual(counts["ppc"]["manual"], 1)
def test_finished_recipe_with_unstarted_guests(self): # host completes, but guest never started job = data_setup.create_job(num_recipes=1, num_guestrecipes=1) data_setup.mark_job_waiting(job) data_setup.mark_recipe_running(job.recipesets[0].recipes[0], only=True) job.recipesets[0].recipes[0].tasks[-1].stop() job.update_status() self.assertEquals(job.recipesets[0].recipes[0].status, TaskStatus.completed) self.assertEquals(job.recipesets[0].recipes[0].guests[0].status, TaskStatus.aborted) # host aborts, but guest never started job = data_setup.create_job(num_recipes=1, num_guestrecipes=1) data_setup.mark_job_waiting(job) job.recipesets[0].recipes[0].abort(msg="blorf") job.update_status() self.assertEquals(job.recipesets[0].recipes[0].status, TaskStatus.aborted) self.assertEquals(job.recipesets[0].recipes[0].guests[0].status, TaskStatus.aborted)
def test_executed_tasks_guest_filter(self): with session.begin(): task = data_setup.create_task() fqdn = 'test_executed_tasks_guest_fqdn_filter.invalid' distro_tree = data_setup.create_distro_tree() recipe = data_setup.create_recipe(distro_tree=distro_tree) guestrecipe = data_setup.create_guestrecipe( host=recipe, task_name=task.name, distro_tree=distro_tree) data_setup.create_job_for_recipes([recipe, guestrecipe]) data_setup.mark_recipe_running(recipe) data_setup.mark_recipe_running(guestrecipe, fqdn=fqdn) b = self.browser b.get(get_server_base() + 'tasks/%d' % task.id) b.find_element_by_id('form_system').click() b.find_element_by_id('form_system').send_keys(fqdn) b.find_element_by_id('form').submit() self.check_recipetask_present_in_results(guestrecipe.tasks[0])
def test_executed_tasks_guest_filter(self): with session.begin(): task = data_setup.create_task() fqdn = 'test_executed_tasks_guest_fqdn_filter.invalid' distro_tree = data_setup.create_distro_tree() recipe = data_setup.create_recipe(distro_tree=distro_tree) guestrecipe = data_setup.create_guestrecipe(host=recipe, task_name=task.name, distro_tree=distro_tree) data_setup.create_job_for_recipes([recipe, guestrecipe]) data_setup.mark_recipe_running(recipe) data_setup.mark_recipe_running(guestrecipe, fqdn=fqdn) b = self.browser b.get(get_server_base() + 'tasks/%d' % task.id) b.find_element_by_id('form_system').click() b.find_element_by_id('form_system').send_keys(fqdn) b.find_element_by_id('form').submit() self.check_recipetask_present_in_results(guestrecipe.tasks[0])
def test_get_system_with_running_hardware_scan_recipe(self): # The bug was a circular reference from system -> recipe -> system # which caused JSON serialization to fail. with session.begin(): Job.inventory_system_job(data_setup.create_distro_tree(), owner=self.owner, system=self.system) recipe = self.system.find_current_hardware_scan_recipe() data_setup.mark_recipe_running(recipe, system=self.system) response = requests.get(get_server_base() + '/systems/%s/' % self.system.fqdn, headers={'Accept': 'application/json'}) response.raise_for_status() in_progress_scan = response.json()['in_progress_scan'] self.assertEquals(in_progress_scan['recipe_id'], recipe.id) self.assertEquals(in_progress_scan['status'], u'Running') self.assertEquals(in_progress_scan['job_id'], recipe.recipeset.job.t_id)
def test_cannot_return_running_recipe(self): with session.begin(): recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([recipe]) data_setup.mark_recipe_running(recipe) system = recipe.resource.system b = self.browser login(b) b.get(get_server_base() + 'view/%s' % system.fqdn) # "Return" button should be absent b.find_element_by_xpath('//div[contains(@class, "system-quick-usage")' ' and not(.//a[text()="Return"])]') # try doing it directly s = requests.Session() requests_login(s) response = put_json(get_server_base() + 'systems/%s/reservations/+current' % system.fqdn, session=s, data=dict(finish_time='now')) self.assertEquals(response.status_code, 400) self.assertEquals(response.text, 'Cannot return system with running %s' % recipe.t_id)
def test_finished_recipe_with_unstarted_guests(self): # host completes, but guest never started job = data_setup.create_job(num_recipes=1, num_guestrecipes=1) data_setup.mark_job_waiting(job) data_setup.mark_recipe_running(job.recipesets[0].recipes[0], only=True) job.recipesets[0].recipes[0].tasks[-1].stop() job.update_status() self.assertEquals(job.recipesets[0].recipes[0].status, TaskStatus.completed) self.assertEquals(job.recipesets[0].recipes[0].guests[0].status, TaskStatus.aborted) # host aborts, but guest never started job = data_setup.create_job(num_recipes=1, num_guestrecipes=1) data_setup.mark_job_waiting(job) job.recipesets[0].recipes[0].abort(msg='blorf') job.update_status() self.assertEquals(job.recipesets[0].recipes[0].status, TaskStatus.aborted) self.assertEquals(job.recipesets[0].recipes[0].guests[0].status, TaskStatus.aborted)
def test_watchdog_extend_by_fqdn(self): with session.begin(): lc = data_setup.create_labcontroller() system = data_setup.create_system(lab_controller=lc) recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([recipe ]) data_setup.mark_recipe_running(recipe , system=system) run_client(['bkr','watchdog-extend', '--by=600', system.fqdn]) with session.begin(): session.expire_all() assert_datetime_within(recipe.watchdog.kill_time, tolerance=datetime.timedelta(seconds=10), reference=datetime.datetime.utcnow() + datetime.timedelta(seconds=600)) # nonexistent fqdn try: run_client(['bkr', 'watchdog-extend', 'ireallydontexistblah.test.fqdn']) self.fail('Must raise') except ClientError as e: self.assertIn('Cannot find any recipe running on ireallydontexistblah.test.fqdn', e.stderr_output)
def setUp(self): with session.begin(): self.recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_running(self.recipe) self.recipe.logs[:] = [ LogRecipe(path=u'/', filename=u'console.log'), LogRecipe(path=u'some-dir', filename=u'some-file.txt') ] self.task = self.recipe.tasks[0] self.task.logs[:] = [ LogRecipeTask(path=u'/', filename=u'TESTOUT.log'), LogRecipeTask(path=u'debug', filename=u'.task_beah_raw') ] self.task.pass_(u'', 0, u'Pass') self.result = self.recipe.tasks[0].results[0] self.result.logs[:] = [ LogRecipeTaskResult(path=u'/', filename=u'test.log'), LogRecipeTaskResult(path=u'some-dir', filename=u'some-file.txt') ]
def test_task_roles_visible_between_hosts_and_guests(self): # Hosts and guests can all see each others' task roles now. Previously # they were not visible to each other. with session.begin(): hostrecipe = data_setup.create_recipe() guestrecipe_server = data_setup.create_guestrecipe(host=hostrecipe) guestrecipe_client = data_setup.create_guestrecipe(host=hostrecipe) job = data_setup.create_job_for_recipes([hostrecipe, guestrecipe_server, guestrecipe_client]) hostrecipe.tasks[0].role = u'SERVERS' guestrecipe_server.tasks[0].role = u'SERVERS' guestrecipe_client.tasks[0].role = u'CLIENTS' system = data_setup.create_system(fqdn=u'host.bz960434') data_setup.mark_recipe_running(hostrecipe, system=system) data_setup.mark_recipe_running(guestrecipe_server, fqdn=u'guestserver.bz960434') data_setup.mark_recipe_running(guestrecipe_client, fqdn=u'guestclient.bz960434') self.server.auth.login_password(self.lc.user.user_name, u'logmein') expected_peer_roles = { 'SERVERS': ['host.bz960434', 'guestserver.bz960434'], 'CLIENTS': ['guestclient.bz960434'], 'STANDALONE': ['host.bz960434', 'guestserver.bz960434', 'guestclient.bz960434'], } for recipe in [hostrecipe, guestrecipe_server, guestrecipe_client]: self.assertEquals( self.server.recipes.tasks.peer_roles(recipe.tasks[0].id), expected_peer_roles)
def test_recipe_running_then_cancelled(self): """ This tests the case where the recipe is running, has a valid reservation request, but is cancelled before it's completed. """ with session.begin(): recipe = data_setup.create_recipe( task_list=[Task.by_name(u'/distribution/install')], reservesys=True) job = data_setup.create_job_for_recipes([recipe]) job_id = job.id data_setup.mark_recipe_running(recipe) job._mark_dirty() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.running) job.recipesets[0].cancel() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.cancelled)
def setUp(self): with session.begin(): self.system = data_setup.create_system( lab_controller=self.get_lc()) self.recipe = data_setup.create_recipe() self.guest_recipe = data_setup.create_guestrecipe(self.recipe) job = data_setup.create_job_for_recipes( [self.recipe, self.guest_recipe]) self.addCleanup(self.cleanup_job, job) data_setup.mark_recipe_running(self.recipe, system=self.system) data_setup.mark_recipe_installing(self.guest_recipe, system=self.system) self.console_log = os.path.join(get_conf().get('CONSOLE_LOGS'), self.system.fqdn) self.cached_console_log = os.path.join( get_conf().get('CACHEPATH'), 'recipes', str(self.recipe.id // 1000) + '+', str(self.recipe.id), 'console.log') self.first_line = 'Here is the first line of the log file.\n' open(self.console_log, 'w').write(self.first_line) self.watchdog = Watchdog() self.monitor = Monitor( { 'recipe_id': self.recipe.id, 'is_virt_recipe': False, 'system': self.system.fqdn }, self.watchdog) self.monitor_guest = Monitor( { 'recipe_id': self.guest_recipe.id, 'is_virt_recipe': False, 'system': None }, self.watchdog)
def test_shows_other_recipes_in_recipeset_holding_this_reservation(self): # Beaker keeps all systems in a recipe set reserved until all recipes # in the set are finished. This is to allow for things like multi-host # tests and virt testing, where one recipe might "drop off the end" but # the other machines still want to talk to it. # This is a frequent gotcha for users ("why is this system still # reserved even though the recipe is finished?") so we went to some # lengths in the new recipe page to indicate when this happens. with session.begin(): job = data_setup.create_job(num_recipes=2, num_guestrecipes=1) recipe = job.recipesets[0].recipes[0] data_setup.mark_recipe_complete(recipe) data_setup.mark_recipe_running(job.recipesets[0].recipes[1]) b = self.browser go_to_recipe_view(b, recipe, tab='Reservation') tab = b.find_element_by_id('reservation') self.assertEqual(tab.find_element_by_xpath('div/p[2]').text, 'However, the system has not been released yet because ' 'the following recipes are still running:') running_recipes_list_items = [li.text for li in tab.find_elements_by_xpath('.//ul[@class="running-recipes-list"]/li')] self.assertEqual(running_recipes_list_items, [job.recipesets[0].recipes[1].t_id, job.recipesets[0].recipes[2].t_id])