def test_peer_roles(self): with session.begin(): dt = data_setup.create_distro_tree() lc = data_setup.create_labcontroller() systems = [ data_setup.create_system(fqdn=u'server.peer-roles.invalid', lab_controller=lc), data_setup.create_system(fqdn=u'clientone.peer-roles.invalid', lab_controller=lc), data_setup.create_system(fqdn=u'clienttwo.peer-roles.invalid', lab_controller=lc), ] job = data_setup.create_job_for_recipes([ data_setup.create_recipe(distro_tree=dt, role=u'SERVERS'), data_setup.create_recipe(distro_tree=dt, role=u'CLIENTS'), data_setup.create_recipe(distro_tree=dt, role=u'CLIENTS'), ]) job.recipesets[0].recipes[0].tasks[0].role = None # Normally you wouldn't use the same role name with different # meaning at the task level, because that would just get # confusing... but it is possible job.recipesets[0].recipes[1].tasks[0].role = u'SERVERS' job.recipesets[0].recipes[2].tasks[0].role = u'CLIENTTWO' for i in range(3): data_setup.mark_recipe_running(job.recipesets[0].recipes[i], system=systems[i]) self.server.auth.login_password(self.lc.user.user_name, u'logmein') expected = { 'SERVERS': ['server.peer-roles.invalid', 'clientone.peer-roles.invalid'], 'CLIENTS': ['clientone.peer-roles.invalid', 'clienttwo.peer-roles.invalid'], 'None': ['server.peer-roles.invalid'], 'CLIENTTWO': ['clienttwo.peer-roles.invalid'], } for i in range(3): self.assertEquals(self.server.recipes.tasks.peer_roles( job.recipesets[0].recipes[i].tasks[0].id), expected)
def test_role_fqdns_not_duplicated(self): with session.begin(): dt = data_setup.create_distro_tree() lc = data_setup.create_labcontroller() systems = [ data_setup.create_system(fqdn=u'server.bz951283', lab_controller=lc), data_setup.create_system(fqdn=u'client.bz951283', lab_controller=lc), ] job = data_setup.create_job_for_recipes([ data_setup.create_recipe(distro_tree=dt, role=u'SERVERS'), data_setup.create_recipe(distro_tree=dt, role=u'CLIENTS'), ]) # same roles on the tasks as on the recipes job.recipesets[0].recipes[0].tasks[0].role = u'SERVERS' job.recipesets[0].recipes[1].tasks[0].role = u'CLIENTS' for i in range(2): data_setup.mark_recipe_running(job.recipesets[0].recipes[i], system=systems[i]) self.server.auth.login_password(self.lc.user.user_name, u'logmein') expected = { 'SERVERS': ['server.bz951283'], 'CLIENTS': ['client.bz951283'], } for i in range(2): self.assertEquals(self.server.recipes.tasks.peer_roles( job.recipesets[0].recipes[i].tasks[0].id), expected)
def test_concurrent_recipe_completion(self): # This test simulates two recipes finishing at the same time. So we # have two concurrent transactions both updating the respective task states. # Previously there was no separate job.update_status() step, so the two # transactions would update the job status using out-of-date values in # both transactions, leaving the job running. with session.begin(): recipe1 = data_setup.create_recipe() recipe2 = data_setup.create_recipe() job = data_setup.create_job_for_recipes([recipe1, recipe2]) assert len(recipe1.tasks) == 1 assert len(recipe2.tasks) == 1 data_setup.mark_recipe_running(recipe1) data_setup.mark_recipe_running(recipe2) recipe1.tasks[-1].pass_(u'/', 0, u'Pass') recipe2.tasks[-1].pass_(u'/', 0, u'Pass') # Complete the recipes "concurrently" in two separate transactions class RecipeCompletionThread(Thread): def __init__(self, recipe_id=None, **kwargs): super(RecipeCompletionThread, self).__init__(**kwargs) self.recipe_id = recipe_id self.ready_evt = Event() self.continue_evt = Event() def run(self): session.begin() recipe = Recipe.by_id(self.recipe_id) self.ready_evt.set() self.continue_evt.wait() recipe.tasks[-1].stop() session.commit() thread1 = RecipeCompletionThread(name='recipe1', recipe_id=recipe1.id) thread2 = RecipeCompletionThread(name='recipe2', recipe_id=recipe2.id) thread1.start() thread2.start() # Wait for both threads to start their transactions thread1.ready_evt.wait() thread2.ready_evt.wait() # Allow recipe 1 to complete thread1.continue_evt.set() thread1.join() with session.begin(): session.expire_all() job.update_status() self.assertEquals(recipe1.status, TaskStatus.completed) self.assertEquals(recipe1.ptasks, 1) self.assertEquals(job.status, TaskStatus.running) self.assertEquals(job.ptasks, 1) # Now recipe 2 completes thread2.continue_evt.set() thread2.join() with session.begin(): session.expire_all() job.update_status() self.assertEquals(recipe2.status, TaskStatus.completed) self.assertEquals(recipe2.ptasks, 1) self.assertEquals(job.status, TaskStatus.completed) self.assertEquals(job.ptasks, 2)
def test_concurrent_recipe_completion(self): # This test simulates two recipes finishing at the same time. So we # have two concurrent transactions both updating the respective task states. # Previously there was no separate job.update_status() step, so the two # transactions would update the job status using out-of-date values in # both transactions, leaving the job running. with session.begin(): recipe1 = data_setup.create_recipe() recipe2 = data_setup.create_recipe() job = data_setup.create_job_for_recipes([recipe1, recipe2]) assert len(recipe1.tasks) == 1 assert len(recipe2.tasks) == 1 data_setup.mark_recipe_running(recipe1) data_setup.mark_recipe_running(recipe2) recipe1.tasks[-1].pass_(u"/", 0, u"Pass") recipe2.tasks[-1].pass_(u"/", 0, u"Pass") # Complete the recipes "concurrently" in two separate transactions class RecipeCompletionThread(Thread): def __init__(self, recipe_id=None, **kwargs): super(RecipeCompletionThread, self).__init__(**kwargs) self.recipe_id = recipe_id self.ready_evt = Event() self.continue_evt = Event() def run(self): session.begin() recipe = Recipe.by_id(self.recipe_id) self.ready_evt.set() self.continue_evt.wait() recipe.tasks[-1].stop() session.commit() thread1 = RecipeCompletionThread(name="recipe1", recipe_id=recipe1.id) thread2 = RecipeCompletionThread(name="recipe2", recipe_id=recipe2.id) thread1.start() thread2.start() # Wait for both threads to start their transactions thread1.ready_evt.wait() thread2.ready_evt.wait() # Allow recipe 1 to complete thread1.continue_evt.set() thread1.join() with session.begin(): session.expire_all() job.update_status() self.assertEquals(recipe1.status, TaskStatus.completed) self.assertEquals(recipe1.ptasks, 1) self.assertEquals(job.status, TaskStatus.running) self.assertEquals(job.ptasks, 1) # Now recipe 2 completes thread2.continue_evt.set() thread2.join() with session.begin(): session.expire_all() job.update_status() self.assertEquals(recipe2.status, TaskStatus.completed) self.assertEquals(recipe2.ptasks, 1) self.assertEquals(job.status, TaskStatus.completed) self.assertEquals(job.ptasks, 2)
def test_role_fqdns_not_duplicated(self): with session.begin(): dt = data_setup.create_distro_tree() lc = data_setup.create_labcontroller() systems = [ data_setup.create_system(fqdn=u'server.bz951283', lab_controller=lc), data_setup.create_system(fqdn=u'client.bz951283', lab_controller=lc), ] job = data_setup.create_job_for_recipes([ data_setup.create_recipe(distro_tree=dt, role=u'SERVERS'), data_setup.create_recipe(distro_tree=dt, role=u'CLIENTS'), ]) # same roles on the tasks as on the recipes job.recipesets[0].recipes[0].tasks[0].role = u'SERVERS' job.recipesets[0].recipes[1].tasks[0].role = u'CLIENTS' for i in range(2): data_setup.mark_recipe_running(job.recipesets[0].recipes[i], system=systems[i]) self.server.auth.login_password(self.lc.user.user_name, u'logmein') expected = { 'SERVERS': ['server.bz951283'], 'CLIENTS': ['client.bz951283'], } for i in range(2): self.assertEquals( self.server.recipes.tasks.peer_roles( job.recipesets[0].recipes[i].tasks[0].id), expected)
def test_delayed_jobs(self): with session.begin(): # Create a queued job that was submitted a long time ago recipe = data_setup.create_recipe() job = data_setup.create_job_for_recipes([recipe]) job.owner = self.user data_setup.mark_job_queued(job) job.recipesets[0].queue_time = datetime.utcnow() - timedelta(days=self.delayed_job_age) # create a job with two recipes, one Queued and one Scheduled # which was submitted a long time ago queued_recipe = data_setup.create_recipe() scheduled_recipe = data_setup.create_recipe() job_with_multiple_recipes = data_setup.create_job_for_recipes([queued_recipe, scheduled_recipe]) job_with_multiple_recipes.owner = self.user # mark recipe Queued queued_recipe.process() queued_recipe.queue() # mark recipe Scheduled scheduled_recipe.process() scheduled_recipe.queue() scheduled_recipe.schedule() data_setup.mark_job_queued(job_with_multiple_recipes) job_with_multiple_recipes.recipesets[0].queue_time = datetime.utcnow()\ - timedelta(days=self.delayed_job_age) # create a new submmited job for just now recently_submmited_job = data_setup.create_job_for_recipes([recipe]) recently_submmited_job.owner = self.user data_setup.mark_job_queued(recently_submmited_job) beaker_usage = BeakerUsage(self.user, self.reservation_expiry, self.reservation_length, self.waiting_recipe_age, self.delayed_job_age) delayed_jobs = beaker_usage.delayed_jobs() self.assertEqual(len(delayed_jobs), 2) self.assertEqual(absolute_url('/jobs/%s' % job.id), delayed_jobs[0][1]) self.assertEqual(absolute_url('/jobs/%s' % job_with_multiple_recipes.id), delayed_jobs[1][1])
def test_open_in_demand_systems(self): with session.begin(): # system with waiting recipes system_with_waiting_recipes = data_setup.create_system() data_setup.create_manual_reservation(system_with_waiting_recipes, start=datetime.utcnow() - timedelta(days=self.reservation_length), user=self.user) recipe = data_setup.create_recipe() recipe.systems[:] = [system_with_waiting_recipes] job = data_setup.create_job_for_recipes([recipe]) data_setup.mark_job_queued(job) job.recipesets[0].queue_time = datetime.utcnow() - timedelta(hours=self.waiting_recipe_age) # Create another system with waiting recipes and delete the job recipe_in_deleted_job = data_setup.create_recipe() recipe_in_deleted_job.systems[:] = [system_with_waiting_recipes] deleted_job = data_setup.create_job_for_recipes([recipe_in_deleted_job]) data_setup.mark_job_queued(deleted_job) deleted_job.recipesets[0].queue_time = datetime.utcnow() - timedelta(hours=self.waiting_recipe_age) deleted_job.delete() # system with no waiting recipes system_without_waiting_recipes = data_setup.create_system() data_setup.create_manual_reservation(system_without_waiting_recipes, start=datetime.utcnow() - timedelta(days=self.reservation_length), user=self.user) beaker_usage = BeakerUsage(self.user, self.reservation_expiry, self.reservation_length, self.waiting_recipe_age, self.delayed_job_age) open_in_demand_systems = beaker_usage.open_in_demand_systems() self.assertEqual(len(open_in_demand_systems), 1) self.assertEqual(open_in_demand_systems[0][1], 1) self.assertEqual(open_in_demand_systems[0][2], system_with_waiting_recipes.fqdn)
def test_install_duration_by_resource(self): system_recipe = data_setup.create_recipe() guest_recipe = data_setup.create_guestrecipe(host=system_recipe) data_setup.mark_job_complete( data_setup.create_job_for_recipes([system_recipe, guest_recipe])) virt_recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([virt_recipe]) virt_recipe2 = data_setup.create_recipe() data_setup.create_job_for_recipes([virt_recipe2]) data_setup.mark_recipe_complete(virt_recipe, virt=True) data_setup.mark_recipe_complete(virt_recipe2, virt=True) system_recipe2 = data_setup.create_recipe() guest_recipe2 = data_setup.create_guestrecipe(host=system_recipe2) job2 = data_setup.create_job_for_recipes( [system_recipe2, guest_recipe2]) data_setup.mark_job_complete(job2, system=system_recipe.resource.system) one_hour = datetime.timedelta(hours=1) two_hours = datetime.timedelta(hours=2) three_hours = datetime.timedelta(hours=3) virt_recipe.installation.install_finished = virt_recipe.installation.install_started + one_hour virt_recipe2.installation.install_finished = virt_recipe2.installation.install_started + two_hours guest_recipe.installation.install_finished = guest_recipe.installation.install_started + two_hours guest_recipe2.installation.install_finished = guest_recipe2.installation.install_started + three_hours system_recipe.installation.install_finished = system_recipe.installation.install_started + one_hour system_recipe2.installation.install_finished = system_recipe2.installation.install_started + three_hours session.flush() rows = self.execute_reporting_query('install-duration-by-resource') all_rows = rows.fetchall() guest_rows = [row for row in all_rows if row.fqdn == 'All Guest'] virt_rows = [row for row in all_rows if row.fqdn == 'All OpenStack'] system_rows = [ row for row in all_rows if row.fqdn == system_recipe.resource.fqdn ] self.assertEquals(len(virt_rows), 1, virt_rows) self.assertEquals(virt_rows[0].min_install_hours, 1) self.assertEquals(virt_rows[0].max_install_hours, 2) self.assertEquals(virt_rows[0].avg_install_hours, Decimal('1.5')) self.assertEquals(len(guest_rows), 1, guest_rows) self.assertEquals(guest_rows[0].min_install_hours, 2) self.assertEquals(guest_rows[0].max_install_hours, 3) self.assertEquals(guest_rows[0].avg_install_hours, Decimal('2.5')) self.assertEquals(len(system_rows), 1, system_rows) self.assertEquals(system_rows[0].min_install_hours, 1) self.assertEquals(system_rows[0].max_install_hours, 3) self.assertEquals(system_rows[0].avg_install_hours, Decimal('2.0'))
def setUp(self): with session.begin(): self.owner = data_setup.create_user(password='******') self.recipe = data_setup.create_recipe() self.recipe_with_reservation_request = data_setup.create_recipe(reservesys=True) self.recipe_without_reservation_request = data_setup.create_recipe() self.job = data_setup.create_job_for_recipes([ self.recipe, self.recipe_with_reservation_request, self.recipe_without_reservation_request], owner=self.owner)
def test_handles_null_recipe_task_id(self): with session.begin(): dt = data_setup.create_distro_tree() running_recipe = data_setup.create_recipe(distro_tree=dt) waiting_recipe = data_setup.create_recipe(distro_tree=dt) job = data_setup.create_job_for_recipes([running_recipe, waiting_recipe]) data_setup.mark_recipe_running(running_recipe) data_setup.mark_recipe_waiting(waiting_recipe) self.assertEquals(waiting_recipe.watchdog.recipetask, None) b = self.browser b.get(get_server_base() + "watchdogs/") self.assertEquals(b.title, "Watchdogs")
def test_resource_install_failures(self): # Get existing state to later compare against rows = self.execute_reporting_query('install-failure-count-by-resource') all_rows = [row for row in rows] guest_rows = [row for row in all_rows if row.fqdn == 'All Guest'] virt_rows = [row for row in all_rows if row.fqdn == 'All OpenStack'] existing_failed_guests = guest_rows[0].failed_recipes existing_failed_virt = virt_rows[0].failed_recipes system_recipe = data_setup.create_recipe() guest_recipe = data_setup.create_guestrecipe(host=system_recipe) virt_recipe = data_setup.create_recipe() job = data_setup.create_job_for_recipes([guest_recipe, virt_recipe, system_recipe]) data_setup.mark_recipe_installing(virt_recipe, virt=True) data_setup.mark_recipe_installing(system_recipe) data_setup.mark_recipe_installing(guest_recipe) session.flush() # Test we don't count runinng recipes rows = self.execute_reporting_query('install-failure-count-by-resource') all_rows = [row for row in rows] system_rows = [row for row in all_rows if row.fqdn == system_recipe.resource.fqdn] self.assertEquals(len(virt_rows), 1, virt_rows) self.assertEquals(existing_failed_virt, virt_rows[0].failed_recipes) self.assertEquals(len(guest_rows), 1, guest_rows) self.assertEquals(existing_failed_guests, guest_rows[0].failed_recipes) self.assertEquals(len(system_rows), 1, system_rows) self.assertEquals(system_rows[0].failed_recipes, 0) # Test completed recipes job.abort() job.update_status() session.flush() rows = self.execute_reporting_query('install-failure-count-by-resource') all_rows = [row for row in rows] guest_rows = [row for row in all_rows if row.fqdn == 'All Guest'] virt_rows = [row for row in all_rows if row.fqdn == 'All OpenStack'] system_rows = [row for row in all_rows if row.fqdn == system_recipe.resource.fqdn] self.assertEquals(len(virt_rows), 1, virt_rows) self.assertEquals(virt_rows[0].failed_recipes, existing_failed_virt + 1) self.assertEquals(len(guest_rows), 1, guest_rows) self.assertEquals(guest_rows[0].failed_recipes, existing_failed_guests + 1) self.assertEquals(len(system_rows), 1, system_rows) self.assertEquals(system_rows[0].failed_recipes, 1)
def _create_recipe_with_user_defined_distro(self, **kwargs): with session.begin(): install_task = Task.by_name(u'/distribution/check-install') reserve_task = Task.by_name(u'/distribution/reservesys') lc = create_lab_controller() system = create_x86_64_automated(lc) recipe = data_setup.create_recipe(custom_distro=True, osmajor=kwargs['osmajor'], task_list=[install_task, reserve_task]) if \ 'osmajor' in kwargs else data_setup.create_recipe(custom_distro=True, task_list=[install_task, reserve_task]) data_setup.create_job_for_recipes([recipe], owner=create_user(), whiteboard=u'') data_setup.mark_recipe_complete(recipe, system=system) self.recipe_id = recipe.id return recipe
def test_handles_null_recipe_task_id(self): with session.begin(): dt = data_setup.create_distro_tree() running_recipe = data_setup.create_recipe(distro_tree=dt) waiting_recipe = data_setup.create_recipe(distro_tree=dt) job = data_setup.create_job_for_recipes( [running_recipe, waiting_recipe]) data_setup.mark_recipe_running(running_recipe) data_setup.mark_recipe_waiting(waiting_recipe) self.assertEquals(waiting_recipe.watchdog.recipetask, None) b = self.browser b.get(get_server_base() + 'watchdogs/') self.assertEquals(b.title, 'Watchdogs')
def test_install_duration_by_resource(self): system_recipe = data_setup.create_recipe() guest_recipe = data_setup.create_guestrecipe(host=system_recipe) data_setup.mark_job_complete( data_setup.create_job_for_recipes([system_recipe, guest_recipe])) virt_recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([virt_recipe]) virt_recipe2 = data_setup.create_recipe() data_setup.create_job_for_recipes([virt_recipe2]) data_setup.mark_recipe_complete(virt_recipe, virt=True) data_setup.mark_recipe_complete(virt_recipe2, virt=True) system_recipe2 = data_setup.create_recipe() guest_recipe2 = data_setup.create_guestrecipe(host=system_recipe2) job2 = data_setup.create_job_for_recipes([system_recipe2, guest_recipe2]) data_setup.mark_job_complete(job2, system=system_recipe.resource.system) one_hour = datetime.timedelta(hours=1) two_hours = datetime.timedelta(hours=2) three_hours = datetime.timedelta(hours=3) virt_recipe.installation.install_finished = virt_recipe.installation.install_started + one_hour virt_recipe2.installation.install_finished = virt_recipe2.installation.install_started + two_hours guest_recipe.installation.install_finished = guest_recipe.installation.install_started + two_hours guest_recipe2.installation.install_finished = guest_recipe2.installation.install_started + three_hours system_recipe.installation.install_finished = system_recipe.installation.install_started + one_hour system_recipe2.installation.install_finished = system_recipe2.installation.install_started + three_hours session.flush() rows = self.execute_reporting_query('install-duration-by-resource') all_rows = rows.fetchall() guest_rows = [row for row in all_rows if row.fqdn == 'All Guest'] virt_rows = [row for row in all_rows if row.fqdn == 'All OpenStack'] system_rows = [row for row in all_rows if row.fqdn == system_recipe.resource.fqdn] self.assertEquals(len(virt_rows), 1, virt_rows) self.assertEquals(virt_rows[0].min_install_hours, 1) self.assertEquals(virt_rows[0].max_install_hours, 2) self.assertEquals(virt_rows[0].avg_install_hours, Decimal('1.5')) self.assertEquals(len(guest_rows), 1, guest_rows) self.assertEquals(guest_rows[0].min_install_hours, 2) self.assertEquals(guest_rows[0].max_install_hours, 3) self.assertEquals(guest_rows[0].avg_install_hours, Decimal('2.5')) self.assertEquals(len(system_rows), 1, system_rows) self.assertEquals(system_rows[0].min_install_hours, 1) self.assertEquals(system_rows[0].max_install_hours, 3) self.assertEquals(system_rows[0].avg_install_hours, Decimal('2.0'))
def test_by_log_server_only_returns_completed_recipesets(self): with session.begin(): dt = data_setup.create_distro_tree() completed_recipe = data_setup.create_recipe(distro_tree=dt) incomplete_recipe = data_setup.create_recipe(distro_tree=dt) job = data_setup.create_job_for_recipes( [completed_recipe, incomplete_recipe]) job.recipesets[0].lab_controller = self.lc data_setup.mark_recipe_running(incomplete_recipe, system=data_setup.create_system(lab_controller=self.lc)) data_setup.mark_recipe_complete(completed_recipe, system=data_setup.create_system(lab_controller=self.lc)) result = self.server.recipes.by_log_server(self.lc.fqdn) self.assertEqual(result, [])
def test_search_by_version(self): with session.begin(): task = data_setup.create_task() old_recipe = data_setup.create_recipe(task_list=[task]) data_setup.create_job_for_recipes([old_recipe]) old_recipe.tasks[0].version = u'1.0-0' recent_recipe = data_setup.create_recipe(task_list=[task]) data_setup.create_job_for_recipes([recent_recipe]) recent_recipe.tasks[0].version = u'2.3-4' b = self.browser b.get(get_server_base() + 'tasks%s' % task.name) b.find_element_by_id('form_version').send_keys('1.0-*') b.find_element_by_id('form').submit() self.check_recipetask_present_in_results(old_recipe.tasks[0]) self.check_recipetask_absent_from_results(recent_recipe.tasks[0])
def test_watchdog_show_non_running_task(self): with session.begin(): r1 = data_setup.create_recipe() data_setup.create_job_for_recipes([r1]) data_setup.mark_recipe_waiting(r1) out = run_client(['bkr', 'watchdog-show', '%s' % r1.tasks[0].id]) self.assertEquals(out, '%s: N/A\n' % r1.tasks[0].id, out)
def test_recipe_provisioned(self): with session.begin(): distro_tree = data_setup.create_distro_tree( arch=u'x86_64', osmajor=u'Fedora20', lab_controllers=[self.lc], urls=[ u'nfs://example.nfs.test:/path/to/os', u'http://example.com/ipxe-test/F20/x86_64/os/' ]) recipe = data_setup.create_recipe(distro_tree=distro_tree) data_setup.create_job_for_recipes([recipe]) data_setup.mark_recipe_waiting(recipe, virt=True, lab_controller=self.lc) response = requests.get(get_server_base() + 'systems/by-uuid/%s/ipxe-script' % recipe.resource.instance_id) response.raise_for_status() self.assertEquals( response.text, """#!ipxe kernel http://example.com/ipxe-test/F20/x86_64/os/pxeboot/vmlinuz console=tty0 console=ttyS0,115200n8 ks=%s noverifyssl netboot_method=ipxe initrd http://example.com/ipxe-test/F20/x86_64/os/pxeboot/initrd boot """ % recipe.installation.rendered_kickstart.link)
def test_authenticated_user_can_comment_recipetask(self): with session.begin(): recipe = data_setup.create_recipe() job = data_setup.create_job_for_recipes([recipe]) recipetask = recipe.tasks[0] # no special permissions required to comment user = data_setup.create_user(password=u'otheruser') comment_text = u'comments are fun' b = self.browser login(b, user=user.user_name, password='******') go_to_recipe_view(b, recipe, tab='Tasks') tab = b.find_element_by_id('tasks') tab.find_element_by_xpath('//div[@class="task-comments"]' '/div/a[@class="comments-link"]').click() popover = b.find_element_by_class_name('popover') popover.find_element_by_name('comment').send_keys(comment_text) popover.find_element_by_tag_name('form').submit() # check if the commit is in the comments list indicating the comment is submitted popover.find_element_by_xpath('//div[@class="comments"]//div[@class="comment"]' '/p[2][text()="%s"]' % comment_text) self.assertEqual(popover.find_element_by_name('comment').text, '') with session.begin(): session.expire_all() self.assertEqual(recipetask.comments[0].user, user) self.assertEqual(recipetask.comments[0].comment, comment_text) # comments link should indicate the new comment comments_link = tab.find_element_by_xpath('//div[@class="task-comments"]' '/div/a[@class="comments-link"]').text self.assertEqual(comments_link, '1')
def test_unknown_fqdns_dont_appear(self): # If we have a recipe where the FQDN is not known (for example # a guest that hasn't finished installing yet), previously it would # appear as the string 'None'. Now it's just not included. with session.begin(): hostrecipe = data_setup.create_recipe(role=u'SERVERS') guestrecipe = data_setup.create_guestrecipe(host=hostrecipe, role=u'CLIENTS') data_setup.create_job_for_recipes([hostrecipe, guestrecipe]) system = data_setup.create_system(fqdn=u'host.bz952948') data_setup.mark_recipe_running(hostrecipe, system=system) data_setup.mark_recipe_waiting(guestrecipe) self.assertEquals(guestrecipe.resource.fqdn, None) self.server.auth.login_password(self.lc.user.user_name, u'logmein') self.assertEquals( self.server.recipes.tasks.peer_roles(hostrecipe.tasks[0].id), { 'SERVERS': ['host.bz952948'], 'STANDALONE': ['host.bz952948'], 'CLIENTS': [] }) self.assertEquals( self.server.recipes.tasks.peer_roles(guestrecipe.tasks[0].id), { 'SERVERS': ['host.bz952948'], 'STANDALONE': [], 'CLIENTS': [] })
def test_task_aborted_return_reservation(self): """ This tests the case where the task was aborted, then the recipe goes to Reserved state and then finally the reservation is returned """ with session.begin(): recipe = data_setup.create_recipe( task_list=[Task.by_name(u'/distribution/install')], reservesys=True) job = data_setup.create_job_for_recipes([recipe]) job_id = job.id data_setup.mark_recipe_tasks_finished( recipe, result=TaskResult.warn, task_status=TaskStatus.aborted) job._mark_dirty() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.reserved) job.recipesets[0].recipes[0].return_reservation() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.aborted)
def test_recipe_running_then_cancelled(self): """ This tests the case where the recipe is running, has a valid reservation request, but is cancelled before it's completed. """ with session.begin(): recipe = data_setup.create_recipe( task_list=[Task.by_name(u'/distribution/install')] * 2, reservesys=True) job = data_setup.create_job_for_recipes([recipe]) job_id = job.id data_setup.mark_recipe_running(recipe) data_setup.mark_recipe_installation_finished(recipe) # we want at least one task to be Completed here # https://bugzilla.redhat.com/show_bug.cgi?id=1195558 job.recipesets[0].recipes[0].tasks[0].stop() job.recipesets[0].recipes[0].tasks[1].start() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.running) job.recipesets[0].cancel() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.cancelled)
def test_guestrecipes_appear_after_host(self): with session.begin(): # hack to force the GuestRecipe to be inserted first guest = data_setup.create_recipe(cls=GuestRecipe) job = data_setup.create_job_for_recipes([guest]) session.flush() host = data_setup.create_recipe() job.recipesets[0].recipes.append(host) host.guests.append(guest) session.flush() self.assert_(guest.id < host.id) b = self.browser b.get(get_server_base() + 'jobs/%s' % job.id) recipe_order = [elem.text for elem in b.find_elements_by_xpath( '//a[@class="recipe-id"]')] self.assertEquals(recipe_order, [host.t_id, guest.t_id])
def setUp(self): with session.begin(): self.recipe = data_setup.create_recipe( task_list=[data_setup.create_task(), data_setup.create_task()]) data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_running(self.recipe)
def test_watchdog_extend_by_fqdn(self): with session.begin(): lc = data_setup.create_labcontroller() system = data_setup.create_system(lab_controller=lc) recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([recipe]) data_setup.mark_recipe_running(recipe, system=system) run_client(['bkr', 'watchdog-extend', '--by=600', system.fqdn]) with session.begin(): session.expire_all() assert_datetime_within(recipe.watchdog.kill_time, tolerance=datetime.timedelta(seconds=10), reference=datetime.datetime.utcnow() + datetime.timedelta(seconds=600)) # nonexistent fqdn try: run_client( ['bkr', 'watchdog-extend', 'ireallydontexistblah.test.fqdn']) self.fail('Must raise') except ClientError as e: self.assertIn( 'Cannot find any recipe running on ireallydontexistblah.test.fqdn', e.stderr_output)
def setUp(self): with session.begin(): self.system = data_setup.create_system( lab_controller=self.get_lc()) self.recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_running(self.recipe, system=self.system)
def test_recipe_running_then_watchdog_expired(self): """ This tests the case where the recipe is running, has a valid reservation request, but the watchdog expires before it's completed. """ with session.begin(): recipe = data_setup.create_recipe( task_list=[Task.by_name(u'/distribution/install')], reservesys=True) job = data_setup.create_job_for_recipes([recipe]) job_id = job.id data_setup.mark_recipe_tasks_finished( recipe, task_status=TaskStatus.aborted) job.recipesets[0].recipes[0].abort() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.reserved) job.recipesets[0].recipes[0].return_reservation() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.aborted)
def test_task_anchor(self): with session.begin(): recipes = [data_setup.create_recipe(distro_tree=self.distro_tree) for _ in range(10)] job = data_setup.create_job_for_recipes(recipes, owner=self.user) b = self.browser recipe = recipes[0] task = job.recipesets[0].recipes[0].tasks[0].id # bkr/recipes/id#task<id> b.get(get_server_base() + 'recipes/%s#task%s' %(recipe.id,task)) # give 10 seconds for the element to be displayed WebDriverWait(b, 10).until(lambda driver : driver.find_element_by_id('task_items_%s' %recipe.id).is_displayed()) self.assertTrue(b.find_element_by_id('task_items_%s' %recipe.id).is_displayed()) # bkr/jobs/id#task<id> # for multi recipe jobs, only the recipe to which the task belongs should be visible # choose a recipe and task somewhere in the middle task = job.recipesets[0].recipes[6].tasks[0].id recipe = recipes[6] b.get(get_server_base() + 'jobs/%s#task%s' %(job.id,task)) # give 10 seconds for the element to be displayed WebDriverWait(b, 10).until(lambda driver : driver.find_element_by_id('task_items_%s' %recipe.id).is_displayed()) self.assertTrue(b.find_element_by_id('task_items_%s' %recipe.id).is_displayed()) recipes.remove(recipe) for r in recipes: # be fair and give 10 seconds for the element to be displayed, if at all WebDriverWait(b, 10).until(lambda driver : driver.find_element_by_id('task_items_%s' %recipe.id).is_displayed()) self.assertTrue(not b.find_element_by_id('task_items_%s' %r.id).is_displayed())
def test_task_aborted_return_reservation(self): """ This tests the case where the task was aborted, then the recipe goes to Reserved state and then finally the reservation is returned """ with session.begin(): recipe = data_setup.create_recipe( task_list=[Task.by_name(u'/distribution/install')], reservesys=True) job = data_setup.create_job_for_recipes([recipe]) job_id = job.id data_setup.mark_recipe_tasks_finished(recipe, result=TaskResult.warn, task_status=TaskStatus.aborted) job._mark_dirty() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.reserved) job.recipesets[0].recipes[0].return_reservation() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.aborted)
def test_recipe_running_then_watchdog_expired(self): """ This tests the case where the recipe is running, has a valid reservation request, but the watchdog expires before it's completed. """ with session.begin(): recipe = data_setup.create_recipe( task_list=[Task.by_name(u'/distribution/install')], reservesys=True) job = data_setup.create_job_for_recipes([recipe]) job_id = job.id data_setup.mark_recipe_tasks_finished(recipe, task_status=TaskStatus.aborted) job.recipesets[0].recipes[0].abort() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.reserved) job.recipesets[0].recipes[0].return_reservation() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.aborted)
def test_return_system_reservation(self): b = self.browser with session.begin(): recipe = data_setup.create_recipe( task_list=[data_setup.create_task()], reservesys=True, reservesys_duration=1800, ) job = data_setup.create_job_for_recipes([recipe], owner=self.user) data_setup.mark_recipe_tasks_finished(job.recipesets[0].recipes[0]) job.update_status() self.go_to_recipe_view(recipe) b.find_element_by_xpath('//span[@class="statusReserved"]') duration = b.find_element_by_xpath( '//span[@class="reservation_duration"]').text self.assertRegexpMatches(duration, r'(0:\d\d:\d\d remaining)') b.find_element_by_link_text('Release System').click() b.find_element_by_xpath( '//h1[text()="Release reserved system for Recipe %s"]' % recipe.id) b.find_element_by_xpath( '//form[@id="end_recipe_reservation"]//input[@type="submit"]' ).click() flash_text = b.find_element_by_class_name('flash').text self.assertEquals( 'Successfully released reserved system for %s' % recipe.t_id, flash_text)
def test_watchdog_time_remaining_display(self): b = self.browser with session.begin(): recipe = data_setup.create_recipe() job = data_setup.create_job_for_recipes([recipe], owner=self.user) data_setup.mark_job_running(job) recipe.watchdog.kill_time = ( datetime.datetime.utcnow() + datetime.timedelta(seconds=83 * 60 + 30)) self.go_to_recipe_view(recipe) b.find_element_by_link_text('Show Results').click() duration = b.find_element_by_xpath( '//tr[contains(@class, "recipe_%s")][1]' '//div[@class="task-duration"]' % recipe.id) self.assertRegexpMatches(duration.text, r'^Time Remaining 1:23:\d\d$') with session.begin(): recipe.watchdog.kill_time = ( datetime.datetime.utcnow() + datetime.timedelta(days=2, seconds=83 * 60 + 30)) self.go_to_recipe_view(recipe) duration = b.find_element_by_xpath( '//tr[contains(@class, "recipe_%s")][1]' '//div[@class="task-duration"]' % recipe.id) self.assertRegexpMatches(duration.text, r'^Time Remaining 2 days, 1:23:\d\d$')
def test_unknown_fqdns_dont_appear(self): # If we have a recipe where the FQDN is not known (for example # a guest that hasn't finished installing yet), previously it would # appear as the string 'None'. Now it's just not included. with session.begin(): hostrecipe = data_setup.create_recipe(role=u'SERVERS') guestrecipe = data_setup.create_guestrecipe(host=hostrecipe, role=u'CLIENTS') data_setup.create_job_for_recipes([hostrecipe, guestrecipe]) system = data_setup.create_system(fqdn=u'host.bz952948', lab_controller=self.lc) data_setup.mark_recipe_running(hostrecipe, system=system) data_setup.mark_recipe_waiting(guestrecipe) self.assertEquals(guestrecipe.resource.fqdn, None) self.server.auth.login_password(self.lc.user.user_name, u'logmein') self.assertEquals(self.server.recipes.tasks.peer_roles( hostrecipe.tasks[0].id), {'SERVERS': ['host.bz952948'], 'STANDALONE': ['host.bz952948'], 'CLIENTS': []}) self.assertEquals(self.server.recipes.tasks.peer_roles( guestrecipe.tasks[0].id), {'SERVERS': ['host.bz952948'], 'STANDALONE': ['host.bz952948'], 'CLIENTS': []})
def test_task_roles_visible_between_hosts_and_guests(self): # Hosts and guests can all see each others' task roles now. Previously # they were not visible to each other. with session.begin(): hostrecipe = data_setup.create_recipe() guestrecipe_server = data_setup.create_guestrecipe(host=hostrecipe) guestrecipe_client = data_setup.create_guestrecipe(host=hostrecipe) job = data_setup.create_job_for_recipes([hostrecipe, guestrecipe_server, guestrecipe_client]) hostrecipe.tasks[0].role = u'SERVERS' guestrecipe_server.tasks[0].role = u'SERVERS' guestrecipe_client.tasks[0].role = u'CLIENTS' system = data_setup.create_system(fqdn=u'host.bz960434') data_setup.mark_recipe_running(hostrecipe, system=system) data_setup.mark_recipe_running(guestrecipe_server, fqdn=u'guestserver.bz960434') data_setup.mark_recipe_running(guestrecipe_client, fqdn=u'guestclient.bz960434') self.server.auth.login_password(self.lc.user.user_name, u'logmein') expected_peer_roles = { 'SERVERS': ['host.bz960434', 'guestserver.bz960434'], 'CLIENTS': ['guestclient.bz960434'], 'STANDALONE': ['host.bz960434', 'guestserver.bz960434', 'guestclient.bz960434'], } for recipe in [hostrecipe, guestrecipe_server, guestrecipe_client]: self.assertEquals( self.server.recipes.tasks.peer_roles(recipe.tasks[0].id), expected_peer_roles)
def test_task_anchor(self): with session.begin(): recipes = [data_setup.create_recipe(distro_tree=self.distro_tree) for _ in range(10)] job = data_setup.create_job_for_recipes(recipes, owner=self.user) b = self.browser recipe = recipes[0] task = job.recipesets[0].recipes[0].tasks[0].id # bkr/recipes/id#task<id> b.get(get_server_base() + 'recipes/%s#task%s' %(recipe.id,task)) # "Show Results" should be activated for the recipe b.find_element_by_css_selector('#recipe%s .results-tab.active' % recipe.id) # bkr/jobs/id#task<id> # for multi recipe jobs, only the recipe to which the task belongs should be visible # choose a recipe and task somewhere in the middle task = job.recipesets[0].recipes[6].tasks[0].id recipe = recipes[6] b.get(get_server_base() + 'jobs/%s#task%s' %(job.id,task)) # "Show Results" should be activated for the recipe b.find_element_by_css_selector('#recipe%s .results-tab.active' % recipe.id) recipes.remove(recipe) for r in recipes: # "Hide Results" should be activated for the recipe b.find_element_by_css_selector('#recipe%s .hide-results-tab.active' % r.id)
def test_wait_duration_by_resource(self): system_recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([system_recipe]) virt_recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([virt_recipe]) virt_recipe2 = data_setup.create_recipe() data_setup.create_job_for_recipes([virt_recipe2]) data_setup.mark_recipe_complete(virt_recipe, virt=True) data_setup.mark_recipe_complete(virt_recipe2, virt=True) data_setup.mark_recipe_complete(system_recipe) system_recipe2 = data_setup.create_recipe() data_setup.create_job_for_recipes([system_recipe2]) data_setup.mark_recipe_complete(system_recipe2, system=system_recipe.resource.system) one_hour = datetime.timedelta(hours=1) two_hours = datetime.timedelta(hours=2) three_hours = datetime.timedelta(hours=3) virt_recipe.resource.recipe.start_time = virt_recipe.resource.recipe.recipeset.queue_time + one_hour virt_recipe2.resource.recipe.start_time = virt_recipe2.resource.recipe.recipeset.queue_time + two_hours system_recipe.resource.recipe.start_time = system_recipe.resource.recipe.recipeset.queue_time + one_hour system_recipe2.resource.recipe.start_time = system_recipe2.resource.recipe.recipeset.queue_time + three_hours session.flush() rows = self.execute_reporting_query('wait-duration-by-resource') all_rows = rows.fetchall() virt_rows = [row for row in all_rows if row.fqdn == 'All OpenStack'] system_rows = [ row for row in all_rows if row.fqdn in (system_recipe.resource.fqdn, system_recipe2.resource.fqdn) ] self.assertEquals(len(virt_rows), 1, virt_rows) self.assertEquals(virt_rows[0].min_wait_hours, 1) self.assertEquals(virt_rows[0].max_wait_hours, 2) self.assertEquals(virt_rows[0].avg_wait_hours, Decimal('1.5')) self.assertEquals(len(system_rows), 1, system_rows) self.assertEquals(system_rows[0].min_wait_hours, 1) self.assertEquals(system_rows[0].max_wait_hours, 3) self.assertEquals(system_rows[0].avg_wait_hours, 2)
def test_recipe_not_provisioned_yet(self): with session.begin(): recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([recipe]) data_setup.mark_recipe_scheduled(recipe, virt=True) # VM is created but recipe.provision() hasn't been called yet response = requests.get(get_server_base() + "systems/by-uuid/%s/ipxe-script" % recipe.resource.instance_id) self.assertEquals(response.status_code, 503)
def test_gets_logs(self): with session.begin(): system = data_setup.create_system(lab_controller=self.lc) recipe = data_setup.create_recipe() recipe.logs.append(LogRecipe(filename=u'test.log')) data_setup.create_job_for_recipes([recipe]) logs = self.server.recipes.files(recipe.id) self.assertEqual(len(logs), 1) self.assertEqual(logs[0]['filename'], u'test.log')
def setUp(self): with session.begin(): self.recipe = data_setup.create_recipe( distro_name=u'PurpleUmbrellaLinux5.11-20160428', variant=u'Server', arch=u'x86_64') data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_installing(self.recipe) self.browser = self.get_browser() go_to_recipe_view(self.browser, self.recipe, tab='Installation')
def test_scheduler_status_is_not_reset_on_already_released_systems(self): first_recipe = data_setup.create_recipe() second_recipe = data_setup.create_recipe() job = data_setup.create_job_for_recipesets([ data_setup.create_recipeset_for_recipes([first_recipe]), data_setup.create_recipeset_for_recipes([second_recipe])]) data_setup.mark_recipe_complete(first_recipe) first_system = first_recipe.resource.system self.assertEquals(first_system.scheduler_status, SystemSchedulerStatus.pending) # Pretend the scheduler has set the system back to idle first_system.scheduler_status = SystemSchedulerStatus.idle data_setup.mark_recipe_scheduled(second_recipe) job.update_status() # The bug was that job.update_status() would reset the *first* recipe's # system back to pending, even though it had already been released and # could potentially be reserved for another recipe already. self.assertEquals(first_system.scheduler_status, SystemSchedulerStatus.idle)
def setUp(self): with session.begin(): self.system = data_setup.create_system(lab_controller=self.get_lc()) self.recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_installing(self.recipe, system=self.system) self.console_log = os.path.join(get_conf().get('CONSOLE_LOGS'), self.system.fqdn) self.cached_console_log = os.path.join(get_conf().get('CACHEPATH'), 'recipes', str(self.recipe.id // 1000) + '+', str(self.recipe.id), 'console.log')
def setUp(self): with session.begin(): self.recipe = data_setup.create_recipe(task_name=u'/distribution/install') self.recipe.tasks.extend([ RecipeTask.from_task(data_setup.create_task()), RecipeTask.from_fetch_url(u'http://example.com/tasks/example.tar.bz2'), ]) data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_running(self.recipe)
def test_unrecognised_anchor_is_replaced_with_default(self): with session.begin(): recipe = data_setup.create_recipe() job = data_setup.create_job_for_recipes([recipe]) data_setup.mark_recipe_installing(recipe) b = self.browser b.get(get_server_base() + 'recipes/%s#no-such-anchor-exists' % recipe.id) b.find_element_by_css_selector('#installation.active') _, fragment = urlparse.urldefrag(b.current_url) self.assertEquals(fragment, 'installation')
def test_shows_installation_tab_while_installing(self): with session.begin(): recipe = data_setup.create_recipe() job = data_setup.create_job_for_recipes([recipe]) data_setup.mark_recipe_installing(recipe) b = self.browser go_to_recipe_view(b, recipe) b.find_element_by_css_selector('#installation.active') _, fragment = urlparse.urldefrag(b.current_url) self.assertEquals(fragment, 'installation')