def setUp(self): with session.begin(): self.recipe = data_setup.create_recipe( task_list=[data_setup.create_task(), data_setup.create_task()]) data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_running(self.recipe)
def test_watchdog_show_non_running_task(self): with session.begin(): r1 = data_setup.create_recipe() data_setup.create_job_for_recipes([r1]) data_setup.mark_recipe_waiting(r1) out = run_client(['bkr', 'watchdog-show', '%s' % r1.tasks[0].id]) self.assertEquals(out, '%s: N/A\n' % r1.tasks[0].id, out)
def setUp(self): with session.begin(): self.system = data_setup.create_system( lab_controller=self.get_lc()) self.recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_running(self.recipe, system=self.system)
def test_open_in_demand_systems(self): with session.begin(): # system with waiting recipes system_with_waiting_recipes = data_setup.create_system() data_setup.create_manual_reservation(system_with_waiting_recipes, start=datetime.utcnow() - timedelta(days=self.reservation_length), user=self.user) recipe = data_setup.create_recipe() recipe.systems[:] = [system_with_waiting_recipes] job = data_setup.create_job_for_recipes([recipe]) data_setup.mark_job_queued(job) job.recipesets[0].queue_time = datetime.utcnow() - timedelta(hours=self.waiting_recipe_age) # Create another system with waiting recipes and delete the job recipe_in_deleted_job = data_setup.create_recipe() recipe_in_deleted_job.systems[:] = [system_with_waiting_recipes] deleted_job = data_setup.create_job_for_recipes([recipe_in_deleted_job]) data_setup.mark_job_queued(deleted_job) deleted_job.recipesets[0].queue_time = datetime.utcnow() - timedelta(hours=self.waiting_recipe_age) deleted_job.delete() # system with no waiting recipes system_without_waiting_recipes = data_setup.create_system() data_setup.create_manual_reservation(system_without_waiting_recipes, start=datetime.utcnow() - timedelta(days=self.reservation_length), user=self.user) beaker_usage = BeakerUsage(self.user, self.reservation_expiry, self.reservation_length, self.waiting_recipe_age, self.delayed_job_age) open_in_demand_systems = beaker_usage.open_in_demand_systems() self.assertEqual(len(open_in_demand_systems), 1) self.assertEqual(open_in_demand_systems[0][1], 1) self.assertEqual(open_in_demand_systems[0][2], system_with_waiting_recipes.fqdn)
def test_watchdog_extend_by_fqdn(self): with session.begin(): lc = data_setup.create_labcontroller() system = data_setup.create_system(lab_controller=lc) recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([recipe]) data_setup.mark_recipe_running(recipe, system=system) run_client(['bkr', 'watchdog-extend', '--by=600', system.fqdn]) with session.begin(): session.expire_all() assert_datetime_within(recipe.watchdog.kill_time, tolerance=datetime.timedelta(seconds=10), reference=datetime.datetime.utcnow() + datetime.timedelta(seconds=600)) # nonexistent fqdn try: run_client( ['bkr', 'watchdog-extend', 'ireallydontexistblah.test.fqdn']) self.fail('Must raise') except ClientError as e: self.assertIn( 'Cannot find any recipe running on ireallydontexistblah.test.fqdn', e.stderr_output)
def test_unknown_fqdns_dont_appear(self): # If we have a recipe where the FQDN is not known (for example # a guest that hasn't finished installing yet), previously it would # appear as the string 'None'. Now it's just not included. with session.begin(): hostrecipe = data_setup.create_recipe(role=u'SERVERS') guestrecipe = data_setup.create_guestrecipe(host=hostrecipe, role=u'CLIENTS') data_setup.create_job_for_recipes([hostrecipe, guestrecipe]) system = data_setup.create_system(fqdn=u'host.bz952948') data_setup.mark_recipe_running(hostrecipe, system=system) data_setup.mark_recipe_waiting(guestrecipe) self.assertEquals(guestrecipe.resource.fqdn, None) self.server.auth.login_password(self.lc.user.user_name, u'logmein') self.assertEquals( self.server.recipes.tasks.peer_roles(hostrecipe.tasks[0].id), { 'SERVERS': ['host.bz952948'], 'STANDALONE': ['host.bz952948'], 'CLIENTS': [] }) self.assertEquals( self.server.recipes.tasks.peer_roles(guestrecipe.tasks[0].id), { 'SERVERS': ['host.bz952948'], 'STANDALONE': [], 'CLIENTS': [] })
def test_delayed_jobs(self): with session.begin(): # Create a queued job that was submitted a long time ago recipe = data_setup.create_recipe() job = data_setup.create_job_for_recipes([recipe]) job.owner = self.user data_setup.mark_job_queued(job) job.recipesets[0].queue_time = datetime.utcnow() - timedelta(days=self.delayed_job_age) # create a job with two recipes, one Queued and one Scheduled # which was submitted a long time ago queued_recipe = data_setup.create_recipe() scheduled_recipe = data_setup.create_recipe() job_with_multiple_recipes = data_setup.create_job_for_recipes([queued_recipe, scheduled_recipe]) job_with_multiple_recipes.owner = self.user # mark recipe Queued queued_recipe.process() queued_recipe.queue() # mark recipe Scheduled scheduled_recipe.process() scheduled_recipe.queue() scheduled_recipe.schedule() data_setup.mark_job_queued(job_with_multiple_recipes) job_with_multiple_recipes.recipesets[0].queue_time = datetime.utcnow()\ - timedelta(days=self.delayed_job_age) # create a new submmited job for just now recently_submmited_job = data_setup.create_job_for_recipes([recipe]) recently_submmited_job.owner = self.user data_setup.mark_job_queued(recently_submmited_job) beaker_usage = BeakerUsage(self.user, self.reservation_expiry, self.reservation_length, self.waiting_recipe_age, self.delayed_job_age) delayed_jobs = beaker_usage.delayed_jobs() self.assertEqual(len(delayed_jobs), 2) self.assertEqual(absolute_url('/jobs/%s' % job.id), delayed_jobs[0][1]) self.assertEqual(absolute_url('/jobs/%s' % job_with_multiple_recipes.id), delayed_jobs[1][1])
def test_unknown_fqdns_dont_appear(self): # If we have a recipe where the FQDN is not known (for example # a guest that hasn't finished installing yet), previously it would # appear as the string 'None'. Now it's just not included. with session.begin(): hostrecipe = data_setup.create_recipe(role=u'SERVERS') guestrecipe = data_setup.create_guestrecipe(host=hostrecipe, role=u'CLIENTS') data_setup.create_job_for_recipes([hostrecipe, guestrecipe]) system = data_setup.create_system(fqdn=u'host.bz952948', lab_controller=self.lc) data_setup.mark_recipe_running(hostrecipe, system=system) data_setup.mark_recipe_waiting(guestrecipe) self.assertEquals(guestrecipe.resource.fqdn, None) self.server.auth.login_password(self.lc.user.user_name, u'logmein') self.assertEquals(self.server.recipes.tasks.peer_roles( hostrecipe.tasks[0].id), {'SERVERS': ['host.bz952948'], 'STANDALONE': ['host.bz952948'], 'CLIENTS': []}) self.assertEquals(self.server.recipes.tasks.peer_roles( guestrecipe.tasks[0].id), {'SERVERS': ['host.bz952948'], 'STANDALONE': ['host.bz952948'], 'CLIENTS': []})
def test_recipe_provisioned(self): with session.begin(): distro_tree = data_setup.create_distro_tree( arch=u'x86_64', osmajor=u'Fedora20', lab_controllers=[self.lc], urls=[ u'nfs://example.nfs.test:/path/to/os', u'http://example.com/ipxe-test/F20/x86_64/os/' ]) recipe = data_setup.create_recipe(distro_tree=distro_tree) data_setup.create_job_for_recipes([recipe]) data_setup.mark_recipe_waiting(recipe, virt=True, lab_controller=self.lc) response = requests.get(get_server_base() + 'systems/by-uuid/%s/ipxe-script' % recipe.resource.instance_id) response.raise_for_status() self.assertEquals( response.text, """#!ipxe kernel http://example.com/ipxe-test/F20/x86_64/os/pxeboot/vmlinuz console=tty0 console=ttyS0,115200n8 ks=%s noverifyssl netboot_method=ipxe initrd http://example.com/ipxe-test/F20/x86_64/os/pxeboot/initrd boot """ % recipe.installation.rendered_kickstart.link)
def test_recipe_not_provisioned_yet(self): with session.begin(): recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([recipe]) data_setup.mark_recipe_scheduled(recipe, virt=True) # VM is created but recipe.provision() hasn't been called yet response = requests.get(get_server_base() + "systems/by-uuid/%s/ipxe-script" % recipe.resource.instance_id) self.assertEquals(response.status_code, 503)
def setUp(self): with session.begin(): self.recipe = data_setup.create_recipe(task_name=u'/distribution/install') self.recipe.tasks.extend([ RecipeTask.from_task(data_setup.create_task()), RecipeTask.from_fetch_url(u'http://example.com/tasks/example.tar.bz2'), ]) data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_running(self.recipe)
def test_gets_logs(self): with session.begin(): system = data_setup.create_system(lab_controller=self.lc) recipe = data_setup.create_recipe() recipe.logs.append(LogRecipe(filename=u'test.log')) data_setup.create_job_for_recipes([recipe]) logs = self.server.recipes.files(recipe.id) self.assertEqual(len(logs), 1) self.assertEqual(logs[0]['filename'], u'test.log')
def setUp(self): with session.begin(): self.recipe = data_setup.create_recipe( distro_name=u'PurpleUmbrellaLinux5.11-20160428', variant=u'Server', arch=u'x86_64') data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_installing(self.recipe) self.browser = self.get_browser() go_to_recipe_view(self.browser, self.recipe, tab='Installation')
def setUp(self): with session.begin(): self.system = data_setup.create_system(lab_controller=self.get_lc()) self.recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_installing(self.recipe, system=self.system) self.console_log = os.path.join(get_conf().get('CONSOLE_LOGS'), self.system.fqdn) self.cached_console_log = os.path.join(get_conf().get('CACHEPATH'), 'recipes', str(self.recipe.id // 1000) + '+', str(self.recipe.id), 'console.log')
def setUp(self): with session.begin(): self.watchdog = Watchdog() self.recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_running(self.recipe, virt=True, lab_controller=self.get_lc()) self.cached_console_log = os.path.join(get_conf().get('CACHEPATH'), 'recipes', str(self.recipe.id // 1000) + '+', str(self.recipe.id), 'console.log')
def test_recipe_not_provisioned_yet(self): with session.begin(): recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([recipe]) data_setup.mark_recipe_running(recipe, virt=True) # VM is created but recipe.provision() hasn't been called yet response = requests.get(get_server_base() + 'systems/by-uuid/%s/ipxe-script' % recipe.resource.instance_id) self.assertEquals(response.status_code, 503)
def test_nonexistent_watchdog(self): with session.begin(): recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([recipe]) data_setup.mark_recipe_complete(recipe) try: run_client(['bkr', 'watchdog-extend', recipe.t_id]) self.fail('Must raise') except ClientError as e: self.assertIn('No watchdog exists for recipe %s' % recipe.id, e.stderr_output)
def setUp(self): with session.begin(): self.recipe = data_setup.create_recipe( task_name=u'/distribution/install') self.recipe.tasks.extend([ RecipeTask.from_task(data_setup.create_task()), RecipeTask.from_fetch_url( u'http://example.com/tasks/example.tar.bz2'), ]) data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_running(self.recipe)
def test_task_start_time_is_localised(self): with session.begin(): recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([recipe]) data_setup.mark_recipe_running(recipe) b = self.browser go_to_recipe_view(b, recipe, tab='Tasks') tab = b.find_element_by_id('tasks') start_time = tab.find_element_by_xpath('//div[@id="task%s"]' '//div[@class="task-start-time"]/time' % recipe.tasks[0].id) self.check_datetime_localised(start_time.get_attribute('title'))
def test_watchdog_show_running_task(self): with session.begin(): r1 = data_setup.create_recipe() data_setup.create_job_for_recipes([r1]) data_setup.mark_recipe_running(r1) session.flush() t1 = r1.tasks[0] t1.watchdog.kill_time = datetime.datetime.utcnow() + \ datetime.timedelta(seconds=99) out = run_client(['bkr', 'watchdog-show', str(t1.id)]) # Let's just check it is somewhere between 10-99 self.assertTrue(re.match('%s: \d\d\\n' % t1.id, out))
def setUp(self): with session.begin(): self.watchdog = Watchdog() self.recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_running(self.recipe, virt=True, lab_controller=self.get_lc()) self.cached_console_log = os.path.join( get_conf().get('CACHEPATH'), 'recipes', str(self.recipe.id // 1000) + '+', str(self.recipe.id), 'console.log')
def setUp(self): with session.begin(): self.system = data_setup.create_system( lab_controller=self.get_lc()) self.recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_running(self.recipe, system=self.system) self.console_log = os.path.join(get_conf().get('CONSOLE_LOGS'), self.system.fqdn) self.cached_console_log = os.path.join( get_conf().get('CACHEPATH'), 'recipes', str(self.recipe.id // 1000) + '+', str(self.recipe.id), 'console.log')
def _create_recipe_with_user_defined_distro(self, **kwargs): with session.begin(): install_task = Task.by_name(u'/distribution/check-install') reserve_task = Task.by_name(u'/distribution/reservesys') lc = create_lab_controller() system = create_x86_64_automated(lc) recipe = data_setup.create_recipe(custom_distro=True, osmajor=kwargs['osmajor'], task_list=[install_task, reserve_task]) if \ 'osmajor' in kwargs else data_setup.create_recipe(custom_distro=True, task_list=[install_task, reserve_task]) data_setup.create_job_for_recipes([recipe], owner=create_user(), whiteboard=u'') data_setup.mark_recipe_complete(recipe, system=system) self.recipe_id = recipe.id return recipe
def _create_recipe(self, system=None): with session.begin(): install_task = Task.by_name(u'/distribution/install') reserve_task = Task.by_name(u'/distribution/reservesys') lc = create_lab_controller() rhel62_server_x86_64 = create_rhel62_server_x86_64(lab_controller=lc) if not system: system = create_x86_64_automated(lc) recipe = data_setup.create_recipe(distro_tree=rhel62_server_x86_64, task_list=[install_task, reserve_task]) data_setup.create_job_for_recipes([recipe], owner=create_user(), whiteboard=u'') data_setup.mark_recipe_complete(recipe, system=system) self.recipe_id = recipe.id return recipe
def _create_recipe(self, system=None): with session.begin(): install_task = Task.by_name(u'/distribution/check-install') reserve_task = Task.by_name(u'/distribution/reservesys') lc = create_lab_controller() rhel62_server_x86_64 = create_rhel62_server_x86_64(lab_controller=lc) if not system: system = create_x86_64_automated(lc) recipe = data_setup.create_recipe(distro_tree=rhel62_server_x86_64, task_list=[install_task, reserve_task]) data_setup.create_job_for_recipes([recipe], owner=create_user(), whiteboard=u'') data_setup.mark_recipe_complete(recipe, system=system) self.recipe_id = recipe.id return recipe
def test_recipe_provision_with_custom_distro_and_incompatible_url(self): with session.begin(): recipe = data_setup.create_recipe(custom_distro=True) self.assertIsNone(recipe.distro_tree) recipe.installation.tree_url = 'nfs://mydistro.dummylab.test:/os/' data_setup.create_job_for_recipes([recipe]) data_setup.mark_recipe_waiting(recipe, virt=True, lab_controller=self.lc) response = requests.get(get_server_base() + 'systems/by-uuid/%s/ipxe-script' % recipe.resource.instance_id) self.assertEqual(response.status_code, 404) self.assertMultiLineEqual( response.text, 'Given tree URL nfs://mydistro.dummylab.test:/os/ incompatible with iPXE')
def test_recipe_activity(self): with session.begin(): recipe = data_setup.create_recipe(whiteboard=u'oldwhiteboard') data_setup.create_job_for_recipes([recipe]) act = recipe.record_activity(service=u'testdata', user=User.by_user_name(data_setup.ADMIN_USER), action=u'Changed', field=u'Whiteboard', old=recipe.whiteboard, new=u'newwhiteboard') b = self.browser b.get(get_server_base() + 'activity/') b.find_element_by_class_name('search-query'). \ send_keys('field_name:Whiteboard new_value:newwhiteboard') b.find_element_by_class_name('grid-filter').submit() check_activity_search_results(b, present=[act])
def test_recipe_view_shows_external_task_results(self): with session.begin(): recipe = data_setup.create_recipe(task_name=u'/distribution/install') external_task = RecipeTask.from_fetch_url( url='git://example.com/externaltasks/example#master', subdir='examples') recipe.tasks.extend([external_task]) data_setup.create_job_for_recipes([recipe], whiteboard='job with external tasks') data_setup.mark_recipe_complete(recipe, result=TaskResult.warn, task_status=TaskStatus.aborted) b = self.browser go_to_recipe_view(b, recipe=recipe, tab='Tasks') b.find_element_by_xpath('//div[@class="task-result-path"]/.[contains(text(), "%s")]' % external_task.fetch_url) b.find_element_by_xpath('//span[@class="task-name"]/.[contains(text(), "%s")]' % external_task.fetch_url)
def test_install_done_preserves_system_resource_fqdn(self): with session.begin(): distro_tree = data_setup.create_distro_tree() recipe = data_setup.create_recipe(distro_tree=distro_tree) system = data_setup.create_system(lab_controller=self.lc) initial_fqdn = system.fqdn data_setup.create_job_for_recipes([recipe]) data_setup.mark_recipe_waiting(recipe, system=system) self.assertEqual(recipe.resource.fqdn, initial_fqdn) result = self.server.recipes.install_done(recipe.id, 'somename') self.assertEqual(result, initial_fqdn) with session.begin(): session.expire(recipe.resource) self.assertEqual(recipe.resource.fqdn, initial_fqdn)
def setUp(self): with session.begin(): self.recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([self.recipe]) data_setup.mark_recipe_running(self.recipe) self.recipe.logs[:] = [LogRecipe(path=u'/', filename=u'console.log'), LogRecipe(path=u'some-dir', filename=u'some-file.txt')] self.task = self.recipe.tasks[0] self.task.logs[:] = [LogRecipeTask(path=u'/', filename=u'TESTOUT.log'), LogRecipeTask(path=u'debug', filename=u'.task_beah_raw')] self.task.pass_(u'', 0, u'Pass') self.result = self.recipe.tasks[0].results[0] self.result.logs[:] = [LogRecipeTaskResult(path=u'/', filename=u'test.log'), LogRecipeTaskResult(path=u'some-dir', filename=u'some-file.txt')]
def test_install_done_updates_resource_fqdn(self): with session.begin(): distro_tree = data_setup.create_distro_tree() recipe = data_setup.create_recipe(distro_tree=distro_tree) guestrecipe = data_setup.create_guestrecipe( host=recipe, distro_tree=distro_tree) data_setup.create_job_for_recipes([recipe, guestrecipe]) data_setup.mark_recipe_running(recipe) data_setup.mark_recipe_waiting(guestrecipe) fqdn = 'theguestname' result = self.server.recipes.install_done(guestrecipe.id, fqdn) self.assertEqual(result, fqdn) with session.begin(): session.expire(guestrecipe.resource) self.assertEqual(guestrecipe.resource.fqdn, fqdn)
def test_install_start(self): with session.begin(): system = data_setup.create_system(lab_controller=self.lc) recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([recipe]) data_setup.mark_recipe_waiting(recipe, system=system) self.server.recipes.install_start(recipe.id) with session.begin(): session.expire_all() assert_datetime_within(recipe.installation.install_started, tolerance=datetime.timedelta(seconds=10), reference=datetime.datetime.utcnow()) assert_datetime_within(recipe.watchdog.kill_time, tolerance=datetime.timedelta(seconds=10), reference=datetime.datetime.utcnow() + datetime.timedelta(hours=3))
def test_search_by_version(self): with session.begin(): task = data_setup.create_task() old_recipe = data_setup.create_recipe(task_list=[task]) data_setup.create_job_for_recipes([old_recipe]) old_recipe.tasks[0].version = u'1.0-0' recent_recipe = data_setup.create_recipe(task_list=[task]) data_setup.create_job_for_recipes([recent_recipe]) recent_recipe.tasks[0].version = u'2.3-4' b = self.browser b.get(get_server_base() + 'tasks%s' % task.name) b.find_element_by_id('form_version').send_keys('1.0-*') b.find_element_by_id('form').submit() self.check_recipetask_present_in_results(old_recipe.tasks[0]) self.check_recipetask_absent_from_results(recent_recipe.tasks[0])
def test_install_done_updates_resource_fqdn(self): with session.begin(): distro_tree = data_setup.create_distro_tree() recipe = data_setup.create_recipe(distro_tree=distro_tree) guestrecipe = data_setup.create_guestrecipe(host=recipe, distro_tree=distro_tree) data_setup.create_job_for_recipes([recipe, guestrecipe]) data_setup.mark_recipe_running(recipe) data_setup.mark_recipe_waiting(guestrecipe) fqdn = 'theguestname' result = self.server.recipes.install_done(guestrecipe.id, fqdn) self.assertEqual(result, fqdn) with session.begin(): session.expire(guestrecipe.resource) self.assertEqual(guestrecipe.resource.fqdn, fqdn)
def test_concurrent_recipe_completion(self): # This test simulates two recipes finishing at the same time. So we # have two concurrent transactions both updating the respective task states. # Previously there was no separate job.update_status() step, so the two # transactions would update the job status using out-of-date values in # both transactions, leaving the job running. with session.begin(): recipe1 = data_setup.create_recipe() recipe2 = data_setup.create_recipe() job = data_setup.create_job_for_recipes([recipe1, recipe2]) assert len(recipe1.tasks) == 1 assert len(recipe2.tasks) == 1 data_setup.mark_recipe_running(recipe1) data_setup.mark_recipe_running(recipe2) recipe1.tasks[-1].pass_(u'/', 0, u'Pass') recipe2.tasks[-1].pass_(u'/', 0, u'Pass') # Complete the recipes "concurrently" in two separate transactions class RecipeCompletionThread(Thread): def __init__(self, recipe_id=None, **kwargs): super(RecipeCompletionThread, self).__init__(**kwargs) self.recipe_id = recipe_id self.ready_evt = Event() self.continue_evt = Event() def run(self): session.begin() recipe = Recipe.by_id(self.recipe_id) self.ready_evt.set() self.continue_evt.wait() recipe.tasks[-1].stop() session.commit() thread1 = RecipeCompletionThread(name='recipe1', recipe_id=recipe1.id) thread2 = RecipeCompletionThread(name='recipe2', recipe_id=recipe2.id) thread1.start() thread2.start() # Wait for both threads to start their transactions thread1.ready_evt.wait() thread2.ready_evt.wait() # Allow recipe 1 to complete thread1.continue_evt.set() thread1.join() with session.begin(): session.expire_all() job.update_status() self.assertEquals(recipe1.status, TaskStatus.completed) self.assertEquals(recipe1.ptasks, 1) self.assertEquals(job.status, TaskStatus.running) self.assertEquals(job.ptasks, 1) # Now recipe 2 completes thread2.continue_evt.set() thread2.join() with session.begin(): session.expire_all() job.update_status() self.assertEquals(recipe2.status, TaskStatus.completed) self.assertEquals(recipe2.ptasks, 1) self.assertEquals(job.status, TaskStatus.completed) self.assertEquals(job.ptasks, 2)
def test_authenticated_user_can_comment_recipetask(self): with session.begin(): recipe = data_setup.create_recipe() job = data_setup.create_job_for_recipes([recipe]) recipetask = recipe.tasks[0] # no special permissions required to comment user = data_setup.create_user(password=u'otheruser') comment_text = u'comments are fun' b = self.browser login(b, user=user.user_name, password='******') go_to_recipe_view(b, recipe, tab='Tasks') tab = b.find_element_by_id('tasks') tab.find_element_by_xpath('//div[@class="task-comments"]' '/div/a[@class="comments-link"]').click() popover = b.find_element_by_class_name('popover') popover.find_element_by_name('comment').send_keys(comment_text) popover.find_element_by_tag_name('form').submit() # check if the commit is in the comments list indicating the comment is submitted popover.find_element_by_xpath('//div[@class="comments"]//div[@class="comment"]' '/p[2][text()="%s"]' % comment_text) self.assertEqual(popover.find_element_by_name('comment').text, '') with session.begin(): session.expire_all() self.assertEqual(recipetask.comments[0].user, user) self.assertEqual(recipetask.comments[0].comment, comment_text) # comments link should indicate the new comment comments_link = tab.find_element_by_xpath('//div[@class="task-comments"]' '/div/a[@class="comments-link"]').text self.assertEqual(comments_link, '1')
def test_recipe_running_then_watchdog_expired(self): """ This tests the case where the recipe is running, has a valid reservation request, but the watchdog expires before it's completed. """ with session.begin(): recipe = data_setup.create_recipe( task_list=[Task.by_name(u'/distribution/install')], reservesys=True) job = data_setup.create_job_for_recipes([recipe]) job_id = job.id data_setup.mark_recipe_tasks_finished(recipe, task_status=TaskStatus.aborted) job.recipesets[0].recipes[0].abort() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.reserved) job.recipesets[0].recipes[0].return_reservation() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.aborted)
def test_task_aborted_return_reservation(self): """ This tests the case where the task was aborted, then the recipe goes to Reserved state and then finally the reservation is returned """ with session.begin(): recipe = data_setup.create_recipe( task_list=[Task.by_name(u'/distribution/install')], reservesys=True) job = data_setup.create_job_for_recipes([recipe]) job_id = job.id data_setup.mark_recipe_tasks_finished(recipe, result=TaskResult.warn, task_status=TaskStatus.aborted) job._mark_dirty() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.reserved) job.recipesets[0].recipes[0].return_reservation() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.aborted)
def test_install_start(self): with session.begin(): system = data_setup.create_system(lab_controller=self.lc) recipe = data_setup.create_recipe() data_setup.create_job_for_recipes([recipe]) data_setup.mark_recipe_waiting(recipe, system=system) self.server.auth.login_password(self.lc.user.user_name, u'logmein') self.server.recipes.install_start(recipe.id) with session.begin(): session.expire_all() assert_datetime_within(recipe.watchdog.kill_time, tolerance=datetime.timedelta(seconds=10), reference=datetime.datetime.utcnow() + datetime.timedelta(hours=3)) self.assertEqual(recipe.tasks[0].results[0].result, TaskResult.pass_) self.assertEqual(recipe.tasks[0].results[0].path, u'/start') self.assertEqual(recipe.tasks[0].results[0].log, u'Install Started')
def test_recipe_running_then_cancelled(self): """ This tests the case where the recipe is running, has a valid reservation request, but is cancelled before it's completed. """ with session.begin(): recipe = data_setup.create_recipe( task_list=[Task.by_name(u'/distribution/install')] * 2, reservesys=True) job = data_setup.create_job_for_recipes([recipe]) job_id = job.id data_setup.mark_recipe_running(recipe) data_setup.mark_recipe_installation_finished(recipe) # we want at least one task to be Completed here # https://bugzilla.redhat.com/show_bug.cgi?id=1195558 job.recipesets[0].recipes[0].tasks[0].stop() job.recipesets[0].recipes[0].tasks[1].start() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.running) job.recipesets[0].cancel() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.cancelled)
def test_role_fqdns_not_duplicated(self): with session.begin(): dt = data_setup.create_distro_tree() lc = data_setup.create_labcontroller() systems = [ data_setup.create_system(fqdn=u'server.bz951283', lab_controller=lc), data_setup.create_system(fqdn=u'client.bz951283', lab_controller=lc), ] job = data_setup.create_job_for_recipes([ data_setup.create_recipe(distro_tree=dt, role=u'SERVERS'), data_setup.create_recipe(distro_tree=dt, role=u'CLIENTS'), ]) # same roles on the tasks as on the recipes job.recipesets[0].recipes[0].tasks[0].role = u'SERVERS' job.recipesets[0].recipes[1].tasks[0].role = u'CLIENTS' for i in range(2): data_setup.mark_recipe_running(job.recipesets[0].recipes[i], system=systems[i]) self.server.auth.login_password(self.lc.user.user_name, u'logmein') expected = { 'SERVERS': ['server.bz951283'], 'CLIENTS': ['client.bz951283'], } for i in range(2): self.assertEquals(self.server.recipes.tasks.peer_roles( job.recipesets[0].recipes[i].tasks[0].id), expected)
def test_role_fqdns_not_duplicated(self): with session.begin(): dt = data_setup.create_distro_tree() lc = data_setup.create_labcontroller() systems = [ data_setup.create_system(fqdn=u'server.bz951283', lab_controller=lc), data_setup.create_system(fqdn=u'client.bz951283', lab_controller=lc), ] job = data_setup.create_job_for_recipes([ data_setup.create_recipe(distro_tree=dt, role=u'SERVERS'), data_setup.create_recipe(distro_tree=dt, role=u'CLIENTS'), ]) # same roles on the tasks as on the recipes job.recipesets[0].recipes[0].tasks[0].role = u'SERVERS' job.recipesets[0].recipes[1].tasks[0].role = u'CLIENTS' for i in range(2): data_setup.mark_recipe_running(job.recipesets[0].recipes[i], system=systems[i]) self.server.auth.login_password(self.lc.user.user_name, u'logmein') expected = { 'SERVERS': ['server.bz951283'], 'CLIENTS': ['client.bz951283'], } for i in range(2): self.assertEquals( self.server.recipes.tasks.peer_roles( job.recipesets[0].recipes[i].tasks[0].id), expected)
def test_task_aborted_return_reservation(self): """ This tests the case where the task was aborted, then the recipe goes to Reserved state and then finally the reservation is returned """ with session.begin(): recipe = data_setup.create_recipe( task_list=[Task.by_name(u'/distribution/install')], reservesys=True) job = data_setup.create_job_for_recipes([recipe]) job_id = job.id data_setup.mark_recipe_tasks_finished( recipe, result=TaskResult.warn, task_status=TaskStatus.aborted) job._mark_dirty() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.reserved) job.recipesets[0].recipes[0].return_reservation() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.aborted)
def test_peer_roles(self): with session.begin(): dt = data_setup.create_distro_tree() lc = data_setup.create_labcontroller() systems = [ data_setup.create_system(fqdn=u'server.peer-roles.invalid', lab_controller=lc), data_setup.create_system(fqdn=u'clientone.peer-roles.invalid', lab_controller=lc), data_setup.create_system(fqdn=u'clienttwo.peer-roles.invalid', lab_controller=lc), ] job = data_setup.create_job_for_recipes([ data_setup.create_recipe(distro_tree=dt, role=u'SERVERS'), data_setup.create_recipe(distro_tree=dt, role=u'CLIENTS'), data_setup.create_recipe(distro_tree=dt, role=u'CLIENTS'), ]) job.recipesets[0].recipes[0].tasks[0].role = None # Normally you wouldn't use the same role name with different # meaning at the task level, because that would just get # confusing... but it is possible job.recipesets[0].recipes[1].tasks[0].role = u'SERVERS' job.recipesets[0].recipes[2].tasks[0].role = u'CLIENTTWO' for i in range(3): data_setup.mark_recipe_running(job.recipesets[0].recipes[i], system=systems[i]) self.server.auth.login_password(self.lc.user.user_name, u'logmein') expected = { 'SERVERS': ['server.peer-roles.invalid', 'clientone.peer-roles.invalid'], 'CLIENTS': ['clientone.peer-roles.invalid', 'clienttwo.peer-roles.invalid'], 'None': ['server.peer-roles.invalid'], 'CLIENTTWO': ['clienttwo.peer-roles.invalid'], } for i in range(3): self.assertEquals(self.server.recipes.tasks.peer_roles( job.recipesets[0].recipes[i].tasks[0].id), expected)
def test_watchdog_time_remaining_display(self): b = self.browser with session.begin(): recipe = data_setup.create_recipe() job = data_setup.create_job_for_recipes([recipe], owner=self.user) data_setup.mark_job_running(job) recipe.watchdog.kill_time = ( datetime.datetime.utcnow() + datetime.timedelta(seconds=83 * 60 + 30)) self.go_to_recipe_view(recipe) b.find_element_by_link_text('Show Results').click() duration = b.find_element_by_xpath( '//tr[contains(@class, "recipe_%s")][1]' '//div[@class="task-duration"]' % recipe.id) self.assertRegexpMatches(duration.text, r'^Time Remaining 1:23:\d\d$') with session.begin(): recipe.watchdog.kill_time = ( datetime.datetime.utcnow() + datetime.timedelta(days=2, seconds=83 * 60 + 30)) self.go_to_recipe_view(recipe) duration = b.find_element_by_xpath( '//tr[contains(@class, "recipe_%s")][1]' '//div[@class="task-duration"]' % recipe.id) self.assertRegexpMatches(duration.text, r'^Time Remaining 2 days, 1:23:\d\d$')
def test_return_system_reservation(self): b = self.browser with session.begin(): recipe = data_setup.create_recipe( task_list=[data_setup.create_task()], reservesys=True, reservesys_duration=1800, ) job = data_setup.create_job_for_recipes([recipe], owner=self.user) data_setup.mark_recipe_tasks_finished(job.recipesets[0].recipes[0]) job.update_status() self.go_to_recipe_view(recipe) b.find_element_by_xpath('//span[@class="statusReserved"]') duration = b.find_element_by_xpath( '//span[@class="reservation_duration"]').text self.assertRegexpMatches(duration, r'(0:\d\d:\d\d remaining)') b.find_element_by_link_text('Release System').click() b.find_element_by_xpath( '//h1[text()="Release reserved system for Recipe %s"]' % recipe.id) b.find_element_by_xpath( '//form[@id="end_recipe_reservation"]//input[@type="submit"]' ).click() flash_text = b.find_element_by_class_name('flash').text self.assertEquals( 'Successfully released reserved system for %s' % recipe.t_id, flash_text)