def test_group_modify_add_member(self): with session.begin(): user = data_setup.create_user() out = run_client(['bkr', 'group-modify', '--add-member', user.user_name, self.group.group_name], config = self.client_config) with session.begin(): session.refresh(self.group) group = Group.by_name(self.group.group_name) self.assert_(user.user_name in [u.user_name for u in group.users]) self.check_notification(user, group, action='Added') try: out = run_client(['bkr', 'group-modify', '--add-member', 'idontexist', self.group.group_name], config = self.client_config) self.fail('Must fail or die') except ClientError, e: self.assert_('User does not exist' in e.stderr_output, e.stderr_output)
def run(self): session.begin() recipe = Recipe.by_id(self.recipe_id) self.ready_evt.set() self.continue_evt.wait() recipe.tasks[-1].stop() session.commit()
def setUp(self): session.begin() from bkr.server.jobs import Jobs self.controller = Jobs() self.user = data_setup.create_user() data_setup.create_distro_tree(distro_name=u'BlueShoeLinux5-5') session.flush()
def update_db(self): self.logger.info('Updating local Beaker database..') for task_rpm in self.tasks_added: self.logger.debug('Adding %s'% task_rpm) with open(os.path.join(self.task_dir,task_rpm)) as f: try: session.begin() task = self.tasks.process_taskinfo(self.tasks.read_taskinfo(f)) old_rpm = task.rpm task.rpm = task_rpm session.commit() except Exception: session.rollback() session.close() self.logger.critical('Error adding task %s' % task_rpm) unlink_ignore(task_rpm) else: session.close() self.logger.debug('Successfully added %s' % task.rpm) if old_rpm: unlink_ignore(os.path.join(self.task_dir, old_rpm)) # Update task repo self.logger.info('Creating repodata..') Task.update_repo() return
def test_task_aborted_return_reservation(self): """ This tests the case where the task was aborted, then the recipe goes to Reserved state and then finally the reservation is returned """ with session.begin(): recipe = data_setup.create_recipe( task_list=[Task.by_name(u'/distribution/install')], reservesys=True) job = data_setup.create_job_for_recipes([recipe]) job_id = job.id data_setup.mark_recipe_tasks_finished(recipe, result=TaskResult.warn, task_status=TaskStatus.aborted) job._mark_dirty() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.reserved) job.recipesets[0].recipes[0].return_reservation() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.aborted)
def test_recipe_running_then_cancelled(self): """ This tests the case where the recipe is running, has a valid reservation request, but is cancelled before it's completed. """ with session.begin(): recipe = data_setup.create_recipe( task_list=[Task.by_name(u'/distribution/install')] * 2, reservesys=True) job = data_setup.create_job_for_recipes([recipe]) job_id = job.id data_setup.mark_recipe_running(recipe) data_setup.mark_recipe_installation_finished(recipe) # we want at least one task to be Completed here # https://bugzilla.redhat.com/show_bug.cgi?id=1195558 job.recipesets[0].recipes[0].tasks[0].stop() job.recipesets[0].recipes[0].tasks[1].start() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.running) job.recipesets[0].cancel() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.cancelled)
def test_recipe_running_then_watchdog_expired(self): """ This tests the case where the recipe is running, has a valid reservation request, but the watchdog expires before it's completed. """ with session.begin(): recipe = data_setup.create_recipe( task_list=[Task.by_name(u'/distribution/install')], reservesys=True) job = data_setup.create_job_for_recipes([recipe]) job_id = job.id data_setup.mark_recipe_tasks_finished(recipe, task_status=TaskStatus.aborted) job.recipesets[0].recipes[0].abort() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.reserved) job.recipesets[0].recipes[0].return_reservation() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.aborted)
def test_update_labinfo(self): with session.begin(): # Due to bz987313 system must have existing lab info self.system.labinfo = LabInfo(weight=100) orig_date_modified = self.system.date_modified b = self.browser login(b) self.go_to_system_view(tab='Lab Info') changes = { 'orig_cost': '1,000.00', 'curr_cost': '500.00', 'dimensions': '1x1x1', 'weight': '50', 'wattage': '500', 'cooling': '1', } for k, v in changes.iteritems(): b.find_element_by_name(k).clear() b.find_element_by_name(k).send_keys(v) b.find_element_by_xpath('//button[text()="Save Lab Info Changes"]').click() self.assertEquals(b.find_element_by_class_name('flash').text, 'Saved Lab Info') for k, v in changes.iteritems(): self.assertEquals(b.find_element_by_name(k).get_attribute('value'), v) with session.begin(): session.refresh(self.system) self.assert_(self.system.date_modified > orig_date_modified)
def test_add_cc(self): with session.begin(): self.system.cc = [] b = self.browser login(b) self.go_to_system_view(tab='Owner') tab = b.find_element_by_id('owner') tab.find_element_by_name('cc').send_keys('*****@*****.**') tab.find_element_by_class_name('cc-add').submit() tab.find_element_by_xpath('.//li[contains(text(), "*****@*****.**")]') tab.find_element_by_name('cc').send_keys('*****@*****.**') tab.find_element_by_class_name('cc-add').submit() tab.find_element_by_xpath('.//li[contains(text(), "*****@*****.**")]') tab.find_element_by_xpath('.//li[contains(text(), "*****@*****.**")]') with session.begin(): session.refresh(self.system) self.assertEquals(set(self.system.cc), set([u'*****@*****.**', u'*****@*****.**'])) self.assertEquals(self.system.activity[0].field_name, u'Cc') self.assertEquals(self.system.activity[0].service, u'HTTP') self.assertEquals(self.system.activity[0].action, u'Added') self.assertEquals(self.system.activity[0].new_value, u'*****@*****.**') self.assertEquals(self.system.activity[1].field_name, u'Cc') self.assertEquals(self.system.activity[1].service, u'HTTP') self.assertEquals(self.system.activity[1].action, u'Added') self.assertEquals(self.system.activity[1].new_value, u'*****@*****.**')
def test_activity_is_not_logged_when_leaving_power_settings_empty(self): # The bug was that we were recording a change to power_user or # power_passwd because it changed from NULL to ''. with session.begin(): self.system.power.power_type = PowerType.lazy_create(name=u'ilo') self.system.power.power_user = None self.system.power.power_passwd = None self.system.power.power_id = None PowerType.lazy_create(name=u'drac') self.assertEquals(len(self.system.activity), 0) b = self.browser login(b) self.go_to_system_view(tab='Power Settings') tab = b.find_element_by_id('power-settings') # change power type but leave the other fields empty BootstrapSelect(tab.find_element_by_name('power_type'))\ .select_by_visible_text('drac') tab.find_element_by_tag_name('form').submit() tab.find_element_by_xpath('.//span[@class="sync-status" and not(text())]') with session.begin(): session.refresh(self.system) self.assertEquals(len(self.system.activity), 1, 'Expecting only one activity row for power_type but found: %r' % self.system.activity) self.assertEquals(self.system.activity[0].field_name, u'power_type')
def test_delete_install_options(self): with session.begin(): self.system.provisions[self.distro_tree.arch] = Provision( arch=self.distro_tree.arch, ks_meta=u'some_ks_meta_var=1', kernel_options=u'some_kernel_option=1', kernel_options_post=u'some_kernel_option=2') orig_date_modified = self.system.date_modified b = self.browser login(b) self.go_to_system_view(tab='Install Options') delete_and_confirm(b, '//tr[th/text()="Architecture"]') b.find_element_by_xpath('//h1[text()="%s"]' % self.system.fqdn) with session.begin(): session.refresh(self.system) self.assert_(self.system.date_modified > orig_date_modified) self.assert_(self.distro_tree.arch not in self.system.provisions) self.assertEquals(self.system.activity[0].action, u'Removed') self.assertEquals(self.system.activity[0].field_name, u'InstallOption:kernel_options_post:i386') self.assertEquals(self.system.activity[1].action, u'Removed') self.assertEquals(self.system.activity[1].field_name, u'InstallOption:kernel_options:i386') self.assertEquals(self.system.activity[2].action, u'Removed') self.assertEquals(self.system.activity[2].field_name, u'InstallOption:ks_meta:i386')
def test_add_group(self): with session.begin(): group = data_setup.create_group() user_password = '******' user = data_setup.create_user(password=user_password) data_setup.add_user_to_group(user, group) orig_date_modified = self.system.date_modified # as admin, assign the system to our test group b = self.browser login(b) self.go_to_system_view(tab='Groups') b.find_element_by_name('group.text').send_keys(group.group_name) b.find_element_by_name('groups').submit() b.find_element_by_xpath( '//div[@id="groups"]' '//td[normalize-space(text())="%s"]' % group.group_name) with session.begin(): session.refresh(self.system) self.assert_(self.system.date_modified > orig_date_modified) # as a user in the group, can we see it? logout(b) login(b, user.user_name, user_password) click_menu_item(b, 'Systems', 'Available') b.find_element_by_name('simplesearch').send_keys(self.system.fqdn) b.find_element_by_name('systemsearch_simple').submit() check_system_search_results(b, present=[self.system])
def test_new_power_settings(self): with session.begin(): lc = data_setup.create_labcontroller() system = data_setup.create_system(lab_controller=lc, with_power=False) b = self.browser login(b) self.go_to_system_view(system=system, tab='Power Settings') tab = b.find_element_by_id('power-settings') BootstrapSelect(tab.find_element_by_name('power_type'))\ .select_by_visible_text('virsh') tab.find_element_by_name('power_address').send_keys \ ('qemu+ssh:10.10.10.10') tab.find_element_by_name('power_user').send_keys('root') tab.find_element_by_name('power_id').send_keys(system.fqdn) tab.find_element_by_tag_name('form').submit() # check activity records power_fields_changed = {'power_type': 'virsh', 'power_address': 'qemu+ssh:10.10.10.1', 'power_user': '******', 'power_id': system.fqdn, 'power_quiescent_period': 5} with session.begin(): session.refresh(system) for activity in system.activity: self.assertEquals(activity.new_value, power_fields_changed[activity])
def test_duplicate_notify_cc_addresses_are_merged(self): with session.begin(): user = data_setup.create_user(password=u'hornet') b = self.browser login(b, user.user_name, u'hornet') b.get(get_server_base()) click_menu_item(b, 'Scheduler', 'New Job') xml_file = tempfile.NamedTemporaryFile() xml_file.write(''' <job> <whiteboard>job with duplicate notify cc addresses</whiteboard> <notify> <cc>[email protected]</cc> <cc>[email protected]</cc> </notify> <recipeSet> <recipe> <distroRequires> <distro_name op="=" value="BlueShoeLinux5-5" /> </distroRequires> <hostRequires/> <task name="/distribution/install" role="STANDALONE"/> </recipe> </recipeSet> </job> ''') xml_file.flush() b.find_element_by_id('jobs_filexml').send_keys(xml_file.name) b.find_element_by_xpath('//button[text()="Submit Data"]').click() b.find_element_by_xpath('//button[text()="Queue"]').click() flash_message = b.find_element_by_class_name('flash').text self.assert_(flash_message.startswith('Success!'), flash_message) with session.begin(): job = Job.query.filter(Job.owner == user).order_by(Job.id.desc()).first() self.assertEqual(job.cc, ['*****@*****.**'])
def test_ackability(self): # XXX If this test gets any more complicated, we should break # it up b = self.browser login(b, user=self.user_1.user_name, password=self.password) b.get(get_server_base() + 'jobs/%d' % self.job.id) #This tests that the ack is there for owner b.find_element_by_name("response_box_%d" % self.job.recipesets[0].id) logout(b) # Not there for non owner login(b, user=self.user_2.user_name, password=self.password) b.get(get_server_base() + 'jobs/%d' % self.job.id) b.find_element_by_xpath("//td[normalize-space(text())='RS:%s' and " "not(./input[@name='response_box_%s'])]" % ( self.job.recipesets[0].id, self.job.recipesets[0].id)) # Is there for job owner's group co-member. with session.begin(): data_setup.add_user_to_group(self.user_1, self.group) data_setup.add_user_to_group(self.user_3, self.group) logout(b) login(b, user=self.user_3.user_name, password=self.password) b.get(get_server_base() + 'jobs/%d' % self.job.id) b.find_element_by_xpath("//input[@name='response_box_%s']" % self.job.recipesets[0].id) # There for job's group member with session.begin(): self.job.group = self.group self.user_2.groups.append(self.group) logout(b) login(b, user=self.user_2.user_name, password=self.password) b.get(get_server_base() + 'jobs/%s' % self.job.id) b.find_element_by_name("response_box_%s" % self.job.recipesets[0].id)
def test_system_activity_shows_changes_for_disk(self): with session.begin(): system=data_setup.create_system() self.server.push(system.fqdn, {'Disk': {'Disks': [{'model': 'Virtio Block Device', 'phys_sector_size': 512, 'sector_size': 512, 'size': str(8589934592)}]}}) with session.begin(): session.refresh(system) self.assertEquals(system.activity[0].service, u'XMLRPC') self.assertEquals(system.activity[0].action, u'Added') self.assertEquals(system.activity[0].field_name, u'Disk:model') self.assertEquals(system.activity[0].new_value, u'Virtio Block Device') self.assertEquals(system.activity[1].service, u'XMLRPC') self.assertEquals(system.activity[1].action, u'Added') self.assertEquals(system.activity[1].field_name, u'Disk:phys_sector_size') self.assertEquals(system.activity[1].new_value, u'512') self.assertEquals(system.activity[2].service, u'XMLRPC') self.assertEquals(system.activity[2].action, u'Added') self.assertEquals(system.activity[2].field_name, u'Disk:sector_size') self.assertEquals(system.activity[2].new_value, u'512') self.assertEquals(system.activity[3].service, u'XMLRPC') self.assertEquals(system.activity[3].action, u'Added') self.assertEquals(system.activity[3].field_name, u'Disk:size') self.assertEquals(system.activity[3].new_value, u'8589934592')
def test_unrecognised_arches_are_not_automatically_created(self): with session.begin(): system = data_setup.create_system(arch=u'x86_64') with self.assertRaisesRegexp(xmlrpclib.Fault, 'No such arch'): self.server.push(system.fqdn, {'Arch': ['x86-64']}) with session.begin(): self.assertEquals(Arch.query.filter_by(arch=u'x86-64').count(), 0)
def test_system_pools_import(self): with session.begin(): system = data_setup.create_system() pool1 = data_setup.create_system_pool() pool2 = data_setup.create_system_pool() login(self.browser) self.import_csv((u'csv_type,fqdn,pool,deleted\n' u'system_pool,%s,%s,False\n' u'system_pool,%s,%s,False'%(system.fqdn, pool1.name, system.fqdn, pool2.name)) \ .encode('utf8')) self.failUnless(is_text_present(self.browser, 'No Errors')) with session.begin(): session.refresh(system) self.assertEquals([pool1.name, pool2.name], [pool.name for pool in system.pools]) # test deletion self.import_csv((u'csv_type,fqdn,pool,deleted\n' u'system_pool,%s,%s,True' % (system.fqdn, pool2.name)) \ .encode('utf8')) self.failUnless(is_text_present(self.browser, 'No Errors')) with session.begin(): session.refresh(system) self.assertNotIn(pool2.name, [pool.name for pool in system.pools]) # Attempting to add a system to a Non existent pool should throw an error self.import_csv((u'csv_type,fqdn,pool,deleted\n' u'system_pool,%s,poolpool,True' % system.fqdn) \ .encode('utf8')) self.assertTrue(is_text_present(self.browser, 'poolpool: pool does not exist'))
def test_release_system(self): with session.begin(): system = data_setup.create_system( owner=User.by_user_name(data_setup.ADMIN_USER), status=u'Manual', shared=True) user = data_setup.create_user(password=u'password') system.reserve_manually(service=u'testdata', user=user) server = self.get_server() server.auth.login_password(user.user_name, 'password') server.systems.release(system.fqdn) with session.begin(): session.refresh(system) session.refresh(system.reservations[0]) self.assert_(system.user is None) self.assertEquals(system.reservations[0].user, user) assert_datetime_within(system.reservations[0].finish_time, tolerance=datetime.timedelta(seconds=10), reference=datetime.datetime.utcnow()) assert_durations_not_overlapping(system.reservations) released_activity = system.activity[0] self.assertEqual(released_activity.action, 'Returned') self.assertEqual(released_activity.field_name, 'User') self.assertEqual(released_activity.user, user) self.assertEqual(released_activity.old_value, user.user_name) self.assertEqual(released_activity.new_value, '') self.assertEqual(released_activity.service, 'XMLRPC')
def test_set_active_policy_to_custom_policy(self): with session.begin(): user1 = data_setup.create_user() user2 = data_setup.create_user() self.system.custom_access_policy.add_rule( permission=SystemPermission.edit_system, user=user1) pool = data_setup.create_system_pool() pool.access_policy.add_rule( permission=SystemPermission.edit_system, user=user2) self.system.active_access_policy = pool.access_policy self.assertFalse(self.system.active_access_policy.grants (user1, SystemPermission.edit_system)) self.assertTrue(self.system.active_access_policy.grants (user2, SystemPermission.edit_system)) s = requests.Session() s.post(get_server_base() + 'login', data={'user_name': self.owner.user_name, 'password': '******'}).raise_for_status() response = patch_json(get_server_base() + 'systems/%s/' % self.system.fqdn, session=s, data={'active_access_policy': {'custom': True}}, ) response.raise_for_status() with session.begin(): session.expire_all() self.assertTrue(self.system.active_access_policy.grants \ (user1, SystemPermission.edit_system))
def test_authenticated_user_can_comment_recipetask(self): with session.begin(): recipe = data_setup.create_recipe() job = data_setup.create_job_for_recipes([recipe]) recipetask = recipe.tasks[0] # no special permissions required to comment user = data_setup.create_user(password=u'otheruser') comment_text = u'comments are fun' b = self.browser login(b, user=user.user_name, password='******') go_to_recipe_view(b, recipe, tab='Tasks') tab = b.find_element_by_id('tasks') tab.find_element_by_xpath('//div[@class="task-comments"]' '/div/a[@class="comments-link"]').click() popover = b.find_element_by_class_name('popover') popover.find_element_by_name('comment').send_keys(comment_text) popover.find_element_by_tag_name('form').submit() # check if the commit is in the comments list indicating the comment is submitted popover.find_element_by_xpath('//div[@class="comments"]//div[@class="comment"]' '/p[2][text()="%s"]' % comment_text) self.assertEqual(popover.find_element_by_name('comment').text, '') with session.begin(): session.expire_all() self.assertEqual(recipetask.comments[0].user, user) self.assertEqual(recipetask.comments[0].comment, comment_text) # comments link should indicate the new comment comments_link = tab.find_element_by_xpath('//div[@class="task-comments"]' '/div/a[@class="comments-link"]').text self.assertEqual(comments_link, '1')
def provision_virt_recipes(*args): recipes = MachineRecipe.query\ .join(Recipe.recipeset).join(RecipeSet.job)\ .filter(Job.dirty_version == Job.clean_version)\ .join(Recipe.distro_tree, DistroTree.lab_controller_assocs, LabController)\ .filter(Recipe.status == TaskStatus.queued)\ .filter(Recipe.virt_status == RecipeVirtStatus.possible)\ .filter(LabController.disabled == False)\ .filter(or_(RecipeSet.lab_controller == None, RecipeSet.lab_controller_id == LabController.id))\ .order_by(RecipeSet.priority.desc(), Recipe.id.asc()) if not recipes.count(): return False log.debug("Entering provision_virt_recipes") for recipe_id, in recipes.values(Recipe.id.distinct()): session.begin() try: provision_virt_recipe(recipe_id) session.commit() except Exception, e: log.exception('Error in provision_virt_recipe(%s)', recipe_id) session.rollback() # As an added precaution, let's try and avoid this recipe in future with session.begin(): recipe = Recipe.by_id(recipe_id) recipe.virt_status = RecipeVirtStatus.failed finally:
def test_filters_out_excluded_families(self): with session.begin(): rhel3_i386 = data_setup.create_distro_tree( osmajor=u"RedHatEnterpriseLinux3", arch=u"i386", distro_tags=[u"STABLE"] ) rhel3_x86_64 = data_setup.create_distro_tree( osmajor=u"RedHatEnterpriseLinux3", arch=u"x86_64", distro_tags=[u"STABLE"] ) rhel4_i386 = data_setup.create_distro_tree( osmajor=u"RedHatEnterpriseLinux4", arch=u"i386", distro_tags=[u"STABLE"] ) rhel4_x86_64 = data_setup.create_distro_tree( osmajor=u"RedHatEnterpriseLinux4", arch=u"x86_64", distro_tags=[u"STABLE"] ) # system with RHEL4 i386 and RHEL3 x86_64 excluded system = data_setup.create_system(arch=u"i386") system.arch.append(Arch.by_name(u"x86_64")) system.excluded_osmajor.extend( [ ExcludeOSMajor(arch=Arch.by_name(u"i386"), osmajor=OSMajor.by_name(u"RedHatEnterpriseLinux4")), ExcludeOSMajor(arch=Arch.by_name(u"x86_64"), osmajor=OSMajor.by_name(u"RedHatEnterpriseLinux3")), ] ) out = run_client(["bkr", "machine-test", "--machine", system.fqdn]) self.assert_(out.startswith("Submitted:"), out) with session.begin(): new_job = Job.query.order_by(Job.id.desc()).first() distro_trees = [recipe.distro_tree for recipe in new_job.all_recipes] self.assert_(rhel3_i386 in distro_trees, distro_trees) self.assert_(rhel3_x86_64 not in distro_trees, distro_trees) self.assert_(rhel4_i386 not in distro_trees, distro_trees) self.assert_(rhel4_x86_64 in distro_trees, distro_trees)
def test_generate_by_whiteboard(self): b = self.browser b.get(get_server_base() + 'matrix/') Select(b.find_element_by_name('whiteboard'))\ .select_by_visible_text(self.job_whiteboard) b.find_element_by_xpath('//button[text()="Generate"]').click() b.find_element_by_xpath('//table[@id="matrix_datagrid"]' '//td[normalize-space(string(.))="Pass: 1"]') with session.begin(): new_job = data_setup.create_completed_job( whiteboard=self.job_whiteboard, result=TaskResult.pass_, recipe_whiteboard=self.recipe_whiteboard) b.find_element_by_xpath('//button[text()="Generate"]').click() b.find_element_by_xpath('//table[@id="matrix_datagrid"]' '//td[normalize-space(string(.))="Pass: 2"]') #Try with multiple whiteboards with session.begin(): another_new_job = data_setup.create_completed_job( whiteboard=self.job_whiteboard_2, result=TaskResult.pass_, recipe_whiteboard=self.recipe_whiteboard) b.get(get_server_base() + 'matrix/') whiteboard = Select(b.find_element_by_name('whiteboard')) whiteboard.select_by_visible_text(self.job_whiteboard) whiteboard.select_by_visible_text(self.job_whiteboard_2) b.find_element_by_xpath('//button[text()="Generate"]').click() b.find_element_by_xpath('//table[@id="matrix_datagrid"]' '//td[normalize-space(string(.))="Pass: 3"]')
def test_deleted_job_results_not_shown(self): with session.begin(): data_setup.create_completed_job( whiteboard=self.job_whiteboard, result=TaskResult.fail, recipe_whiteboard=self.recipe_whiteboard) data_setup.create_completed_job( whiteboard=self.job_whiteboard, result=TaskResult.warn, recipe_whiteboard=self.recipe_whiteboard) owner = data_setup.create_user(password='******') self.passed_job.owner = owner b = self.browser login(b, user=owner.user_name, password='******') b.get(get_server_base() + 'matrix') b.find_element_by_xpath("//select[@name='whiteboard']/option[@value='%s']" % self.job_whiteboard).click() b.find_element_by_xpath('//button[@type="submit" and text()="Generate"]').click() report_text = b.find_element_by_xpath("//div[@id='matrix-report']").text self.assert_('Pass: 1' in report_text) # Delete Job with session.begin(): self.passed_job.soft_delete() # Assert it is no longer there b.get(get_server_base() + 'matrix') b.find_element_by_xpath("//select[@name='whiteboard']/option[@value='%s']" % self.job_whiteboard).click() b.find_element_by_xpath('//button[@type="submit" and text()="Generate"]').click() report_text = b.find_element_by_xpath("//div[@id='matrix-report']").text self.assert_('Pass: 1' not in report_text)
def test_can_return_manual_reservation_when_automated(self): with session.begin(): user = data_setup.create_user(password='******') system = data_setup.create_system(owner=user, status=SystemStatus.manual) b = self.browser login(b, user=user.user_name, password="******") # Take b.get(get_server_base() + 'view/%s' % system.fqdn) b.find_element_by_link_text('Take').click() b.find_element_by_xpath('//div[contains(@class, "system-quick-usage")]' '//span[@class="label" and text()="Reserved"]') # toggle status to Automated with session.begin(): system.lab_controller = data_setup.create_labcontroller() system.status = SystemStatus.automated # Attempt to return b.get(get_server_base() + 'view/%s' % system.fqdn) b.find_element_by_link_text('Return').click() b.find_element_by_xpath('//div[contains(@class, "system-quick-usage")]' '//span[@class="label" and text()="Idle"]')
def init(): if(len(Work.query().all()) > 0): return log.info("initializing data") works = [ dict(title="Mountain Stream", file_path="mountain_stream", description="Taken in Colorado during the summer of 1999.", purchases=0, id=0), dict(title="Graffiti", file_path="graffiti", description="SpongeBob and Patrick in St. Joseph, Missouri.", purchases=0, id=1), dict(title="Lenexa Conference Center", file_path="lenexa_conference_center", description="Taken to scout out a wedding reception location.", purchases=0, id=2), dict(title="Glass", file_path="glass", description="Somewhere in California", purchases=0, id=3), ] session.begin() for w in works: work = Work(dict=w) session.commit()
def abort_dead_recipes(*args): filters = [not_(DistroTree.lab_controller_assocs.any())] if _virt_enabled(): filters.append(and_(not_(Recipe.systems.any()), Recipe.virt_status != RecipeVirtStatus.possible)) else: filters.append(not_(Recipe.systems.any())) recipes = MachineRecipe.query\ .join(MachineRecipe.recipeset).join(RecipeSet.job)\ .filter(Job.dirty_version == Job.clean_version)\ .outerjoin(Recipe.distro_tree)\ .filter(Recipe.status == TaskStatus.queued)\ .filter(or_(*filters)) if not recipes.count(): return False log.debug("Entering abort_dead_recipes") for recipe_id, in recipes.values(MachineRecipe.id): session.begin() try: abort_dead_recipe(recipe_id) session.commit() except exceptions.Exception, e: log.exception('Error in abort_dead_recipe(%s)', recipe_id) session.rollback() finally:
def test_remove_user_job_cancel(self): with session.begin(): user = data_setup.create_user(user_name = data_setup.unique_name('aaaaa%s')) job = data_setup.create_job(owner=user) data_setup.mark_job_running(job) b = self.browser login(b) b.get(get_server_base() + 'users') b.find_element_by_xpath('//a[@href="remove?id=%d"]' %user.user_id).click() # XXX: not necessary, but doing it here to buy time, since sometimes the # job cancellation seems to take a while logout(b) # reflect the change in recipe task status when # update_dirty_jobs() is called session.expunge_all() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job.id) self.assertEquals(job.status, TaskStatus.cancelled) self.assertIn('User %s removed' % user.user_name, job.recipesets[0].recipes[0].tasks[0].results[0].log)
def provision_scheduled_recipesets(*args): """ if All recipes in a recipeSet are in Scheduled state then move them to Running. """ recipesets = RecipeSet.query.join(RecipeSet.job)\ .filter(and_(Job.dirty_version == Job.clean_version, Job.deleted == None))\ .filter(not_(RecipeSet.recipes.any( Recipe.status != TaskStatus.scheduled))) if not recipesets.count(): return False log.debug("Entering provision_scheduled_recipesets") for rs_id, in recipesets.values(RecipeSet.id): log.info("scheduled_recipesets: RS:%s" % rs_id) session.begin() try: provision_scheduled_recipeset(rs_id) session.commit() except exceptions.Exception: log.exception('Error in provision_scheduled_recipeset(%s)', rs_id) session.rollback() finally: session.close() log.debug("Exiting provision_scheduled_recipesets") return True
def check_console_log_registered(self): with session.begin(): return LogRecipe.query.filter_by( parent=self.recipe, filename=u'console.log').count() == 1
def setup_package(): assert os.path.exists( CONFIG_FILE), 'Config file %s must exist' % CONFIG_FILE load_config(configfile=CONFIG_FILE) log_to_stream(sys.stdout, level=logging.DEBUG) from bkr.inttest import data_setup if not 'BEAKER_SKIP_INIT_DB' in os.environ: data_setup.setup_model() with session.begin(): data_setup.create_labcontroller() #always need a labcontroller data_setup.create_task( name=u'/distribution/install', requires=u'make gcc nfs-utils wget procmail redhat-lsb ntp ' u'@development-tools @development-libs @development ' u'@desktop-platform-devel @server-platform-devel ' u'libxml2-python expect pyOpenSSL'.split()) data_setup.create_task( name=u'/distribution/reservesys', requires=u'emacs vim-enhanced unifdef sendmail'.split()) data_setup.create_task(name=u'/distribution/utils/dummy') data_setup.create_task(name=u'/distribution/inventory') data_setup.create_distro() if os.path.exists(turbogears.config.get('basepath.rpms')): # Remove any task RPMs left behind by previous test runs for entry in os.listdir(turbogears.config.get('basepath.rpms')): shutil.rmtree(os.path.join(turbogears.config.get('basepath.rpms'), entry), ignore_errors=True) else: os.mkdir(turbogears.config.get('basepath.rpms')) setup_slapd() if turbogears.config.get('openstack.identity_api_url'): setup_openstack() turbogears.testutil.make_app(Root) turbogears.testutil.start_server() global processes processes = [] if 'BEAKER_SERVER_BASE_URL' not in os.environ: # need to start the server ourselves # Usual pkg_resources ugliness is needed to ensure gunicorn doesn't # import pkg_resources before we get a chance to specify our # requirements in bkr.server.wsgi processes.extend([ Process('gunicorn', args=[sys.executable, '-c', '__requires__ = ["CherryPy < 3.0"]; import pkg_resources; ' \ 'from gunicorn.app.wsgiapp import run; run()', '--bind', ':%s' % turbogears.config.get('server.socket_port'), '--workers', '8', '--access-logfile', '-', '--preload', 'bkr.server.wsgi:application'], listen_port=turbogears.config.get('server.socket_port')), ]) processes.extend([ Process('slapd', args=[ 'slapd', '-d0', '-F' + slapd_config_dir, '-hldap://127.0.0.1:3899/' ], listen_port=3899, stop_signal=signal.SIGINT), ]) try: for process in processes: process.start() except: for process in processes: process.stop() raise
def cleanup_openstack(): with session.begin(): region = OpenStackRegion.query.one() log.info('Cleaning up Glance image %s', region.ipxe_image_id) _glance().images.delete(region.ipxe_image_id)
def _decorated(*args, **kwargs): with session.begin(): func(*args, **kwargs)
def begin_session(): session.begin()
def restart_transaction_patched(args): session.rollback() session.begin()
def check_watchdog_extended(self, recipe, by_seconds): with session.begin(): session.refresh(recipe) self.assertEquals(recipe.status, TaskStatus.running) # 5 seconds tolerance return (by_seconds - recipe.status_watchdog()) <= 5
def setUp(self): with session.begin(): self.user = data_setup.create_user(password=u'password') self.browser = self.get_browser()
def populate_db(user_name=None, password=None, user_display_name=None, user_email_address=None): session.begin() try: admin = Group.by_name(u'admin') except InvalidRequestError: admin = Group(group_name=u'admin', display_name=u'Admin') session.add(admin) try: lab_controller = Group.by_name(u'lab_controller') except InvalidRequestError: lab_controller = Group(group_name=u'lab_controller', display_name=u'Lab Controller') session.add(lab_controller) #Setup User account if user_name: user = User.lazy_create(user_name=user_name.decode('utf8')) if password: user.password = password.decode('utf8') if user_display_name: user.display_name = user_display_name.decode('utf8') if user_email_address: user.email_address = user_email_address.decode('utf8') # Ensure the user is in the 'admin' group as an owner. # Flush for lazy_create. session.flush() user_group_assoc = UserGroup.lazy_create(user_id=user.user_id, group_id=admin.group_id) user_group_assoc.is_owner = True # Create distro_expire perm if not present try: distro_expire_perm = Permission.by_name(u'distro_expire') except NoResultFound: distro_expire_perm = Permission(u'distro_expire') session.add(distro_expire_perm) # Create proxy_auth perm if not present try: proxy_auth_perm = Permission.by_name(u'proxy_auth') except NoResultFound: proxy_auth_perm = Permission(u'proxy_auth') session.add(proxy_auth_perm) # Create tag_distro perm if not present try: tag_distro_perm = Permission.by_name(u'tag_distro') except NoResultFound: tag_distro_perm = Permission(u'tag_distro') admin.permissions.append(tag_distro_perm) # Create stop_task perm if not present try: stop_task_perm = Permission.by_name(u'stop_task') except NoResultFound: stop_task_perm = Permission(u'stop_task') lab_controller.permissions.append(stop_task_perm) admin.permissions.append(stop_task_perm) # Create secret_visible perm if not present try: secret_visible_perm = Permission.by_name(u'secret_visible') except NoResultFound: secret_visible_perm = Permission(u'secret_visible') lab_controller.permissions.append(secret_visible_perm) admin.permissions.append(secret_visible_perm) #Setup Hypervisors Table if Hypervisor.query.count() == 0: for h in [u'KVM', u'Xen', u'HyperV', u'VMWare']: session.add(Hypervisor(hypervisor=h)) #Setup kernel_type Table if KernelType.query.count() == 0: for type in [u'default', u'highbank', u'imx', u'omap', u'tegra']: session.add(KernelType(kernel_type=type, uboot=False)) for type in [u'mvebu']: session.add(KernelType(kernel_type=type, uboot=True)) #Setup base Architectures if Arch.query.count() == 0: for arch in [ u'i386', u'x86_64', u'ia64', u'ppc', u'ppc64', u'ppc64le', u's390', u's390x', u'armhfp', u'aarch64', u'arm' ]: session.add(Arch(arch)) #Setup base power types if PowerType.query.count() == 0: for power_type in [ u'apc_snmp', u'apc_snmp_then_etherwake', u'bladecenter', u'bladepap', u'drac', u'ether_wake', u'hyper-v', u'ilo', u'integrity', u'ipmilan', u'ipmitool', u'lpar', u'rsa', u'virsh', u'wti' ]: session.add(PowerType(power_type)) #Setup key types if Key.query.count() == 0: session.add(Key(u'DISKSPACE', True)) session.add(Key(u'COMMENT')) session.add(Key(u'CPUFAMILY', True)) session.add(Key(u'CPUFLAGS')) session.add(Key(u'CPUMODEL')) session.add(Key(u'CPUMODELNUMBER', True)) session.add(Key(u'CPUSPEED', True)) session.add(Key(u'CPUVENDOR')) session.add(Key(u'DISK', True)) session.add(Key(u'FORMFACTOR')) session.add(Key(u'HVM')) session.add(Key(u'MEMORY', True)) session.add(Key(u'MODEL')) session.add(Key(u'MODULE')) session.add(Key(u'NETWORK')) session.add(Key(u'NR_DISKS', True)) session.add(Key(u'NR_ETH', True)) session.add(Key(u'NR_IB', True)) session.add(Key(u'PCIID')) session.add(Key(u'PROCESSORS', True)) session.add(Key(u'RTCERT')) session.add(Key(u'SCRATCH')) session.add(Key(u'STORAGE')) session.add(Key(u'USBID')) session.add(Key(u'VENDOR')) session.add(Key(u'XENCERT')) session.add(Key(u'NETBOOT_METHOD')) #Setup ack/nak reposnses if Response.query.count() == 0: session.add(Response(response=u'ack')) session.add(Response(response=u'nak')) if RetentionTag.query.count() == 0: session.add( RetentionTag(tag=u'scratch', is_default=1, expire_in_days=30)) session.add( RetentionTag(tag=u'60days', needs_product=False, expire_in_days=60)) session.add( RetentionTag(tag=u'120days', needs_product=False, expire_in_days=120)) session.add(RetentionTag(tag=u'active', needs_product=True)) session.add(RetentionTag(tag=u'audit', needs_product=True)) config_items = [ # name, description, numeric (u'root_password', u'Plaintext root password for provisioned systems', False), (u'root_password_validity', u"Maximum number of days a user's root password is valid for", True), (u'guest_name_prefix', u'Prefix for names of dynamic guests in OpenStack', False), ] for name, description, numeric in config_items: ConfigItem.lazy_create(name=name, description=description, numeric=numeric) if ConfigItem.by_name(u'root_password').current_value() is None: ConfigItem.by_name(u'root_password').set(u'beaker', user=admin.users[0]) session.commit() session.close()
def assert_panic_detected(self, message): with session.begin(): self.assertEquals(len(self.recipe.tasks[0].results), 1) self.assertEquals(self.recipe.tasks[0].results[0].result, TaskResult.panic) self.assertEquals(self.recipe.tasks[0].results[0].log, message)
def setUp(self): with session.begin(): self.lc = data_setup.create_labcontroller() self.lc.user.password = u'logmein' self.server = self.get_server() self.server.auth.login_password(self.lc.user.user_name, u'logmein')
def test_by_date_added(self): with session.begin(): new_system = data_setup.create_system() new_system.date_added = datetime.datetime(2020, 6, 21, 11, 30, 0) old_system = data_setup.create_system() old_system.date_added = datetime.datetime(2001, 1, 15, 14, 12, 0) b = self.browser b.get(get_server_base()) b.find_element_by_link_text('Show Search Options').click() wait_for_animation(b, '#searchform') Select(b.find_element_by_name('systemsearch-0.table'))\ .select_by_visible_text('System/Added') Select(b.find_element_by_name('systemsearch-0.operation'))\ .select_by_visible_text('is') b.find_element_by_name('systemsearch-0.value').send_keys('2001-01-15') b.find_element_by_id('searchform').submit() check_system_search_results(b, present=[old_system], absent=[new_system]) Select(b.find_element_by_name('systemsearch-0.table'))\ .select_by_visible_text('System/Added') Select(b.find_element_by_name('systemsearch-0.operation'))\ .select_by_visible_text('before') b.find_element_by_name('systemsearch-0.value').clear() b.find_element_by_name('systemsearch-0.value').send_keys('2001-01-16') b.find_element_by_id('searchform').submit() check_system_search_results(b, present=[old_system], absent=[new_system]) Select(b.find_element_by_name('systemsearch-0.table'))\ .select_by_visible_text('System/Added') Select(b.find_element_by_name('systemsearch-0.operation'))\ .select_by_visible_text('after') b.find_element_by_name('systemsearch-0.value').clear() b.find_element_by_name('systemsearch-0.value').send_keys('2020-12-31') b.find_element_by_id('searchform').submit() # no results b.find_element_by_xpath('//table[@id="widget" and not(.//td)]') Select(b.find_element_by_name('systemsearch-0.table'))\ .select_by_visible_text('System/Added') Select(b.find_element_by_name('systemsearch-0.operation'))\ .select_by_visible_text('after') b.find_element_by_name('systemsearch-0.value').clear() b.find_element_by_name('systemsearch-0.value').send_keys('2020-06-20') b.find_element_by_id('searchform').submit() check_system_search_results(b, present=[new_system], absent=[old_system]) Select(b.find_element_by_name('systemsearch-0.table'))\ .select_by_visible_text('System/Added') Select(b.find_element_by_name('systemsearch-0.operation'))\ .select_by_visible_text('after') b.find_element_by_name('systemsearch-0.value').clear() b.find_element_by_name('systemsearch-0.value').send_keys('2020-06-20') b.find_element_by_id('doclink').click() Select(b.find_element_by_name('systemsearch-1.table'))\ .select_by_visible_text('System/Added') Select(b.find_element_by_name('systemsearch-1.operation'))\ .select_by_visible_text('before') b.find_element_by_name('systemsearch-1.value').send_keys('2020-06-22') b.find_element_by_id('searchform').submit() check_system_search_results(b, present=[new_system], absent=[old_system])
def setup_package(): global lc_fqdn, _daemons_running_externally conf = get_conf() if not 'BEAKER_LABCONTROLLER_HOSTNAME' in os.environ: # Need to start the lab controller daemons ourselves with session.begin(): user = data_setup.create_user( user_name=conf.get('USERNAME').decode('utf8'), password=conf.get('PASSWORD')) lc = data_setup.create_labcontroller(fqdn=u'localhost', user=user) processes.extend([ Process('beaker-proxy', args=[ 'python', '../LabController/src/bkr/labcontroller/main.py', '-c', config_file, '-f' ], listen_port=8000, stop_signal=signal.SIGTERM), Process('beaker-provision', args=[ 'python', '../LabController/src/bkr/labcontroller/provision.py', '-c', config_file, '-f' ], stop_signal=signal.SIGTERM), Process('beaker-watchdog', args=[ 'python', '../LabController/src/bkr/labcontroller/watchdog.py', '-c', config_file, '-f' ], stop_signal=signal.SIGTERM), ]) lc_fqdn = u'localhost' else: _daemons_running_externally = True # We have been passed a space seperated list of LCs lab_controllers = os.environ.get( 'BEAKER_LABCONTROLLER_HOSTNAME').decode('utf8') lab_controllers_list = lab_controllers.split() # Just get the last one, it shouldn't matter to us lab_controller = lab_controllers_list.pop() # Make sure that the LC is in the DB data_setup.create_labcontroller(fqdn=lab_controller) lc_fqdn = lab_controller # Clear out any existing job logs, so that they are registered correctly # when first created. # If we've been passed a remote hostname for the LC, we assume it's been # freshly provisioned and the dir will already be empty. shutil.rmtree(conf.get('CACHEPATH'), ignore_errors=True) try: for process in processes: process.start() except: for process in processes: process.stop() raise
def log_delete(print_logs=False, dry=False, limit=None): if dry: logger.info('Dry run only') logger.info('Getting expired jobs') failed = False if not dry: requests_session = requests.Session() log_delete_user = config.get('beaker.log_delete_user') log_delete_password = config.get('beaker.log_delete_password') available_auths = [] available_auth_names = [] if _kerberos_available: available_auths.append( requests_kerberos.HTTPKerberosAuth( mutual_authentication=requests_kerberos.DISABLED)) available_auth_names.append('Kerberos') if log_delete_user and log_delete_password: available_auths.append( requests.auth.HTTPDigestAuth(log_delete_user, log_delete_password)) available_auth_names.append('HTTPDigestAuth') requests_session.auth = MultipleAuth(available_auths) logger.debug('Available authentication methods: %s' % ', '.join(available_auth_names)) for jobid, in Job.query.filter(Job.is_expired).limit(limit).values(Job.id): logger.info('Deleting logs for job %s', jobid) try: session.begin() job = Job.by_id(jobid) all_logs = job.all_logs(load_parent=False) # We always delete entire directories, not individual log files, # because that's faster, and because we never mix unrelated log # files together in the same directory so it's safe to do that. # We keep a trailing slash on the directories otherwise when we try # to DELETE them, Apache will first redirect us to the trailing # slash. log_dirs = (os.path.dirname(log.full_path) + '/' for log in all_logs) for path in remove_descendants(log_dirs): if not dry: if urlparse.urlparse(path).scheme: # We need to handle redirects ourselves, since requests # turns DELETE into GET on 302 which we do not want. response = requests_session.delete( path, allow_redirects=False) redirect_limit = 10 while redirect_limit > 0 and response.status_code in ( 301, 302, 303, 307): response = requests_session.delete( response.headers['Location'], allow_redirects=False) redirect_limit -= 1 if response.status_code not in (200, 204, 404): response.raise_for_status() else: try: shutil.rmtree(path) except OSError, e: if e.errno == errno.ENOENT: pass if print_logs: print path if not dry: job.delete() session.commit() session.close() else: session.close() except Exception, e: logger.exception('Exception while deleting logs for job %s', jobid) failed = True session.close() continue
self.assertIn('does not have permission to power system', e.faultString) def test_cannot_power_system_in_use(self): with session.begin(): owner = data_setup.create_user(password=u'password') user = data_setup.create_user() system = data_setup.create_system(owner=owner) system.user = user self.server.auth.login_password(owner.user_name, 'password') try: self.server.systems.power('on', system.fqdn) self.fail('should raise') except xmlrpclib.Fault, e: self.assertIn('System is in use', e.faultString) with session.begin(): self.assertEquals(system.command_queue, []) def check_power_action(self, action, command_actions): with session.begin(): user = data_setup.create_user(password=u'password') system = data_setup.create_system() data_setup.configure_system_power(system) system.lab_controller = self.lab_controller system.user = user self.server.auth.login_password(user.user_name, 'password') self.server.systems.power(action, system.fqdn) with session.begin(): for i, a in enumerate(command_actions): self.assertEqual(system.command_queue[i].action, a)
def cleanup_system(self, system): with session.begin(): session.expire(system) system.status = SystemStatus.removed system.lab_controller = None
def test_reserved_openstack_instance(self): with session.begin(): owner = data_setup.create_user( email_address=u'*****@*****.**') distro_tree = data_setup.create_distro_tree( distro_name=u'MicrowaveOS-20141016.1', variant=u'ThreeHeats', arch=u'x86_64') job = data_setup.create_job( owner=owner, distro_tree=distro_tree, whiteboard=u'Operation Righteous Cowboy Lightning', recipe_whiteboard=u'Everything Sunny All the Time Always') recipe = job.recipesets[0].recipes[0] data_setup.mark_recipe_running( recipe, virt=True, instance_id=uuid.UUID('00000000-1111-2222-3333-444444444444'), fqdn=u'bitenuker.ge.invalid') mail_capture_thread.start_capturing() with session.begin(): bkr.server.mail.reservesys_notify(recipe) captured_mails = mail_capture_thread.stop_capturing() self.assertEqual(len(captured_mails), 1) sender, rcpts, raw_msg = captured_mails[0] self.assertEqual(rcpts, [owner.email_address]) msg = email.message_from_string(raw_msg) self.assertEqual(msg['To'], owner.email_address) self.assertEqual(msg['Subject'], '[Beaker System Reserved] bitenuker.ge.invalid') self.assertEqual(msg['X-Beaker-Notification'], 'system-reservation') expected_mail_body = u"""\ ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** This System is reserved by [email protected] To return this system early, you can click on 'Release System' against this recipe from the Web UI. Ensure you have your logs off the system before returning to Beaker. %(base)srecipes/%(recipeid)s For system details, see: http://openstack.example.invalid/dashboard/project/instances/00000000-1111-2222-3333-444444444444/ For the default root password, see: %(base)sprefs Beaker Test information: HOSTNAME=bitenuker.ge.invalid JOBID=%(jobid)s RECIPEID=%(recipeid)s DISTRO=MicrowaveOS-20141016.1 ThreeHeats x86_64 ARCHITECTURE=x86_64 Job Whiteboard: Operation Righteous Cowboy Lightning Recipe Whiteboard: Everything Sunny All the Time Always ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **""" \ % dict(base=get_server_base(), recipeid=recipe.id, jobid=job.id) actual_mail_body = msg.get_payload(decode=True) self.assertMultiLineEqual(actual_mail_body, expected_mail_body)
def test_set_bogus_property(self): with session.begin(): system = data_setup.create_system() self.server.push(system.fqdn, {'Bothria': 8})
def _commands_finished(): with session.begin(): session.expire_all() return system.command_queue[0].status in \ (CommandStatus.completed, CommandStatus.failed)
def test_system_reserved_notification_on(self): with session.begin(): owner = data_setup.create_user( email_address=u'*****@*****.**') system = data_setup.create_system( fqdn=u'funcooker.ge.invalid', lab_controller=data_setup.create_labcontroller()) distro_tree = data_setup.create_distro_tree( distro_name=u'MicrowaveOS-20141016.0', variant=u'ThreeHeats', arch=u'x86_64') job = data_setup.create_running_job( owner=owner, system=system, distro_tree=distro_tree, whiteboard=u'Chain Reaction of Mental Anguish', recipe_whiteboard=u'Christmas Attack Zone') recipe = job.recipesets[0].recipes[0] mail_capture_thread.start_capturing() with session.begin(): bkr.server.mail.reservesys_notify(job.recipesets[0].recipes[0]) captured_mails = mail_capture_thread.stop_capturing() self.assertEqual(len(captured_mails), 1) sender, rcpts, raw_msg = captured_mails[0] self.assertEqual(rcpts, [owner.email_address]) msg = email.message_from_string(raw_msg) self.assertEqual(msg['To'], owner.email_address) self.assertEqual(msg['Subject'], '[Beaker System Reserved] funcooker.ge.invalid') self.assertEqual(msg['X-Beaker-Notification'], 'system-reservation') expected_mail_body = u"""\ ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** This System is reserved by [email protected] To return this system early, you can click on 'Release System' against this recipe from the Web UI. Ensure you have your logs off the system before returning to Beaker. %(base)srecipes/%(recipeid)s For ssh, kvm, serial and power control operations please look here: %(base)sview/funcooker.ge.invalid For the default root password, see: %(base)sprefs Beaker Test information: HOSTNAME=funcooker.ge.invalid JOBID=%(jobid)s RECIPEID=%(recipeid)s DISTRO=MicrowaveOS-20141016.0 ThreeHeats x86_64 ARCHITECTURE=x86_64 Job Whiteboard: Chain Reaction of Mental Anguish Recipe Whiteboard: Christmas Attack Zone ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **""" \ % dict(base=get_server_base(), recipeid=recipe.id, jobid=job.id) actual_mail_body = msg.get_payload(decode=True) self.assertEqual(actual_mail_body, expected_mail_body)
def log_delete(print_logs=False, dry=False, limit=None): if dry: logger.info('Dry run only') logger.info('Getting expired jobs') failed = False if not dry: requests_session = requests.Session() log_delete_user = config.get('beaker.log_delete_user') log_delete_password = config.get('beaker.log_delete_password') available_auths = [] available_auth_names = [] if _kerberos_available: available_auths.append( requests_kerberos.HTTPKerberosAuth( mutual_authentication=requests_kerberos.DISABLED)) available_auth_names.append('Kerberos') if log_delete_user and log_delete_password: available_auths.append( requests.auth.HTTPDigestAuth(log_delete_user, log_delete_password)) available_auth_names.append('HTTPDigestAuth') requests_session.auth = MultipleAuth(available_auths) logger.debug('Available authentication methods: %s' % ', '.join(available_auth_names)) for job, logs in Job.expired_logs(limit): logger.info('Deleting logs for %s', job.t_id) try: session.begin() for log in logs: if not dry: if urlparse.urlparse(log).scheme: # We need to handle redirects ourselves, since requests # turns DELETE into GET on 302 which we do not want. response = requests_session.delete( log, allow_redirects=False) redirect_limit = 10 while redirect_limit > 0 and response.status_code in ( 301, 302, 303, 307): response = requests_session.delete( response.headers['Location'], allow_redirects=False) redirect_limit -= 1 if response.status_code not in (200, 204, 404): response.raise_for_status() else: try: shutil.rmtree(log) except OSError, e: if e.errno == errno.ENOENT: pass if print_logs: print log if not dry: job.delete() session.commit() session.close() else: session.close() except Exception, e: logger.exception('Exception while deleting logs for %s', job.t_id) failed = True # session needs to be open for job.t_id in the log message above session.close() continue
def _command_completed(): with session.begin(): session.refresh(command) return command.status == CommandStatus.completed
def provision_virt_recipe(recipe_id): log.debug('Attempting to provision dynamic virt guest for recipe %s', recipe_id) session.begin() try: recipe = Recipe.by_id(recipe_id) job_owner = recipe.recipeset.job.owner manager = dynamic_virt.VirtManager(job_owner) available_flavors = manager.available_flavors() # We want them in order of smallest to largest, so that we can pick the # smallest flavor that satisfies the recipe's requirements. Sorting by RAM # is a decent approximation. possible_flavors = XmlHost.from_string(recipe.host_requires)\ .filter_openstack_flavors(available_flavors, manager.lab_controller) if not possible_flavors: log.info( 'No OpenStack flavors matched recipe %s, marking precluded', recipe.id) recipe.virt_status = RecipeVirtStatus.precluded return # cheapest flavor has the smallest disk and ram # id guarantees consistency of our results flavor = min(possible_flavors, key=lambda flavor: (flavor.ram, flavor.disk, flavor.id)) vm_name = '%srecipe-%s' % (ConfigItem.by_name( u'guest_name_prefix').current_value(u'beaker-'), recipe.id) log.debug('Creating VM named %s as flavor %s', vm_name, flavor) vm = manager.create_vm(vm_name, flavor) vm.instance_created = datetime.utcnow() try: recipe.createRepo() recipe.clear_candidate_systems() recipe.watchdog = Watchdog() recipe.resource = vm recipe.recipeset.lab_controller = manager.lab_controller recipe.virt_status = RecipeVirtStatus.succeeded recipe.schedule() log.info( "recipe ID %s moved from Queued to Scheduled by provision_virt_recipe", recipe.id) recipe.waiting() recipe.provision() log.info( "recipe ID %s moved from Scheduled to Waiting by provision_virt_recipe", recipe.id) except: exc_type, exc_value, exc_tb = sys.exc_info() try: manager.destroy_vm(vm) except Exception: log.exception( 'Failed to clean up VM %s during provision_virt_recipe, leaked!', vm.instance_id) # suppress this exception so the original one is not masked raise exc_type, exc_value, exc_tb session.commit() except Exception as e: log.exception('Error in provision_virt_recipe(%s)', recipe_id) session.rollback() # As an added precaution, let's try and avoid this recipe in future with session.begin(): recipe = Recipe.by_id(recipe_id) recipe.virt_status = RecipeVirtStatus.failed finally: session.close()
def test_inventory_date_search(self): # date times today = datetime.date.today() time_now = datetime.datetime.combine(today, datetime.time(0, 0)) time_delta1 = datetime.datetime.combine(today, datetime.time(0, 30)) time_tomorrow = time_now + datetime.timedelta(days=1) # today date date_today = time_now.date().isoformat() date_tomorrow = time_tomorrow.date().isoformat() with session.begin(): not_inv = data_setup.create_system() inv1 = data_setup.create_system() inv1.date_lastcheckin = time_now inv2 = data_setup.create_system() inv2.date_lastcheckin = time_delta1 inv3 = data_setup.create_system() inv3.date_lastcheckin = time_tomorrow # uninventoried out = run_client([ 'bkr', 'list-systems', '--xml-filter', '<system>' '<last_inventoried op="=" value="" />' '</system>' ]) self.returned_systems = out.splitlines() self.check_systems(present=[not_inv], absent=[inv1, inv2, inv3]) # Return all inventoried systems out = run_client([ 'bkr', 'list-systems', '--xml-filter', '<system>' '<last_inventoried op="!=" value="" />' '</system>' ]) self.returned_systems = out.splitlines() self.check_systems(present=[inv1, inv2, inv2], absent=[not_inv]) # inventoried on a certain date out = run_client([ 'bkr', 'list-systems', '--xml-filter', '<system>' '<last_inventoried op="=" value="%s" />' '</system>' % date_today ]) self.returned_systems = out.splitlines() self.check_systems(present=[inv1, inv2], absent=[not_inv, inv3]) # not inventoried on a certain date out = run_client([ 'bkr', 'list-systems', '--xml-filter', '<system>' '<last_inventoried op="!=" value="%s" />' '</system>' % date_today ]) self.returned_systems = out.splitlines() self.check_systems(present=[inv3], absent=[not_inv, inv1, inv2]) # Before a certain date out = run_client([ 'bkr', 'list-systems', '--xml-filter', '<system>' '<last_inventoried op="<" value="%s" />' '</system>' % date_tomorrow ]) self.returned_systems = out.splitlines() self.check_systems(present=[inv1, inv2], absent=[not_inv, inv3]) # On or before a certain date out = run_client([ 'bkr', 'list-systems', '--xml-filter', '<system>' '<last_inventoried op="<=" value="%s" />' '</system>' % date_tomorrow ]) self.returned_systems = out.splitlines() self.check_systems(present=[inv1, inv2, inv3], absent=[not_inv]) # Only date is valid, not date time try: out = run_client([ 'bkr', 'list-systems', '--xml-filter', '<system>' '<last_inventoried op=">" value="%s 00:00:00" />' '</system>' % today ]) self.fail('Must Fail or Die') except ClientError, e: self.assertEqual(e.status, 1) self.assert_('Invalid date format' in e.stderr_output, e.stderr_output)
def setUp(self): with session.begin(): self.owner = data_setup.create_user(password=u'owner') self.system = data_setup.create_system(owner=self.owner)
def schedule_queued_recipes(*args): session.begin() try: # This query returns a queued host recipe and and the guest which has # the most recent distro tree. It is to be used as a derived table. latest_guest_distro = select([machine_guest_map.c.machine_recipe_id.label('host_id'), func.max(DistroTree.date_created).label('latest_distro_date')], from_obj=[machine_guest_map.join(GuestRecipe.__table__, machine_guest_map.c.guest_recipe_id==GuestRecipe.__table__.c.id). \ join(Recipe.__table__).join(DistroTree.__table__)], whereclause=Recipe.status=='Queued', group_by=machine_guest_map.c.machine_recipe_id).alias() hosts_lab_controller_distro_map = aliased(LabControllerDistroTree) hosts_distro_tree = aliased(DistroTree) guest_recipe = aliased(Recipe) guests_distro_tree = aliased(DistroTree) guests_lab_controller = aliased(LabController) # This query will return queued recipes that are eligible to be scheduled. # They are determined to be eligible if: # * They are clean # * There are systems available (see the filter criteria) in lab controllers where # the recipe's distro tree is available. # * If it is a host recipe, the most recently created distro of all # the guest recipe's distros is available in at least one of the same # lab controllers as that of the host's distro tree. # # Also note that we do not try to handle the situation where the guest and host never # have a common labcontroller. In that situation the host and guest would stay queued # until that situation was rectified. recipes = MachineRecipe.query\ .join(Recipe.recipeset, RecipeSet.job)\ .filter(Job.dirty_version == Job.clean_version)\ .outerjoin((guest_recipe, MachineRecipe.guests))\ .outerjoin((guests_distro_tree, guest_recipe.distro_tree_id == guests_distro_tree.id))\ .outerjoin((latest_guest_distro, and_(latest_guest_distro.c.host_id == MachineRecipe.id, latest_guest_distro.c.latest_distro_date == \ guests_distro_tree.date_created)))\ .outerjoin(guests_distro_tree.lab_controller_assocs, guests_lab_controller)\ .join(Recipe.systems)\ .join((hosts_distro_tree, hosts_distro_tree.id == MachineRecipe.distro_tree_id))\ .join((hosts_lab_controller_distro_map, hosts_distro_tree.lab_controller_assocs), (LabController, and_( hosts_lab_controller_distro_map.lab_controller_id == LabController.id, System.lab_controller_id == LabController.id)))\ .filter( and_(Recipe.status == TaskStatus.queued, System.user == None, LabController.disabled == False, or_( RecipeSet.lab_controller == None, RecipeSet.lab_controller_id == System.lab_controller_id, ), or_( System.loan_id == None, System.loan_id == Job.owner_id, ), or_( # We either have no guest guest_recipe.id == None, # Or we have a guest of which the latest # is in a common lab controller. and_(guests_lab_controller.id == LabController.id, latest_guest_distro.c.latest_distro_date != None ), ) # or ) # and ) # Get out of here if we have no recipes if not recipes.count(): return False # This should be the guest recipe with the latest distro. # We return it in this query, to save us from re-running the # derived table query in schedule_queued_recipe() recipes = recipes.add_column(guest_recipe.id) # Effective priority is given in the following order: # * Multi host recipes with already scheduled siblings # * Priority level (i.e Normal, High etc) # * RecipeSet id # * Recipe id recipes = recipes.order_by(RecipeSet.lab_controller == None). \ order_by(RecipeSet.priority.desc()). \ order_by(RecipeSet.id). \ order_by(MachineRecipe.id) # Don't do a GROUP BY before here, it is not needed. recipes = recipes.group_by(MachineRecipe.id) log.debug("Entering schedule_queued_recipes") for recipe_id, guest_recipe_id in recipes.values( MachineRecipe.id, guest_recipe.id): session.begin(nested=True) try: schedule_queued_recipe(recipe_id, guest_recipe_id) session.commit() except (StaleSystemUserException, InsufficientSystemPermissions, StaleTaskStatusException), e: # Either # System user has changed before # system allocation # or # System permissions have changed before # system allocation # or # Something has moved our status on from queued # already. log.warn(str(e)) session.rollback() except Exception, e: log.exception('Error in schedule_queued_recipe(%s)', recipe_id) session.rollback() session.begin(nested=True) try: recipe = MachineRecipe.by_id(recipe_id) recipe.recipeset.abort( "Aborted in schedule_queued_recipe: %s" % e) session.commit() except Exception, e: log.exception( "Error during error handling in schedule_queued_recipe: %s" % e) session.rollback()
def test_cancel_while_scheduling(self): # This test simulates a user cancelling their job at the same time as # beakerd is scheduling it. beakerd assigns a system and creates # a watchdog and sets the recipe status to Waiting, then it's # overwritten by another transaction setting the status to Cancelled. with session.begin(): lab_controller = data_setup.create_labcontroller() system = data_setup.create_system(shared=True, lab_controller=lab_controller) distro_tree = data_setup.create_distro_tree( osmajor=u'Fedora20', lab_controllers=[lab_controller]) job = data_setup.create_job(distro_tree=distro_tree) job.recipesets[0].recipes[0]._host_requires = (u""" <hostRequires> <hostname op="=" value="%s" /> </hostRequires> """ % system.fqdn) beakerd.process_new_recipes() beakerd.update_dirty_jobs() beakerd.queue_processed_recipesets() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job.id) system = System.query.get(system.id) self.assertEquals(job.status, TaskStatus.queued) self.assertEquals(job.recipesets[0].recipes[0].systems, [system]) # Two "concurrent" transactions, in the first one beakerd has # scheduled the recipe and is about to commit... class ScheduleThread(Thread): def __init__(self, **kwargs): super(ScheduleThread, self).__init__(**kwargs) self.ready_evt = Event() self.continue_evt = Event() def run(self): session.begin() recipe = Job.by_id(job.id).recipesets[0].recipes[0] assert recipe.status == TaskStatus.queued self.ready_evt.set() self.continue_evt.wait() try: beakerd.schedule_queued_recipe(recipe.id) assert False, 'should raise' except StaleTaskStatusException: pass # expected session.rollback() # ... and in the second transaction the user is cancelling the recipe. class CancelThread(Thread): def __init__(self, **kwargs): super(CancelThread, self).__init__(**kwargs) self.ready_evt = Event() self.continue_evt = Event() def run(self): session.begin() recipe = Job.by_id(job.id).recipesets[0].recipes[0] assert not recipe.watchdog assert not recipe.resource recipe.recipeset.cancel() self.ready_evt.set() self.continue_evt.wait() session.commit() sched_thread = ScheduleThread() cancel_thread = CancelThread() sched_thread.start() cancel_thread.start() sched_thread.ready_evt.wait() cancel_thread.ready_evt.wait() sched_thread.continue_evt.set() cancel_thread.continue_evt.set() sched_thread.join() cancel_thread.join() with session.begin(): session.expire_all() job.update_status() self.assertEquals(job.status, TaskStatus.cancelled) self.assertEquals(job.recipesets[0].recipes[0].watchdog, None) self.assertEquals(system.open_reservation, None)
def test_submission_delegate_cancel_with_group(self): with session.begin(): group = data_setup.create_group() self.job.group = group self.test_submission_delegate_cancel_job()
def tearDown(self): with session.begin(): session.delete(self.task_one) session.delete(self.task_two) session.delete(self.task_three)
def setUp(self): session.begin()