def test_task_aborted_return_reservation(self): """ This tests the case where the task was aborted, then the recipe goes to Reserved state and then finally the reservation is returned """ with session.begin(): recipe = data_setup.create_recipe( task_list=[Task.by_name(u'/distribution/install')], reservesys=True) job = data_setup.create_job_for_recipes([recipe]) job_id = job.id data_setup.mark_recipe_tasks_finished( recipe, result=TaskResult.warn, task_status=TaskStatus.aborted) job._mark_dirty() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.reserved) job.recipesets[0].recipes[0].return_reservation() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.aborted)
def test_recipe_running_then_cancelled(self): """ This tests the case where the recipe is running, has a valid reservation request, but is cancelled before it's completed. """ with session.begin(): recipe = data_setup.create_recipe( task_list=[Task.by_name(u'/distribution/install')] * 2, reservesys=True) job = data_setup.create_job_for_recipes([recipe]) job_id = job.id data_setup.mark_recipe_running(recipe) data_setup.mark_recipe_installation_finished(recipe) # we want at least one task to be Completed here # https://bugzilla.redhat.com/show_bug.cgi?id=1195558 job.recipesets[0].recipes[0].tasks[0].stop() job.recipesets[0].recipes[0].tasks[1].start() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.running) job.recipesets[0].cancel() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.cancelled)
def _remove(self, user, method, **kw): if user == identity.current.user: raise BX(_('You cannot remove yourself')) # cancel all running and queued jobs Job.cancel_jobs_by_user(user, 'User %s removed' % user.user_name) # Return all systems in use by this user for system in System.query.filter(System.user == user): reservation = system.open_reservation system.unreserve(reservation=reservation, service=method, user=user) # Return all loaned systems in use by this user for system in System.query.filter(System.loaned == user): system.record_activity(user=identity.current.user, service=method, action=u'Changed', field=u'Loaned To', old=u'%s' % system.loaned, new=u'None') system.loaned = None # Change the owner to the caller for system in System.query.filter(System.owner == user): system.owner = identity.current.user system.record_activity(user=identity.current.user, service=method, action=u'Changed', field=u'Owner', old=u'%s' % user, new=u'%s' % identity.current.user) # Finally remove the user user.removed = datetime.utcnow()
def _remove(user, method, **kw): if user == identity.current.user: raise BX(_('You cannot remove yourself')) # cancel all running and queued jobs Job.cancel_jobs_by_user(user, 'User %s removed' % user.user_name) # Return all systems in use by this user for system in System.query.filter(System.user==user): reservation = system.open_reservation system.unreserve(reservation=reservation, service=method) # Return all loaned systems in use by this user for system in System.query.filter(System.loaned==user): system.record_activity(user=identity.current.user, service=method, action=u'Changed', field=u'Loaned To', old=u'%s' % system.loaned, new=None) system.loaned = None # Remove the user from all system access policies for rule in SystemAccessPolicyRule.query.filter_by(user=user): rule.record_deletion(service=method) session.delete(rule) # Change the owner to the caller newowner = kw.get('newowner', identity.current.user) for system in System.query.filter(System.owner==user): system.owner = newowner system.record_activity(user=identity.current.user, service=method, action=u'Changed', field=u'Owner', old=u'%s' % user, new=u'%s' % newowner) # Remove the user from all groups for group in user.groups: if not group.membership_type == GroupMembershipType.inverted: group.remove_member(user=user, agent=identity.current.user, service=method) # Finally remove the user user.removed=datetime.utcnow()
def create_job_for_recipesets(recipesets, owner=None, whiteboard=None, cc=None, product=None, retention_tag=None, group=None, submitter=None, **kwargs): if retention_tag is None: retention_tag = RetentionTag.by_tag( u'scratch') # Don't use default, unpredictable else: retention_tag = RetentionTag.by_tag(retention_tag) if owner is None: owner = create_user() if whiteboard is None: whiteboard = unique_name(u'job %s') job = Job(whiteboard=whiteboard, ttasks=sum(rs.ttasks for rs in recipesets), owner=owner, retention_tag=retention_tag, group=group, product=product, submitter=submitter) if cc is not None: job.cc = cc job.recipesets.extend(recipesets) session.add(job) session.flush() log.debug('Created %s', job.t_id) return job
def _remove(self, user, method, **kw): if user == identity.current.user: raise BX(_('You cannot remove yourself')) # cancel all running and queued jobs Job.cancel_jobs_by_user(user, 'User %s removed' % user.user_name) # Return all systems in use by this user for system in System.query.filter(System.user==user): reservation = system.open_reservation system.unreserve(reservation=reservation, service=method, user=user) # Return all loaned systems in use by this user for system in System.query.filter(System.loaned==user): system.record_activity(user=identity.current.user, service=method, action=u'Changed', field=u'Loaned To', old=u'%s' % system.loaned, new=u'None') system.loaned = None # Change the owner to the caller for system in System.query.filter(System.owner==user): system.owner = identity.current.user system.record_activity(user=identity.current.user, service=method, action=u'Changed', field=u'Owner', old=u'%s' % user, new=u'%s' % identity.current.user) # Finally remove the user user.removed=datetime.utcnow()
def _disable(self, user, method, msg='Your account has been temporarily disabled'): # cancel all queued and running jobs Job.cancel_jobs_by_user(user, msg)
def test_recipe_running_then_watchdog_expired(self): """ This tests the case where the recipe is running, has a valid reservation request, but the watchdog expires before it's completed. """ with session.begin(): recipe = data_setup.create_recipe( task_list=[Task.by_name(u'/distribution/install')], reservesys=True) job = data_setup.create_job_for_recipes([recipe]) job_id = job.id data_setup.mark_recipe_tasks_finished( recipe, task_status=TaskStatus.aborted) job.recipesets[0].recipes[0].abort() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.reserved) job.recipesets[0].recipes[0].return_reservation() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.aborted)
def test_recipe_running_then_watchdog_expired(self): """ This tests the case where the recipe is running, has a valid reservation request, but the watchdog expires before it's completed. """ with session.begin(): recipe = data_setup.create_recipe( task_list=[Task.by_name(u'/distribution/install')], reservesys=True) job = data_setup.create_job_for_recipes([recipe]) job_id = job.id data_setup.mark_recipe_tasks_finished(recipe, task_status=TaskStatus.aborted) job.recipesets[0].recipes[0].abort() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.reserved) job.recipesets[0].recipes[0].return_reservation() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.aborted)
def test_task_aborted_return_reservation(self): """ This tests the case where the task was aborted, then the recipe goes to Reserved state and then finally the reservation is returned """ with session.begin(): recipe = data_setup.create_recipe( task_list=[Task.by_name(u'/distribution/install')], reservesys=True) job = data_setup.create_job_for_recipes([recipe]) job_id = job.id data_setup.mark_recipe_tasks_finished(recipe, result=TaskResult.warn, task_status=TaskStatus.aborted) job._mark_dirty() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.reserved) job.recipesets[0].recipes[0].return_reservation() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.aborted)
def create_job_for_recipes(recipes, owner=None, whiteboard=None, cc=None,product=None, retention_tag=None, group=None, submitter=None, priority=None, **kwargs): if retention_tag is None: retention_tag = RetentionTag.by_tag(u'scratch') # Don't use default, unpredictable else: retention_tag = RetentionTag.by_tag(retention_tag) if owner is None: owner = create_user() if whiteboard is None: whiteboard = unique_name(u'job %s') job = Job(whiteboard=whiteboard, ttasks=sum(r.ttasks for r in recipes), owner=owner, retention_tag=retention_tag, group=group, product=product, submitter=submitter) if cc is not None: job.cc = cc if priority is None: priority = TaskPriority.default_priority() recipe_set = RecipeSet(ttasks=sum(r.ttasks for r in recipes), priority=priority) recipe_set.recipes.extend(recipes) job.recipesets.append(recipe_set) session.add(job) session.flush() log.debug('Created %s', job.t_id) return job
def test_01_invalid_system_distro_combo(self): beakerd.process_new_recipes() beakerd.update_dirty_jobs() with session.begin(): self.assertEqual( Job.by_id(self.job1.id).status, TaskStatus.aborted) self.assertEqual( Job.by_id(self.job2.id).status, TaskStatus.processed)
def delete_jobs(self, jobs=None, tag=None, complete_days=None, family=None, dryrun=False, product=None): """ delete_jobs will mark the job to be deleted To select jobs by id, pass an array for the *jobs* argument. Elements of the array must be strings of the form ``'J:123'``. Alternatively, pass some combination of the *tag*, *complete_days*, or *family* arguments to select jobs for deletion. These arguments behave as per the :meth:`jobs.list` method. If *dryrun* is True, deletions will be reported but nothing will be modified. Admins are not be able to delete jobs which are not owned by themselves by using the tag, complete_days etc kwargs, instead, they should do that via the *jobs* argument. """ if jobs: #Turn them into job objects if not isinstance(jobs, list): jobs = [jobs] jobs_to_try_to_del = [] for j_id in jobs: job = TaskBase.get_by_t_id(j_id) if not isinstance(job, Job): raise BeakerException('Incorrect task type passed %s' % j_id) if not job.can_delete(identity.current.user): raise BeakerException( "You don't have permission to delete job %s" % j_id) jobs_to_try_to_del.append(job) delete_jobs_kw = dict(jobs=jobs_to_try_to_del) else: # only allow people to delete their own jobs while using these kwargs delete_jobs_kw = dict( query=Job.find_jobs(tag=tag, complete_days=complete_days, family=family, product=product, owner=identity.current.user.user_name)) deleted_jobs = Job.delete_jobs(**delete_jobs_kw) msg = 'Jobs deleted' if dryrun: session.rollback() msg = 'Dryrun only. %s' % (msg) return '%s: %s' % (msg, [j.t_id for j in deleted_jobs])
def test_02_abort_dead_recipes(self): beakerd.process_new_recipes() beakerd.update_dirty_jobs() beakerd.queue_processed_recipesets() beakerd.update_dirty_jobs() with session.begin(): self.assertEqual(Job.by_id(self.job2.id).status, TaskStatus.queued) # Remove distro_tree2 from lab1, should cause remaining recipe to abort. for lca in self.distro_tree2.lab_controller_assocs[:]: session.delete(lca) beakerd.abort_dead_recipes() beakerd.update_dirty_jobs() with session.begin(): self.assertEqual(Job.by_id(self.job2.id).status, TaskStatus.aborted)
def test_get_system_with_running_hardware_scan_recipe(self): # The bug was a circular reference from system -> recipe -> system # which caused JSON serialization to fail. with session.begin(): Job.inventory_system_job(data_setup.create_distro_tree(), owner=self.owner, system=self.system) recipe = self.system.find_current_hardware_scan_recipe() data_setup.mark_recipe_running(recipe, system=self.system) response = requests.get( get_server_base() + "/systems/%s/" % self.system.fqdn, headers={"Accept": "application/json"} ) response.raise_for_status() in_progress_scan = response.json()["in_progress_scan"] self.assertEquals(in_progress_scan["recipe_id"], recipe.id) self.assertEquals(in_progress_scan["status"], u"Running") self.assertEquals(in_progress_scan["job_id"], recipe.recipeset.job.t_id)
def user_full_json(user): # Users have a minimal JSON representation which is embedded in many other # objects (system owner, system user, etc) but we need more info here on # the user page. attributes = user.__json__() attributes['id'] = user.user_id attributes['job_count'] = Job.query.filter(not_(Job.is_finished()))\ .filter(Job.owner == user).count() attributes['reservation_count'] = System.query.filter(System.user == user).count() attributes['loan_count'] = System.query\ .filter(System.status != SystemStatus.removed)\ .filter(System.loaned == user).count() attributes['owned_system_count'] = System.query\ .filter(System.status != SystemStatus.removed)\ .filter(System.owner == user).count() # Intentionally not counting membership in inverted groups because everyone # is always in those attributes['group_membership_count'] = len(user.group_user_assocs) if identity.current.user: attributes['can_edit'] = user.can_edit(identity.current.user) attributes['can_change_password'] = \ user.can_change_password(identity.current.user) if user.can_edit(identity.current.user): attributes['root_password'] = user._root_password attributes['root_password_changed'] = user.rootpw_changed attributes['root_password_expiry'] = user.rootpw_expiry attributes['ssh_public_keys'] = user.sshpubkeys attributes['submission_delegates'] = user.submission_delegates attributes['use_old_job_page'] = user.use_old_job_page else: attributes['can_edit'] = False attributes['can_change_password'] = False return attributes
def user_full_json(user): # Users have a minimal JSON representation which is embedded in many other # objects (system owner, system user, etc) but we need more info here on # the user page. attributes = user.__json__() attributes['id'] = user.user_id attributes['job_count'] = Job.query.filter(not_(Job.is_finished()))\ .filter(Job.owner == user).count() attributes['reservation_count'] = System.query.filter( System.user == user).count() attributes['loan_count'] = System.query\ .filter(System.status != SystemStatus.removed)\ .filter(System.loaned == user).count() attributes['owned_system_count'] = System.query\ .filter(System.status != SystemStatus.removed)\ .filter(System.owner == user).count() # Intentionally not counting membership in inverted groups because everyone # is always in those attributes['group_membership_count'] = len(user.group_user_assocs) if identity.current.user: attributes['can_edit'] = user.can_edit(identity.current.user) attributes['can_change_password'] = \ user.can_change_password(identity.current.user) if user.can_edit(identity.current.user): attributes['root_password'] = user._root_password attributes['root_password_changed'] = user.rootpw_changed attributes['root_password_expiry'] = user.rootpw_expiry attributes['ssh_public_keys'] = user.sshpubkeys attributes['submission_delegates'] = user.submission_delegates attributes['use_old_job_page'] = user.use_old_job_page else: attributes['can_edit'] = False attributes['can_change_password'] = False return attributes
def test_export_xml(self): b = self.browser # Make sure the Export button is present in the jobs grid. We can't # actually click it because it triggers a download, which WebDriver # can't handle. b.get(get_server_base() + 'jobs/') b.find_element_by_name('simplesearch').send_keys(unicode(self.job_to_export.id)) b.find_element_by_name('jobsearch_simple').submit() b.find_element_by_xpath( '//tr[normalize-space(string(./td[1]))="%s"]' '//a[text()="Export"]' % self.job_to_export.t_id) # Make sure the Export button is present on the job page. b.get(get_server_base() + 'jobs/%s' % self.job_to_export.id) b.find_element_by_link_text('Export') # Fetch the exported XML directly. response = requests.get(get_server_base() + 'to_xml?taskid=%s&pretty=False' % self.job_to_export.t_id) xml_export = response.content with session.begin(): job = Job.by_id(self.job_to_export.id) xml_export = job.to_xml().toxml() xml_export_tree = lxml.etree.parse(StringIO(xml_export)) pretty_xml = lxml.etree.tostring(xml_export_tree, pretty_print=False) self.assert_(pretty_xml == xml_export)
def doit(): distro_trees = [] for id in request.form.getlist('distro_tree_id'): try: distro_trees.append(DistroTree.by_id(id)) except NoResultFound: raise BadRequest400('Distro tree %r does not exist' % id) job_details = {} job_details['pick'] = request.form.get('pick') or 'auto' if job_details['pick'] == 'fqdn': try: job_details['system'] = System.by_fqdn(request.form.get('system'), identity.current.user) except NoResultFound: raise BadRequest400('System %s not found' % request.form.get('system')) elif job_details['pick'] == 'lab': try: job_details['lab'] = LabController.by_name(request.form.get('lab')) except NoResultFound: raise BadRequest400('Lab controller %s not found' % request.form.get('lab')) days = int(request.form.get('reserve_days') or DEFAULT_RESERVE_DAYS) days = min(days, MAX_DAYS_PROVISION) job_details['reservetime'] = days * 24 * 60 * 60 job_details['whiteboard'] = request.form.get('whiteboard') job_details['ks_meta'] = request.form.get('ks_meta') job_details['koptions'] = request.form.get('koptions') job_details['koptions_post'] = request.form.get('koptions_post') with convert_internal_errors(): job = Job.provision_system_job(distro_trees, **job_details) return 'Created %s' % job.t_id, 201, [('Location', url('/jobs/%s' % job.id))]
def test_job_group(self): with session.begin(): user_in_group = data_setup.create_user(password='******') group = data_setup.create_group() user_in_group.groups.append(group) user_not_in_group = data_setup.create_user(password='******') # Test submitting on behalf of user's group config1 = create_client_config(username=user_in_group.user_name, password='******') out = run_client(['bkr', 'workflow-simple', '--random', '--arch', self.distro_tree.arch.arch, '--family', self.distro.osversion.osmajor.osmajor, '--job-group', group.group_name, '--task', self.task.name], config=config1) self.assertTrue(out.startswith('Submitted:'), out) m = re.search('J:(\d+)', out) job_id = m.group(1) with session.begin(): job = Job.by_id(job_id) self.assertEqual(group, job.group) # Test submitting on behalf of group user does not belong to config2 = create_client_config(username=user_not_in_group.user_name, password='******') try: out2 = run_client(['bkr', 'workflow-simple', '--random', '--arch', self.distro_tree.arch.arch, '--family', self.distro.osversion.osmajor.osmajor, '--job-group', group.group_name, '--task', self.task.name], config=config2) fail('should raise') except ClientError, e: self.assertTrue('You are not a member of the %s group' % \ group.group_name in e.stderr_output, e)
def test_does_not_load_RecipeTaskResults(self): # In large jobs with many RecipeTasks and RecipeTaskResults, # beaker-log-delete would previously take a long time and a lot of # memory, because it was traversing the entire object graph down to # RecipeTaskResult and loading them all into memory. # This test is asserting that no RecipeTask or RecipeTaskResult # instances are loaded when beaker-log-delete runs. with session.begin(): job = data_setup.create_completed_job() job.deleted = datetime.datetime.utcnow() recipe = job.recipesets[0].recipes[0] server = self.log_server_url + '/recipe/' open(os.path.join(self.recipe_logs_dir, 'recipe.log'), 'w').write('dummy') recipe.logs[:] = [LogRecipe(server=server, filename=u'recipe.log')] open(os.path.join(self.recipe_logs_dir, 'task.log'), 'w').write('dummy') recipe.tasks[0].logs[:] = [LogRecipeTask(server=server, filename=u'task.log')] open(os.path.join(self.recipe_logs_dir, 'result.log'), 'w').write('dummy') recipe.tasks[0].results[0].logs[:] = \ [LogRecipeTaskResult(server=server, filename=u'result.log')] # RecipeTasks/RecipeTaskResults are already loaded from the data_setup # calls above, expunge the session so that log_delete starts from # a clean slate. session.expunge_all() with mock.patch.object(RecipeTask, '__new__', side_effect=AssertionError): with mock.patch.object(RecipeTaskResult, '__new__', side_effect=AssertionError): self.assertEquals(log_delete.log_delete(), 0) # exit status # Check that we really deleted something, if not the test setup was faulty. with session.begin(): job = Job.by_id(job.id) self.assertIsNotNone(job.purged)
def test_export_xml(self): b = self.browser # Make sure the Export button is present in the jobs grid. We can't # actually click it because it triggers a download, which WebDriver # can't handle. b.get(get_server_base() + 'jobs/') b.find_element_by_name('simplesearch').send_keys( unicode(self.job_to_export.whiteboard)) b.find_element_by_name('jobsearch_simple').submit() b.find_element_by_xpath('//tr[normalize-space(string(./td[1]))="%s"]' '//a[text()="Export"]' % self.job_to_export.t_id) # Fetch the exported XML directly. response = requests.get(get_server_base() + 'to_xml?taskid=%s&pretty=False' % self.job_to_export.t_id) actual = response.content with session.begin(): # Expire the job, otherwise the exported job XML (read from the # Python instance) will have a duration attribute while the export # from the view will have not since our database stores only seconds session.expire_all() job = Job.by_id(self.job_to_export.id) expected = lxml.etree.tostring(job.to_xml(), pretty_print=True, encoding='utf8') self.assertMultiLineEqual(expected, actual)
def test_export_xml(self): b = self.browser # Make sure the Export button is present in the jobs grid. We can't # actually click it because it triggers a download, which WebDriver # can't handle. b.get(get_server_base() + 'jobs/') b.find_element_by_name('simplesearch').send_keys( unicode(self.job_to_export.id)) b.find_element_by_name('jobsearch_simple').submit() b.find_element_by_xpath('//tr[normalize-space(string(./td[1]))="%s"]' '//a[text()="Export"]' % self.job_to_export.t_id) # Make sure the Export button is present on the job page. b.get(get_server_base() + 'jobs/%s' % self.job_to_export.id) b.find_element_by_link_text('Export') # Fetch the exported XML directly. response = requests.get(get_server_base() + 'to_xml?taskid=%s&pretty=False' % self.job_to_export.t_id) xml_export = response.content with session.begin(): job = Job.by_id(self.job_to_export.id) xml_export = job.to_xml().toxml() xml_export_tree = lxml.etree.parse(StringIO(xml_export)) pretty_xml = lxml.etree.tostring(xml_export_tree, pretty_print=False) self.assert_(pretty_xml == xml_export)
def clone(self, job_id=None, recipe_id=None, recipeset_id=None, textxml=None, filexml=None, confirmed=False, **kw): """ Review cloned xml before submitting it. """ title = 'Clone Job' if job_id: # Clone from Job ID title = 'Clone Job %s' % job_id try: job = Job.by_id(job_id) except InvalidRequestError: flash(_(u"Invalid job id %s" % job_id)) redirect(".") textxml = job.to_xml(clone=True).toprettyxml() elif recipeset_id: title = 'Clone Recipeset %s' % recipeset_id try: recipeset = RecipeSet.by_id(recipeset_id) except InvalidRequestError: flash(_(u"Invalid recipeset id %s" % recipeset_id)) redirect(".") textxml = recipeset.to_xml(clone=True,from_job=False).toprettyxml() elif isinstance(filexml, cgi.FieldStorage): # Clone from file try: textxml = filexml.value.decode('utf8') except UnicodeDecodeError, e: flash(_(u'Invalid job XML: %s') % e) redirect('.')
def test_ignore_missing_tasks(self): job_tid = self.server.jobs.upload(''' <job> <whiteboard>job with nonexistent task</whiteboard> <recipeSet> <recipe> <distroRequires> <distro_name op="=" value="BlueShoeLinux5-5" /> </distroRequires> <hostRequires/> <task name="/distribution/install" /> <task name="/asdf/notexist" /> <task name="/distribution/reservesys" /> </recipe> </recipeSet> </job> ''', True # ignore_missing_tasks ) self.assert_(job_tid.startswith('J:')) with session.begin(): job = Job.by_id(int(job_tid[2:])) self.assertEqual(job.ttasks, 2) # not 3 recipe = job.recipesets[0].recipes[0] self.assertEqual(len(recipe.tasks), 2) self.assertEqual(recipe.tasks[0].task.name, u'/distribution/install') # /asdf/notexist is silently dropped self.assertEqual(recipe.tasks[1].task.name, u'/distribution/reservesys')
def mine(self, *args, **kw): query = Job.mine(identity.current.user) return self.jobs(jobs=query, action='./mine', title=u'My Jobs', *args, **kw)
def test_remove_user_job_cancel(self): with session.begin(): user = data_setup.create_user(user_name = data_setup.unique_name('aaaaa%s')) job = data_setup.create_job(owner=user) data_setup.mark_job_running(job) b = self.browser login(b) b.get(get_server_base() + 'users') b.find_element_by_xpath('//a[@href="remove?id=%d"]' %user.user_id).click() # XXX: not necessary, but doing it here to buy time, since sometimes the # job cancellation seems to take a while logout(b) # reflect the change in recipe task status when # update_dirty_jobs() is called session.expunge_all() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job.id) self.assertEquals(job.status, TaskStatus.cancelled) self.assertIn('User %s removed' % user.user_name, job.recipesets[0].recipes[0].tasks[0].results[0].log)
def mygroups(self, *args, **kw): query = Job.my_groups(identity.current.user) return self.jobs(jobs=query, action='./mygroups', title=u'My Group Jobs', *args, **kw)
def doit(): distro_trees = [] for id in request.form.getlist('distro_tree_id'): try: distro_trees.append(DistroTree.by_id(id)) except NoResultFound: raise BadRequest400('Distro tree %r does not exist' % id) job_details = {} job_details['pick'] = request.form.get('pick') or 'auto' if job_details['pick'] == 'fqdn': try: job_details['system'] = System.by_fqdn(request.form.get('system'), identity.current.user) except NoResultFound: raise BadRequest400('System %s not found' % request.form.get('system')) elif job_details['pick'] == 'lab': try: job_details['lab'] = LabController.by_name(request.form.get('lab')) except NoResultFound: raise BadRequest400('Lab controller %s not found' % request.form.get('lab')) days = int(request.form.get('reserve_days') or DEFAULT_RESERVE_DAYS) days = min(days, MAX_DAYS_PROVISION) job_details['reservetime'] = days * 24 * 60 * 60 job_details['whiteboard'] = request.form.get('whiteboard') job_details['ks_meta'] = request.form.get('ks_meta') job_details['koptions'] = request.form.get('koptions') job_details['koptions_post'] = request.form.get('koptions_post') with convert_internal_errors(): job = Job.provision_system_job(distro_trees, **job_details) return 'Created %s' % job.t_id, 201, [('Location', absolute_url('/jobs/%s' % job.id))]
def test_ignore_missing_tasks(self): job_tid = self.server.jobs.upload( ''' <job> <whiteboard>job with nonexistent task</whiteboard> <recipeSet> <recipe> <distroRequires> <distro_name op="=" value="BlueShoeLinux5-5" /> </distroRequires> <hostRequires/> <task name="/distribution/check-install" /> <task name="/asdf/notexist" /> <task name="/distribution/reservesys" /> </recipe> </recipeSet> </job> ''', True # ignore_missing_tasks ) self.assert_(job_tid.startswith('J:')) with session.begin(): job = Job.by_id(int(job_tid[2:])) self.assertEqual(job.ttasks, 2) # not 3 recipe = job.recipesets[0].recipes[0] self.assertEqual(len(recipe.tasks), 2) self.assertEqual(recipe.tasks[0].task.name, u'/distribution/check-install') # /asdf/notexist is silently dropped self.assertEqual(recipe.tasks[1].task.name, u'/distribution/reservesys')
def update(self, id, **kw): # XXX Thus function is awkward and needs to be cleaned up. try: job = Job.by_id(id) except InvalidRequestError: raise cherrypy.HTTPError(status=400, message='Invalid job id %s' % id) if not job.can_change_product(identity.current.user) or not \ job.can_change_retention_tag(identity.current.user): raise cherrypy.HTTPError(status=403, message="You don't have permission to update job id %s" % id) returns = {'success' : True, 'vars':{}} if 'retentiontag' in kw and 'product' in kw: retention_tag = RetentionTag.by_id(kw['retentiontag']) if int(kw['product']) == ProductWidget.product_deselected: product = None else: product = Product.by_id(kw['product']) returns.update(Utility.update_retention_tag_and_product(job, retention_tag, product)) elif 'retentiontag' in kw: retention_tag = RetentionTag.by_id(kw['retentiontag']) returns.update(Utility.update_retention_tag(job, retention_tag)) elif 'product' in kw: if int(kw['product']) == ProductWidget.product_deselected: product = None else: product = Product.by_id(kw['product']) returns.update(Utility.update_product(job, product)) if 'whiteboard' in kw: job.whiteboard = kw['whiteboard'] return returns
def run(self): session.begin() recipe = Job.by_id(job.id).recipesets[0].recipes[0] assert not recipe.watchdog assert not recipe.resource recipe.recipeset.cancel() self.ready_evt.set() self.continue_evt.wait() session.commit()
def test_get_system_with_running_hardware_scan_recipe(self): # The bug was a circular reference from system -> recipe -> system # which caused JSON serialization to fail. with session.begin(): Job.inventory_system_job(data_setup.create_distro_tree(), owner=self.owner, system=self.system) recipe = self.system.find_current_hardware_scan_recipe() data_setup.mark_recipe_running(recipe, system=self.system) response = requests.get(get_server_base() + '/systems/%s/' % self.system.fqdn, headers={'Accept': 'application/json'}) response.raise_for_status() in_progress_scan = response.json()['in_progress_scan'] self.assertEquals(in_progress_scan['recipe_id'], recipe.id) self.assertEquals(in_progress_scan['status'], u'Running') self.assertEquals(in_progress_scan['job_id'], recipe.recipeset.job.t_id)
def _remove(user, method, **kw): if user == identity.current.user: raise BX(_('You cannot remove yourself')) # cancel all running and queued jobs Job.cancel_jobs_by_user(user, 'User %s removed' % user.user_name) # Return all systems in use by this user for system in System.query.filter(System.user == user): reservation = system.open_reservation system.unreserve(reservation=reservation, service=method) # Return all loaned systems in use by this user for system in System.query.filter(System.loaned == user): system.record_activity(user=identity.current.user, service=method, action=u'Changed', field=u'Loaned To', old=u'%s' % system.loaned, new=None) system.loaned = None # Remove the user from all system access policies for rule in SystemAccessPolicyRule.query.filter_by(user=user): rule.record_deletion(service=method) session.delete(rule) # Change the owner to the caller newowner = kw.get('newowner', identity.current.user) for system in System.query.filter(System.owner == user): system.owner = newowner system.record_activity(user=identity.current.user, service=method, action=u'Changed', field=u'Owner', old=u'%s' % user, new=u'%s' % newowner) for pool in SystemPool.query.filter(SystemPool.owning_user == user): pool.change_owner(user=newowner, service=method) # Remove the user from all groups for group in user.groups: if not group.membership_type == GroupMembershipType.inverted: group.remove_member(user=user, agent=identity.current.user, service=method) # Finally remove the user user.removed = datetime.utcnow()
def doit(self, distro_tree_id, **kw): """ Create a new reserve job, if system_id is defined schedule it too """ if 'system_id' in kw: kw['id'] = kw['system_id'] try: provision_system_job = Job.provision_system_job(distro_tree_id, **kw) except BX, msg: flash(_(u"%s" % msg)) redirect(u".")
def _remove(self, user, method, **kw): if user == identity.current.user: raise BX(_('You cannot remove yourself')) # cancel all running and queued jobs Job.cancel_jobs_by_user(user, 'User %s removed' % user.user_name) # Return all systems in use by this user for system in System.query.filter(System.user==user): msg = '' try: reservation = system.open_reservation system.unreserve(reservation=reservation, service=method, user=user) except BX, error_msg: msg = 'Error: %s Action: %s' % (error_msg,system.release_action) system.activity.append(SystemActivity(identity.current.user, method, '%s' % system.release_action, 'Return', '', msg)) system.activity.append(SystemActivity(identity.current.user, method, 'Returned', 'User', '%s' % user, ''))
def delete_jobs(self, jobs=None, tag=None, complete_days=None, family=None, dryrun=False, product=None): """ delete_jobs will mark the job to be deleted To select jobs by id, pass an array for the *jobs* argument. Elements of the array must be strings of the form ``'J:123'``. Alternatively, pass some combination of the *tag*, *complete_days*, or *family* arguments to select jobs for deletion. These arguments behave as per the :meth:`jobs.list` method. If *dryrun* is True, deletions will be reported but nothing will be modified. Admins are not be able to delete jobs which are not owned by themselves by using the tag, complete_days etc kwargs, instead, they should do that via the *jobs* argument. """ if jobs: #Turn them into job objects if not isinstance(jobs,list): jobs = [jobs] jobs_to_try_to_del = [] for j_id in jobs: job = TaskBase.get_by_t_id(j_id) if not isinstance(job,Job): raise BeakerException('Incorrect task type passed %s' % j_id ) if not job.can_delete(identity.current.user): raise BeakerException("You don't have permission to delete job %s" % j_id) jobs_to_try_to_del.append(job) delete_jobs_kw = dict(jobs=jobs_to_try_to_del) else: # only allow people to delete their own jobs while using these kwargs delete_jobs_kw = dict(query=Job.find_jobs(tag=tag, complete_days=complete_days, family=family, product=product, owner=identity.current.user.user_name)) deleted_jobs = Job.delete_jobs(**delete_jobs_kw) msg = 'Jobs deleted' if dryrun: session.rollback() msg = 'Dryrun only. %s' % (msg) return '%s: %s' % (msg, [j.t_id for j in deleted_jobs])
def test_deletes_old_jobs_which_never_started(self): with session.begin(): the_past = datetime.datetime.utcnow() - datetime.timedelta(days=31) cancelled_job = data_setup.create_job(queue_time=the_past) cancelled_job.cancel() cancelled_job.update_status() aborted_job = data_setup.create_job(queue_time=the_past) aborted_job.abort() aborted_job.update_status() self.assertEqual(cancelled_job.status, TaskStatus.cancelled) self.assertEqual(aborted_job.status, TaskStatus.aborted) self.assertIsNone(cancelled_job.recipesets[0].recipes[0].finish_time) self.assertIsNone(aborted_job.recipesets[0].recipes[0].finish_time) self.assertIsNone(cancelled_job.deleted) self.assertIsNone(aborted_job.deleted) log_delete.log_delete() with session.begin(): self.assertIsNotNone(Job.by_id(cancelled_job.id).deleted) self.assertIsNotNone(Job.by_id(aborted_job.id).deleted)
def test_no_default_install_method(self): # Not specifying a method in ks_meta means Beaker picks one. We want # that to be the default behaviour if --method is not given. out = run_client(['bkr', 'workflow-simple', '--distro', self.distro.name, '--task', self.task.name]) self.assertTrue(out.startswith('Submitted:'), out) m = re.search('J:(\d+)', out) job_id = m.group(1) with session.begin(): job = Job.by_id(job_id) self.assertEquals(job.recipesets[0].recipes[0].ks_meta, u'')
def submit_job_and_check_arches(self, workflow_options, expected_arches): out = run_client(['bkr', 'workflow-simple', '--task', self.task.name] + workflow_options) self.assertTrue(out.startswith('Submitted:'), out) m = re.search('J:(\d+)', out) job_id = m.group(1) with session.begin(): job = Job.by_id(job_id) self.assertEqual(len(job.recipesets), len(expected_arches)) actual_arches = [rs.recipes[0].distro_tree.arch.arch for rs in job.recipesets] self.assertItemsEqual(actual_arches, expected_arches)
def test_02_abort_dead_recipes(self): beakerd.process_new_recipes() beakerd.update_dirty_jobs() beakerd.queue_processed_recipesets() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(self.job2.id) self.assertEqual(job.status, TaskStatus.queued) # check if rows in system_recipe_map self.assertNotEqual(len(job.recipesets[0].recipes[0].systems), 0) # Remove distro_tree2 from lab1, should cause remaining recipe to abort. for lca in self.distro_tree2.lab_controller_assocs[:]: session.delete(lca) beakerd.abort_dead_recipes() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(self.job2.id) self.assertEqual(job.status, TaskStatus.aborted) # https://bugzilla.redhat.com/show_bug.cgi?id=1173376 # check if no rows system_recipe_map self.assertEqual(len(job.recipesets[0].recipes[0].systems), 0)
def test_purge_deleted(self): with session.begin(): self.job_to_delete.deleted = datetime.datetime.utcnow() self.job_to_delete.recipesets[0].recipes[0].logs.append(LogRecipe(filename=u'test.log')) r_ = self.job_to_delete.recipesets[0].recipes[0] dir = os.path.join(r_.logspath, r_.filepath) self.make_dir(dir) f = open(os.path.join(dir, 'test.log'), 'w') f.close() run_command('log_delete.py', 'beaker-log-delete') self._assert_logs_not_in_db(Job.by_id(self.job_to_delete.id)) self.check_dir_not_there(dir)
def run(self): session.begin() recipeset = Job.by_id(job.id).recipesets[0] assert recipeset.status == TaskStatus.processed self.ready_evt.set() self.continue_evt.wait() try: beakerd.queue_processed_recipeset(recipeset.id) assert False, 'should raise' except StaleTaskStatusException: pass # expected session.rollback()
def test_log_delete_to_delete(self): with session.begin(): self.job_to_delete.to_delete = datetime.datetime.utcnow() self.job_to_delete.recipesets[0].recipes[0].logs.append(LogRecipe(filename=u'test.log')) r_ = self.job_to_delete.recipesets[0].recipes[0] dir = os.path.join(r_.logspath ,r_.filepath) self.make_dir(dir) f = open(os.path.join(dir,'test.log'), 'w') f.close() log_delete.log_delete() self._assert_logs_not_in_db(Job.by_id(self.job_to_delete.id)) self.check_dir_not_there(dir)
def test_02_abort_dead_recipes(self): beakerd.process_new_recipes() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(self.job2.id) self.assertEqual(job.status, TaskStatus.processed) # check if rows in system_recipe_map self.assertNotEqual(len(job.recipesets[0].recipes[0].systems), 0) # Remove distro_tree2 from lab1, should cause remaining recipe to abort. for lca in self.distro_tree2.lab_controller_assocs[:]: session.delete(lca) beakerd.queue_processed_recipesets() beakerd.update_dirty_jobs() beakerd.abort_dead_recipes() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(self.job2.id) self.assertEqual(job.status, TaskStatus.aborted) # https://bugzilla.redhat.com/show_bug.cgi?id=1173376 # check if no rows system_recipe_map self.assertEqual(len(job.recipesets[0].recipes[0].systems), 0)
def run(self): session.begin() recipe = Job.by_id(job.id).recipesets[0].recipes[0] assert recipe.status == TaskStatus.queued self.ready_evt.set() self.continue_evt.wait() try: beakerd.schedule_queued_recipe(recipe.id) assert False, 'should raise' except StaleTaskStatusException: pass # expected session.rollback()
def stop(self, job_id, stop_type, msg=None): """ Set job status to Completed """ try: job = Job.by_id(job_id) except InvalidRequestError: raise BX(_('Invalid job ID: %s' % job_id)) if stop_type not in job.stop_types: raise BX(_('Invalid stop_type: %s, must be one of %s' % (stop_type, job.stop_types))) kwargs = dict(msg = msg) return getattr(job,stop_type)(**kwargs)
def test_no_default_install_method(self): # Not specifying a method in ks_meta means Beaker picks one. We want # that to be the default behaviour if --method is not given. out = run_client([ 'bkr', 'workflow-simple', '--distro', self.distro.name, '--task', self.task.name ]) self.assertTrue(out.startswith('Submitted:'), out) m = re.search('J:(\d+)', out) job_id = m.group(1) with session.begin(): job = Job.by_id(job_id) self.assertEquals(job.recipesets[0].recipes[0].ks_meta, u'')
def test_clients_default_zero(self): out = run_client(['bkr', 'workflow-simple', '--distro', self.distro.name, '--task', '/distribution/reservesys', '--servers', '2']) self.assertTrue(out.startswith('Submitted:'), out) m = re.search('J:(\d+)', out) job_id = m.group(1) with session.begin(): job = Job.by_id(job_id) self.assertEquals(len(job.recipesets), 1) self.assertEquals(len(job.recipesets[0].recipes), 2) self.assertEquals(job.recipesets[0].recipes[0].tasks[1].role, 'SERVERS') self.assertEquals(job.recipesets[0].recipes[1].tasks[1].role, 'SERVERS')
def stop(self, job_id, stop_type, msg=None): """ Set job status to Completed """ try: job = Job.by_id(job_id) except InvalidRequestError: raise BX(_('Invalid job ID: %s' % job_id)) if stop_type not in job.stop_types: raise BX( _('Invalid stop_type: %s, must be one of %s' % (stop_type, job.stop_types))) kwargs = dict(msg=msg) return getattr(job, stop_type)(**kwargs)
def test_reserved_then_job_cancelled(self): """ This tests the case where the recipe is Reserved but the job is cancelled """ with session.begin(): recipe = data_setup.create_recipe( task_list=[Task.by_name(u'/distribution/install')], reservesys=True) job = data_setup.create_job_for_recipes([recipe]) job_id = job.id data_setup.mark_recipe_tasks_finished(recipe) job._mark_dirty() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.reserved) job.cancel() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.completed)
def test_reserved_then_watchdog_expired(self): """ This tests the case where the external watchdog expires when the recipe is in Reserved state """ with session.begin(): recipe = data_setup.create_recipe( task_list=[Task.by_name(u'/distribution/install')], reservesys=True) job = data_setup.create_job_for_recipes([recipe]) job_id = job.id data_setup.mark_recipe_tasks_finished(recipe) job._mark_dirty() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.reserved) job.recipesets[0].recipes[0].abort() job._update_status() with session.begin(): job = Job.by_id(job_id) self.assertEqual(job.recipesets[0].recipes[0].status, TaskStatus.completed)