示例#1
0
 def test_recipe_running_then_cancelled(self):
     """ This tests the case where the recipe is running, has a valid
     reservation request, but is cancelled before it's completed.
     """
     with session.begin():
         recipe = data_setup.create_recipe(
             task_list=[Task.by_name(u'/distribution/install')] * 2,
             reservesys=True)
         job = data_setup.create_job_for_recipes([recipe])
         job_id = job.id
         data_setup.mark_recipe_running(recipe)
         data_setup.mark_recipe_installation_finished(recipe)
         # we want at least one task to be Completed here
         # https://bugzilla.redhat.com/show_bug.cgi?id=1195558
         job.recipesets[0].recipes[0].tasks[0].stop()
         job.recipesets[0].recipes[0].tasks[1].start()
     beakerd.update_dirty_jobs()
     with session.begin():
         job = Job.by_id(job_id)
         self.assertEqual(job.recipesets[0].recipes[0].status,
                          TaskStatus.running)
         job.recipesets[0].cancel()
     beakerd.update_dirty_jobs()
     with session.begin():
         job = Job.by_id(job_id)
         self.assertEqual(job.recipesets[0].recipes[0].status,
                          TaskStatus.cancelled)
示例#2
0
 def test_recipe_running_then_watchdog_expired(self):
     """ This tests the case where the recipe is running, has a valid
     reservation request, but the watchdog expires before it's
     completed.
     """
     with session.begin():
         recipe = data_setup.create_recipe(
             task_list=[Task.by_name(u'/distribution/install')],
             reservesys=True)
         job = data_setup.create_job_for_recipes([recipe])
         job_id = job.id
         data_setup.mark_recipe_tasks_finished(recipe,
                                               task_status=TaskStatus.aborted)
         job.recipesets[0].recipes[0].abort()
     beakerd.update_dirty_jobs()
     with session.begin():
         job = Job.by_id(job_id)
         self.assertEqual(job.recipesets[0].recipes[0].status,
                          TaskStatus.reserved)
         job.recipesets[0].recipes[0].return_reservation()
     beakerd.update_dirty_jobs()
     with session.begin():
         job = Job.by_id(job_id)
         self.assertEqual(job.recipesets[0].recipes[0].status,
                           TaskStatus.aborted)
示例#3
0
 def test_task_aborted_return_reservation(self):
     """ This tests the case where the task was aborted, then
     the recipe goes to Reserved state and then finally the reservation
     is returned
     """
     with session.begin():
         recipe = data_setup.create_recipe(
             task_list=[Task.by_name(u'/distribution/install')],
             reservesys=True)
         job = data_setup.create_job_for_recipes([recipe])
         job_id = job.id
         data_setup.mark_recipe_tasks_finished(recipe, result=TaskResult.warn,
                                               task_status=TaskStatus.aborted)
         job._mark_dirty()
     beakerd.update_dirty_jobs()
     with session.begin():
         job = Job.by_id(job_id)
         self.assertEqual(job.recipesets[0].recipes[0].status,
                          TaskStatus.reserved)
         job.recipesets[0].recipes[0].return_reservation()
     beakerd.update_dirty_jobs()
     with session.begin():
         job = Job.by_id(job_id)
         self.assertEqual(job.recipesets[0].recipes[0].status,
                          TaskStatus.aborted)
示例#4
0
 def test_02_abort_dead_recipes(self):
     beakerd.process_new_recipes()
     beakerd.update_dirty_jobs()
     beakerd.queue_processed_recipesets()
     beakerd.update_dirty_jobs()
     with session.begin():
         self.assertEqual(Job.by_id(self.job2.id).status, TaskStatus.queued)
         # Remove distro_tree2 from lab1, should cause remaining recipe to abort.
         for lca in self.distro_tree2.lab_controller_assocs[:]:
             session.delete(lca)
     beakerd.abort_dead_recipes()
     beakerd.update_dirty_jobs()
     with session.begin():
         self.assertEqual(Job.by_id(self.job2.id).status, TaskStatus.aborted)
示例#5
0
 def test_ignore_missing_tasks(self):
     job_tid = self.server.jobs.upload('''
         <job>
             <whiteboard>job with nonexistent task</whiteboard>
             <recipeSet>
                 <recipe>
                     <distroRequires>
                         <distro_name op="=" value="BlueShoeLinux5-5" />
                     </distroRequires>
                     <hostRequires/>
                     <task name="/distribution/install" />
                     <task name="/asdf/notexist" />
                     <task name="/distribution/reservesys" />
                 </recipe>
             </recipeSet>
         </job>
         ''',
         True # ignore_missing_tasks
     )
     self.assert_(job_tid.startswith('J:'))
     with session.begin():
         job = Job.by_id(int(job_tid[2:]))
         self.assertEqual(job.ttasks, 2) # not 3
         recipe = job.recipesets[0].recipes[0]
         self.assertEqual(len(recipe.tasks), 2)
         self.assertEqual(recipe.tasks[0].task.name, u'/distribution/install')
         # /asdf/notexist is silently dropped
         self.assertEqual(recipe.tasks[1].task.name, u'/distribution/reservesys')
示例#6
0
    def test_job_group(self):
        with session.begin():
            user_in_group = data_setup.create_user(password='******')
            group = data_setup.create_group()
            user_in_group.groups.append(group)
            user_not_in_group = data_setup.create_user(password='******')

        # Test submitting on behalf of user's group
        config1 = create_client_config(username=user_in_group.user_name,
            password='******')
        out = run_client(['bkr', 'workflow-simple', '--random',
                '--arch', self.distro_tree.arch.arch,
                '--family', self.distro.osversion.osmajor.osmajor,
                '--job-group', group.group_name,
                '--task', self.task.name], config=config1)
        self.assertTrue(out.startswith('Submitted:'), out)
        m = re.search('J:(\d+)', out)
        job_id = m.group(1)
        with session.begin():
            job = Job.by_id(job_id)
        self.assertEqual(group, job.group)

        # Test submitting on behalf of group user does not belong to
        config2 = create_client_config(username=user_not_in_group.user_name,
            password='******')
        try:
            out2 = run_client(['bkr', 'workflow-simple', '--random',
                    '--arch', self.distro_tree.arch.arch,
                    '--family', self.distro.osversion.osmajor.osmajor,
                    '--job-group', group.group_name,
                    '--task', self.task.name], config=config2)
            fail('should raise')
        except ClientError, e:
            self.assertTrue('You are not a member of the %s group' % \
                group.group_name in e.stderr_output, e)
示例#7
0
文件: jobs.py 项目: ustbgaofan/beaker
 def update(self, id, **kw):
     # XXX Thus function is awkward and needs to be cleaned up.
     try:
         job = Job.by_id(id)
     except InvalidRequestError:
         raise cherrypy.HTTPError(status=400, message='Invalid job id %s' % id)
     if not job.can_change_product(identity.current.user) or not \
         job.can_change_retention_tag(identity.current.user):
         raise cherrypy.HTTPError(status=403,
                 message="You don't have permission to update job id %s" % id)
     returns = {'success' : True, 'vars':{}}
     if 'retentiontag' in kw and 'product' in kw:
         retention_tag = RetentionTag.by_id(kw['retentiontag'])
         if int(kw['product']) == ProductWidget.product_deselected:
             product = None
         else:
             product = Product.by_id(kw['product'])
         returns.update(Utility.update_retention_tag_and_product(job,
                 retention_tag, product))
     elif 'retentiontag' in kw:
         retention_tag = RetentionTag.by_id(kw['retentiontag'])
         returns.update(Utility.update_retention_tag(job, retention_tag))
     elif 'product' in kw:
         if int(kw['product']) == ProductWidget.product_deselected:
             product = None
         else:
             product = Product.by_id(kw['product'])
         returns.update(Utility.update_product(job, product))
     if 'whiteboard' in kw:
         job.whiteboard = kw['whiteboard']
     return returns
示例#8
0
 def test_export_xml(self):
     b = self.browser
     # Make sure the Export button is present in the jobs grid. We can't 
     # actually click it because it triggers a download, which WebDriver 
     # can't handle.
     b.get(get_server_base() + 'jobs/')
     b.find_element_by_name('simplesearch').send_keys(unicode(self.job_to_export.id))
     b.find_element_by_name('jobsearch_simple').submit()
     b.find_element_by_xpath(
             '//tr[normalize-space(string(./td[1]))="%s"]'
             '//a[text()="Export"]'
             % self.job_to_export.t_id)
     # Make sure the Export button is present on the job page.
     b.get(get_server_base() + 'jobs/%s' % self.job_to_export.id)
     b.find_element_by_link_text('Export')
     # Fetch the exported XML directly.
     response = requests.get(get_server_base() +
             'to_xml?taskid=%s&pretty=False' % self.job_to_export.t_id)
     xml_export = response.content
     with session.begin():
         job = Job.by_id(self.job_to_export.id)
         xml_export = job.to_xml().toxml()
         xml_export_tree = lxml.etree.parse(StringIO(xml_export))
         pretty_xml = lxml.etree.tostring(xml_export_tree, pretty_print=False)
         self.assert_(pretty_xml == xml_export)
示例#9
0
    def test_remove_user_job_cancel(self):
        with session.begin():
            user = data_setup.create_user(user_name =
                                          data_setup.unique_name('aaaaa%s'))
            job = data_setup.create_job(owner=user)
            data_setup.mark_job_running(job)

        b = self.browser
        login(b)
        b.get(get_server_base() + 'users')
        b.find_element_by_xpath('//a[@href="remove?id=%d"]' %user.user_id).click()
        # XXX: not necessary, but doing it here to buy time, since sometimes the
        # job cancellation seems to take a while
        logout(b)

        # reflect the change in recipe task status when
        # update_dirty_jobs() is called
        session.expunge_all()
        beakerd.update_dirty_jobs()

        with session.begin():
            job = Job.by_id(job.id)
            self.assertEquals(job.status, TaskStatus.cancelled)
            self.assertIn('User %s removed' % user.user_name,
                          job.recipesets[0].recipes[0].tasks[0].results[0].log)
示例#10
0
文件: jobs.py 项目: ustbgaofan/beaker
 def clone(self, job_id=None, recipe_id=None, recipeset_id=None,
         textxml=None, filexml=None, confirmed=False, **kw):
     """
     Review cloned xml before submitting it.
     """
     title = 'Clone Job'
     if job_id:
         # Clone from Job ID
         title = 'Clone Job %s' % job_id
         try:
             job = Job.by_id(job_id)
         except InvalidRequestError:
             flash(_(u"Invalid job id %s" % job_id))
             redirect(".")
         textxml = job.to_xml(clone=True).toprettyxml()
     elif recipeset_id:
         title = 'Clone Recipeset %s' % recipeset_id
         try:
             recipeset = RecipeSet.by_id(recipeset_id)
         except InvalidRequestError:
             flash(_(u"Invalid recipeset id %s" % recipeset_id))
             redirect(".")
         textxml = recipeset.to_xml(clone=True,from_job=False).toprettyxml()
     elif isinstance(filexml, cgi.FieldStorage):
         # Clone from file
         try:
             textxml = filexml.value.decode('utf8')
         except UnicodeDecodeError, e:
             flash(_(u'Invalid job XML: %s') % e)
             redirect('.')
示例#11
0
 def run(self):
     session.begin()
     recipe = Job.by_id(job.id).recipesets[0].recipes[0]
     assert not recipe.watchdog
     assert not recipe.resource
     recipe.recipeset.cancel()
     self.ready_evt.set()
     self.continue_evt.wait()
     session.commit()
示例#12
0
 def test_deletes_old_jobs_which_never_started(self):
     with session.begin():
         the_past = datetime.datetime.utcnow() - datetime.timedelta(days=31)
         cancelled_job = data_setup.create_job(queue_time=the_past)
         cancelled_job.cancel()
         cancelled_job.update_status()
         aborted_job = data_setup.create_job(queue_time=the_past)
         aborted_job.abort()
         aborted_job.update_status()
         self.assertEqual(cancelled_job.status, TaskStatus.cancelled)
         self.assertEqual(aborted_job.status, TaskStatus.aborted)
         self.assertIsNone(cancelled_job.recipesets[0].recipes[0].finish_time)
         self.assertIsNone(aborted_job.recipesets[0].recipes[0].finish_time)
         self.assertIsNone(cancelled_job.deleted)
         self.assertIsNone(aborted_job.deleted)
     log_delete.log_delete()
     with session.begin():
         self.assertIsNotNone(Job.by_id(cancelled_job.id).deleted)
         self.assertIsNotNone(Job.by_id(aborted_job.id).deleted)
示例#13
0
 def submit_job_and_check_arches(self, workflow_options, expected_arches):
     out = run_client(['bkr', 'workflow-simple', '--task', self.task.name]
             + workflow_options)
     self.assertTrue(out.startswith('Submitted:'), out)
     m = re.search('J:(\d+)', out)
     job_id = m.group(1)
     with session.begin():
         job = Job.by_id(job_id)
         self.assertEqual(len(job.recipesets), len(expected_arches))
         actual_arches = [rs.recipes[0].distro_tree.arch.arch for rs in job.recipesets]
         self.assertItemsEqual(actual_arches, expected_arches)
示例#14
0
 def test_no_default_install_method(self):
     # Not specifying a method in ks_meta means Beaker picks one. We want 
     # that to be the default behaviour if --method is not given.
     out = run_client(['bkr', 'workflow-simple', '--distro', self.distro.name,
             '--task', self.task.name])
     self.assertTrue(out.startswith('Submitted:'), out)
     m = re.search('J:(\d+)', out)
     job_id = m.group(1)
     with session.begin():
         job = Job.by_id(job_id)
         self.assertEquals(job.recipesets[0].recipes[0].ks_meta, u'')
示例#15
0
 def test_02_abort_dead_recipes(self):
     beakerd.process_new_recipes()
     beakerd.update_dirty_jobs()
     with session.begin():
         job =  Job.by_id(self.job2.id)
         self.assertEqual(job.status, TaskStatus.processed)
         # check if rows in system_recipe_map
         self.assertNotEqual(len(job.recipesets[0].recipes[0].systems), 0)
         # Remove distro_tree2 from lab1, should cause remaining recipe to abort.
         for lca in self.distro_tree2.lab_controller_assocs[:]:
             session.delete(lca)
     beakerd.queue_processed_recipesets()
     beakerd.update_dirty_jobs()
     beakerd.abort_dead_recipes()
     beakerd.update_dirty_jobs()
     with session.begin():
         job =  Job.by_id(self.job2.id)
         self.assertEqual(job.status, TaskStatus.aborted)
         # https://bugzilla.redhat.com/show_bug.cgi?id=1173376
         # check if no rows system_recipe_map
         self.assertEqual(len(job.recipesets[0].recipes[0].systems), 0)
示例#16
0
 def run(self):
     session.begin()
     recipe = Job.by_id(job.id).recipesets[0].recipes[0]
     assert recipe.status == TaskStatus.queued
     self.ready_evt.set()
     self.continue_evt.wait()
     try:
         beakerd.schedule_queued_recipe(recipe.id)
         assert False, 'should raise'
     except StaleTaskStatusException:
         pass # expected
     session.rollback()
示例#17
0
 def test_log_delete_to_delete(self):
     with session.begin():
         self.job_to_delete.to_delete = datetime.datetime.utcnow()
         self.job_to_delete.recipesets[0].recipes[0].logs.append(LogRecipe(filename=u'test.log'))
     r_ = self.job_to_delete.recipesets[0].recipes[0]
     dir = os.path.join(r_.logspath ,r_.filepath)
     self.make_dir(dir)
     f = open(os.path.join(dir,'test.log'), 'w')
     f.close()
     log_delete.log_delete()
     self._assert_logs_not_in_db(Job.by_id(self.job_to_delete.id))
     self.check_dir_not_there(dir)
示例#18
0
 def test_clients_default_zero(self):
     out = run_client(['bkr', 'workflow-simple', '--distro', self.distro.name,
             '--task', '/distribution/reservesys',
             '--servers', '2'])
     self.assertTrue(out.startswith('Submitted:'), out)
     m = re.search('J:(\d+)', out)
     job_id = m.group(1)
     with session.begin():
         job = Job.by_id(job_id)
         self.assertEquals(len(job.recipesets), 1)
         self.assertEquals(len(job.recipesets[0].recipes), 2)
         self.assertEquals(job.recipesets[0].recipes[0].tasks[1].role, 'SERVERS')
         self.assertEquals(job.recipesets[0].recipes[1].tasks[1].role, 'SERVERS')
示例#19
0
文件: jobs.py 项目: ustbgaofan/beaker
 def stop(self, job_id, stop_type, msg=None):
     """
     Set job status to Completed
     """
     try:
         job = Job.by_id(job_id)
     except InvalidRequestError:
         raise BX(_('Invalid job ID: %s' % job_id))
     if stop_type not in job.stop_types:
         raise BX(_('Invalid stop_type: %s, must be one of %s' %
                          (stop_type, job.stop_types)))
     kwargs = dict(msg = msg)
     return getattr(job,stop_type)(**kwargs)
示例#20
0
 def test_recipe_running_then_cancelled(self):
     """ This tests the case where the recipe is running, has a valid
     reservation request, but is cancelled before it's completed.
     """
     with session.begin():
         recipe = data_setup.create_recipe(
             task_list=[Task.by_name(u'/distribution/install')],
             reservesys=True)
         job = data_setup.create_job_for_recipes([recipe])
         job_id = job.id
         data_setup.mark_recipe_running(recipe)
         job._mark_dirty()
     beakerd.update_dirty_jobs()
     with session.begin():
         job = Job.by_id(job_id)
         self.assertEqual(job.recipesets[0].recipes[0].status,
                          TaskStatus.running)
         job.recipesets[0].cancel()
     beakerd.update_dirty_jobs()
     with session.begin():
         job = Job.by_id(job_id)
         self.assertEqual(job.recipesets[0].recipes[0].status,
                          TaskStatus.cancelled)
示例#21
0
 def test_reserved_then_watchdog_expired(self):
     """ This tests the case where the external
     watchdog expires when the recipe is in Reserved state
     """
     with session.begin():
         recipe = data_setup.create_recipe(
             task_list=[Task.by_name(u'/distribution/install')],
             reservesys=True)
         job = data_setup.create_job_for_recipes([recipe])
         job_id = job.id
         data_setup.mark_recipe_tasks_finished(recipe)
         job._mark_dirty()
     beakerd.update_dirty_jobs()
     with session.begin():
         job = Job.by_id(job_id)
         self.assertEqual(job.recipesets[0].recipes[0].status,
                      TaskStatus.reserved)
         job.recipesets[0].recipes[0].abort()
         job._update_status()
     with session.begin():
         job = Job.by_id(job_id)
         self.assertEqual(job.recipesets[0].recipes[0].status,
                          TaskStatus.completed)
示例#22
0
 def test_reserved_then_job_cancelled(self):
     """ This tests the case where the recipe is Reserved
     but the job is cancelled
     """
     with session.begin():
         recipe = data_setup.create_recipe(
             task_list=[Task.by_name(u'/distribution/install')],
             reservesys=True)
         job = data_setup.create_job_for_recipes([recipe])
         job_id = job.id
         data_setup.mark_recipe_tasks_finished(recipe)
         job._mark_dirty()
     beakerd.update_dirty_jobs()
     with session.begin():
         job = Job.by_id(job_id)
         self.assertEqual(job.recipesets[0].recipes[0].status,
                          TaskStatus.reserved)
         job.cancel()
     beakerd.update_dirty_jobs()
     with session.begin():
         job = Job.by_id(job_id)
         self.assertEqual(job.recipesets[0].recipes[0].status,
                          TaskStatus.completed)
示例#23
0
 def test_kickstart_template(self):
     template_contents = 'install\n%packages\n%end\n'
     template_file = NamedTemporaryFile()
     template_file.write(template_contents)
     template_file.flush()
     out = run_client(['bkr', 'workflow-simple', '--distro', self.distro.name,
             '--task', self.task.name,
             '--kickstart', template_file.name])
     self.assertTrue(out.startswith('Submitted:'), out)
     m = re.search('J:(\d+)', out)
     job_id = m.group(1)
     with session.begin():
         job = Job.by_id(job_id)
         self.assertEquals(job.recipesets[0].recipes[0].kickstart,
                 template_contents)
示例#24
0
    def test_log_delete_expired(self):
        with session.begin():
            job_to_delete = data_setup.create_completed_job(
                    start_time=datetime.datetime.utcnow() - datetime.timedelta(days=60),
                    finish_time=datetime.datetime.utcnow() - datetime.timedelta(days=31))
            self.job_to_delete.owner = self.user
            job_to_delete.recipesets[0].recipes[0].logs.append(LogRecipe(filename=u'test.log'))
            r_delete = job_to_delete.recipesets[0].recipes[0]
            dir_delete = os.path.join(r_delete.logspath ,r_delete.filepath)

        self.make_dir(dir_delete)
        fd = open(os.path.join(dir_delete,'test.log'), 'w')
        fd.close()
        log_delete.log_delete()
        self._assert_logs_not_in_db(Job.by_id(job_to_delete.id))
        self.check_dir_not_there(dir_delete)
 def test_uses_original_install_task_on_older_distros(self):
     with session.begin():
         distro = data_setup.create_distro(osmajor=u'RedHatEnterpriseLinux7')
         data_setup.create_distro_tree(distro=distro)
     out = run_client(['bkr', 'workflow-simple',
                       '--distro', distro.name,
                       '--task', self.task.name])
     self.assertIn('Submitted:', out)
     m = re.search('J:(\d+)', out)
     job_id = m.group(1)
     with session.begin():
         job = Job.by_id(job_id)
         tasks = job.recipesets[0].recipes[0].tasks
         self.assertEqual(len(tasks), 2)
         self.assertEqual(tasks[0].name, '/distribution/install')
         self.assertEqual(tasks[1].name, self.task.name)
 def test_update_inventory_wait(self):
     args = ['bkr', 'update-inventory',
             '--wait', self.system1.fqdn]
     proc = start_client(args)
     out = proc.stdout.readline().rstrip()
     self.assert_(out.startswith('Submitted:'), out)
     m = re.search('J:(\d+)', out)
     job_id = m.group(1)
     out = proc.stdout.readline().rstrip()
     self.assert_('Watching tasks (this may be safely interrupted)...' == out)
     with session.begin():
         job = Job.by_id(job_id)
         job.cancel()
         job.update_status()
     returncode = proc.wait()
     self.assertEquals(returncode, 1)
 def test_uses_new_check_install_task_by_default(self):
     with session.begin():
         distro = data_setup.create_distro(osmajor=u'Fedora29')
         data_setup.create_distro_tree(distro=distro)
     out = run_client(['bkr', 'workflow-simple',
                       '--distro', distro.name,
                       '--task', self.task.name])
     self.assertIn('Submitted:', out)
     m = re.search('J:(\d+)', out)
     job_id = m.group(1)
     with session.begin():
         job = Job.by_id(job_id)
         tasks = job.recipesets[0].recipes[0].tasks
         self.assertEqual(len(tasks), 2)
         self.assertEqual(tasks[0].name, '/distribution/check-install')
         self.assertEqual(tasks[1].name, self.task.name)
示例#28
0
 def test_export_xml(self):
     sel = self.selenium
     sel.open('jobs')
     sel.type("simplesearch", "%s" % self.job_to_export.id)
     sel.click("//a[text()='Export']")
     sel.open('jobs/%s' % self.job_to_export.id)
     sel.click("//a[text()='Export']")
     #make sure it's not pretty print, otherwise it screws things up
     sel.open('to_xml?taskid=%s&to_screen=True&pretty=False' % self.job_to_export.t_id)
     sel.wait_for_page_to_load('30000')
     xml_export = sel.get_text('//body')
     with session.begin():
         job = Job.by_id(self.job_to_export.id)
         xml_export = job.to_xml().toxml()
         xml_export_tree = lxml.etree.parse(StringIO(xml_export))
         pretty_xml = lxml.etree.tostring(xml_export_tree, pretty_print=False)
         self.assert_(pretty_xml == xml_export)
示例#29
0
 def test_job_owner(self):
     with session.begin():
         bot = data_setup.create_user(password='******')
         user = data_setup.create_user()
         user.add_submission_delegate(bot, service=u'testdata')
     config = create_client_config(username=bot.user_name, password='******')
     out = run_client(['bkr', 'workflow-simple',
             '--job-owner', user.user_name,
             '--arch', self.distro_tree.arch.arch,
             '--family', self.distro.osversion.osmajor.osmajor,
             '--task', self.task.name], config=config)
     self.assertTrue(out.startswith('Submitted:'), out)
     m = re.search('J:(\d+)', out)
     job_id = m.group(1)
     with session.begin():
         job = Job.by_id(job_id)
         self.assertEquals(job.owner, user)
示例#30
0
    def test_account_close_job_cancel(self):
        with session.begin():
            user1 = data_setup.create_user()
            job = data_setup.create_job(owner=user1)
            data_setup.mark_job_running(job)

        run_client(['bkr', 'remove-account', user1.user_name])

        # reflect the change in recipe task status when
        # update_dirty_jobs() is called
        session.expunge_all()
        beakerd.update_dirty_jobs()

        with session.begin():
            job = Job.by_id(job.id)
            self.assertEquals(job.status, TaskStatus.cancelled)
            self.assertIn('User %s removed' % user1.user_name,
                          job.recipesets[0].recipes[0].tasks[0].results[0].log)
示例#31
0
    def test_submit_job_wait(self):
        args = [
            'bkr', 'workflow-simple', '--random', '--arch',
            self.distro_tree.arch.arch, '--family',
            self.distro.osversion.osmajor.osmajor, '--task', self.task.name,
            '--wait'
        ]
        proc = start_client(args)
        out = proc.stdout.readline().rstrip()
        self.assert_(out.startswith('Submitted:'), out)
        m = re.search('J:(\d+)', out)
        job_id = m.group(1)

        out = proc.stdout.readline().rstrip()
        self.assert_(
            'Watching tasks (this may be safely interrupted)...' == out)

        with session.begin():
            job = Job.by_id(job_id)
            job.cancel()
            job.update_status()

        returncode = proc.wait()
        self.assert_(returncode == 1)
示例#32
0
def update_dirty_job(job_id):
    log.debug('Updating dirty job %s', job_id)
    job = Job.by_id(job_id)
    job.update_status()
示例#33
0
 def to_xml(self, id):
     jobxml = Job.by_id(id).to_xml().toxml()
     return dict(xml=jobxml)
示例#34
0
    def test_cancel_while_scheduling(self):
        # This test simulates a user cancelling their job at the same time as
        # beakerd is scheduling it. beakerd assigns a system and creates
        # a watchdog and sets the recipe status to Waiting, then it's
        # overwritten by another transaction setting the status to Cancelled.
        with session.begin():
            lab_controller = data_setup.create_labcontroller()
            system = data_setup.create_system(shared=True,
                                              lab_controller=lab_controller)
            distro_tree = data_setup.create_distro_tree(
                osmajor=u'Fedora20', lab_controllers=[lab_controller])
            job = data_setup.create_job(distro_tree=distro_tree)
            job.recipesets[0].recipes[0]._host_requires = (u"""
                <hostRequires>
                    <hostname op="=" value="%s" />
                </hostRequires>
                """ % system.fqdn)
        beakerd.process_new_recipes()
        beakerd.update_dirty_jobs()
        with session.begin():
            job = Job.by_id(job.id)
            system = System.query.get(system.id)
            self.assertEquals(job.status, TaskStatus.processed)
            self.assertEquals(job.recipesets[0].recipes[0].systems, [system])

        # Two "concurrent" transactions, in the first one beakerd has
        # scheduled the recipe and is about to commit...
        class ScheduleThread(Thread):
            def __init__(self, **kwargs):
                super(ScheduleThread, self).__init__(**kwargs)
                self.ready_evt = Event()
                self.continue_evt = Event()

            def run(self):
                session.begin()
                recipeset = Job.by_id(job.id).recipesets[0]
                assert recipeset.status == TaskStatus.processed
                self.ready_evt.set()
                self.continue_evt.wait()
                try:
                    beakerd.queue_processed_recipeset(recipeset.id)
                    assert False, 'should raise'
                except StaleTaskStatusException:
                    pass  # expected
                session.rollback()

        # ... and in the second transaction the user is cancelling the recipe.
        class CancelThread(Thread):
            def __init__(self, **kwargs):
                super(CancelThread, self).__init__(**kwargs)
                self.ready_evt = Event()
                self.continue_evt = Event()

            def run(self):
                session.begin()
                recipe = Job.by_id(job.id).recipesets[0].recipes[0]
                assert not recipe.watchdog
                assert not recipe.resource
                recipe.recipeset.cancel()
                self.ready_evt.set()
                self.continue_evt.wait()
                session.commit()

        sched_thread = ScheduleThread()
        cancel_thread = CancelThread()
        sched_thread.start()
        cancel_thread.start()
        sched_thread.ready_evt.wait()
        cancel_thread.ready_evt.wait()
        sched_thread.continue_evt.set()
        cancel_thread.continue_evt.set()
        sched_thread.join()
        cancel_thread.join()
        with session.begin():
            session.expire_all()
            job.update_status()
            self.assertEquals(job.status, TaskStatus.cancelled)
            self.assertEquals(job.recipesets[0].recipes[0].watchdog, None)
            self.assertEquals(system.open_reservation, None)
示例#35
0
 def test_01_invalid_system_distro_combo(self):
     beakerd.process_new_recipes()
     beakerd.update_dirty_jobs()
     with session.begin():
         self.assertEqual(Job.by_id(self.job1.id).status, TaskStatus.aborted)
         self.assertEqual(Job.by_id(self.job2.id).status, TaskStatus.processed)
示例#36
0
    def default(self, id):
        try:
            job = Job.by_id(id)
        except InvalidRequestError:
            flash(_(u"Invalid job id %s" % id))
            redirect(".")

        if job.counts_as_deleted():
            flash(_(u'Invalid %s, has been deleted' % job.t_id))
            redirect(".")

        recipe_set_history = [
            RecipeSetActivity.query.with_parent(elem, "activity")
            for elem in job.recipesets
        ]
        recipe_set_data = []
        for query in recipe_set_history:
            for d in query:
                recipe_set_data.append(d)

        recipe_set_data += job.activity
        recipe_set_data = sorted(recipe_set_data,
                                 key=lambda x: x.created,
                                 reverse=True)

        job_history_grid = BeakerDataGrid(
            name='job_history_datagrid',
            fields=[
                BeakerDataGrid.Column(name='user',
                                      getter=lambda x: x.user,
                                      title='User',
                                      options=dict(sortable=True)),
                BeakerDataGrid.Column(name='service',
                                      getter=lambda x: x.service,
                                      title='Via',
                                      options=dict(sortable=True)),
                BeakerDataGrid.Column(name='created',
                                      title='Created',
                                      getter=lambda x: x.created,
                                      options=dict(sortable=True)),
                BeakerDataGrid.Column(name='object_name',
                                      getter=lambda x: x.object_name(),
                                      title='Object',
                                      options=dict(sortable=True)),
                BeakerDataGrid.Column(name='field_name',
                                      getter=lambda x: x.field_name,
                                      title='Field Name',
                                      options=dict(sortable=True)),
                BeakerDataGrid.Column(name='action',
                                      getter=lambda x: x.action,
                                      title='Action',
                                      options=dict(sortable=True)),
                BeakerDataGrid.Column(name='old_value',
                                      getter=lambda x: x.old_value,
                                      title='Old value',
                                      options=dict(sortable=True)),
                BeakerDataGrid.Column(name='new_value',
                                      getter=lambda x: x.new_value,
                                      title='New value',
                                      options=dict(sortable=True)),
            ])

        return_dict = dict(
            title='Job',
            recipeset_widget=self.recipeset_widget,
            recipe_widget=self.recipe_widget,
            hidden_id=widgets.HiddenField(name='job_id', value=job.id),
            job_history=recipe_set_data,
            job_history_grid=job_history_grid,
            whiteboard_widget=self.whiteboard_widget,
            action_widget=self.job_page_action_widget,
            delete_action=url('delete_job_page'),
            job=job,
            product_widget=self.product_widget,
            retention_tag_widget=self.retention_tag_widget,
        )
        return return_dict
示例#37
0
def log_delete(print_logs=False, dry=False, limit=None):
    if dry:
        logger.info('Dry run only')
    logger.info('Getting expired jobs')

    failed = False
    if not dry:
        requests_session = requests.Session()
        log_delete_user = config.get('beaker.log_delete_user')
        log_delete_password = config.get('beaker.log_delete_password')

        available_auths = []
        available_auth_names = []

        if _kerberos_available:
            available_auths.append(
                requests_kerberos.HTTPKerberosAuth(
                    mutual_authentication=requests_kerberos.DISABLED))
            available_auth_names.append('Kerberos')

        if log_delete_user and log_delete_password:
            available_auths.append(
                requests.auth.HTTPDigestAuth(log_delete_user,
                                             log_delete_password))
            available_auth_names.append('HTTPDigestAuth')
        requests_session.auth = MultipleAuth(available_auths)
        logger.debug('Available authentication methods: %s' %
                     ', '.join(available_auth_names))

    for jobid, in Job.query.filter(Job.is_expired).limit(limit).values(Job.id):
        logger.info('Deleting logs for job %s', jobid)
        try:
            session.begin()
            job = Job.by_id(jobid)
            all_logs = job.all_logs(load_parent=False)
            # We always delete entire directories, not individual log files,
            # because that's faster, and because we never mix unrelated log
            # files together in the same directory so it's safe to do that.
            # We keep a trailing slash on the directories otherwise when we try
            # to DELETE them, Apache will first redirect us to the trailing
            # slash.
            log_dirs = (os.path.dirname(log.full_path) + '/'
                        for log in all_logs)
            for path in remove_descendants(log_dirs):
                if not dry:
                    if urlparse.urlparse(path).scheme:
                        # We need to handle redirects ourselves, since requests
                        # turns DELETE into GET on 302 which we do not want.
                        response = requests_session.delete(
                            path, allow_redirects=False)
                        redirect_limit = 10
                        while redirect_limit > 0 and response.status_code in (
                                301, 302, 303, 307):
                            response = requests_session.delete(
                                response.headers['Location'],
                                allow_redirects=False)
                            redirect_limit -= 1
                        if response.status_code not in (200, 204, 404):
                            response.raise_for_status()
                    else:
                        try:
                            shutil.rmtree(path)
                        except OSError, e:
                            if e.errno == errno.ENOENT:
                                pass
                if print_logs:
                    print path
            if not dry:
                job.delete()
                session.commit()
                session.close()
            else:
                session.close()
        except Exception, e:
            logger.exception('Exception while deleting logs for job %s', jobid)
            failed = True
            session.close()
            continue