def test_does_not_load_RecipeTaskResults(self): # In large jobs with many RecipeTasks and RecipeTaskResults, # beaker-log-delete would previously take a long time and a lot of # memory, because it was traversing the entire object graph down to # RecipeTaskResult and loading them all into memory. # This test is asserting that no RecipeTask or RecipeTaskResult # instances are loaded when beaker-log-delete runs. with session.begin(): job = data_setup.create_completed_job() job.deleted = datetime.datetime.utcnow() recipe = job.recipesets[0].recipes[0] server = self.log_server_url + '/recipe/' open(os.path.join(self.recipe_logs_dir, 'recipe.log'), 'w').write('dummy') recipe.logs[:] = [LogRecipe(server=server, filename=u'recipe.log')] open(os.path.join(self.recipe_logs_dir, 'task.log'), 'w').write('dummy') recipe.tasks[0].logs[:] = [LogRecipeTask(server=server, filename=u'task.log')] open(os.path.join(self.recipe_logs_dir, 'result.log'), 'w').write('dummy') recipe.tasks[0].results[0].logs[:] = \ [LogRecipeTaskResult(server=server, filename=u'result.log')] # RecipeTasks/RecipeTaskResults are already loaded from the data_setup # calls above, expunge the session so that log_delete starts from # a clean slate. session.expunge_all() with mock.patch.object(RecipeTask, '__new__', side_effect=AssertionError): with mock.patch.object(RecipeTaskResult, '__new__', side_effect=AssertionError): self.assertEquals(log_delete.log_delete(), 0) # exit status # Check that we really deleted something, if not the test setup was faulty. with session.begin(): job = Job.by_id(job.id) self.assertIsNotNone(job.purged)
def test_remove_user_job_cancel(self): with session.begin(): user = data_setup.create_user(user_name = data_setup.unique_name('aaaaa%s')) job = data_setup.create_job(owner=user) data_setup.mark_job_running(job) b = self.browser login(b) b.get(get_server_base() + 'users') b.find_element_by_xpath('//a[@href="remove?id=%d"]' %user.user_id).click() # XXX: not necessary, but doing it here to buy time, since sometimes the # job cancellation seems to take a while logout(b) # reflect the change in recipe task status when # update_dirty_jobs() is called session.expunge_all() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job.id) self.assertEquals(job.status, TaskStatus.cancelled) self.assertIn('User %s removed' % user.user_name, job.recipesets[0].recipes[0].tasks[0].results[0].log)
def tearDown(self): """Clear database configuration and drop database. """ try: session.expunge_all() except: session.clear() clear_mappers() metadata.drop_all() metadata.clear() set_db_uri(self.sa_dburi, 'sqlalchemy')
def test_user_redirect(): """If a controller redirects, transactions are committed.""" cherrypy.root = MyRoot() create_request("/create_person?id=22&doerr=2") try: # should be done automatically, but just in case session.expunge_all() except AttributeError: # SQLAlchemy < 0.5.1 session.clear() query = Person.query if ActiveMapper: query = query() assert query.get(22) is not None
def test_user_exception(): """If a controller raises an exception, transactions are rolled back.""" cherrypy.root = MyRoot() create_request("/create_person?id=21&doerr=1") try: # should be done automatically, but just in case session.expunge_all() except AttributeError: # SQLAlchemy < 0.5.1 session.clear() query = Person.query if ActiveMapper: query = query() assert query.get(21) is None
def test_implicit_trans_no_error(): """If a controller runs sucessfully, the transaction is commited.""" capture_log("turbogears.database") cherrypy.root = MyRoot() create_request("/no_error?name=A.%20Dent") print_log() try: session.expunge_all() except AttributeError: # SQLAlchemy < 0.5.1 session.clear() q = session.query(Person) arthur = q.filter_by(name="A. Dent").one()
def test_active_mapper(): p = Person(name="Ford Prefect") a = Address(address="1 West Guildford", city="Betelgeuse") p.addresses.append(a) session.flush() try: session.expunge_all() except AttributeError: # SQLAlchemy < 0.5.1 session.clear() q = session.query(Person) ford = q.filter_by(name="Ford Prefect").one() assert ford is not p assert len(ford.addresses) == 1
def test_account_close_job_cancel(self): with session.begin(): user1 = data_setup.create_user() job = data_setup.create_job(owner=user1) data_setup.mark_job_running(job) run_client(['bkr', 'remove-account', user1.user_name]) # reflect the change in recipe task status when # update_dirty_jobs() is called session.expunge_all() beakerd.update_dirty_jobs() with session.begin(): job = Job.by_id(job.id) self.assertEquals(job.status, TaskStatus.cancelled) self.assertIn('User %s removed' % user1.user_name, job.recipesets[0].recipes[0].tasks[0].results[0].log)
def test_does_not_load_RecipeTaskResults(self): # In large jobs with many RecipeTasks and RecipeTaskResults, # beaker-log-delete would previously take a long time and a lot of # memory, because it was traversing the entire object graph down to # RecipeTaskResult and loading them all into memory. # This test is asserting that no RecipeTask or RecipeTaskResult # instances are loaded when beaker-log-delete runs. with session.begin(): job = data_setup.create_completed_job() job.to_delete = datetime.datetime.utcnow() self.assertTrue(job.is_expired) recipe = job.recipesets[0].recipes[0] server = self.log_server_url + '/recipe/' open(os.path.join(self.recipe_logs_dir, 'recipe.log'), 'w').write('dummy') recipe.logs[:] = [LogRecipe(server=server, filename=u'recipe.log')] open(os.path.join(self.recipe_logs_dir, 'task.log'), 'w').write('dummy') recipe.tasks[0].logs[:] = [LogRecipeTask(server=server, filename=u'task.log')] open(os.path.join(self.recipe_logs_dir, 'result.log'), 'w').write('dummy') recipe.tasks[0].results[0].logs[:] = \ [LogRecipeTaskResult(server=server, filename=u'result.log')] # RecipeTasks/RecipeTaskResults are already loaded from the data_setup # calls above, expunge the session so that log_delete starts from # a clean slate. session.expunge_all() with mock.patch.object(RecipeTask, '__new__', side_effect=AssertionError): with mock.patch.object(RecipeTaskResult, '__new__', side_effect=AssertionError): self.assertEquals(log_delete.log_delete(), 0) # exit status # Check that we really deleted something, if not the test setup was faulty. with session.begin(): job = Job.by_id(job.id) self.assertIsNotNone(job.deleted)
def test_retention_tag_product(self): with session.begin(): rt1 = data_setup.create_retention_tag() rt2 = data_setup.create_retention_tag(needs_product=True) p1 = data_setup.create_product() out = run_client(['bkr', 'job-modify', self.job.t_id, '--retention-tag', '%s' % rt1.tag]) self.assert_(out == 'Successfully modified jobs %s\n' % self.job.t_id) session.expunge_all() j = TaskBase.get_by_t_id(self.job.t_id) self.assert_(j.retention_tag.tag == rt1.tag) out = run_client(['bkr', 'job-modify', self.job.t_id, '--retention-tag', '%s' % rt2.tag, '--product', '%s' % p1.name]) self.assert_(out == 'Successfully modified jobs %s\n' % self.job.t_id) session.expunge_all() j = TaskBase.get_by_t_id(self.job.t_id) self.assert_(j.retention_tag.tag == rt2.tag) self.assert_(j.product.name == p1.name) out = run_client(['bkr', 'job-modify', self.job.t_id, '--retention-tag', '%s' % rt1.tag, '--product=']) self.assert_(out == 'Successfully modified jobs %s\n' % self.job.t_id) session.expunge_all() j = TaskBase.get_by_t_id(self.job.t_id) self.assert_(j.retention_tag.tag == rt1.tag) self.assert_(j.product is None)