def test_run_cronscript(self): # The cronscript is configured: schema-lazr.conf and security.cfg. job = make_question_job(self.factory, QuestionRecipientSet.ASKER_SUBSCRIBER) question = job.question with person_logged_in(question.target.owner): question.linkBug(self.factory.makeBug(target=question.target)) question.linkFAQ(question.target.owner, self.factory.makeFAQ(target=question.target), 'test FAQ link') user = job.user with person_logged_in(user): lang_set = getUtility(ILanguageSet) user.addLanguage(lang_set['en']) question.target.addAnswerContact(user, user) transaction.commit() out, err, exit_code = run_script( "LP_DEBUG_SQL=1 cronscripts/process-job-source.py -vv %s" % (IQuestionEmailJobSource.getName())) self.addDetail("stdout", Content(UTF8_TEXT, lambda: out)) self.addDetail("stderr", Content(UTF8_TEXT, lambda: err)) self.assertEqual(0, exit_code) self.assertTrue('Traceback (most recent call last)' not in err) message = ('QuestionEmailJob has sent email for question %s.' % question.id) self.assertTrue( message in err, 'Cound not find "%s" in err log:\n%s.' % (message, err)) IStore(job.job).invalidate() self.assertEqual(JobStatus.COMPLETED, job.job.status)
def test___init___None_errors(self): self.assertThat(lambda: Content(None, None), raises_value_error) self.assertThat(lambda: Content(None, lambda: ["traceback"]), raises_value_error) self.assertThat( lambda: Content(ContentType("text", "traceback"), None), raises_value_error)
def test_smoke(self): # Smoke test, primarily for DB permissions need for users and teams. # Check the oopses in /var/tmp/lperr.test if the person.merged # assertion fails. self.transfer_email() to_team = self.factory.makeTeam(name='legion') from_team = self.factory.makeTeam(name='null') with person_logged_in(from_team.teamowner): from_team.teamowner.leave(from_team) self.job_source.create(from_person=from_team, to_person=to_team, reviewer=from_team.teamowner, requester=self.factory.makePerson()) transaction.commit() out, err, exit_code = run_script( "LP_DEBUG_SQL=1 cronscripts/process-job-source.py -vv %s" % (IPersonMergeJobSource.getName())) self.addDetail("stdout", Content(UTF8_TEXT, lambda: out)) self.addDetail("stderr", Content(UTF8_TEXT, lambda: err)) self.assertEqual(0, exit_code) IStore(self.from_person).invalidate() self.assertEqual(self.to_person, self.from_person.merged) self.assertEqual(to_team, from_team.merged)
def test_smoke_admining_team(self): # Smoke test, primarily for DB permissions needed by queries to work # with admining users and teams # Check the oopses in /var/tmp/lperr.test if the assertions fail. with person_logged_in(self.team.teamowner): # This implicitly creates a job, but it is not the job under test. admining_team = self.factory.makeTeam() self.team.addMember( admining_team, self.team.teamowner, force_team_add=True) membership = getUtility(ITeamMembershipSet).getByPersonAndTeam( admining_team, self.team) membership.setStatus( TeamMembershipStatus.ADMIN, self.team.teamowner) job = self.job_source.create( self.person, self.team, self.team.teamowner, TeamMembershipStatus.APPROVED, TeamMembershipStatus.ADMIN) job_repr = repr(job) transaction.commit() out, err, exit_code = run_script( "LP_DEBUG_SQL=1 cronscripts/process-job-source.py -vv %s" % ( IMembershipNotificationJobSource.getName())) self.addDetail("stdout", Content(UTF8_TEXT, lambda: [out])) self.addDetail("stderr", Content(UTF8_TEXT, lambda: [err])) self.assertEqual(0, exit_code) self.assertTrue(job_repr in err, err) self.assertTrue("MembershipNotificationJob sent email" in err, err)
def _assert_run_cronscript(self, create_job): # The cronscript is configured: schema-lazr.conf and security.cfg. # The job runs correctly and the requested bug subscriptions are # removed. distro = self.factory.makeDistribution() grantee = self.factory.makePerson() owner = self.factory.makePerson() bug = self.factory.makeBug(owner=owner, target=distro, information_type=InformationType.USERDATA) with person_logged_in(owner): bug.subscribe(grantee, owner) job, job_type = create_job(distro, bug, grantee, owner) # Subscribing grantee has created an artifact grant so we need to # revoke that to test the job. artifact = self.factory.makeAccessArtifact(concrete=bug) getUtility(IAccessArtifactGrantSource).revokeByArtifact([artifact], [grantee]) transaction.commit() out, err, exit_code = run_script( "LP_DEBUG_SQL=1 cronscripts/process-job-source.py -vv %s" % (job_type)) self.addDetail("stdout", Content(UTF8_TEXT, lambda: out)) self.addDetail("stderr", Content(UTF8_TEXT, lambda: err)) self.assertEqual(0, exit_code) self.assertTrue('Traceback (most recent call last)' not in err) IStore(job.job).invalidate() self.assertEqual(JobStatus.COMPLETED, job.job.status) self.assertNotIn(grantee, removeSecurityProxy(bug).getDirectSubscribers())
def _get_details(self): """Calculate a details dict for the test - attachments etc.""" details = {} result = attribute_as_text(self._case, 'result', 'status') details['filename'] = Content(mime_utf8, lambda: [self._binary.file]) details['random_seed'] = Content(mime_utf8, lambda: [self._binary.random_seed]) if self._get_outcome() == 'addFailure': # Extract the error details. Skips have no details because its not # skip like unittest does, instead the runner just bypasses N test. txt = self._error_text(self._case) details['error'] = Content(mime_utf8, lambda: [txt]) if self._get_outcome() == 'addSuccess': # Sucessful tests may have performance metrics. perflist = list_children(self._case, 'performance') if perflist: presults = [] for perf in perflist: pmin = bool(int(attribute_as_text(perf, 'minimize'))) pmax = bool(int(attribute_as_text(perf, 'maximize'))) pval = float(attribute_as_text(perf, 'value')) txt = node_as_text(perf) txt = 'Performance(' + (pmin and 'minimized' or 'maximized' ) + '): ' + txt.strip() + '\n' presults += [(pval, txt)] presults.sort() perf_details = [e[1] for e in presults] details['performance'] = Content(mime_utf8, lambda: perf_details) return details
def _fix_twisted_logs(self, detailed, detail_name): """ Split the Eliot logs out of a Twisted log. :param detailed: Object with ``getDetails`` where the original Twisted logs are stored. :param detail_name: Name of the Twisted log detail. """ twisted_log = detailed.getDetails()[detail_name] split_logs = [None] def _get_split_logs(): # Memoize the split log so we don't iterate through it twice. if split_logs[0] is None: split_logs[0] = _split_map_maybe( _get_eliot_data, _iter_content_lines(twisted_log), ) return split_logs[0] # The trick here is that we can't iterate over the base detail yet. # We can only use it inside the iter_bytes of the Content objects # that we add. This is because the only time that we *know* the # details are populated is when the details are evaluated. If we call # it in _setUp(), the logs are empty. If we call it in cleanup, the # detail is gone. detailed.addDetail(detail_name, Content(UTF8_TEXT, lambda: _get_split_logs()[0])) detailed.addDetail( self._ELIOT_LOG_DETAIL_NAME, Content(UTF8_TEXT, lambda: _prettyformat_lines(_get_split_logs()[1])))
def test_produces_suitable_output_for_tgt_admin(self): spec = make_image_spec() image = self.make_file() osystem = factory.make_name('osystem') entry = boot_resources.tgt_entry(osystem, spec.arch, spec.subarch, spec.release, spec.label, image) config = self.make_file(contents=entry) # Pretend to be root, but without requiring the actual privileges and # without prompting for a password. In that state, run tgt-admin. # It has to think it's root, even for a "pretend" run. # Make it read the config we just produced, and pretend to update its # iSCSI targets based on what it finds in the config. # # The only real test is that this succeed. cmd = Popen([ 'fakeroot', 'tgt-admin', '--conf', config, '--pretend', '--update', 'ALL', ], stdout=PIPE, stderr=PIPE) stdout, stderr = cmd.communicate() self.addDetail('tgt-stderr', Content(UTF8_TEXT, lambda: [stderr])) self.addDetail('tgt-stdout', Content(UTF8_TEXT, lambda: [stdout])) self.assertEqual(0, cmd.returncode)
def _request_stop(self): """Try to stop the daemon cleanly.""" self._process.terminate() self._process.wait(timeout=self._timeout) outstr, errstr = self._process.communicate() binary = self._command[0].split("/")[-1] if outstr: self.addDetail('%s-out' % binary, Content(UTF8_TEXT, lambda: [outstr])) if errstr: self.addDetail('%s-err' % binary, Content(UTF8_TEXT, lambda: [errstr]))
def test_run(self): # The script called ProductJobManager.createAllDailyJobs(). # This test uses the same setup as # ProductJobManagerTestCase.test_createAllDailyJobs self.make_test_products() transaction.commit() stdout, stderr, retcode = run_script( 'cronscripts/daily_product_jobs.py') self.addDetail("stdout", Content(UTF8_TEXT, lambda: stdout)) self.addDetail("stderr", Content(UTF8_TEXT, lambda: stderr)) self.assertEqual(0, retcode) self.assertIn('Requested 3 total product jobs.', stderr)
def _add_std_streams_to_details(self, details, stdout, stderr): """Add buffered standard stream contents to a subunit details dict.""" if stdout: if isinstance(stdout, bytes): stdout = stdout.decode('utf-8', 'replace') details['test-stdout'] = Content( self.PLAIN_TEXT, lambda: [stdout.encode('utf-8')]) if stderr: if isinstance(stderr, bytes): stderr = stderr.decode('utf-8', 'replace') details['test-stderr'] = Content( self.PLAIN_TEXT, lambda: [stderr.encode('utf-8')])
def get_details_and_string(self): """Get a details dict and expected string.""" text1 = lambda: [_b("1\n2\n")] text2 = lambda: [_b("3\n4\n")] bin1 = lambda: [_b("5\n")] details = {'text 1': Content(ContentType('text', 'plain'), text1), 'text 2': Content(ContentType('text', 'strange'), text2), 'bin 1': Content(ContentType('application', 'binary'), bin1)} return (details, "Binary content: bin 1\n" "Text attachment: text 1\n------------\n1\n2\n" "------------\nText attachment: text 2\n------------\n" "3\n4\n------------\n")
def test___eq__(self): content_type = ContentType("foo", "bar") one_chunk = lambda: [_b("bytes")] two_chunk = lambda: [_b("by"), _b("tes")] content1 = Content(content_type, one_chunk) content2 = Content(content_type, one_chunk) content3 = Content(content_type, two_chunk) content4 = Content(content_type, lambda: [_b("by"), _b("te")]) content5 = Content(ContentType("f", "b"), two_chunk) self.assertEqual(content1, content2) self.assertEqual(content1, content3) self.assertNotEqual(content1, content4) self.assertNotEqual(content1, content5)
def add_retry_failure_details(self, bouncer): # XXX benji bug=974617, bug=1011847, bug=504291 2011-07-31: # This method (and its invocations) are to be removed when we have # figured out what is causing bug 974617 and friends. # First we figure out if pgbouncer is listening on the port it is # supposed to be listening on. connect_ex returns 0 on success or an # errno otherwise. pg_port_status = str(socket.socket().connect_ex(('localhost', 5432))) self.addDetail('postgres socket.connect_ex result', Content(UTF8_TEXT, lambda: pg_port_status)) bouncer_port_status = str(socket.socket().connect_ex( ('localhost', bouncer.port))) self.addDetail('pgbouncer socket.connect_ex result', Content(UTF8_TEXT, lambda: bouncer_port_status))
def test_success_empty_message(self): self.protocol.lineReceived(_b("success mcdonalds farm [\n")) self.protocol.lineReceived(_b("]\n")) details = {} details['message'] = Content(ContentType("text", "plain"), lambda: [_b("")]) self.assertSuccess(details)
def check_fail_or_uxsuccess(self, as_fail, error_message=None): details = {} if error_message is not None: details['traceback'] = Content( ContentType("text", "x-traceback", {'charset': 'utf8'}), lambda: [_b(error_message)]) if isinstance(self.client, ExtendedTestResult): value = details else: value = None if as_fail: self.client._events[1] = self.client._events[1][:2] # The value is generated within the extended to original decorator: # todo use the testtools matcher to check on this. self.assertEqual([ ('startTest', self.test), ('addFailure', self.test), ('stopTest', self.test), ], self.client._events) elif value: self.assertEqual([ ('startTest', self.test), ('addUnexpectedSuccess', self.test, value), ('stopTest', self.test), ], self.client._events) else: self.assertEqual([ ('startTest', self.test), ('addUnexpectedSuccess', self.test), ('stopTest', self.test), ], self.client._events)
def check_success_or_xfail(self, as_success, error_message=None): if as_success: self.assertEqual([ ('startTest', self.test), ('addSuccess', self.test), ('stopTest', self.test), ], self.client._events) else: details = {} if error_message is not None: details['traceback'] = Content( ContentType("text", "x-traceback", {'charset': 'utf8'}), lambda: [_b(error_message)]) if isinstance(self.client, ExtendedTestResult): value = details else: if error_message is not None: value = subunit.RemoteError( _u("Text attachment: traceback\n" "------------\n") + _u(error_message) + _u("------------\n")) else: value = subunit.RemoteError() self.assertEqual([ ('startTest', self.test), ('addExpectedFailure', self.test, value), ('stopTest', self.test), ], self.client._events)
def execute(self, command, env): process = Popen(command, stdout=PIPE, stderr=STDOUT, env=env) output, _ = process.communicate() if len(output) != 0: name = "stdout/err from `%s`" % " ".join(map(quote, command)) self.addDetail(name, Content(UTF8_TEXT, lambda: [output])) self.assertEqual(0, process.wait(), "(return code is not zero)")
def test_square_2(self): self.addDetail('arbitrary-color-name', text_content('blue')) self.addDetail( 'log-file', Content(UTF8_TEXT, lambda: open('log.txt', 'r').readlines())) result = silly.square(7) self.assertThat(result, Equals(49))
def match(self, observed): if observed != self.expected: diff = self._diff(self.expected, observed) return Mismatch( "Observed text does not match expectations; see diff.", {"diff": Content(UTF8_TEXT, lambda: map(str.encode, diff))}, )
def test_failure_empty_message(self): self.protocol.lineReceived("failure mcdonalds farm [\n") self.protocol.lineReceived("]\n") details = {} details['traceback'] = Content(ContentType("text", "x-traceback", {'charset': 'utf8'}), lambda:[""]) self.assertFailure(details)
def test_log_details_handles_binary_data(self): fake_details = dict( TestBinary=Content(ContentType('image', 'png'), lambda: b'') ) result = testresult.LoggedTestResultDecorator(None) result._log_details(0, fake_details)
def _run_core(self): # Add an observer to trap all logged errors. error_observer = _log_observer full_log = StringIO() full_observer = log.FileLogObserver(full_log) spinner = self._make_spinner() successful, unhandled = run_with_log_observers( [error_observer.gotEvent, full_observer.emit], self._blocking_run_deferred, spinner) self.case.addDetail('twisted-log', Content(UTF8_TEXT, full_log.readlines)) logged_errors = error_observer.flushErrors() for logged_error in logged_errors: successful = False self._got_user_failure(logged_error, tb_label='logged-error') if unhandled: successful = False for debug_info in unhandled: f = debug_info.failResult info = debug_info._getDebugTracebacks() if info: self.case.addDetail('unhandled-error-in-deferred-debug', text_content(info)) self._got_user_failure(f, 'unhandled-error-in-deferred') junk = spinner.clear_junk() if junk: successful = False self._log_user_exception(UncleanReactorError(junk)) if successful: self.result.addSuccess(self.case, details=self.case.getDetails())
def execute(self, *command): process = Popen(command, stdout=PIPE, stderr=STDOUT, cwd=root) output, _ = process.communicate() if len(output) != 0: name = "stdout/err from `%s`" % " ".join(map(quote, command)) self.addDetail(name, Content(UTF8_TEXT, lambda: [output])) self.assertEqual(0, process.wait(), "failed to compile css.")
def _spawn(self): """Spawn the BIND server process.""" env = dict(os.environ, HOME=self.config.homedir) with open(self.config.log_file, "wb") as log_file: with open(os.devnull, "rb") as devnull: self.process = subprocess.Popen( [ self.config.named_file, "-f", "-c", self.config.conf_file, ], stdin=devnull, stdout=log_file, stderr=log_file, close_fds=True, cwd=self.config.homedir, env=env, preexec_fn=preexec_fn, ) self.addCleanup(self._stop) # Keep the log_file open for reading so that we can still get the log # even if the log is deleted. open_log_file = open(self.config.log_file, "rb") self.addDetail( os.path.basename(self.config.log_file), Content(UTF8_TEXT, lambda: open_log_file), )
def _setUp(self): logs = StringIO() full_observer = log.FileLogObserver(logs) self.useFixture(_TwistedLogObservers([full_observer.emit])) self.addDetail( self.LOG_DETAIL_NAME, Content(UTF8_TEXT, lambda: [logs.getvalue().encode("utf-8")]))
def validate_dhcpd_configuration(test, configuration, ipv6): """Validate `configuration` using `dhcpd` itself. :param test: An instance of `maastesting.testcase.TestCase`. :param configuration: The contents of the configuration file as a string. :param ipv6: When true validate as DHCPv6, otherwise validate as DHCPv4. """ with tempfile.NamedTemporaryFile( "w", encoding="ascii", prefix="dhcpd.", suffix=".conf") as conffile, tempfile.NamedTemporaryFile( "w", encoding="ascii", prefix="dhcpd.", suffix=".leases") as leasesfile: # Write the configuration to the temporary file. conffile.write(configuration) conffile.flush() # Add line numbers to configuration and add as a detail. This will # make it much easier to debug; `dhcpd -t` prints line numbers for any # errors it finds. test.addDetail( conffile.name, Content( UTF8_TEXT, lambda: map( str.encode, ("> %3d %s" % entry for entry in zip( count(1), configuration.splitlines(keepends=True))), ), ), ) # Call `dhcpd` via `aa-exec --profile unconfined`. The latter is # needed so that `dhcpd` can open the configuration file from /tmp. cmd = ( "aa-exec", "--profile", "unconfined", "dhcpd", ("-6" if ipv6 else "-4"), "-t", "-cf", conffile.name, "-lf", leasesfile.name, ) process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=get_env_with_locale(), ) command = " ".join(map(pipes.quote, process.args)) output, _ = process.communicate() # Record the output from `dhcpd -t` as a detail. test.addDetail( "stdout/err from `%s`" % command, text_content(output.decode("utf-8")), ) # Check that it completed successfully. test.assertThat(process.returncode, Equals(0), "`%s` failed." % command)
def content(self): """Return a `testtools.content.Content` for this object's buffer. Use with `testtools.TestCase.addDetail`, `fixtures.Fixture.addDetail`, and anything else that understands details. """ get_bytes = lambda: [self.getLogBuffer().encode("utf-8")] return Content(UTF8_TEXT, get_bytes)
def failure_quoted_bracket(self, keyword): self.protocol.lineReceived("%s mcdonalds farm [\n" % keyword) self.protocol.lineReceived(" ]\n") self.protocol.lineReceived("]\n") details = {} details['traceback'] = Content(ContentType("text", "x-traceback", {'charset': 'utf8'}), lambda:["]\n"]) self.assertFailure(details)
def tearDown(self): """Runs after each test run""" super(TestFromSPEC, self).tearDown() ct = ContentType('application', 'json') # information on test dependencies mentioned in the SPEC self._get_dep_info() # configure default set of information to be reported for any test run # still can figure out why this can't be a loop self.addDetail('spec_info', Content(ct, lambda: [self._jds(self._details['spec_info'])])) self.addDetail('dep_info', Content(ct, lambda: [self._jds(self._details['dep_info'])])) self.addDetail('exec_info', Content(ct, lambda: [self._jds(self._details['exec_info'])])) self.addDetail('env_info', Content(ct, lambda: [self._jds(self._details['env_info'])])) self.addDetail('metric_info', Content(ct, lambda: [self._jds(self._details['metric_info'])])) self.addDetail('output_info', Content(ct, lambda: [self._jds(self._details['output_info'])])) self.addDetail('sys_info', Content(ct, lambda: [self._jds(self._get_system_info())])) # restore environment to its previous state self._restore_environment() # after EVERYTHING is done # remove status var again del os.environ['TESTKRAUT_TESTBED_PATH'] # wipe out testbed if not self._workdir is None: lgr.debug("remove work dir at '%s'" % self._workdir) import shutil shutil.rmtree(self._workdir) self._workdir = None