def _assert_run_cronscript(self, create_job): # The cronscript is configured: schema-lazr.conf and security.cfg. # The job runs correctly and the requested bug subscriptions are # removed. distro = self.factory.makeDistribution() grantee = self.factory.makePerson() owner = self.factory.makePerson() bug = self.factory.makeBug(owner=owner, target=distro, information_type=InformationType.USERDATA) with person_logged_in(owner): bug.subscribe(grantee, owner) job, job_type = create_job(distro, bug, grantee, owner) # Subscribing grantee has created an artifact grant so we need to # revoke that to test the job. artifact = self.factory.makeAccessArtifact(concrete=bug) getUtility(IAccessArtifactGrantSource).revokeByArtifact([artifact], [grantee]) transaction.commit() out, err, exit_code = run_script( "LP_DEBUG_SQL=1 cronscripts/process-job-source.py -vv %s" % (job_type)) self.addDetail("stdout", Content(UTF8_TEXT, lambda: out)) self.addDetail("stderr", Content(UTF8_TEXT, lambda: err)) self.assertEqual(0, exit_code) self.assertTrue('Traceback (most recent call last)' not in err) IStore(job.job).invalidate() self.assertEqual(JobStatus.COMPLETED, job.job.status) self.assertNotIn(grantee, removeSecurityProxy(bug).getDirectSubscribers())
def _get_details(self): """Calculate a details dict for the test - attachments etc.""" details = {} result = attribute_as_text(self._case, 'result', 'status') details['filename'] = Content(mime_utf8, lambda: [self._binary.file]) details['random_seed'] = Content(mime_utf8, lambda: [self._binary.random_seed]) if self._get_outcome() == 'addFailure': # Extract the error details. Skips have no details because its not # skip like unittest does, instead the runner just bypasses N test. txt = self._error_text(self._case) details['error'] = Content(mime_utf8, lambda: [txt]) if self._get_outcome() == 'addSuccess': # Sucessful tests may have performance metrics. perflist = list_children(self._case, 'performance') if perflist: presults = [] for perf in perflist: pmin = bool(int(attribute_as_text(perf, 'minimize'))) pmax = bool(int(attribute_as_text(perf, 'maximize'))) pval = float(attribute_as_text(perf, 'value')) txt = node_as_text(perf) txt = 'Performance(' + (pmin and 'minimized' or 'maximized' ) + '): ' + txt.strip() + '\n' presults += [(pval, txt)] presults.sort() perf_details = [e[1] for e in presults] details['performance'] = Content(mime_utf8, lambda: perf_details) return details
def test___init___None_errors(self): self.assertThat(lambda: Content(None, None), raises_value_error) self.assertThat(lambda: Content(None, lambda: ["traceback"]), raises_value_error) self.assertThat( lambda: Content(ContentType("text", "traceback"), None), raises_value_error)
def _fix_twisted_logs(self, detailed, detail_name): """ Split the Eliot logs out of a Twisted log. :param detailed: Object with ``getDetails`` where the original Twisted logs are stored. :param detail_name: Name of the Twisted log detail. """ twisted_log = detailed.getDetails()[detail_name] split_logs = [None] def _get_split_logs(): # Memoize the split log so we don't iterate through it twice. if split_logs[0] is None: split_logs[0] = _split_map_maybe( _get_eliot_data, _iter_content_lines(twisted_log), ) return split_logs[0] # The trick here is that we can't iterate over the base detail yet. # We can only use it inside the iter_bytes of the Content objects # that we add. This is because the only time that we *know* the # details are populated is when the details are evaluated. If we call # it in _setUp(), the logs are empty. If we call it in cleanup, the # detail is gone. detailed.addDetail(detail_name, Content(UTF8_TEXT, lambda: _get_split_logs()[0])) detailed.addDetail( self._ELIOT_LOG_DETAIL_NAME, Content(UTF8_TEXT, lambda: _prettyformat_lines(_get_split_logs()[1])))
def test_run_cronscript(self): # The cronscript is configured: schema-lazr.conf and security.cfg. job = make_question_job(self.factory, QuestionRecipientSet.ASKER_SUBSCRIBER) question = job.question with person_logged_in(question.target.owner): question.linkBug(self.factory.makeBug(target=question.target)) question.linkFAQ(question.target.owner, self.factory.makeFAQ(target=question.target), 'test FAQ link') user = job.user with person_logged_in(user): lang_set = getUtility(ILanguageSet) user.addLanguage(lang_set['en']) question.target.addAnswerContact(user, user) transaction.commit() out, err, exit_code = run_script( "LP_DEBUG_SQL=1 cronscripts/process-job-source.py -vv %s" % (IQuestionEmailJobSource.getName())) self.addDetail("stdout", Content(UTF8_TEXT, lambda: out)) self.addDetail("stderr", Content(UTF8_TEXT, lambda: err)) self.assertEqual(0, exit_code) self.assertTrue('Traceback (most recent call last)' not in err) message = ('QuestionEmailJob has sent email for question %s.' % question.id) self.assertTrue( message in err, 'Cound not find "%s" in err log:\n%s.' % (message, err)) IStore(job.job).invalidate() self.assertEqual(JobStatus.COMPLETED, job.job.status)
def _request_stop(self): """Try to stop the daemon cleanly.""" self._process.terminate() self._process.wait(timeout=self._timeout) outstr, errstr = self._process.communicate() binary = self._command[0].split("/")[-1] if outstr: self.addDetail('%s-out' % binary, Content(UTF8_TEXT, lambda: [outstr])) if errstr: self.addDetail('%s-err' % binary, Content(UTF8_TEXT, lambda: [errstr]))
def get_details_and_string(self): """Get a details dict and expected string.""" text1 = lambda: [_b("1\n2\n")] text2 = lambda: [_b("3\n4\n")] bin1 = lambda: [_b("5\n")] details = {'text 1': Content(ContentType('text', 'plain'), text1), 'text 2': Content(ContentType('text', 'strange'), text2), 'bin 1': Content(ContentType('application', 'binary'), bin1)} return (details, "Binary content: bin 1\n" "Text attachment: text 1\n------------\n1\n2\n" "------------\nText attachment: text 2\n------------\n" "3\n4\n------------\n")
def _add_std_streams_to_details(self, details, stdout, stderr): """Add buffered standard stream contents to a subunit details dict.""" if stdout: if isinstance(stdout, bytes): stdout = stdout.decode('utf-8', 'replace') details['test-stdout'] = Content( self.PLAIN_TEXT, lambda: [stdout.encode('utf-8')]) if stderr: if isinstance(stderr, bytes): stderr = stderr.decode('utf-8', 'replace') details['test-stderr'] = Content( self.PLAIN_TEXT, lambda: [stderr.encode('utf-8')])
def test_run(self): # The script called ProductJobManager.createAllDailyJobs(). # This test uses the same setup as # ProductJobManagerTestCase.test_createAllDailyJobs self.make_test_products() transaction.commit() stdout, stderr, retcode = run_script( 'cronscripts/daily_product_jobs.py') self.addDetail("stdout", Content(UTF8_TEXT, lambda: stdout)) self.addDetail("stderr", Content(UTF8_TEXT, lambda: stderr)) self.assertEqual(0, retcode) self.assertIn('Requested 3 total product jobs.', stderr)
def setUp(self): # Print RabbitMQ log on errors. Useful for debugging Travis failures. import os os.environ["RABBITMQ_ENABLED_PLUGINS_FILE"] = "/dev/null" try: super(RabbitServerWithoutReset, self).setUp() except Exception as error: message = str(error) if message.startswith("Timeout waiting for RabbitMQ server"): content = Content(UTF8_TEXT, lambda: open(self.config.logfile, "r").readlines()) print content.as_text() raise error
def test___eq__(self): content_type = ContentType("foo", "bar") one_chunk = lambda: [_b("bytes")] two_chunk = lambda: [_b("by"), _b("tes")] content1 = Content(content_type, one_chunk) content2 = Content(content_type, one_chunk) content3 = Content(content_type, two_chunk) content4 = Content(content_type, lambda: [_b("by"), _b("te")]) content5 = Content(ContentType("f", "b"), two_chunk) self.assertEqual(content1, content2) self.assertEqual(content1, content3) self.assertNotEqual(content1, content4) self.assertNotEqual(content1, content5)
def add_retry_failure_details(self, bouncer): # XXX benji bug=974617, bug=1011847, bug=504291 2011-07-31: # This method (and its invocations) are to be removed when we have # figured out what is causing bug 974617 and friends. # First we figure out if pgbouncer is listening on the port it is # supposed to be listening on. connect_ex returns 0 on success or an # errno otherwise. pg_port_status = str(socket.socket().connect_ex(('localhost', 5432))) self.addDetail('postgres socket.connect_ex result', Content(UTF8_TEXT, lambda: pg_port_status)) bouncer_port_status = str(socket.socket().connect_ex( ('localhost', bouncer.port))) self.addDetail('pgbouncer socket.connect_ex result', Content(UTF8_TEXT, lambda: bouncer_port_status))
def match(self, observed): if observed != self.expected: diff = self._diff(self.expected, observed) return Mismatch( "Observed text does not match expectations; see diff.", {"diff": Content(UTF8_TEXT, lambda: map(str.encode, diff))}, )
def _run_core(self): # Add an observer to trap all logged errors. error_observer = _log_observer full_log = StringIO() full_observer = log.FileLogObserver(full_log) spinner = self._make_spinner() successful, unhandled = run_with_log_observers( [error_observer.gotEvent, full_observer.emit], self._blocking_run_deferred, spinner) self.case.addDetail('twisted-log', Content(UTF8_TEXT, full_log.readlines)) logged_errors = error_observer.flushErrors() for logged_error in logged_errors: successful = False self._got_user_failure(logged_error, tb_label='logged-error') if unhandled: successful = False for debug_info in unhandled: f = debug_info.failResult info = debug_info._getDebugTracebacks() if info: self.case.addDetail('unhandled-error-in-deferred-debug', text_content(info)) self._got_user_failure(f, 'unhandled-error-in-deferred') junk = spinner.clear_junk() if junk: successful = False self._log_user_exception(UncleanReactorError(junk)) if successful: self.result.addSuccess(self.case, details=self.case.getDetails())
def test_log_details_handles_binary_data(self): fake_details = dict( TestBinary=Content(ContentType('image', 'png'), lambda: b'') ) result = testresult.LoggedTestResultDecorator(None) result._log_details(0, fake_details)
def test_square_2(self): self.addDetail('arbitrary-color-name', text_content('blue')) self.addDetail( 'log-file', Content(UTF8_TEXT, lambda: open('log.txt', 'r').readlines())) result = silly.square(7) self.assertThat(result, Equals(49))
def test_success_empty_message(self): self.protocol.lineReceived(_b("success mcdonalds farm [\n")) self.protocol.lineReceived(_b("]\n")) details = {} details['message'] = Content(ContentType("text", "plain"), lambda: [_b("")]) self.assertSuccess(details)
def check_success_or_xfail(self, as_success, error_message=None): if as_success: self.assertEqual([ ('startTest', self.test), ('addSuccess', self.test), ('stopTest', self.test), ], self.client._events) else: details = {} if error_message is not None: details['traceback'] = Content( ContentType("text", "x-traceback", {'charset': 'utf8'}), lambda: [_b(error_message)]) if isinstance(self.client, ExtendedTestResult): value = details else: if error_message is not None: value = subunit.RemoteError( _u("Text attachment: traceback\n" "------------\n") + _u(error_message) + _u("------------\n")) else: value = subunit.RemoteError() self.assertEqual([ ('startTest', self.test), ('addExpectedFailure', self.test, value), ('stopTest', self.test), ], self.client._events)
def check_fail_or_uxsuccess(self, as_fail, error_message=None): details = {} if error_message is not None: details['traceback'] = Content( ContentType("text", "x-traceback", {'charset': 'utf8'}), lambda: [_b(error_message)]) if isinstance(self.client, ExtendedTestResult): value = details else: value = None if as_fail: self.client._events[1] = self.client._events[1][:2] # The value is generated within the extended to original decorator: # todo use the testtools matcher to check on this. self.assertEqual([ ('startTest', self.test), ('addFailure', self.test), ('stopTest', self.test), ], self.client._events) elif value: self.assertEqual([ ('startTest', self.test), ('addUnexpectedSuccess', self.test, value), ('stopTest', self.test), ], self.client._events) else: self.assertEqual([ ('startTest', self.test), ('addUnexpectedSuccess', self.test), ('stopTest', self.test), ], self.client._events)
def _spawn(self): """Spawn the BIND server process.""" env = dict(os.environ, HOME=self.config.homedir) with open(self.config.log_file, "wb") as log_file: with open(os.devnull, "rb") as devnull: self.process = subprocess.Popen( [ self.config.named_file, "-f", "-c", self.config.conf_file, ], stdin=devnull, stdout=log_file, stderr=log_file, close_fds=True, cwd=self.config.homedir, env=env, preexec_fn=preexec_fn, ) self.addCleanup(self._stop) # Keep the log_file open for reading so that we can still get the log # even if the log is deleted. open_log_file = open(self.config.log_file, "rb") self.addDetail( os.path.basename(self.config.log_file), Content(UTF8_TEXT, lambda: open_log_file), )
def execute(self, *command): process = Popen(command, stdout=PIPE, stderr=STDOUT, cwd=root) output, _ = process.communicate() if len(output) != 0: name = "stdout/err from `%s`" % " ".join(map(quote, command)) self.addDetail(name, Content(UTF8_TEXT, lambda: [output])) self.assertEqual(0, process.wait(), "failed to compile css.")
def execute(self, command, env): process = Popen(command, stdout=PIPE, stderr=STDOUT, env=env) output, _ = process.communicate() if len(output) != 0: name = "stdout/err from `%s`" % " ".join(map(quote, command)) self.addDetail(name, Content(UTF8_TEXT, lambda: [output])) self.assertEqual(0, process.wait(), "(return code is not zero)")
def test_failure_empty_message(self): self.protocol.lineReceived("failure mcdonalds farm [\n") self.protocol.lineReceived("]\n") details = {} details['traceback'] = Content(ContentType("text", "x-traceback", {'charset': 'utf8'}), lambda:[""]) self.assertFailure(details)
def _setUp(self): logs = StringIO() full_observer = log.FileLogObserver(logs) self.useFixture(_TwistedLogObservers([full_observer.emit])) self.addDetail( self.LOG_DETAIL_NAME, Content(UTF8_TEXT, lambda: [logs.getvalue().encode("utf-8")]))
def validate_dhcpd_configuration(test, configuration, ipv6): """Validate `configuration` using `dhcpd` itself. :param test: An instance of `maastesting.testcase.TestCase`. :param configuration: The contents of the configuration file as a string. :param ipv6: When true validate as DHCPv6, otherwise validate as DHCPv4. """ with tempfile.NamedTemporaryFile( "w", encoding="ascii", prefix="dhcpd.", suffix=".conf") as conffile, tempfile.NamedTemporaryFile( "w", encoding="ascii", prefix="dhcpd.", suffix=".leases") as leasesfile: # Write the configuration to the temporary file. conffile.write(configuration) conffile.flush() # Add line numbers to configuration and add as a detail. This will # make it much easier to debug; `dhcpd -t` prints line numbers for any # errors it finds. test.addDetail( conffile.name, Content( UTF8_TEXT, lambda: map( str.encode, ("> %3d %s" % entry for entry in zip( count(1), configuration.splitlines(keepends=True))), ), ), ) # Call `dhcpd` via `aa-exec --profile unconfined`. The latter is # needed so that `dhcpd` can open the configuration file from /tmp. cmd = ( "aa-exec", "--profile", "unconfined", "dhcpd", ("-6" if ipv6 else "-4"), "-t", "-cf", conffile.name, "-lf", leasesfile.name, ) process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=get_env_with_locale(), ) command = " ".join(map(pipes.quote, process.args)) output, _ = process.communicate() # Record the output from `dhcpd -t` as a detail. test.addDetail( "stdout/err from `%s`" % command, text_content(output.decode("utf-8")), ) # Check that it completed successfully. test.assertThat(process.returncode, Equals(0), "`%s` failed." % command)
def content(self): """Return a `testtools.content.Content` for this object's buffer. Use with `testtools.TestCase.addDetail`, `fixtures.Fixture.addDetail`, and anything else that understands details. """ get_bytes = lambda: [self.getLogBuffer().encode("utf-8")] return Content(UTF8_TEXT, get_bytes)
def failure_quoted_bracket(self, keyword): self.protocol.lineReceived("%s mcdonalds farm [\n" % keyword) self.protocol.lineReceived(" ]\n") self.protocol.lineReceived("]\n") details = {} details['traceback'] = Content(ContentType("text", "x-traceback", {'charset': 'utf8'}), lambda:["]\n"]) self.assertFailure(details)
def tearDown(self): """Runs after each test run""" super(TestFromSPEC, self).tearDown() ct = ContentType('application', 'json') # information on test dependencies mentioned in the SPEC self._get_dep_info() # configure default set of information to be reported for any test run # still can figure out why this can't be a loop self.addDetail('spec_info', Content(ct, lambda: [self._jds(self._details['spec_info'])])) self.addDetail('dep_info', Content(ct, lambda: [self._jds(self._details['dep_info'])])) self.addDetail('exec_info', Content(ct, lambda: [self._jds(self._details['exec_info'])])) self.addDetail('env_info', Content(ct, lambda: [self._jds(self._details['env_info'])])) self.addDetail('metric_info', Content(ct, lambda: [self._jds(self._details['metric_info'])])) self.addDetail('output_info', Content(ct, lambda: [self._jds(self._details['output_info'])])) self.addDetail('sys_info', Content(ct, lambda: [self._jds(self._get_system_info())])) # restore environment to its previous state self._restore_environment() # after EVERYTHING is done # remove status var again del os.environ['TESTKRAUT_TESTBED_PATH'] # wipe out testbed if not self._workdir is None: lgr.debug("remove work dir at '%s'" % self._workdir) import shutil shutil.rmtree(self._workdir) self._workdir = None
def setUp(self): self.io = StringIO() self.protocol = subunit.TestProtocolClient(self.io) self.test = TestTestProtocolClient("test_start_test") self.sample_details = {'something':Content( ContentType('text', 'plain'), lambda:['serialised\nform'])} self.sample_tb_details = dict(self.sample_details) self.sample_tb_details['traceback'] = TracebackContent( subunit.RemoteError(u"boo qux"), self.test)
def test_that_imports_are_formatted(self): # We're going to export all Python source code to a new, freshly # created, tree, then run `make format` in it. root_export = mkdtemp(prefix=".export.", dir=root) self.addCleanup(rmtree, root_export, ignore_errors=True) # Useful predicates. p_visible = lambda name: not name.startswith(".") p_is_python = lambda name: name.endswith(".py") # Copy all visible Python source files over. for dirpath, dirnames, filenames in walk(root): dirnames[:] = filter(p_visible, dirnames) dirpath_export = join(root_export, relpath(dirpath, start=root)) for dirname in dirnames: mkdir(join(dirpath_export, dirname)) for filename in filter(p_visible, filenames): if p_is_python(filename): src = join(dirpath, filename) dst = join(dirpath_export, filename) copy2(src, dst) # We'll need the Makefile and format-imports too. copy2(join(root, "Makefile"), root_export) copy2(join(root, "utilities", "format-imports"), join(root_export, "utilities", "format-imports")) # Format imports in the exported tree. self.execute("make", "--quiet", "-C", root_export, "format") # This will record a unified diff between the original source code and # the reformatted source code, should there be any. diff = [] # For each file in the export, compare it to its counterpart in the # original tree. for dirpath, dirnames, filenames in walk(root_export): dirpath_relative = relpath(dirpath, start=root_export) dirpath_original = join(root, dirpath_relative) for filename in filter(p_is_python, filenames): filepath_original = join(dirpath_original, filename) with open(filepath_original, "r", encoding="utf8") as f: file_lines_original = f.readlines() filepath_formatted = join(dirpath, filename) with open(filepath_formatted, "r", encoding="utf8") as f: file_lines_formatted = f.readlines() diff.extend( unified_diff(file_lines_original, file_lines_formatted, filepath_original, filepath_formatted)) if len(diff) != 0: self.addDetail( "diff", Content(UTF8_TEXT, lambda: (line.encode("utf8") for line in diff))) self.fail("Some imports are not formatted; see the diff for the " "missing changes. Use `make format` to address them.")
def __init__(self, value, acontenttype=None): if acontenttype is None: acontenttype = ContentType("text","plain") Content.__init__(self, acontenttype, lambda:value.encode("utf8"))
def test_iter_text_default_charset_iso_8859_1(self): content_type = ContentType("text", "strange") text = _u("bytes\xea") iso_version = text.encode("ISO-8859-1") content = Content(content_type, lambda: [iso_version]) self.assertEqual([text], list(content.iter_text()))
def test_iter_text_decodes(self): content_type = ContentType("text", "strange", {"charset": "utf8"}) content = Content(content_type, lambda: [_u("bytes\xea").encode("utf8")]) self.assertEqual([_u("bytes\xea")], list(content.iter_text()))
def test___init___sets_ivars(self): content_type = ContentType("foo", "bar") content = Content(content_type, lambda: ["bytes"]) self.assertEqual(content_type, content.content_type) self.assertEqual(["bytes"], list(content.iter_bytes()))
def test_as_text(self): content_type = ContentType("text", "strange", {"charset": "utf8"}) content = Content( content_type, lambda: [_u("bytes\xea").encode("utf8")]) self.assertEqual(_u("bytes\xea"), content.as_text())