def __enter__(self): self._stdout = sys.__stdout__ self._stderr = sys.__stderr__ self.cwd = os.getcwd() if self.chdir: os.chdir(self.chdir) self.stdout = sys.stdout = sys.__stdout__ = six.StringIO() self.stderr = sys.stderr = sys.__stderr__ = six.StringIO() return self
def setUp(self): self.err = sys.stderr self.buf = six.StringIO() sys.stderr = self.buf self.addCleanup(self.restore_stderr) self.session = session.Session() self.print_hooks_plugin = printhooks.PrintHooks(session=self.session) self.plugin_a = TestPluginA(session=self.session) self.plugin_b = TestPluginB(session=self.session)
def test_captures_stdout(self): out = sys.stdout buf = six.StringIO() sys.stdout = buf try: test = self.case("test_out") test(self.result) assert "hello" not in buf.getvalue() assert "hello" in self.watcher.events[0].metadata["stdout"] finally: sys.stdout = out
def test_captures_stderr_when_configured(self): self.plugin.captureStderr = True err = sys.stderr buf = six.StringIO() sys.stderr = buf try: test = self.case("test_err") test(self.result) assert "goodbye" not in buf.getvalue() assert "goodbye" in self.watcher.events[0].metadata["stderr"] finally: sys.stderr = err
def _can_tokenize(source_lines): """ Check if a list of lines of source can successfully be tokenized """ # tokenize.generate_tokens requires a file-like object, so we need to # convert source_lines to a StringIO to give it that interface filelike = six.StringIO(textwrap.dedent("".join(source_lines))) try: for _tokty, _tok, _start, _end, _tok_lineno in tokenize.generate_tokens( filelike.readline): pass except tokenize.TokenError: return False return True
def __init__(self): """Get our config and add our command line arguments.""" # tracking var for any decision which marks the entire run as failed self.decided_failure = False # buffer for error output data self.error_output_buffer = six.StringIO() self.covSource = self.config.as_list("coverage", []) or ["."] self.covReport = self.config.as_list("coverage-report", []) or ["term"] self.covConfig = (self.config.as_str("coverage-config", "").strip() or ".coveragerc") group = self.session.pluginargs group.add_argument( "--coverage", action="append", default=[], metavar="PATH", dest="coverage_source", help="Measure coverage for filesystem path (multi-allowed)", ) group.add_argument( "--coverage-report", action="append", default=[], metavar="TYPE", choices=["term", "term-missing", "annotate", "html", "xml"], dest="coverage_report", help="Generate selected reports, available types:" " term, term-missing, annotate, html, xml (multi-allowed)", ) group.add_argument( "--coverage-config", action="store", default="", metavar="FILE", dest="coverage_config", help="Config file for coverage, default: .coveragerc", ) self.covController = None
def _tokenize_assert(source_lines, frame_locals, frame_globals): """ Given a set of lines of source ending in a failing assert, plus the frame locals and globals, tokenize source. Only look at tokens in the final assert statement Resolve all names to repr() of values Return The line on which the assert starts (relative to start of source_lines) A collection of token descriptions as a name=val ordered dict """ # tokenize.generate_tokens requires a file-like object, so we need to # convert source_lines to a StringIO to give it that interface filelike_context = six.StringIO(textwrap.dedent("".join(source_lines))) # track the first line of the assert statement # when the assert is on oneline, we'll have it easily, but a multiline # statement like # assert (x == # 1) # will leave us holding the last line of the statement, # e.g. " 1)", which is not useful # so every time a new assert is found, we get a value back indicate # that it's the start line # # assert True # assert False # works fine, because we'll just hold the last value # # assert True # assert False # assert True # also works because we truncated source_lines to remove the final # assert, which we didn't reach during execution assert_startline = None token_processor = TokenProcessor(frame_locals, frame_globals) # tokenize and process each token for tokty, tok, start, end, tok_lineno in tokenize.generate_tokens( filelike_context.readline): ret = token_processor.handle_token(tokty, tok, start, end, tok_lineno) if ret: assert_startline = ret # adjust assert_startline by 1 to become a valid index into the # source_lines -- "line 1" means "index 0" if assert_startline: assert_startline -= 1 token_descriptions = collections.OrderedDict() for (name, obj) in token_processor.get_token_collection().items(): # okay, get repr() for a good string representation strvalue = repr(obj) # add in the form we want to print token_descriptions[name] = strvalue return assert_startline, token_descriptions
def setUp(self): super(UnitTestTestId, self).setUp() self.stream = six.StringIO() self.session = session.Session() self.plugin = testid.TestId(session=self.session)
def __init__(self, stream): self._stream = stream self._buffer = six.StringIO()
def __init__(self): self.stdout = six.StringIO() self.stderr = six.StringIO()