def pytest_collectreport(self, report): # Show errors occurred during the collection instantly. TerminalReporter.pytest_collectreport(self, report) if report.failed: if self.isatty: self.rewrite('') # erase the "collecting"/"collected" message self.print_failure(report)
def pytest_runtest_logreport(self, report): TerminalReporter.pytest_runtest_logreport(self, report) if self.verbosity <= 0: return if report.when != 'call': return if self.config.getoption('--sys-stats') is False: return if self.verbosity > 1: # Late Import self.ensure_newline() self.section('Processes Statistics', sep='-', bold=True) template = ' {} - CPU: {:6.2f} % MEM: {:6.2f} %' if not IS_WINDOWS: template += ' SWAP: {:6.2f} %' template += '\n' self.write( template.format(' System', psutil.cpu_percent(), psutil.virtual_memory().percent, psutil.swap_memory().percent)) for name, psproc in self._session.stats_processes.items(): with psproc.oneshot(): cpu = psproc.cpu_percent() mem = psproc.memory_percent('vms') if not IS_WINDOWS: swap = psproc.memory_percent('swap') formatted = template.format(name, cpu, mem, swap) else: formatted = template.format(name, cpu, mem) self.write(formatted)
def pytest_unconfigure(config): """ called before test process is exited.""" if config.option.log_file: TerminalReporter(config).write('\n') TerminalReporter(config).write_sep( '-', 'generated log file: ' + config.option.log_file) TerminalReporter(config).write('\n\n')
def test_internalerror(self, testdir, linecomp): modcol = testdir.getmodulecol("def test_one(): pass") rep = TerminalReporter(modcol.config, file=linecomp.stringio) with pytest.raises(ValueError) as excinfo: raise ValueError("hello") rep.pytest_internalerror(excinfo.getrepr()) linecomp.assert_contains_lines(["INTERNALERROR> *ValueError*hello*"])
def __init__(self, comm, config, file, mpi_reporter): """ """ TerminalReporter.__init__(self, config, file) self.comm = comm self.mpi_reporter = mpi_reporter
def pytest_collectreport(self, report): TerminalReporter.pytest_collectreport(self, report) if report.location[0]: self.paths_left.append(os.path.join(os.getcwd(), report.location[0])) if report.failed: self.rewrite("") self.print_failure(report)
def pytest_collectreport(self, report): """ Live errors during test suite run """ TerminalReporter.pytest_collectreport(self, report) if report.failed: if self.isatty: self.rewrite("") self.print_failure(report)
def pytest_runtest_logreport(self, report): """ Shows failures and errors as tests are running """ TerminalReporter.pytest_runtest_logreport(self, report) if report.failed and not hasattr(report, "wasxfail"): if self.verbosity <= 0: self._tw.line() self.print_failure(report)
def pytest_runtest_logreport(self, report): TerminalReporter.pytest_runtest_logreport(self, report) if self.verbosity <= 0: return if report.when != 'call': return if self.config.getoption('--sys-stats') is False: return test_daemon = getattr(self._session, 'test_daemon', None) if self.verbosity == 1: line = ' [CPU:{0}%|MEM:{1}%]'.format( psutil.cpu_percent(), psutil.virtual_memory().percent) self._tw.write(line) return else: self.ensure_newline() template = ' {} - CPU: {:6.2f} % MEM: {:6.2f} % SWAP: {:6.2f} %\n' self._tw.write( template.format(' System', psutil.cpu_percent(), psutil.virtual_memory().percent, psutil.swap_memory().percent)) for name, psproc in self._session.stats_processes.items(): with psproc.oneshot(): cpu = psproc.cpu_percent() mem = psproc.memory_percent('vms') swap = psproc.memory_percent('swap') self._tw.write(template.format(name, cpu, mem, swap))
def pytest_runtest_logreport(self, report): # Show failures and errors occuring during running a test # instantly. TerminalReporter.pytest_runtest_logreport(self, report) if (report.failed or report.outcome == "rerun") and not hasattr(report, 'wasxfail'): if self.verbosity <= 0: self._tw.line() self.print_failure(report)
def __init__(self, reporter): TerminalReporter.__init__(self, reporter.config) self.writer = self._tw self.tests_count = 0 self.reports = [] self.skipped = [] self.failed = [] self.result = StreamResultToBytes(self.writer._file)
def pytest_runtest_logreport(self, report): # Show failures and errors occuring during running a test # instantly. TerminalReporter.pytest_runtest_logreport(self, report) if report.failed and not hasattr(report, 'wasxfail'): if self.verbosity <= 0: self._tw.line() self.print_failure(report)
def pytest_collectreport(self, report): TerminalReporter.pytest_collectreport(self, report) if report.location[0]: self.paths_left.append( os.path.join(os.getcwd(), report.location[0])) if report.failed: self.rewrite("") self.print_failure(report)
def test_internalerror(self, testdir, linecomp): modcol = testdir.getmodulecol("def test_one(): pass") rep = TerminalReporter(modcol.config, file=linecomp.stringio) excinfo = pytest.raises(ValueError, "raise ValueError('hello')") rep.pytest_internalerror(excinfo.getrepr()) linecomp.assert_contains_lines([ "INTERNALERROR> *ValueError*hello*" ])
def pytest_collectreport(self, report): """Report failure during colltion. :type report: :py:class:_pytest.runner.CollectReport """ TerminalReporter.pytest_collectreport(self, report) if report.failed: self.report_failure(report, when='collect')
def __init__(self, config): TerminalReporter.__init__(self, config) self._session = None self._show_sys_stats = config.getoption('--sys-stats') is True self._sys_stats_no_children = config.getoption('--sys-stats-no-children') is True if config.getoption('--sys-stats-uss-mem') is True: self._sys_stats_mem_type = 'uss' else: self._sys_stats_mem_type = 'rss'
def pytest_runtest_logreport(self, report): TerminalReporter.pytest_runtest_logreport(self, report) res = self.config.hook.pytest_report_teststatus(report=report) cat, letter, word = res if not letter and not word: # probably passed setup/teardown return if self.sqlcount: self._tw.write(" %d" % report.sqlcount, blue=True)
def test_writeline(self, testdir, linecomp): modcol = testdir.getmodulecol("def test_one(): pass") rep = TerminalReporter(modcol.config, file=linecomp.stringio) rep.write_fspath_result(modcol.nodeid, ".") rep.write_line("hello world") lines = linecomp.stringio.getvalue().split('\n') assert not lines[0] assert lines[1].endswith(modcol.name + " .") assert lines[2] == "hello world"
def test_rewrite(self, testdir, monkeypatch): config = testdir.parseconfig() f = py.io.TextIO() monkeypatch.setattr(f, 'isatty', lambda *args: True) tr = TerminalReporter(config, f) tr.writer.fullwidth = 10 tr.write('hello') tr.rewrite('hey', erase=True) assert f.getvalue() == 'hello' + '\r' + 'hey' + (7 * ' ')
def test_rewrite(self, testdir, monkeypatch): config = testdir.parseconfig() f = py.io.TextIO() monkeypatch.setattr(f, "isatty", lambda *args: True) tr = TerminalReporter(config, f) tr._tw.fullwidth = 10 tr.write("hello") tr.rewrite("hey", erase=True) assert f.getvalue() == "hello" + "\r" + "hey" + (6 * " ")
def pytest_runtest_logreport(self, report): rep = report res = self.config.hook.pytest_report_teststatus(report=rep) cat, letter, word = res if not letter and not word: # probably passed setup/teardown return if isinstance(word, tuple): word, word_markup = word else: if rep.passed: word_markup = {'green': True} elif rep.failed: word_markup = {'red': True} elif rep.skipped: word_markup = {'yellow': True} feature_markup = {'blue': True} scenario_markup = word_markup if self.verbosity <= 0: return TerminalReporter.pytest_runtest_logreport(self, rep) elif self.verbosity == 1: if hasattr(report, 'scenario'): self.ensure_newline() self._tw.write('Feature: ', **feature_markup) self._tw.write(report.scenario['feature']['name'], **feature_markup) self._tw.write('\n') self._tw.write(' Scenario: ', **scenario_markup) self._tw.write(report.scenario['name'], **scenario_markup) self._tw.write(' ') self._tw.write(word, **word_markup) self._tw.write('\n') else: return TerminalReporter.pytest_runtest_logreport(self, rep) elif self.verbosity > 1: if hasattr(report, 'scenario'): self.ensure_newline() self._tw.write('Feature: ', **feature_markup) self._tw.write(report.scenario['feature']['name'], **feature_markup) self._tw.write('\n') self._tw.write(' Scenario: ', **scenario_markup) self._tw.write(report.scenario['name'], **scenario_markup) self._tw.write('\n') for step in report.scenario['steps']: if self.config.option.expand: step_name = self._format_step_name(step['name'], **report.scenario['example_kwargs']) else: step_name = step['name'] self._tw.write(' {} {}\n'.format(step['keyword'], step_name), **scenario_markup) self._tw.write(' ' + word, **word_markup) self._tw.write('\n\n') else: return TerminalReporter.pytest_runtest_logreport(self, rep) self.stats.setdefault(cat, []).append(rep)
def pytest_collectreport(self, report): # Show errors occurred during the collection instantly. TerminalReporter.pytest_collectreport(self, report) if self.config.option.instafail: if report.failed: if self.isatty: self.rewrite( '') # erase the "collecting"/"collected" message self.print_failure(report)
def __init__(self, reporter): TerminalReporter.__init__(self, reporter.config) self.paths_left = [] self.tests_count = 0 self.tests_taken = 0 self.reports = [] self.unreported_errors = [] self.progress_blocks = [] self.reset_tracked_lines()
def __init__(self, builtin): # Pass in the builtin reporter's config so we're not redoing all of its # initial setup/cli parsing/etc. NOTE: TerminalReporter is old-style :( TerminalReporter.__init__(self, builtin.config) # Which headers have already been displayed # TODO: faster data structure probably wise self.headers_displayed = [] # Size of indents. TODO: configuration self.indent = " " * 4
def pytest_runtest_logreport(self, report): rep = report res = self.config.hook.pytest_report_teststatus(report=rep) cat, letter, word = res if not letter and not word: # probably passed setup/teardown return if isinstance(word, tuple): word, word_markup = word else: if rep.passed: word_markup = {'green': True} elif rep.failed: word_markup = {'red': True} elif rep.skipped: word_markup = {'yellow': True} feature_markup = {'blue': True} scenario_markup = word_markup if self.verbosity <= 0: return TerminalReporter.pytest_runtest_logreport(self, rep) elif self.verbosity == 1: if hasattr(report, 'scenario'): self.ensure_newline() self._tw.write('Feature: ', **feature_markup) self._tw.write(report.scenario['feature']['name'], **feature_markup) self._tw.write('\n') self._tw.write(' Scenario: ', **scenario_markup) self._tw.write(report.scenario['name'], **scenario_markup) self._tw.write(' ') self._tw.write(word, **word_markup) self._tw.write('\n') else: return TerminalReporter.pytest_runtest_logreport(self, rep) elif self.verbosity > 1: if hasattr(report, 'scenario'): self.ensure_newline() self._tw.write('Feature: ', **feature_markup) self._tw.write(report.scenario['feature']['name'], **feature_markup) self._tw.write('\n') self._tw.write(' Scenario: ', **scenario_markup) self._tw.write(report.scenario['name'], **scenario_markup) self._tw.write('\n') for step in report.scenario['steps']: self._tw.write( ' {} {}\n'.format(step['keyword'], step['name']), **scenario_markup) self._tw.write(' ' + word, **word_markup) self._tw.write('\n\n') else: return TerminalReporter.pytest_runtest_logreport(self, rep) self.stats.setdefault(cat, []).append(rep)
def pytest_runtest_logreport(self, report): rep = report res = self.config.hook.pytest_report_teststatus(report=rep, config=self.config) cat, letter, word = res if not letter and not word: # probably passed setup/teardown return if isinstance(word, tuple): word, word_markup = word else: if rep.passed: word_markup = {"green": True} elif rep.failed: word_markup = {"red": True} elif rep.skipped: word_markup = {"yellow": True} feature_markup = {"blue": True} scenario_markup = word_markup if self.verbosity <= 0: return TerminalReporter.pytest_runtest_logreport(self, rep) elif self.verbosity == 1: if hasattr(report, "scenario"): self.ensure_newline() self._tw.write("Feature: ", **feature_markup) self._tw.write(report.scenario["feature"]["name"], **feature_markup) self._tw.write("\n") self._tw.write(" Scenario: ", **scenario_markup) self._tw.write(report.scenario["name"], **scenario_markup) self._tw.write(" ") self._tw.write(word, **word_markup) self._tw.write("\n") else: return TerminalReporter.pytest_runtest_logreport(self, rep) elif self.verbosity > 1: if hasattr(report, "scenario"): self.ensure_newline() self._tw.write("Feature: ", **feature_markup) self._tw.write(report.scenario["feature"]["name"], **feature_markup) self._tw.write("\n") self._tw.write(" Scenario: ", **scenario_markup) self._tw.write(report.scenario["name"], **scenario_markup) self._tw.write("\n") for step in report.scenario["steps"]: if self.config.option.expand: step_name = self._format_step_name(step["name"], **report.scenario["example_kwargs"]) else: step_name = step["name"] self._tw.write(" {} {}\n".format(step["keyword"], step_name), **scenario_markup) self._tw.write(" " + word, **word_markup) self._tw.write("\n\n") else: return TerminalReporter.pytest_runtest_logreport(self, rep) self.stats.setdefault(cat, []).append(rep)
def __init__(self, reporter): TerminalReporter.__init__(self, reporter.config) self.writer = self._tw self.paths_left = [] self.tests_count = 0 self.tests_taken = 0 self.reports = [] self.unreported_errors = [] self.progress_blocks = [] self.reset_tracked_lines()
def __init__(self, config): TerminalReporter.__init__(self, config) self._session = None self._show_sys_stats = config.getoption("--sys-stats") is True self._sys_stats_no_children = config.getoption( "--sys-stats-no-children") is True if config.getoption("--sys-stats-uss-mem") is True: self._sys_stats_mem_type = "uss" else: self._sys_stats_mem_type = "rss"
def test_writeline(self, testdir, linecomp): modcol = testdir.getmodulecol("def test_one(): pass") stringio = py.io.TextIO() rep = TerminalReporter(modcol.config, file=linecomp.stringio) rep.write_fspath_result(py.path.local("xy.py"), '.') rep.write_line("hello world") lines = linecomp.stringio.getvalue().split('\n') assert not lines[0] assert lines[1].endswith("xy.py .") assert lines[2] == "hello world"
def __init__(self, reporter): TerminalReporter.__init__(self, reporter.config) self.writer = self._tw self.paths_left = [] self.tests_count = 0 self.tests_taken = 0 self.current_line = '' self.currentfspath2 = '' self.time_taken = {} self.reports = [] self.unreported_errors = []
def pytest_sessionfinish(session, exitstatus): standard_reporter = session.config.pluginmanager.getplugin('terminalreporter') assert(isinstance(standard_reporter, TerminalReporterMPI)) # if(not standard_reporter.mpi_reporter.post_done): # standard_reporter.mpi_reporter.pytest_sessionfinish(session) assert(standard_reporter.mpi_reporter.post_done == True) for i_report, report in standard_reporter.mpi_reporter.reports_gather.items(): # print(" \n ", i_report, " 2/ ---> ", report, "\n") TerminalReporter.pytest_runtest_logreport(standard_reporter, report[0])
def _setup_local_spark(out: TerminalReporter, verbosity=0): # TODO make a "spark_context" fixture instead of doing this through pytest_configure out.write_line("[conftest.py] Setting up local Spark") travis_mode = 'TRAVIS' in os.environ master_str = "local[2]" if travis_mode else "local[2]" if 'PYSPARK_PYTHON' not in os.environ: os.environ['PYSPARK_PYTHON'] = sys.executable from geopyspark import geopyspark_conf from pyspark import SparkContext conf = geopyspark_conf(master=master_str, appName="OpenEO-GeoPySpark-Driver-Tests") conf.set('spark.kryoserializer.buffer.max', value='1G') conf.set(key='spark.kryo.registrator', value='geopyspark.geotools.kryo.ExpandedKryoRegistrator') conf.set( key='spark.kryo.classesToRegister', value= 'org.openeo.geotrellisaccumulo.SerializableConfiguration,ar.com.hjg.pngj.ImageInfo,ar.com.hjg.pngj.ImageLineInt,geotrellis.raster.RasterRegion$GridBoundsRasterRegion' ) # Only show spark progress bars for high verbosity levels conf.set('spark.ui.showConsoleProgress', verbosity >= 3) if travis_mode: conf.set(key='spark.driver.memory', value='2G') conf.set(key='spark.executor.memory', value='2G') conf.set('spark.ui.enabled', False) else: conf.set('spark.ui.enabled', True) out.write_line("[conftest.py] SparkContext.getOrCreate with {c!r}".format( c=conf.getAll())) context = SparkContext.getOrCreate(conf) out.write_line("[conftest.py] JVM info: {d!r}".format( d={ f: context._jvm.System.getProperty(f) for f in [ "java.version", "java.vendor", "java.home", "java.class.version", # "java.class.path", ] })) out.write_line("[conftest.py] Validating the Spark context") dummy = context._jvm.org.openeo.geotrellis.OpenEOProcesses() answer = context.parallelize([9, 10, 11, 12]).sum() out.write_line("[conftest.py] " + repr((answer, dummy))) return context
def __init__(self, reporter): TerminalReporter.__init__(self, reporter.config) self.writer = self._tw self.paths_left = [] self.tests_count = 0 self.tests_taken = 0 self.current_line = '' self.currentfspath2 = '' self.reports = [] self.unreported_errors = [] self.progress_blocks = []
def __init__(self, reporter): #pytest_collectreport = self.pytest_collectreport TerminalReporter.__init__(self, reporter.config) self.writer = self._tw self.eta_logger = EtaLogger() self.paths_left = [] self.tests_count = 0 self.tests_taken = 0 self.current_line = u'' self.currentfspath2 = '' self.time_taken = {} self.reports = [] self.unreported_errors = []
def __init__(self, reporter): TerminalReporter.__init__(self, reporter.config) self._tw = reporter._tw self.tests_count = 0 self.tests_taken = 0 self.pass_count = 0 self.fail_count = 0 self.skip_count = 0 self.xpass_count = 0 self.xfail_count = 0 self.error_count = 0 self.rerun_count = 0 self.executed_nodes = []
def __init__(self, reporter, print_every_x_pass=1): TerminalReporter.__init__(self, reporter.config, ) self.print_every_x_pass = print_every_x_pass self._tw = reporter._tw self.tests_count = 0 self.tests_taken = Counter() self.pass_count = Counter() self.fail_count = Counter() self.skip_count = Counter() self.xpass_count = Counter() self.xfail_count = Counter() self.error_count = Counter() self.rerun_count = Counter()
def __init__(self, reporter, manager): TerminalReporter.__init__(self, reporter.config) self._tw = reporter.writer # some monkeypatching needed to access existing writer self.manager = manager self.stats = dict() self.stat_keys = [ 'passed', 'failed', 'error', 'skipped', 'warnings', 'xpassed', 'xfailed', '' ] for key in self.stat_keys: self.stats[key] = manager.list() self.stats_lock = manager.Lock() self._progress_items_reported_proxy = manager.Value('i', 0)
def perform_collect_and_run(session): """Collect and run tests streaming from the redis queue.""" # This mimics the internal pytest collect loop, but shortened # while running tests as soon as they are found. term = TerminalReporter(session.config) redis_connection = get_redis_connection(session.config) redis_list = populate_test_generator(session, redis_connection) default_verbosity = session.config.option.verbose hook = session.config.hook session._initialpaths = set() session._initialparts = [] session._notfound = [] session.items = [] for arg in redis_list: term.write(os.linesep) parts = session._parsearg(arg) session._initialparts.append(parts) session._initialpaths.add(parts[0]) arg = "::".join(map(str, parts)) session.trace("processing argument", arg) session.trace.root.indent += 1 try: for x in session._collect(arg): items = session.genitems(x) new_items = [] for item in items: new_items.append(item) # HACK ATTACK: This little hack lets us remove the # 'collected' and 'collecting' messages while still # keeping the default verbosity for the rest of the # run... session.config.option.verbose = -1 hook.pytest_collection_modifyitems(session=session, config=session.config, items=new_items) session.config.option.verbose = default_verbosity for item in new_items: session.items.append(item) _pytest.runner.pytest_runtest_protocol(item, None) except NoMatch: # we are inside a make_report hook so # we cannot directly pass through the exception raise pytest.UsageError("Could not find" + arg) session.trace.root.indent -= 1 return session.items
def __init__(self, config, file=None): TerminalReporter.__init__(self, config, file) self._last_header = None self.pattern_config = models.PatternConfig( files=self.config.getini('python_files'), functions=self.config.getini('python_functions'), classes=self.config.getini('python_classes')) self.result_wrappers = [] if config.getini('pspec_format') != 'plaintext': self.result_wrappers.append(wrappers.UTF8Wrapper) if config.option.color != 'no': self.result_wrappers.append(wrappers.ColorWrapper)
def pytest_terminal_summary(terminalreporter: TerminalReporter, exitstatus: int, config: Config) -> None: """Create the custom report. In the directory that contains the report generator, the report database is created and the report generator is called. """ # path to the report generator reporter_path = config.getoption("report_generator") if reporter_path is None: return if not terminalreporter.stats: # no test have been run thus no report to create or update return output_root = Path(config.getoption("output_root")) terminalreporter.write_sep("=", "starting report generation") try: report.generate(reporter_path, output_root, terminalreporter) except Exception as e: terminalreporter.write_line(str(e), red=True) terminalreporter.write_sep("=", "report generation failed", red=True) else: terminalreporter.write_sep("=", "report generation done")
def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None: if self.known_failure_cases: terminalreporter.section('Known failure cases', bold=True, yellow=True) terminalreporter.line('\n'.join(self.known_failure_cases)) if self.failed_cases: terminalreporter.section('Failed cases', bold=True, red=True) terminalreporter.line('\n'.join(self.failed_cases))
def summary_errors(self): reports = self.getreports('error') if not reports: return for rep in self.stats['error']: name = rep.nodeid.split("/")[-1] location = None if hasattr(rep, 'location'): location, lineno, domain = rep.location messages.testSuiteStarted(name, location=fspath_to_url(location)) messages.testStarted("ERROR", location=fspath_to_url(location)) TerminalReporter.summary_errors(self) messages.testError("ERROR") messages.testSuiteFinished(name)
def summary_errors(self): reports = self.getreports('error') if not reports: return for rep in self.stats['error']: name = rep.nodeid.split("/")[-1] location = None if hasattr(rep, 'location'): location, lineno, domain = rep.location messages.testSuiteStarted(name, location=fspath_to_url(location)) messages.testStarted("<noname>", location=fspath_to_url(location)) TerminalReporter.summary_errors(self) messages.testError("<noname>") messages.testSuiteFinished(name)
def __init__(self, reporter): global THEME, LEN_PROGRESS_BAR_SETTING TerminalReporter.__init__(self, reporter.config) self.writer = self._tw self.paths_left = [] self.tests_count = 0 self.tests_taken = 0 self.current_line = '' self.currentfspath2 = '' self.reports = [] self.unreported_errors = [] self.progress_blocks = [] if self.config.option.color == "no": THEME = THEMES['no-color'] LEN_PROGRESS_BAR_SETTING = self.config.option.progressbar_len
def redis_test_generator(config, redis_connection, redis_list_key, backup_list_key=None): """A generator that pops and returns test paths from the redis list key.""" term = TerminalReporter(config) val = retrieve_test_from_redis(redis_connection, redis_list_key, backup_list_key) if val is None: term.write("No items in redis list '%s'\n" % redis_list_key) while val is not None: yield val val = retrieve_test_from_redis(redis_connection, redis_list_key, backup_list_key)
def __init__(self, config=None, file=None): if config: # If we have a config, nothing more needs to be done return TerminalReporter.__init__(self, config, file) # Without a config, pretend to be a TerminalReporter # hook-related functions (logreport, collection, etc) will be outrigt broken, # but the line writers should still be usable if file is None: file = sys.stdout self._tw = self.writer = TerminalWriter(file) self.hasmarkup = self._tw.hasmarkup self.reportchars = '' self.currentfspath = None
def __init__(self, reporter): """Initialize TerminalReporter without features we don't need. :type reporter: :py:class:`_pytest.terminal.TerminalReporter` """ TerminalReporter.__init__(self, reporter.config)
def __init__(self, reporter): TerminalReporter.__init__(self, reporter.config) self._tw = reporter._tw
def __init__(self, reporter): TerminalReporter.__init__(self, reporter.config) self.config = reporter.config self.outdent = DEFAULT_OUTDENT self.stdout = os.fdopen(os.dup(sys.stdout.fileno()), 'w') self.tw = py._io.terminalwriter.TerminalWriter(self.stdout)
def __init__(self, config): TerminalReporter.__init__(self, config)
def __init__(self, config, file=None): TerminalReporter.__init__(self, config, file)