def pytest_terminal_summary(terminalreporter: TerminalReporter, exitstatus: int, config: Config) -> None: """Create the custom report. In the directory that contains the report generator, the report database is created and the report generator is called. """ # path to the report generator reporter_path = config.getoption("report_generator") if reporter_path is None: return if not terminalreporter.stats: # no test have been run thus no report to create or update return output_root = Path(config.getoption("output_root")) terminalreporter.write_sep("=", "starting report generation") try: report.generate(reporter_path, output_root, terminalreporter) except Exception as e: terminalreporter.write_line(str(e), red=True) terminalreporter.write_sep("=", "report generation failed", red=True) else: terminalreporter.write_sep("=", "report generation done")
def pytest_terminal_summary(terminalreporter: TerminalReporter, exitstatus: int, config: Config) -> None: """Show factoryboy random state when there are test failures or errors. Factoryboy uses randomness in order to generate its values which is great for fuzzing but makes it really hard to reproduce tests which fail due to a fuzzed value. This hook outputs the random state used by factory-boy (and faker) when there are test failures (or errors). The outputted state is an ascii, base64 encoded pickle dump. Args: terminalreporter (TerminalReporter): Add output to the pytest output. exitstatus (int): The exit status of pytest (unused) config (Config): The pytest config (unused) """ show_state = config.getoption("show_state") or ( os.environ.get("SHOW_FACTORYBOY_STATE") == "True") failures: list[BaseReport] = terminalreporter.getreports("failed") errors: list[BaseReport] = terminalreporter.getreports("error") if show_state and (failures or errors): terminalreporter.write_sep("=", "factory-boy random state") encoded_state = base64.b64encode( pickle.dumps(factory.random.get_random_state())) terminalreporter.write_line(encoded_state.decode("ascii"))
def test_writeline(self, testdir, linecomp): modcol = testdir.getmodulecol("def test_one(): pass") rep = TerminalReporter(modcol.config, file=linecomp.stringio) rep.write_fspath_result(modcol.nodeid, ".") rep.write_line("hello world") lines = linecomp.stringio.getvalue().split('\n') assert not lines[0] assert lines[1].endswith(modcol.name + " .") assert lines[2] == "hello world"
def test_writeline(self, testdir, linecomp): modcol = testdir.getmodulecol("def test_one(): pass") stringio = py.io.TextIO() rep = TerminalReporter(modcol.config, file=linecomp.stringio) rep.write_fspath_result(py.path.local("xy.py"), '.') rep.write_line("hello world") lines = linecomp.stringio.getvalue().split('\n') assert not lines[0] assert lines[1].endswith("xy.py .") assert lines[2] == "hello world"
def _setup_local_spark(out: TerminalReporter, verbosity=0): # TODO make a "spark_context" fixture instead of doing this through pytest_configure out.write_line("[conftest.py] Setting up local Spark") travis_mode = 'TRAVIS' in os.environ master_str = "local[2]" if travis_mode else "local[2]" if 'PYSPARK_PYTHON' not in os.environ: os.environ['PYSPARK_PYTHON'] = sys.executable from geopyspark import geopyspark_conf from pyspark import SparkContext conf = geopyspark_conf(master=master_str, appName="OpenEO-GeoPySpark-Driver-Tests") conf.set('spark.kryoserializer.buffer.max', value='1G') conf.set(key='spark.kryo.registrator', value='geopyspark.geotools.kryo.ExpandedKryoRegistrator') conf.set( key='spark.kryo.classesToRegister', value= 'org.openeo.geotrellisaccumulo.SerializableConfiguration,ar.com.hjg.pngj.ImageInfo,ar.com.hjg.pngj.ImageLineInt,geotrellis.raster.RasterRegion$GridBoundsRasterRegion' ) # Only show spark progress bars for high verbosity levels conf.set('spark.ui.showConsoleProgress', verbosity >= 3) if travis_mode: conf.set(key='spark.driver.memory', value='2G') conf.set(key='spark.executor.memory', value='2G') conf.set('spark.ui.enabled', False) else: conf.set('spark.ui.enabled', True) out.write_line("[conftest.py] SparkContext.getOrCreate with {c!r}".format( c=conf.getAll())) context = SparkContext.getOrCreate(conf) out.write_line("[conftest.py] JVM info: {d!r}".format( d={ f: context._jvm.System.getProperty(f) for f in [ "java.version", "java.vendor", "java.home", "java.class.version", # "java.class.path", ] })) out.write_line("[conftest.py] Validating the Spark context") dummy = context._jvm.org.openeo.geotrellis.OpenEOProcesses() answer = context.parallelize([9, 10, 11, 12]).sum() out.write_line("[conftest.py] " + repr((answer, dummy))) return context
def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None: if terminalreporter.config.option.pastebin != "failed": return if "failed" in terminalreporter.stats: terminalreporter.write_sep("=", "Sending information to Paste Service") for rep in terminalreporter.stats["failed"]: try: msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc except AttributeError: msg = terminalreporter._getfailureheadline(rep) file = StringIO() tw = create_terminal_writer(terminalreporter.config, file) rep.toterminal(tw) s = file.getvalue() assert len(s) pastebinurl = create_new_paste(s) terminalreporter.write_line(f"{msg} --> {pastebinurl}")
def pytest_terminal_summary(self, terminalreporter: TerminalReporter, exitstatus: ExitCode, config: Config) -> None: if self.exception: terminalreporter.ensure_newline() terminalreporter.section('Jira XRAY', sep='-', red=True, bold=True) terminalreporter.write_line( 'Could not publish results to Jira XRAY!') if self.exception.message: terminalreporter.write_line(self.exception.message) else: if self.issue_id and self.logfile: terminalreporter.write_sep( '-', f'Generated XRAY execution report file: {Path(self.logfile).absolute()}' ) elif self.issue_id: terminalreporter.write_sep( '-', f'Uploaded results to JIRA XRAY. Test Execution Id: {self.issue_id}' )
def _setup_local_spark(out: TerminalReporter, verbosity=0): # TODO make a "spark_context" fixture instead of doing this through pytest_configure out.write_line("Setting up local Spark") travis_mode = 'TRAVIS' in os.environ master_str = "local[2]" if travis_mode else "local[*]" from geopyspark import geopyspark_conf from pyspark import SparkContext conf = geopyspark_conf(master=master_str, appName="OpenEO-GeoPySpark-Driver-Tests") conf.set('spark.kryoserializer.buffer.max', value='1G') # Only show spark progress bars for high verbosity levels conf.set('spark.ui.showConsoleProgress', verbosity >= 3) if travis_mode: conf.set(key='spark.driver.memory', value='2G') conf.set(key='spark.executor.memory', value='2G') conf.set('spark.ui.enabled', False) else: conf.set('spark.ui.enabled', True) out.write_line("SparkContext.getOrCreate with {c!r}".format(c=conf.getAll())) context = SparkContext.getOrCreate(conf) out.write_line("JVM info: {d!r}".format(d={ f: context._jvm.System.getProperty(f) for f in [ "java.version", "java.vendor", "java.home", "java.class.version", # "java.class.path", ] })) out.write_line("Validating the Spark context") dummy = context._jvm.org.openeo.geotrellis.OpenEOProcesses() answer = context.parallelize([9, 10, 11, 12]).sum() out.write_line(repr((answer, dummy))) return context
def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None: functions = self.infra_manager.get_squashed(self.environment) terminalreporter.write_sep("-", "pytest-infrastructure results") terminalreporter.write_line(f"{repr(self._resolve_meta_data())}") if not functions: terminalreporter.write_line( "no pytest-infrastructure functions collected & executed.") else: for function in functions: terminalreporter.write_line(repr(function))
def _ensure_geopyspark(out: TerminalReporter): """Make sure GeoPySpark knows where to find Spark (SPARK_HOME) and py4j""" try: import geopyspark out.write_line("Succeeded to import geopyspark automatically: {p!r}".format(p=geopyspark)) except KeyError as e: # Geopyspark failed to detect Spark home and py4j, let's fix that. from pyspark import find_spark_home pyspark_home = Path(find_spark_home._find_spark_home()) out.write_line("Failed to import geopyspark automatically. " "Will set up py4j path using Spark home: {h}".format(h=pyspark_home)) py4j_zip = next((pyspark_home / 'python' / 'lib').glob('py4j-*-src.zip')) out.write_line("py4j zip: {z!r}".format(z=py4j_zip)) sys.path.append(str(py4j_zip))
def write_report(self, tr: TerminalReporter): for line in self.report: tr.write_line(line)
def pytest_terminal_summary(self, terminalreporter: TerminalReporter): terminalreporter.write_line( line=f"pdf test report: {str(self.report_path)}")
def _print_adcm_url(reporter: TerminalReporter, adcm: ADCM): """Print ADCM URL link to the console output""" reporter.write_line("###################################") reporter.write_line(f"ADCM URL - {adcm.url}") reporter.write_line("###################################")