def storage_backend() -> Generator[AmazonS3Storage, None, None]: """Provide a S3 Storage backend for all AWS S3 tests For this to work against production S3, you need to set boto3 auth: 1. AWS_ACCESS_KEY_ID 2. AWS_SECRET_ACCESS_KEY For more details please see: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html#environment-variables If these variables are not set, and pytest-vcr is not in use, the tests *will* fail. """ prefix = 'giftless-tests' # We use a live S3 bucket to test storage = AmazonS3Storage(bucket_name=TEST_AWS_S3_BUCKET_NAME, path_prefix=prefix) try: yield storage finally: bucket = storage.s3.Bucket(TEST_AWS_S3_BUCKET_NAME) try: bucket.objects.all().delete() except Exception as e: raise pytest.PytestWarning( "Could not clean up after test: {}".format(e))
def pytest_terminal_summary(self, terminalreporter): if self._disabled: message = 'Coverage disabled via --no-cov switch!' terminalreporter.write('WARNING: %s\n' % message, red=True, bold=True) if pytest.__version__ >= '3.8': warnings.warn(pytest.PytestWarning(message)) else: terminalreporter.config.warn(code='COV-1', message=message) return if self.cov_controller is None: return if self.cov_total is None: # we shouldn't report, or report generation failed (error raised above) return terminalreporter.write('\n' + self.cov_report.getvalue() + '\n') if self.options.cov_fail_under is not None and self.options.cov_fail_under > 0: failed = self.cov_total < self.options.cov_fail_under markup = {'red': True, 'bold': True} if failed else {'green': True} message = ( '{fail}Required test coverage of {required}% {reached}. ' 'Total coverage: {actual:.2f}%\n'.format( required=self.options.cov_fail_under, actual=self.cov_total, fail="FAIL " if failed else "", reached="not reached" if failed else "reached")) terminalreporter.write(message, **markup)
def pytest_runtestloop(self, session): yield if self._disabled: return compat_session = compat.SessionWrapper(session) self.failed = bool(compat_session.testsfailed) if self.cov_controller is not None: self.cov_controller.finish() if not self._is_worker(session) and self._should_report(): # import coverage lazily here to avoid importing # it for unit tests that don't need it from coverage.misc import CoverageException try: self.cov_total = self.cov_controller.summary(self.cov_report) except CoverageException as exc: message = 'Failed to generate report: %s\n' % exc session.config.pluginmanager.getplugin( "terminalreporter").write('WARNING: %s\n' % message, red=True, bold=True) warnings.warn(pytest.PytestWarning(message)) self.cov_total = 0 assert self.cov_total is not None, 'Test coverage should never be `None`' if self._failed_cov_total(): # make sure we get the EXIT_TESTSFAILED exit code compat_session.testsfailed += 1
def pytest_terminal_summary(self, terminalreporter): if self._disabled: message = 'Coverage disabled via --no-cov switch!' terminalreporter.write('WARNING: %s\n' % message, red=True, bold=True) if pytest.__version__ >= '3.8': warnings.warn(pytest.PytestWarning(message)) else: terminalreporter.config.warn(code='COV-1', message=message) return if self.ml_runner is None: return if self.cov_total is None: # we shouldn't report, or report generation failed (error raised above) return terminalreporter.write('\n' + self.cov_report.getvalue() + '\n') if self.options.cov_fail_under is not None and self.options.cov_fail_under > 0: if self.cov_total < self.options.cov_fail_under: markup = {'red': True, 'bold': True} message = ('FAIL Required test coverage of %d%% not ' 'reached. Total coverage: %.2f%%\n' % (self.options.cov_fail_under, self.cov_total)) else: markup = {'green': True} message = ('Required test coverage of %d%% ' 'reached. Total coverage: %.2f%%\n' % (self.options.cov_fail_under, self.cov_total)) terminalreporter.write(message, **markup)
def storage_backend() -> Generator[GoogleCloudStorage, None, None]: """Provide a Google Cloud Storage backend for all GCS tests For this to work against production Google Cloud, you need to set ``GCP_ACCOUNT_KEY_FILE``, ``GCP_PROJECT_NAME`` and ``GCP_BUCKET_NAME`` environment variables when running the tests. If these variables are not set, and pytest-vcr is not in use, the tests *will* fail. """ account_key_file = os.environ.get('GCP_ACCOUNT_KEY_FILE') project_name = os.environ.get('GCP_PROJECT_NAME') bucket_name = os.environ.get('GCP_BUCKET_NAME') prefix = 'giftless-tests' if account_key_file and project_name and bucket_name: # We use a live GCS bucket to test storage = GoogleCloudStorage(project_name=project_name, bucket_name=bucket_name, account_key_file=account_key_file, path_prefix=prefix) try: yield storage finally: bucket = storage.storage_client.bucket(bucket_name) try: blobs = bucket.list_blobs(prefix=prefix + '/') bucket.delete_blobs(blobs) except GoogleAPIError as e: raise pytest.PytestWarning("Could not clean up after test: {}".format(e)) else: yield GoogleCloudStorage(project_name=MOCK_GCP_PROJECT_NAME, bucket_name=MOCK_GCP_BUCKET_NAME, account_key_base64=MOCK_GCP_KEY_B64, path_prefix=prefix)
def _warn_if_env_name_is_not_str(self, name): """On Python 2, warn if the given environment variable name is not a native str (#4056)""" if six.PY2 and not isinstance(name, str): warnings.warn( pytest.PytestWarning( "Environment variable name {!r} should be str".format( name)))
def _take_screenshot( request, browser_instance, fixture_name, session_tmpdir, splinter_screenshot_dir, splinter_screenshot_getter_html, splinter_screenshot_getter_png, splinter_screenshot_encoding, ): """Capture a screenshot as .png and .html. Invoked from session and function browser fixtures. """ slaveoutput = getattr(request.config, 'slaveoutput', None) try: names = junitxml.mangle_testnames(request.node.nodeid.split("::")) except AttributeError: # pytest>=2.9.0 names = junitxml.mangle_test_address(request.node.nodeid) classname = '.'.join(names[:-1]) screenshot_dir = os.path.join(splinter_screenshot_dir, classname) screenshot_file_name_format = '{0}.{{format}}'.format( '{0}-{1}'.format(names[-1][:128 - len(fixture_name) - 5], fixture_name).replace(os.path.sep, '-') ) screenshot_file_name = screenshot_file_name_format.format(format='png') screenshot_html_file_name = screenshot_file_name_format.format(format='html') if not slaveoutput: if not os.path.exists(screenshot_dir): os.makedirs(screenshot_dir) else: screenshot_dir = session_tmpdir.ensure('screenshots', dir=True).strpath screenshot_png_path = os.path.join(screenshot_dir, screenshot_file_name) screenshot_html_path = os.path.join(screenshot_dir, screenshot_html_file_name) LOGGER.info('Saving screenshot to %s', screenshot_dir) try: splinter_screenshot_getter_html(browser_instance, screenshot_html_path) splinter_screenshot_getter_png(browser_instance, screenshot_png_path) if request.node.splinter_failure.longrepr: reprtraceback = request.node.splinter_failure.longrepr.reprtraceback reprtraceback.extraline = _screenshot_extraline(screenshot_png_path, screenshot_html_path) if slaveoutput is not None: with codecs.open(screenshot_html_path, encoding=splinter_screenshot_encoding) as html_fd: with open(screenshot_png_path, 'rb') as fd: slaveoutput.setdefault('screenshots', []).append({ 'class_name': classname, 'files': [ { 'file_name': screenshot_file_name, 'content': fd.read(), }, { 'file_name': screenshot_html_file_name, 'content': html_fd.read(), 'encoding': splinter_screenshot_encoding, }] }) except Exception as e: # NOQA warnings.warn(pytest.PytestWarning("Could not save screenshot: {0}".format(e)))
def pytest_sessionstart(session): no_fds_soft, no_fds_hard = resource.getrlimit(resource.RLIMIT_NOFILE) if no_fds_soft < 2028: session.warn( pytest.PytestWarning( f"Too low open filedescriptor limit: {no_fds_soft} (try `ulimit -n 4096`)" ))
def pytest_runtestloop(self, session): yield if self._disabled: return compat_session = SessionWrapper(session) self.failed = bool(compat_session.testsfailed) if self.ml_runner is not None: self.ml_runner.finish() if self._should_report(): try: self.cov_total = self.ml_runner.summary(self.cov_report) except CoverageException as exc: message = 'Failed to generate report: %s\n' % exc session.config.pluginmanager.getplugin( "terminalreporter").write('WARNING: %s\n' % message, red=True, bold=True) if pytest.__version__ >= '3.8': warnings.warn(pytest.PytestWarning(message)) else: session.config.warn(code='COV-2', message=message) self.cov_total = 0 assert self.cov_total is not None, 'Test coverage should never be `None`' if self._failed_cov_total(): # make sure we get the EXIT_TESTSFAILED exit code compat_session.testsfailed += 1
def pytest_runtest_setup(self, item): priority = item.get_closest_marker("priority") if priority: self.current_priority = priority.args[0] else: msg = "Priority missing for " + item.nodeid warnings.warn(pytest.PytestWarning(msg)) self.current_priority = None
def _remove_overlay(target, request): name = target[1:].replace("/", "_") # Log any file changes, if requested if request.config.getoption('--audit'): # Run the garbase collector, just in case it releases # some opened file handles gc.collect() # Do a sync also, just in case subprocess.run(["sync"], check=True) with open(f'/export/reports/revert_{name}.log', 'a') as f: cwd = os.getcwd() os.chdir(f'/overlay/{name}/upper') mods = glob.glob('**/*', recursive=True) if mods: # Write out what was modified f.write('TEST: {}\n'.format(request.node.nodeid)) f.write('{}\n\n'.format('\n'.join(sorted(mods)))) # Check if the fixture is part of the function parameters parameters = inspect.signature(request.function).parameters if request.fixturename not in parameters: request.node.warn( pytest.PytestWarning( f"'{request.fixturename}' fixture is missing")) os.chdir(cwd) # Try three times to unmount the overlay for attempt in range(1, 4): try: # Run the garbase collector, just in case it releases # some opened file handles gc.collect() # Do a sync also, just in case subprocess.run(["sync"], check=True) # Then try to unmount the overlay subprocess.run(["umount", f"overlay_{name}"], check=True) # It succeeded break except subprocess.CalledProcessError: if attempt < 3: # Sleep for a few seconds to give the open file # handles a chance to clean themselves up time.sleep(3) else: # We couldn't unmount the overlay pytest.fail(f"Unable to unmount overlay_{name}") # Clean up the overlay directories shutil.rmtree(f"/overlay/{name}")
def teardown_database(): with django_db_blocker.unblock(): try: teardown_databases(db_cfg, verbosity=request.config.option.verbose) except Exception as exc: request.node.warn( pytest.PytestWarning( "Error when trying to teardown test databases: %r" % exc))
def pytest_ignore_collect(path, config): if config.pluginmanager.hasplugin("pytest-benchmark"): return False else: global warn if warn: warnings.warn( pytest.PytestWarning( "Skipping benchmarks because pytest-benchmark plugin was not found." )) warn = False return True
def selected_types_for_item(item) -> Optional[List[TestType]]: type_option = item.config.getoption("-T") if not type_option: return None matches = list(filter(lambda t: t.startswith(type_option), TestType.names())) if matches: if len(matches) > 1: item.warn( pytest.PytestWarning( f"--type argument '{type_option}' matched multiple types ({matches})" ) ) else: type_names = ", ".join(list(TestType.names())) item.warn( pytest.PytestWarning( f"--type argument '{type_option}' does not match any test type: {type_names}" ) ) return matches
def setenv(self, name, value, prepend=None): """ Set environment variable ``name`` to ``value``. If ``prepend`` is a character, read the current environment variable value and prepend the ``value`` adjoined with the ``prepend`` character.""" if not isinstance(value, str): warnings.warn( pytest.PytestWarning( "Environment variable value {!r} should be str, converted to str implicitly" .format(value))) value = str(value) if prepend and name in os.environ: value = value + prepend + os.environ[name] self._warn_if_env_name_is_not_str(name) self.setitem(os.environ, name, value)
def setenv(self, name, value, prepend=None): """ Set environment variable ``name`` to ``value``. If ``prepend`` is a character, read the current environment variable value and prepend the ``value`` adjoined with the ``prepend`` character.""" if not isinstance(value, str): warnings.warn( pytest.PytestWarning( "Value of environment variable {name} type should be str, but got " "{value!r} (type: {type}); converted to str implicitly". format(name=name, value=value, type=type(value).__name__)), stacklevel=2, ) value = str(value) if prepend and name in os.environ: value = value + prepend + os.environ[name] self.setitem(os.environ, name, value)
def test_check_required_parameters_missing_raises_exception( param, invalid_val): jwtcp: JwtCredentialsProvider = make_jwtcredentialsprovider() valid_val: str = "hello world!" if param == JwtCredentialsProvider.KEY_ROLE_ARN: jwtcp.role_arn = invalid_val jwtcp.jwt = valid_val elif param == JwtCredentialsProvider.KEY_WEB_IDENTITY_TOKEN: jwtcp.role_arn = valid_val jwtcp.jwt = invalid_val else: raise pytest.PytestWarning( "Invalid arg supplied for param: {}".format(param)) with pytest.raises(InterfaceError, match="Missing required property: {}".format(param)): jwtcp.check_required_parameters()
def pytest_configure(config): config.addinivalue_line("markers", "lg_feature: marker for labgrid feature flags") terminalreporter = config.pluginmanager.getplugin('terminalreporter') capturemanager = config.pluginmanager.getplugin('capturemanager') rewrite = True lg_log = config.option.lg_log if not capturemanager.is_globally_capturing(): rewrite = False # other output would interfere with our rewrites if terminalreporter.verbosity > 1: # enable with -vv if config.option.lg_colored_steps: config.pluginmanager.register( ColoredStepReporter(terminalreporter, rewrite=rewrite)) else: config.pluginmanager.register( StepReporter(terminalreporter, rewrite=rewrite)) if terminalreporter.verbosity > 2: # enable with -vvv logging.getLogger().setLevel(logging.DEBUG) if lg_log: ConsoleLoggingReporter(lg_log) env_config = config.option.env_config lg_env = config.option.lg_env lg_coordinator = config.option.lg_coordinator if lg_env is None: if env_config is not None: warnings.warn( pytest.PytestWarning( "deprecated option --env-config (use --lg-env instead)", __file__)) lg_env = env_config env = None if lg_env is None: lg_env = os.environ.get('LG_ENV') if lg_env is not None: env = Environment(config_file=lg_env) if lg_coordinator is not None: env.config.set_option('crossbar_url', lg_coordinator) config._labgrid_env = env processwrapper.enable_logging()
def pytest_runtest_protocol(self, item): lines1 = self.get_open_files() yield if hasattr(sys, "pypy_version_info"): gc.collect() lines2 = self.get_open_files() new_fds = {t[0] for t in lines2} - {t[0] for t in lines1} leaked_files = [t for t in lines2 if t[0] in new_fds] if leaked_files: error = [] error.append("***** %s FD leakage detected" % len(leaked_files)) error.extend([str(f) for f in leaked_files]) error.append("*** Before:") error.extend([str(f) for f in lines1]) error.append("*** After:") error.extend([str(f) for f in lines2]) error.append(error[0]) error.append("*** function %s:%s: %s " % item.location) error.append("See issue #2366") item.warn(pytest.PytestWarning("\n".join(error)))
def pytest_pyfunc_call(pyfuncitem): if not pyfuncitem.config.option.reload_loop: return None else: testfunction = pyfuncitem.obj iscoroutinefunction = getattr(inspect, "iscoroutinefunction", None) if iscoroutinefunction is not None and iscoroutinefunction( testfunction): msg = "Coroutine functions are not natively supported and have been skipped.\n" msg += "You need to install a suitable plugin for your async framework, for example:\n" msg += " - pytest-asyncio\n" msg += " - pytest-trio\n" msg += " - pytest-tornasync" warnings.warn(pytest.PytestWarning(msg.format(pyfuncitem.nodeid))) pytest.skip( msg= "coroutine function and no async plugin installed (see warnings)" ) funcargs = pyfuncitem.funcargs testargs = { arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames } passing = False while not passing: info = CallInfo.from_call(lambda: testfunction(**testargs), when="call", reraise=None) # if info.excinfo: if info.excinfo: # build the pytest report report = pyfuncitem.ihook.pytest_runtest_makereport( item=pyfuncitem, call=info) _enter_pdb(pyfuncitem, info.excinfo, report) else: passing = True # after you've successfully gotten the test to pass run it one more time # (this hack exists because of the hookwrapper logic) return info
def pytest_collection_modifyitems(session, config, items): failure = None session.random_order_bucket_type_key_handlers = [] process_failed_first_last_failed(session, config, items) item_ids = _get_set_of_item_ids(items) plugin = Config(config) try: seed = plugin.seed bucket_type = plugin.bucket_type if bucket_type != 'none': _shuffle_items( items, bucket_key=bucket_type_keys[bucket_type], disable=_disable, seed=seed, session=session, ) except Exception as e: # See the finally block -- we only fail if we have lost user's tests. _, _, exc_tb = sys.exc_info() failure = 'pytest-random-order plugin has failed with {0!r}:\n{1}'.format( e, ''.join(traceback.format_tb(exc_tb, 10)) ) if not hasattr(pytest, "PytestWarning"): config.warn(0, failure, None) else: warnings.warn(pytest.PytestWarning(failure)) finally: # Fail only if we have lost user's tests if item_ids != _get_set_of_item_ids(items): if not failure: failure = 'pytest-random-order plugin has failed miserably' raise RuntimeError(failure)
def _take_screenshot( request, browser_instance, fixture_name, session_tmpdir, splinter_screenshot_dir, splinter_screenshot_getter_html, splinter_screenshot_getter_png, splinter_screenshot_encoding, ): """Capture a screenshot as .png and .html. Invoked from session and function browser fixtures. """ slaveoutput = getattr(request.config, "slaveoutput", None) names = junitxml.mangle_test_address(request.node.nodeid) classname = ".".join(names[:-1]) screenshot_dir = os.path.join(splinter_screenshot_dir, classname) screenshot_file_name_format = "{0}.{{format}}".format("{}-{}".format( names[-1][:128 - len(fixture_name) - 5], fixture_name).replace(os.path.sep, "-")) screenshot_file_name = screenshot_file_name_format.format(format="png") screenshot_html_file_name = screenshot_file_name_format.format( format="html") if not slaveoutput: if not os.path.exists(screenshot_dir): os.makedirs(screenshot_dir) else: screenshot_dir = session_tmpdir.ensure("screenshots", dir=True).strpath screenshot_png_path = os.path.join(screenshot_dir, screenshot_file_name) screenshot_html_path = os.path.join(screenshot_dir, screenshot_html_file_name) LOGGER.info("Saving screenshot to %s", screenshot_dir) try: splinter_screenshot_getter_html(browser_instance, screenshot_html_path) splinter_screenshot_getter_png(browser_instance, screenshot_png_path) if request.node.splinter_failure.longrepr: reprtraceback = request.node.splinter_failure.longrepr.reprtraceback reprtraceback.extraline = _screenshot_extraline( screenshot_png_path, screenshot_html_path) if slaveoutput is not None: with codecs.open(screenshot_html_path, encoding=splinter_screenshot_encoding) as html_fd: with open(screenshot_png_path, "rb") as fd: slaveoutput.setdefault("screenshots", []).append({ "class_name": classname, "files": [ { "file_name": screenshot_file_name, "content": fd.read(), }, { "file_name": screenshot_html_file_name, "content": html_fd.read(), "encoding": splinter_screenshot_encoding, }, ], }) except Exception as e: # NOQA warnings.warn( pytest.PytestWarning("Could not save screenshot: {}".format(e)))