def get_snapshot(rule_runner: RuleRunner, source_files: dict[str, str]) -> Snapshot: files = [ FileContent(path, content.encode()) for path, content in source_files.items() ] digest = rule_runner.request(Digest, [CreateDigest(files)]) return rule_runner.request(Snapshot, [digest]) @pytest.mark.platform_specific_behavior @pytest.mark.parametrize( "major_minor_interpreter", all_major_minor_python_versions( Docformatter.default_interpreter_constraints), ) def test_passing(rule_runner: RuleRunner, major_minor_interpreter: str) -> None: rule_runner.write_files({ "f.py": GOOD_FILE, "BUILD": "python_sources(name='t')" }) tgt = rule_runner.get_target( Address("", target_name="t", relative_file_path="f.py")) lint_results, fmt_result = run_docformatter( rule_runner, [tgt], extra_args=[ f"--docformatter-interpreter-constraints=['=={major_minor_interpreter}.*']" ],
return lint_results.results, fmt_result def get_digest(rule_runner: RuleRunner, source_files: dict[str, str]) -> Digest: files = [ FileContent(path, content.encode()) for path, content in source_files.items() ] return rule_runner.request(Digest, [CreateDigest(files)]) @pytest.mark.platform_specific_behavior @pytest.mark.parametrize( "major_minor_interpreter", all_major_minor_python_versions(Black.default_interpreter_constraints), ) def test_passing(rule_runner: RuleRunner, major_minor_interpreter: str) -> None: rule_runner.write_files({ "f.py": GOOD_FILE, "BUILD": "python_sources(name='t')" }) tgt = rule_runner.get_target( Address("", target_name="t", relative_file_path="f.py")) interpreter_constraint = (">=3.6.2,<3.7" if major_minor_interpreter == "3.6" else f"=={major_minor_interpreter}.*") lint_results, fmt_result = run_black( rule_runner, [tgt], extra_args=[
def assert_success(rule_runner: RuleRunner, target: Target, *, extra_args: list[str] | None = None) -> None: result = run_flake8(rule_runner, [target], extra_args=extra_args) assert len(result) == 1 assert result[0].exit_code == 0 assert result[0].stdout.strip() == "" assert result[0].report == EMPTY_DIGEST @pytest.mark.platform_specific_behavior @pytest.mark.parametrize( "major_minor_interpreter", all_major_minor_python_versions( PythonSetup.default_interpreter_constraints), ) def test_passing(rule_runner: RuleRunner, major_minor_interpreter: str) -> None: rule_runner.write_files({ "f.py": GOOD_FILE, "BUILD": "python_library(name='t')" }) tgt = rule_runner.get_target( Address("", target_name="t", relative_file_path="f.py")) assert_success( rule_runner, tgt, extra_args=[ f"--python-setup-interpreter-constraints=['=={major_minor_interpreter}.*']" ],
return lint_results.results, fmt_result def get_digest(rule_runner: RuleRunner, source_files: dict[str, str]) -> Digest: files = [ FileContent(path, content.encode()) for path, content in source_files.items() ] return rule_runner.request(Digest, [CreateDigest(files)]) @pytest.mark.platform_specific_behavior @pytest.mark.parametrize( "major_minor_interpreter", all_major_minor_python_versions(Autoflake.default_interpreter_constraints), ) def test_passing_source(rule_runner: RuleRunner, major_minor_interpreter: str) -> None: rule_runner.write_files({ "f.py": GOOD_FILE, "BUILD": "python_sources(name='t')" }) tgt = rule_runner.get_target( Address("", target_name="t", relative_file_path="f.py")) lint_results, fmt_result = run_autoflake( rule_runner, [tgt], extra_args=[ f"--autoflake-interpreter-constraints=['=={major_minor_interpreter}.*']" ],
"src/protobuf/dir1/BUILD": "protobuf_sources(python_source_root='notasourceroot')", }) with engine_error(NoSourceRootError): assert_files_generated( rule_runner, Address("src/protobuf/dir1", relative_file_path="f.proto"), source_roots=["src/protobuf"], expected_files=[], ) @pytest.mark.platform_specific_behavior @pytest.mark.parametrize( "major_minor_interpreter", all_major_minor_python_versions( PythonProtobufMypyPlugin.default_interpreter_constraints), ) def test_mypy_plugin(rule_runner: RuleRunner, major_minor_interpreter: str) -> None: rule_runner.write_files({ "src/protobuf/dir1/f.proto": dedent("""\ syntax = "proto3"; package dir1; message Person { string name = 1; int32 id = 2; string email = 3; }
def run_coverage(tmpdir: str, *extra_args: str) -> PantsResult: result = run_coverage_that_may_fail(tmpdir, *extra_args) result.assert_success() # Regression test: make sure that individual tests do not complain about failing to # generate reports. This was showing up at test-time, even though the final merged # report would work properly. assert "Failed to generate report" not in result.stderr return result @pytest.mark.platform_specific_behavior @pytest.mark.parametrize( "major_minor_interpreter", all_major_minor_python_versions( CoverageSubsystem.default_interpreter_constraints), ) def test_coverage(major_minor_interpreter: str) -> None: with setup_tmpdir(SOURCES) as tmpdir: result = run_coverage( tmpdir, f"--coverage-py-interpreter-constraints=['=={major_minor_interpreter}.*']" ) assert (dedent(f"""\ Name Stmts Miss Cover --------------------------------------------------------------------------------- {tmpdir}/src/python/project/__init__.py 0 0 100% {tmpdir}/src/python/project/lib.py 6 0 100% {tmpdir}/src/python/project/lib_test.py 3 0 100% {tmpdir}/src/python/project/random.py 2 2 0% {tmpdir}/tests/python/project_test/__init__.py 0 0 100%
[PythonAwsLambdaFieldSet.create(target)]) assert ( " Runtime: python3.7", " Handler: lambdex_handler.handler", ) == built_asset.artifacts[0].extra_log_lines digest_contents = rule_runner.request(DigestContents, [built_asset.digest]) assert len(digest_contents) == 1 relpath = built_asset.artifacts[0].relpath assert relpath is not None return relpath, digest_contents[0].content @pytest.mark.platform_specific_behavior @pytest.mark.parametrize( "major_minor_interpreter", all_major_minor_python_versions(Lambdex.default_interpreter_constraints), ) def test_create_hello_world_lambda(rule_runner: RuleRunner, major_minor_interpreter: str) -> None: rule_runner.write_files({ "src/python/foo/bar/hello_world.py": dedent(""" def handler(event, context): print('Hello, World!') """), "src/python/foo/bar/BUILD": dedent(""" python_library(name='lib') python_awslambda( name='lambda',
def get_snapshot(rule_runner: RuleRunner, source_files: dict[str, str]) -> Snapshot: files = [ FileContent(path, content.encode()) for path, content in source_files.items() ] digest = rule_runner.request(Digest, [CreateDigest(files)]) return rule_runner.request(Snapshot, [digest]) @pytest.mark.platform_specific_behavior @pytest.mark.parametrize( "major_minor_interpreter", all_major_minor_python_versions(PyUpgrade.default_interpreter_constraints), ) def test_passing(rule_runner: RuleRunner, major_minor_interpreter: str) -> None: rule_runner.write_files({ "f.py": PY_36_GOOD_FILE, "BUILD": "python_sources(name='t')" }) tgt = rule_runner.get_target( Address("", target_name="t", relative_file_path="f.py")) lint_results, fmt_result = run_pyupgrade( rule_runner, [tgt], extra_args=[ f"--pyupgrade-interpreter-constraints=['=={major_minor_interpreter}.*']" ],
inferred_deps = rule_runner.request( InferredDependencies, [InferTerraformModuleDependenciesRequest(target.get(SourcesField))], ) assert inferred_deps == InferredDependencies( FrozenOrderedSet([ Address("src/tf/modules/foo"), Address("src/tf/modules/foo/bar"), Address("src/tf/resources/grok/subdir"), ]), ) @pytest.mark.platform_specific_behavior @pytest.mark.parametrize( "major_minor_interpreter", all_major_minor_python_versions( TerraformHcl2Parser.default_interpreter_constraints), ) def test_hcl_parser_wrapper_runs(rule_runner: RuleRunner, major_minor_interpreter: str) -> None: rule_runner.set_options( [ "--backend-packages=pants.backend.experimental.terraform", f"--terraform-hcl2-parser-interpreter-constraints=['=={major_minor_interpreter}.*']", ], env_inherit={"PATH", "PYENV_ROOT", "HOME"}, ) rule_runner.write_files({ "foo/BUILD": "terraform_module(name='t')\n", "foo/bar.tf":