def test_undefined_env_var_behavior( rule_runner: RuleRunner, dockerfile_arg_value: str | None, extra_build_arg_value: str | None, expect: ContextManager, ) -> None: dockerfile_arg = "" if dockerfile_arg_value is not None: dockerfile_arg = "ARG MY_ARG" if dockerfile_arg_value: dockerfile_arg += f"={dockerfile_arg_value}" extra_build_args = "" if extra_build_arg_value is not None: extra_build_args = 'extra_build_args=["MY_ARG' if extra_build_arg_value: extra_build_args += f"={extra_build_arg_value}" extra_build_args += '"],' rule_runner.write_files( { "src/docker/BUILD": dedent( f"""\ docker_image( {extra_build_args} ) """ ), "src/docker/Dockerfile": dedent( f"""\ FROM python:3.8 {dockerfile_arg} """ ), } ) with expect: assert_build_context( rule_runner, Address("src/docker"), expected_files=["src/docker/Dockerfile"], )
def test_junit(rule_runner: RuleRunner) -> None: rule_runner.write_files({ f"{PACKAGE}/tests.py": GOOD_TEST, f"{PACKAGE}/BUILD": "python_tests()" }) tgt = rule_runner.get_target( Address(PACKAGE, relative_file_path="tests.py")) result = run_pytest( rule_runner, tgt, extra_args=["--pytest-junit-xml-dir=dist/test-results"]) assert result.exit_code == 0 assert f"{PACKAGE}/tests.py ." in result.stdout assert result.xml_results is not None digest_contents = rule_runner.request(DigestContents, [result.xml_results.digest]) file = digest_contents[0] assert file.path.startswith("dist/test-results") assert b"pants_test.tests" in file.content
def test_conftest_handling(rule_runner: RuleRunner) -> None: """Tests that we a) inject a dependency on conftest.py and b) skip running directly on conftest.py.""" tgt = create_test_target(rule_runner, [GOOD_SOURCE]) rule_runner.create_file( f"{SOURCE_ROOT}/conftest.py", "def pytest_runtest_setup(item):\n print('In conftest!')\n") rule_runner.add_to_build_file(SOURCE_ROOT, "python_tests()") conftest_tgt = rule_runner.get_target( Address(SOURCE_ROOT, relative_file_path="conftest.py")) assert isinstance(conftest_tgt, PythonTests) result = run_pytest(rule_runner, tgt, passthrough_args="-s") assert result.exit_code == 0 assert f"{PACKAGE}/test_good.py In conftest!\n." in result.stdout result = run_pytest(rule_runner, conftest_tgt) assert result.exit_code is None
def test_config_file(rule_runner: RuleRunner, path: str, extra_args: list[str]) -> None: rule_runner.write_files({ "f.py": NEEDS_CONFIG_FILE, "BUILD": "python_sources(name='t', interpreter_constraints=['==3.9.*'])", path: "[isort]\ncombine_as_imports=True\n", }) tgt = rule_runner.get_target( Address("", target_name="t", relative_file_path="f.py")) lint_results, fmt_result = run_isort(rule_runner, [tgt], extra_args=extra_args) assert len(lint_results) == 1 assert lint_results[0].exit_code == 1 assert "f.py Imports are incorrectly sorted" in lint_results[0].stderr assert fmt_result.stdout == "Fixing f.py\n" assert fmt_result.output == get_digest(rule_runner, {"f.py": FIXED_NEEDS_CONFIG_FILE}) assert fmt_result.did_change is True
def test_failing(rule_runner: RuleRunner) -> None: rule_runner.write_files( { "tests.sh": dedent( """\ #!/usr/bin/bash testEquality() { assertEquals 1 5 } """ ), "BUILD": "shunit2_tests(name='t')", } ) tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="tests.sh")) result = run_shunit2(rule_runner, tgt) assert result.exit_code == 1 assert "Ran 1 test.\n\nFAILED" in result.stdout
def test_generate_source_targets() -> None: rule_runner = RuleRunner( rules=[ *target_types.rules(), QueryRule(_TargetParametrizations, [Address]), ], target_types=[WsdlSourcesGeneratorTarget], ) source_root = "src/wsdl" rule_runner.write_files({ f"{source_root}/BUILD": dedent("""\ wsdl_sources( name="lib", sources=["**/*.wsdl"] ) """), f"{source_root}/f1.wsdl": "", f"{source_root}/sub/f2.wsdl": "", }) def gen_tgt(rel_fp: str, tags: list[str] | None = None) -> WsdlSourceTarget: return WsdlSourceTarget( { SingleSourceField.alias: rel_fp, Tags.alias: tags }, Address(source_root, target_name="lib", relative_file_path=rel_fp), residence_dir=os.path.dirname(os.path.join(source_root, rel_fp)), ) generated = rule_runner.request( _TargetParametrizations, [Address(source_root, target_name="lib")]).parametrizations assert set(generated.values()) == { gen_tgt("f1.wsdl"), gen_tgt("sub/f2.wsdl"), }
def test_dict_string_to_string_sequence_field() -> None: class Example(DictStringToStringSequenceField): alias = "example" addr = Address("", target_name="example") def assert_flexible_constructor( raw_value: Dict[str, Iterable[str]]) -> None: assert Example(raw_value, addr).value == FrozenDict( {k: tuple(v) for k, v in raw_value.items()}) for v in [("hello", "world"), ["hello", "world"], OrderedSet(["hello", "world"])]: assert_flexible_constructor({"greeting": v}) def assert_invalid_type(raw_value: Any) -> None: with pytest.raises(InvalidFieldTypeException): Example(raw_value, addr) for v in [ # type: ignore[assignment] 0, object(), "hello", ["hello"], { "hello": "world" }, { 0: ["world"] }, ]: assert_invalid_type(v) # Regression test that a default can be set. class ExampleDefault(DictStringToStringSequenceField): alias = "example" # Note that we use `FrozenDict` so that the object can be hashable. default = FrozenDict({"default": ("val", )}) assert ExampleDefault(None, addr).value == FrozenDict({"default": ("val", )})
def test_incorrect_signatures(rule_runner: RuleRunner) -> None: test_cases = [ ("TestFoo(t *testing.T, a int)", "wrong signature for TestFoo"), ("TestFoo()", "wrong signature for TestFoo"), ("TestFoo(t *testing.B)", "wrong signature for TestFoo"), ("TestFoo(t *testing.M)", "wrong signature for TestFoo"), ("TestFoo(a int)", "wrong signature for TestFoo"), ("BenchmarkFoo(t *testing.B, a int)", "wrong signature for BenchmarkFoo"), ("BenchmarkFoo()", "wrong signature for BenchmarkFoo"), ("BenchmarkFoo(t *testing.T)", "wrong signature for BenchmarkFoo"), ("BenchmarkFoo(t *testing.M)", "wrong signature for BenchmarkFoo"), ("BenchmarkFoo(a int)", "wrong signature for BenchmarkFoo"), ] for test_sig, err_msg in test_cases: input_digest = rule_runner.make_snapshot( { "foo_test.go": dedent(f""" package foo func {test_sig} {{ }} """), }, ).digest result = rule_runner.request( GeneratedTestMain, [ GenerateTestMainRequest( input_digest, FrozenOrderedSet(["foo_test.go"]), FrozenOrderedSet(), "foo", Address("foo"), ) ], ) assert result.failed_exit_code_and_stderr is not None exit_code, stderr = result.failed_exit_code_and_stderr assert exit_code == 1 assert err_msg in stderr
def test_docker_build_labels_option(rule_runner: RuleRunner) -> None: rule_runner.write_files({ "docker/test/BUILD": dedent("""\ docker_image( name="img1", extra_build_args=[ "BUILD_SLAVE=tbs06", "BUILD_NUMBER=13934", ], image_labels={ "build.host": "{build_args.BUILD_SLAVE}", "build.job": "{build_args.BUILD_NUMBER}", } ) """), }) def check_docker_proc(process: Process): assert process.argv == ( "/dummy/docker", "build", "--label", "build.host=tbs06", "--label", "build.job=13934", "--tag", "img1:latest", "--build-arg", "BUILD_NUMBER=13934", "--build-arg", "BUILD_SLAVE=tbs06", "--file", "docker/test/Dockerfile", ".", ) assert_build( rule_runner, Address("docker/test", target_name="img1"), process_assertions=check_docker_proc, )
def test_extra_env_vars(rule_runner: RuleRunner) -> None: rule_runner.write_files({ f"{PACKAGE}/test_extra_env_vars.py": dedent("""\ import os def test_args(): assert os.getenv("ARG_WITH_VALUE_VAR") == "arg_with_value_var" assert os.getenv("ARG_WITHOUT_VALUE_VAR") == "arg_without_value_value" assert os.getenv("PYTHON_TESTS_VAR_WITH_VALUE") == "python_tests_var_with_value" assert os.getenv("PYTHON_TESTS_VAR_WITHOUT_VALUE") == "python_tests_var_without_value" assert os.getenv("PYTHON_TESTS_OVERRIDE_WITH_VALUE_VAR") == "python_tests_override_with_value_var_override" """), f"{PACKAGE}/BUILD": dedent("""\ python_tests( extra_env_vars=( "PYTHON_TESTS_VAR_WITHOUT_VALUE", "PYTHON_TESTS_VAR_WITH_VALUE=python_tests_var_with_value", "PYTHON_TESTS_OVERRIDE_WITH_VALUE_VAR=python_tests_override_with_value_var_override", ) ) """), }) tgt = rule_runner.get_target( Address(PACKAGE, relative_file_path="test_extra_env_vars.py")) result = run_pytest( rule_runner, tgt, extra_args=[ '--test-extra-env-vars=["ARG_WITH_VALUE_VAR=arg_with_value_var", "ARG_WITHOUT_VALUE_VAR", "PYTHON_TESTS_OVERRIDE_ARG_WITH_VALUE_VAR"]' ], env={ "ARG_WITHOUT_VALUE_VAR": "arg_without_value_value", "PYTHON_TESTS_VAR_WITHOUT_VALUE": "python_tests_var_without_value", "PYTHON_TESTS_OVERRIDE_WITH_VALUE_VAR": "python_tests_override_with_value_var", }, ) assert result.exit_code == 0
def test_has_fields() -> None: empty_union_membership = UnionMembership({}) tgt = FortranTarget({}, address=Address("", target_name="lib")) assert tgt.field_types == (FortranExtensions, FortranSources) assert FortranTarget.class_field_types(union_membership=empty_union_membership) == ( FortranExtensions, FortranSources, ) assert tgt.has_fields([]) is True assert FortranTarget.class_has_fields([], union_membership=empty_union_membership) is True assert tgt.has_fields([FortranExtensions]) is True assert tgt.has_field(FortranExtensions) is True assert ( FortranTarget.class_has_fields([FortranExtensions], union_membership=empty_union_membership) is True ) assert ( FortranTarget.class_has_field(FortranExtensions, union_membership=empty_union_membership) is True ) assert tgt.has_fields([UnrelatedField]) is False assert tgt.has_field(UnrelatedField) is False assert ( FortranTarget.class_has_fields([UnrelatedField], union_membership=empty_union_membership) is False ) assert ( FortranTarget.class_has_field(UnrelatedField, union_membership=empty_union_membership) is False ) assert tgt.has_fields([FortranExtensions, UnrelatedField]) is False assert ( FortranTarget.class_has_fields( [FortranExtensions, UnrelatedField], union_membership=empty_union_membership ) is False )
def assert_analysis( dir_path: str, *, imports: list[str], test_imports: list[str], xtest_imports: list[str], go_files: list[str], test_files: list[str], xtest_files: list[str], ) -> None: addr = Address(os.path.join("foo", dir_path)) maybe_analysis = rule_runner.request( FallibleFirstPartyPkgAnalysis, [FirstPartyPkgAnalysisRequest(addr)] ) assert maybe_analysis.analysis is not None analysis = maybe_analysis.analysis assert analysis.imports == tuple(imports) assert analysis.test_imports == tuple(test_imports) assert analysis.xtest_imports == tuple(xtest_imports) assert analysis.go_files == tuple(go_files) assert analysis.test_go_files == tuple(test_files) assert analysis.xtest_go_files == tuple(xtest_files) assert not analysis.s_files assert analysis.minimum_go_version == "1.16" assert analysis.embed_patterns == () assert analysis.test_embed_patterns == () assert analysis.xtest_embed_patterns == () maybe_digest = rule_runner.request( FallibleFirstPartyPkgDigest, [FirstPartyPkgDigestRequest(addr)] ) assert maybe_digest.pkg_digest is not None pkg_digest = maybe_digest.pkg_digest actual_snapshot = rule_runner.request(Snapshot, [pkg_digest.digest]) expected_snapshot = rule_runner.request(Snapshot, [PathGlobs([f"foo/{dir_path}/*.go"])]) assert actual_snapshot == expected_snapshot assert pkg_digest.embed_config is None assert pkg_digest.xtest_embed_config is None assert pkg_digest.xtest_embed_config is None
def test_relocated_relocated_files() -> None: rule_runner = RuleRunner( rules=[ *target_type_rules(), *archive.rules(), *source_files.rules(), *system_binaries.rules(), QueryRule(GeneratedSources, [RelocateFilesViaCodegenRequest]), QueryRule(TransitiveTargets, [TransitiveTargetsRequest]), QueryRule(SourceFiles, [SourceFilesRequest]), ], target_types=[FilesGeneratorTarget, RelocatedFiles], ) rule_runner.write_files({ "original_prefix/file.txt": "", "BUILD": dedent("""\ files(name="original", sources=["original_prefix/file.txt"]) relocated_files( name="relocated", files_targets=[":original"], src="original_prefix", dest="intermediate_prefix", ) relocated_files( name="double_relocated", files_targets=[":relocated"], src="intermediate_prefix", dest="final_prefix", ) """), }) tgt = rule_runner.get_target(Address("", target_name="double_relocated")) result = rule_runner.request( GeneratedSources, [RelocateFilesViaCodegenRequest(EMPTY_SNAPSHOT, tgt)]) assert result.snapshot.files == ("final_prefix/file.txt", )
def test_includes_direct_dependencies(rule_runner: RuleRunner) -> None: rule_runner.write_files({ "transitive_dep.sh": BAD_FILE, "dep.sh": GOOD_FILE, "f.sh": "# shellcheck shell=bash\nsource dep.sh\n", "BUILD": dedent("""\ shell_library(name='transitive', sources=['transitive_dep.sh']) shell_library(name='dep', sources=['dep.sh'], dependencies=[':transitive']) shell_library(name='t', sources=['f.sh'], dependencies=[':dep']) """), }) tgt = rule_runner.get_target( Address("", target_name="t", relative_file_path="f.sh")) assert_success(rule_runner, tgt, extra_args=["--shellcheck-args=--external-sources"])
def test_force(rule_runner: RuleRunner) -> None: rule_runner.write_files({ f"{PACKAGE}/tests.py": GOOD_TEST, f"{PACKAGE}/BUILD": "python_tests()" }) tgt = rule_runner.get_target( Address(PACKAGE, relative_file_path="tests.py")) # Should not receive a memoized result if force=True. result_one = run_pytest(rule_runner, tgt, extra_args=["--test-force"]) result_two = run_pytest(rule_runner, tgt, extra_args=["--test-force"]) assert result_one.exit_code == 0 assert result_two.exit_code == 0 assert result_one is not result_two # But should if force=False. result_one = run_pytest(rule_runner, tgt) result_two = run_pytest(rule_runner, tgt) assert result_one.exit_code == 0 assert result_one is result_two
def test_config_file(rule_runner: RuleRunner) -> None: rule_runner.write_files({ "pytest.ini": dedent("""\ [pytest] addopts = -s """), f"{PACKAGE}/tests.py": dedent("""\ def test(): print("All good!") """), f"{PACKAGE}/BUILD": "python_tests()", }) tgt = rule_runner.get_target( Address(PACKAGE, relative_file_path="tests.py")) result = run_pytest(rule_runner, tgt) assert result.exit_code == 0 assert "All good!" in result.stdout and "Captured" not in result.stdout
def complete_platform(rule_runner: RuleRunner) -> bytes: rule_runner.write_files({ "pex_exe/BUILD": dedent("""\ python_requirement(name="req", requirements=["pex==2.1.66"]) pex_binary(dependencies=[":req"], script="pex") """), }) result = rule_runner.request( BuiltPackage, [PexBinaryFieldSet.create(rule_runner.get_target(Address("pex_exe")))]) rule_runner.write_digest(result.digest) pex_executable = os.path.join(rule_runner.build_root, "pex_exe/pex_exe.pex") return subprocess.run( args=[pex_executable, "interpreter", "inspect", "-mt"], env=dict(PEX_MODULE="pex.cli", **os.environ), check=True, stdout=subprocess.PIPE, ).stdout
def test_respects_config_file(rule_runner: RuleRunner) -> None: rule_runner.write_files({ "f.sh": NEEDS_CONFIG_FILE, "BUILD": "shell_library(name='t')", ".editorconfig": "[*.sh]\nswitch_case_indent = true\n", }) tgt = rule_runner.get_target( Address("", target_name="t", relative_file_path="f.sh")) lint_results, fmt_result = run_shfmt( rule_runner, [tgt], extra_args=["--shfmt-config=.editorconfig"]) assert len(lint_results) == 1 assert lint_results[0].exit_code == 1 assert "f.sh.orig" in lint_results[0].stdout assert fmt_result.stdout == "f.sh\n" assert fmt_result.output == get_digest(rule_runner, {"f.sh": FIXED_NEEDS_CONFIG_FILE}) assert fmt_result.did_change is True
def test_passing(rule_runner: RuleRunner, major_minor_interpreter: str) -> None: rule_runner.write_files({ "f.py": GOOD_FILE, "BUILD": "python_sources(name='t')" }) tgt = rule_runner.get_target( Address("", target_name="t", relative_file_path="f.py")) lint_results, fmt_result = run_docformatter( rule_runner, [tgt], extra_args=[ f"--docformatter-interpreter-constraints=['=={major_minor_interpreter}.*']" ], ) assert len(lint_results) == 1 assert lint_results[0].exit_code == 0 assert lint_results[0].stderr == "" assert fmt_result.output == get_snapshot(rule_runner, {"f.py": GOOD_FILE}) assert fmt_result.did_change is False
def test_get_context_root(context_root: str | None, default_context_root: str, expected_context_root: str | ContextManager) -> None: if isinstance(expected_context_root, str): raises = cast("ContextManager", no_exception()) else: raises = expected_context_root expected_context_root = "" with raises: docker_options = create_subsystem( DockerOptions, default_context_root=default_context_root, ) address = Address("src/docker", target_name="image") tgt = DockerImageTarget({"context_root": context_root}, address) fs = DockerFieldSet.create(tgt) actual_context_root = fs.get_context_root( docker_options.default_context_root) if expected_context_root: assert actual_context_root == expected_context_root
def test_invalid_package(rule_runner) -> None: rule_runner.write_files( { "BUILD": "go_mod(name='mod')\ngo_package(name='pkg')", "go.mod": dedent( """\ module go.example.com/foo go 1.17 """ ), "bad.go": "invalid!!!", } ) maybe_analysis = rule_runner.request( FallibleFirstPartyPkgAnalysis, [FirstPartyPkgAnalysisRequest(Address("", target_name="pkg"))], ) assert maybe_analysis.analysis is None assert maybe_analysis.exit_code == 1 assert "bad.go:1:1: expected 'package', found invalid\n" in maybe_analysis.stderr
def run_black_and_isort( rule_runner: RuleRunner, source_files: List[FileContent], *, name: str, extra_args: Optional[List[str]] = None, ) -> LanguageFmtResults: for source_file in source_files: rule_runner.create_file(source_file.path, source_file.content.decode()) targets = PythonFmtTargets( Targets([PythonLibrary({}, address=Address("test", target_name=name))]) ) rule_runner.set_options( [ "--backend-packages=['pants.backend.python.lint.black', 'pants.backend.python.lint.isort']", *(extra_args or []), ] ) results = rule_runner.request(LanguageFmtResults, [targets]) return results
def assert_python_requirements( rule_runner: RuleRunner, build_file_entry: str, requirements_txt: str, *, expected_targets: set[Target], requirements_txt_relpath: str = "requirements.txt", ) -> None: rule_runner.write_files({ "BUILD": build_file_entry, requirements_txt_relpath: requirements_txt }) result = rule_runner.request( _TargetParametrizations, [ _TargetParametrizationsRequest(Address("", target_name="reqs"), description_of_origin="tests") ], ) assert set(result.parametrizations.values()) == expected_targets
def test_conftest_dependency_injection(rule_runner: RuleRunner) -> None: # See `test_skip_tests` for a test that we properly skip running on conftest.py. rule_runner.write_files({ f"{SOURCE_ROOT}/conftest.py": dedent("""\ def pytest_runtest_setup(item): print('In conftest!') """), f"{SOURCE_ROOT}/BUILD": "python_test_utils()", f"{PACKAGE}/tests.py": GOOD_TEST, f"{PACKAGE}/BUILD": "python_tests()", }) tgt = rule_runner.get_target( Address(PACKAGE, relative_file_path="tests.py")) result = run_pytest(rule_runner, tgt, extra_args=["--pytest-args='-s'"]) assert result.exit_code == 0 assert f"{PACKAGE}/tests.py In conftest!\n." in result.stdout
def test_entry_point_validation(caplog: LogCaptureFixture) -> None: addr = Address("src/python/project") with pytest.raises(InvalidFieldException): PexEntryPointField(" ", address=addr) with pytest.raises(InvalidFieldException): PexEntryPointField("modue:func:who_knows_what_this_is", address=addr) with pytest.raises(InvalidFieldException): PexEntryPointField(":func", address=addr) ep = "custom.entry_point:" with caplog.at_level(logging.WARNING): assert EntryPoint("custom.entry_point") == PexEntryPointField( ep, address=addr).value assert len(caplog.record_tuples) == 1 _, levelno, message = caplog.record_tuples[0] assert logging.WARNING == levelno assert ep in message assert str(addr) in message
def test_bad_python_source_root(rule_runner: RuleRunner) -> None: rule_runner.write_files( { "src/protobuf/dir1/f.proto": dedent( """\ syntax = "proto3"; package dir1; """ ), "src/protobuf/dir1/BUILD": "protobuf_sources(python_source_root='notasourceroot')", } ) with engine_error(NoSourceRootError): assert_files_generated( rule_runner, Address("src/protobuf/dir1", relative_file_path="f.proto"), source_roots=["src/protobuf"], expected_files=[], )
def test_force(rule_runner: RuleRunner) -> None: rule_runner.write_files({ "tests.sh": GOOD_TEST, "BUILD": "shunit2_tests(name='t')" }) tgt = rule_runner.get_target( Address("", target_name="t", relative_file_path="tests.sh")) # Should not receive a memoized result if force=True. result_one = run_shunit2(rule_runner, tgt, extra_args=["--test-force"]) result_two = run_shunit2(rule_runner, tgt, extra_args=["--test-force"]) assert result_one.exit_code == 0 assert result_two.exit_code == 0 assert result_one is not result_two # But should if force=False. result_one = run_shunit2(rule_runner, tgt) result_two = run_shunit2(rule_runner, tgt) assert result_one.exit_code == 0 assert result_one is result_two
def test_internal_test_fails(rule_runner: RuleRunner) -> None: rule_runner.write_files( { "foo/BUILD": "go_mod(name='mod')\ngo_package()", "foo/go.mod": "module foo", "foo/bar_test.go": textwrap.dedent( """ package foo import "testing" func TestAdd(t *testing.T) { t.Fail() } """ ), } ) tgt = rule_runner.get_target(Address("foo")) result = rule_runner.request(TestResult, [GoTestFieldSet.create(tgt)]) assert result.exit_code == 1 assert "FAIL: TestAdd" in result.stdout
def assert_streaming_output( *, exit_code: Optional[int], stdout: str = "stdout", stderr: str = "stderr", output_setting: ShowOutput = ShowOutput.ALL, expected_level: LogLevel, expected_message: str, ) -> None: result = EnrichedTestResult( exit_code=exit_code, stdout=stdout, stdout_digest=EMPTY_FILE_DIGEST, stderr=stderr, stderr_digest=EMPTY_FILE_DIGEST, output_setting=output_setting, address=Address("demo_test"), ) assert result.level() == expected_level assert result.message() == expected_message
def assert_poetry_requirements( rule_runner: RuleRunner, build_file_entry: str, pyproject_toml: str, *, expected_targets: set[Target], pyproject_toml_relpath: str = "pyproject.toml", ) -> None: rule_runner.write_files({ "BUILD": build_file_entry, pyproject_toml_relpath: pyproject_toml }) result = rule_runner.request( _TargetParametrizations, [ _TargetParametrizationsRequest(Address("", target_name="reqs"), description_of_origin="tests") ], ) assert set(result.parametrizations.values()) == expected_targets