def test_find_putative_targets(rule_runner: RuleRunner) -> None: dir_structure = { "src/python/foo/__init__.py": "", "src/python/foo/bar/BUILD": "python_library(sources=['__init__.py', 'baz1.py'])", "src/python/foo/bar/__init__.py": "", "src/python/foo/bar/baz1.py": "", "src/python/foo/bar/baz1_test.py": "", "src/python/foo/bar/baz2.py": "", "src/python/foo/bar/baz2_test.py": "", "src/python/foo/bar/baz3.py": "", } for path, content in dir_structure.items(): rule_runner.create_file(path, content) pts = rule_runner.request(PutativeTargets, [PutativePythonTargetsRequest()]) assert (PutativeTargets([ PutativeTarget.for_target_type(PythonLibrary, "src/python/foo", "foo", ["__init__.py"]), PutativeTarget.for_target_type(PythonLibrary, "src/python/foo/bar", "bar", ["baz2.py", "baz3.py"]), PutativeTarget.for_target_type( PythonTests, "src/python/foo/bar", "tests", ["baz1_test.py", "baz2_test.py"], kwargs={"name": "tests"}, ), ]) == pts)
def test_find_putative_targets_subset(rule_runner: RuleRunner) -> None: rule_runner.write_files({ f"src/sh/foo/{fp}": "" for fp in ( "bar/bar.sh", "bar/bar_test.sh", "baz/baz.sh", "baz/baz_test.sh", "qux/qux.sh", ) }) pts = rule_runner.request( PutativeTargets, [ PutativeShellTargetsRequest( PutativeTargetsSearchPaths( ("src/sh/foo/bar", "src/sh/foo/qux"))), AllOwnedSources(["src/sh/foo/bar/bar.sh"]), ], ) assert (PutativeTargets([ PutativeTarget.for_target_type( Shunit2TestsGeneratorTarget, path="src/sh/foo/bar", name="tests", triggering_sources=["bar_test.sh"], ), PutativeTarget.for_target_type( ShellSourcesGeneratorTarget, path="src/sh/foo/qux", name=None, triggering_sources=["qux.sh"], ), ]) == pts)
def test_ignore_solitary_init(rule_runner: RuleRunner) -> None: rule_runner.write_files({ f"src/python/foo/{fp}": "" for fp in ( "__init__.py", "bar/__init__.py", "bar/bar.py", "baz/__init__.py", "qux/qux.py", ) }) pts = rule_runner.request( PutativeTargets, [ PutativePythonTargetsRequest(PutativeTargetsSearchPaths(("", ))), AllOwnedSources([]), ], ) assert (PutativeTargets([ PutativeTarget.for_target_type( PythonSourcesGeneratorTarget, "src/python/foo/bar", "bar", ["__init__.py", "bar.py"], ), PutativeTarget.for_target_type(PythonSourcesGeneratorTarget, "src/python/foo/qux", "qux", ["qux.py"]), ]) == pts)
def test_find_putative_targets() -> None: rule_runner = RuleRunner( rules=[ *core_tailor_rules(), *terraform_tailor_rules(), QueryRule(PutativeTargets, [PutativeTerraformTargetsRequest, AllOwnedSources]), QueryRule(AllOwnedSources, ()), ], target_types=[ TerraformModule, TerraformModules, ], ) rule_runner.write_files({ fp: "" for fp in ( "prod/terraform/resources/foo/versions.tf", "prod/terraform/resources/bar/versions.tf", "prod/terraform/modules/bar/versions.tf", "prod/terraform/modules/bar/hello/versions.tf", "prod/terraform/modules/world/versions.tf", "service1/src/terraform/versions.tf", "service1/src/terraform/foo/versions.tf", "service1/src/terraform/versions.tf", "service2/src/terraform/versions.tf", ) }) pts = rule_runner.request( PutativeTargets, [ PutativeTerraformTargetsRequest(PutativeTargetsSearchPaths( ("", ))), AllOwnedSources([ "src/terraform/root.tf", "src/terraform/owned-module/main.tf", "src/terraform/owned-module/foo.tf", ]), ], ) assert (PutativeTargets([ PutativeTarget.for_target_type( TerraformModules, "prod/terraform", "tf_mods", ("prod/terraform/**/*.tf", ), ), PutativeTarget.for_target_type( TerraformModules, "service1/src/terraform", "tf_mods", ("service1/src/terraform/**/*.tf", ), ), PutativeTarget.for_target_type( TerraformModules, "service2/src/terraform", "tf_mods", ("service2/src/terraform/**/*.tf", ), ), ]) == pts)
def test_find_putative_targets_subset(rule_runner: RuleRunner) -> None: rule_runner.write_files({ f"src/python/foo/{fp}": "" for fp in ( "__init__.py", "bar/__init__.py", "bar/bar.py", "bar/bar_test.py", "baz/baz.py", "baz/baz_test.py", "qux/qux.py", ) }) pts = rule_runner.request( PutativeTargets, [ PutativePythonTargetsRequest( PutativeTargetsSearchPaths( ("src/python/foo/bar", "src/python/foo/qux"))), AllOwnedSources([ "src/python/foo/bar/__init__.py", "src/python/foo/bar/bar.py" ]), ], ) assert (PutativeTargets([ PutativeTarget.for_target_type( PythonTestsGeneratorTarget, "src/python/foo/bar", "tests", ["bar_test.py"], ), PutativeTarget.for_target_type(PythonSourcesGeneratorTarget, "src/python/foo/qux", None, ["qux.py"]), ]) == pts)
def test_find_putative_targets(rule_runner: RuleRunner) -> None: rule_runner.write_files({ "thrifts/foo/f.thrift": "", "thrifts/foo/bar/baz1.thrift": "", "thrifts/foo/bar/baz2.thrift": "", "thrifts/foo/bar/baz3.thrift": "", }) pts = rule_runner.request( PutativeTargets, [ PutativeThriftTargetsRequest(PutativeTargetsSearchPaths(("", ))), AllOwnedSources(["thrifts/foo/bar/baz1.thrift"]), ], ) assert (PutativeTargets([ PutativeTarget.for_target_type( ThriftSourcesGeneratorTarget, path="thrifts/foo", name=None, triggering_sources=["f.thrift"], ), PutativeTarget.for_target_type( ThriftSourcesGeneratorTarget, path="thrifts/foo/bar", name=None, triggering_sources=["baz2.thrift", "baz3.thrift"], ), ]) == pts)
def test_find_putative_targets_subset(rule_runner: RuleRunner) -> None: rule_runner.write_files({ "protos/foo/f.proto": "", "protos/foo/bar/bar.proto": "", "protos/foo/baz/baz.proto": "", "protos/foo/qux/qux.proto": "", }) pts = rule_runner.request( PutativeTargets, [ PutativeProtobufTargetsRequest( PutativeTargetsSearchPaths( ("protos/foo/bar", "protos/foo/qux"))), AllOwnedSources([]), ], ) assert (PutativeTargets([ PutativeTarget.for_target_type( ProtobufSourcesGeneratorTarget, path="protos/foo/bar", name=None, triggering_sources=["bar.proto"], ), PutativeTarget.for_target_type( ProtobufSourcesGeneratorTarget, path="protos/foo/qux", name=None, triggering_sources=["qux.proto"], ), ]) == pts)
def test_find_putative_targets(rule_runner: RuleRunner) -> None: rule_runner.write_files( { "src/java/owned/BUILD": "java_sources()\n", "src/java/owned/OwnedFile.java": "package owned", "src/java/unowned/UnownedFile.java": "package unowned\n", "src/java/unowned/UnownedFileTest.java": "package unowned\n", } ) putative_targets = rule_runner.request( PutativeTargets, [ PutativeJavaTargetsRequest(PutativeTargetsSearchPaths(("",))), AllOwnedSources(["src/java/owned/OwnedFile.java"]), ], ) assert ( PutativeTargets( [ PutativeTarget.for_target_type( JavaSourcesGeneratorTarget, "src/java/unowned", "unowned", ["UnownedFile.java"] ), PutativeTarget.for_target_type( JunitTestsGeneratorTarget, "src/java/unowned", "tests", ["UnownedFileTest.java"], kwargs={"name": "tests"}, ), ] ) == putative_targets )
def test_find_go_package_targets(rule_runner: RuleRunner) -> None: rule_runner.write_files({ "unowned/f.go": "", "unowned/f1.go": "", "owned/f.go": "", "owned/BUILD": "go_package()", # Any `.go` files under a `testdata` or `vendor` folder should be ignored. "unowned/testdata/f.go": "", "unowned/testdata/subdir/f.go": "", "unowned/vendor/example.com/foo/bar.go": "", # Except if `vendor` is the last directory. "unowned/cmd/vendor/main.go": "", }) putative_targets = rule_runner.request( PutativeTargets, [ PutativeGoTargetsRequest(PutativeTargetsSearchPaths(("", ))), AllOwnedSources(["owned/f.go"]), ], ) assert putative_targets == PutativeTargets([ PutativeTarget.for_target_type( GoPackageTarget, path="unowned", name=None, triggering_sources=["f.go", "f1.go"], ), PutativeTarget.for_target_type( GoPackageTarget, path="unowned/cmd/vendor", name=None, triggering_sources=["main.go"], ), ])
def test_find_putative_targets(rule_runner: RuleRunner) -> None: rule_runner.write_files({ "avro/foo/f.avsc": "", "avro/foo/bar/baz1.avdl": "", "avro/foo/bar/baz2.avpr": "", "avro/foo/bar/baz3.avsc": "", }) pts = rule_runner.request( PutativeTargets, [ PutativeAvroTargetsRequest(("avro/foo", "avro/foo/bar")), AllOwnedSources(["avro/foo/bar/baz1.avdl"]), ], ) assert (PutativeTargets([ PutativeTarget.for_target_type( AvroSourcesGeneratorTarget, path="avro/foo", name=None, triggering_sources=["f.avsc"], ), PutativeTarget.for_target_type( AvroSourcesGeneratorTarget, path="avro/foo/bar", name=None, triggering_sources=["baz2.avpr", "baz3.avsc"], ), ]) == pts)
def test_find_putative_targets_subset(rule_runner: RuleRunner) -> None: rule_runner.write_files({ "avro/foo/f.avsc": "", "avro/foo/bar/bar.avsc": "", "avro/foo/baz/baz.avsc": "", "avro/foo/qux/qux.avsc": "", }) pts = rule_runner.request( PutativeTargets, [ PutativeAvroTargetsRequest(("avro/foo/bar", "avro/foo/qux")), AllOwnedSources([]) ], ) assert (PutativeTargets([ PutativeTarget.for_target_type( AvroSourcesGeneratorTarget, path="avro/foo/bar", name=None, triggering_sources=["bar.avsc"], ), PutativeTarget.for_target_type( AvroSourcesGeneratorTarget, path="avro/foo/qux", name=None, triggering_sources=["qux.avsc"], ), ]) == pts)
def test_find_putative_targets() -> None: rule_runner = RuleRunner( rules=[ *tailor_rules(), QueryRule(PutativeTargets, (PutativeProtobufTargetsRequest, AllOwnedSources)), ], target_types=[], ) rule_runner.write_files({ "protos/foo/f.proto": "", "protos/foo/bar/baz1.proto": "", "protos/foo/bar/baz2.proto": "", "protos/foo/bar/baz3.proto": "", }) pts = rule_runner.request( PutativeTargets, [ PutativeProtobufTargetsRequest(), AllOwnedSources(["protos/foo/bar/baz1.proto"]) ], ) assert (PutativeTargets([ PutativeTarget.for_target_type(ProtobufLibrary, "protos/foo", "foo", ["f.proto"]), PutativeTarget.for_target_type(ProtobufLibrary, "protos/foo/bar", "bar", ["baz2.proto", "baz3.proto"]), ]) == pts)
def test_rename_conflicting_targets(rule_runner: RuleRunner) -> None: rule_runner.write_files({ "src/fortran/foo/BUILD": "fortran_library(sources=['bar1.f90'])\n" "fortran_library(name='foo0', sources=['bar2.f90'])", "src/fortran/foo/bar1.f90": "", "src/fortran/foo/bar2.f90": "", "src/fortran/foo/bar3.f90": "", }) ptgt = PutativeTarget("src/fortran/foo", "foo", "fortran_library", ["bar3.f90"], FortranLibrarySources.default) unpts = rule_runner.request(UniquelyNamedPutativeTargets, [PutativeTargets([ptgt])]) ptgts = unpts.putative_targets assert (PutativeTargets([ PutativeTarget( "src/fortran/foo", "foo1", "fortran_library", ["bar3.f90"], FortranLibrarySources.default, kwargs={"name": "foo1"}, ) ]) == ptgts)
def test_find_go_binary_targets(rule_runner: RuleRunner) -> None: rule_runner.write_files({ "missing_binary_tgt/go.mod": "", "missing_binary_tgt/app.go": "package main", "missing_binary_tgt/BUILD": "go_package()", "tgt_already_exists/go.mod": "", "tgt_already_exists/app.go": "package main", "tgt_already_exists/BUILD": "go_binary(name='bin')\ngo_package()", "missing_pkg_and_binary_tgt/go.mod": "", "missing_pkg_and_binary_tgt/app.go": "package main", "main_set_to_different_dir/go.mod": "", "main_set_to_different_dir/subdir/app.go": "package main", "main_set_to_different_dir/subdir/BUILD": "go_package()", "main_set_to_different_dir/BUILD": "go_binary(main='main_set_to_different_dir/subdir')", "no_go_mod/app.go": "package main", }) putative_targets = rule_runner.request( PutativeTargets, [ PutativeGoTargetsRequest(( "missing_binary_tgt", "tgt_already_exists", "missing_pkg_and_binary_tgt", "main_set_to_different_dir", "no_go_mod", )), AllOwnedSources([ "missing_binary_tgt/go.mod", "missing_binary_tgt/app.go", "tgt_already_exists/go.mod", "tgt_already_exists/app.go", "missing_pkg_and_binary_tgt/go.mod", "main_set_to_different_dir/go.mod", "main_set_to_different_dir/subdir/app.go", ]), ], ) assert putative_targets == PutativeTargets([ PutativeTarget.for_target_type( GoBinaryTarget, path="missing_binary_tgt", name="bin", triggering_sources=[], ), PutativeTarget.for_target_type( GoPackageTarget, path="missing_pkg_and_binary_tgt", name="missing_pkg_and_binary_tgt", triggering_sources=["app.go"], kwargs={}, ), PutativeTarget.for_target_type( GoBinaryTarget, path="missing_pkg_and_binary_tgt", name="bin", triggering_sources=[], ), ])
def test_find_putative_targets_for_entry_points(rule_runner: RuleRunner) -> None: mains = ("main1.py", "main2.py", "main3.py") rule_runner.write_files( { f"src/python/foo/{name}": textwrap.dedent( """ if __name__ == "__main__": main() """ ) for name in mains } ) rule_runner.write_files( { "src/python/foo/BUILD": textwrap.dedent( """\ pex_binary(name='main1', entry_point='main1.py') pex_binary(name='main2', entry_point='foo.main2') """ ), "src/python/foo/__main__.py": "", } ) pts = rule_runner.request( PutativeTargets, [ PutativePythonTargetsRequest(("src/python/foo",)), AllOwnedSources( [f"src/python/foo/{name}" for name in mains] + ["src/python/foo/__main__.py"] ), ], ) assert ( PutativeTargets( [ PutativeTarget.for_target_type( PexBinary, "src/python/foo", "main3", [], kwargs={"entry_point": "main3.py"}, ), PutativeTarget.for_target_type( PexBinary, "src/python/foo", "__main__", [], kwargs={"entry_point": "__main__.py"}, ), ] ) == pts )
def test_skip_invalid_requirements(rule_runner: RuleRunner) -> None: rule_runner.set_options(["--no-python-tailor-ignore-solitary-init-files"]) rule_runner.write_files( { "3rdparty/requirements-valid.txt": b"FooProject >= 1.2", "3rdparty/requirements-invalid.txt": b"FooProject LOLOLOLOL 1.2", "pipfile-valid/Pipfile.lock": b"{}", "pipfile-invalid/Pipfile.lock": b"FNARB", "poetry-valid/pyproject.toml": b"[tool.poetry]", "poetry-invalid/pyproject.toml": b"FNARB", } ) pts = rule_runner.request( PutativeTargets, [ PutativePythonTargetsRequest( ( "3rdparty", "pipfile-valid", "pipfile-invalid", "poetry-valid", "poetry-invalid", ) ), AllOwnedSources([]), ], ) assert ( PutativeTargets( [ PutativeTarget.for_target_type( PythonRequirementsTargetGenerator, path="3rdparty", name="reqs", triggering_sources=["3rdparty/requirements-valid.txt"], kwargs={"source": "requirements-valid.txt"}, ), PutativeTarget.for_target_type( PipenvRequirementsTargetGenerator, path="pipfile-valid", name="pipenv", triggering_sources=["pipfile-valid/Pipfile.lock"], ), PutativeTarget.for_target_type( PoetryRequirementsTargetGenerator, path="poetry-valid", name="poetry", triggering_sources=["poetry-valid/pyproject.toml"], ), ] ) == pts )
def test_find_putative_targets(rule_runner: RuleRunner) -> None: rule_runner.set_options( ["--no-python-setup-tailor-ignore-solitary-init-files"]) rule_runner.write_files({ "3rdparty/requirements.txt": "", "3rdparty/requirements-test.txt": "", **{ f"src/python/foo/{fp}": "" for fp in ( "__init__.py", "bar/__init__.py", "bar/baz1.py", "bar/baz1_test.py", "bar/baz2.py", "bar/baz2_test.py", "bar/baz3.py", ) }, }) pts = rule_runner.request( PutativeTargets, [ PutativePythonTargetsRequest(PutativeTargetsSearchPaths(("", ))), AllOwnedSources([ "3rdparty/requirements.txt", "src/python/foo/bar/__init__.py", "src/python/foo/bar/baz1.py", ]), ], ) assert (PutativeTargets([ PutativeTarget( "3rdparty", "requirements-test.txt", "python_requirements", ("3rdparty/requirements-test.txt", ), ("3rdparty/requirements-test.txt", ), addressable=False, kwargs={"requirements_relpath": "requirements-test.txt"}, ), PutativeTarget.for_target_type(PythonLibrary, "src/python/foo", "foo", ["__init__.py"]), PutativeTarget.for_target_type(PythonLibrary, "src/python/foo/bar", "bar", ["baz2.py", "baz3.py"]), PutativeTarget.for_target_type( PythonTests, "src/python/foo/bar", "tests", ["baz1_test.py", "baz2_test.py"], kwargs={"name": "tests"}, ), ]) == pts)
def test_target_type_with_no_sources_field(rule_runner: RuleRunner) -> None: putative_targets = rule_runner.request( PutativeTargets, [MockPutativeFortranModuleRequest(("dir", ))]) assert putative_targets == PutativeTargets( [PutativeTarget.for_target_type(FortranModule, "dir", "dir", [])]) with pytest.raises(AssertionError) as excinfo: _ = PutativeTarget.for_target_type(FortranModule, "dir", "dir", ["a.f90"]) expected_msg = ( "A target of type FortranModule was proposed at address dir:dir with explicit sources a.f90, " "but this target type does not have a `source` or `sources` field.") assert str(excinfo.value) == expected_msg
async def find_putative_go_targets( request: PutativeGoTargetsRequest, all_owned_sources: AllOwnedSources) -> PutativeTargets: putative_targets = [] # Add `go_mod` targets. all_go_mod_files = await Get(Paths, PathGlobs, request.search_paths.path_globs("go.mod")) unowned_go_mod_files = set(all_go_mod_files.files) - set(all_owned_sources) for dirname, filenames in group_by_dir(unowned_go_mod_files).items(): putative_targets.append( PutativeTarget.for_target_type( GoModTarget, path=dirname, name=os.path.basename(dirname), triggering_sources=sorted(filenames), )) # Add `go_binary` targets. digest_contents = await Get(DigestContents, PathGlobs, request.search_paths.path_globs("*.go")) main_package_dirs = [ os.path.dirname(file_content.path) for file_content in digest_contents if has_package_main(file_content.content) ] existing_targets = await Get( UnexpandedTargets, AddressSpecs(AscendantAddresses(d) for d in main_package_dirs)) owned_main_packages = await MultiGet( Get(GoBinaryMainPackage, GoBinaryMainPackageRequest(t[GoBinaryMainPackageField])) for t in existing_targets if t.has_field(GoBinaryMainPackageField)) unowned_main_package_dirs = set(main_package_dirs) - { # We can be confident `go_first_party_package` targets were generated, meaning that the # below will get us the full path to the package's directory. # TODO: generalize this os.path.join(pkg.address.spec_path, pkg.address.generated_name[2:]).rstrip( "/") # type: ignore[index] for pkg in owned_main_packages } putative_targets.extend( PutativeTarget.for_target_type( target_type=GoBinaryTarget, path=main_pkg_dir, name="bin", triggering_sources=tuple(), kwargs={"name": "bin"}, ) for main_pkg_dir in unowned_main_package_dirs) return PutativeTargets(putative_targets)
def test_root_macros_dont_get_named(rule_runner: RuleRunner) -> None: # rule_runner.write_files({"macro_trigger.txt": ""}) ptgt = PutativeTarget("", "", "fortran_macro", [], [], addressable=False) unpts = rule_runner.request(UniquelyNamedPutativeTargets, [PutativeTargets([ptgt])]) ptgts = unpts.putative_targets assert (PutativeTargets( [PutativeTarget( "", "", "fortran_macro", [], [], addressable=False, )]) == ptgts)
async def find_fortran_targets( req: PutativeFortranTargetsRequest, all_owned_sources: AllOwnedSources) -> PutativeTargets: all_fortran_files = await Get(Paths, PathGlobs, req.path_globs("*.f90")) unowned_shell_files = set(all_fortran_files.files) - set(all_owned_sources) tests_filespec = Filespec(includes=list(FortranTestsSources.default)) test_filenames = set( matches_filespec( tests_filespec, paths=[os.path.basename(path) for path in unowned_shell_files])) test_files = { path for path in unowned_shell_files if os.path.basename(path) in test_filenames } sources_files = set(unowned_shell_files) - test_files classified_unowned_shell_files = { FortranTestsTarget: test_files, FortranLibraryTarget: sources_files, } pts = [] for tgt_type, paths in classified_unowned_shell_files.items(): for dirname, filenames in group_by_dir(paths).items(): name = "tests" if tgt_type == FortranTestsTarget else None pts.append( PutativeTarget.for_target_type( tgt_type, path=dirname, name=name, triggering_sources=sorted(filenames))) return PutativeTargets(pts)
def test_make_content_str() -> None: content = make_content_str( "fortran_library()\n", " ", [ PutativeTarget.for_target_type( FortranTests, "path/to", "tests", ["test1.f90", "test2.f90"], kwargs={ "name": "tests", "sources": ("test1.f90", "test2.f90") }, ) ], ) assert (textwrap.dedent(""" fortran_library() fortran_tests( name="tests", sources=[ "test1.f90", "test2.f90", ], ) """).lstrip() == content)
def test_find_putative_targets(rule_runner: RuleRunner) -> None: rule_runner.write_files( { "src/wsdl/simple.wsdl": "", "src/wsdl/dir1/hello.wsdl": "", "src/wsdl/dir1/world.wsdl": "", } ) pts = rule_runner.request( PutativeTargets, [ PutativeWsdlTargetsRequest(PutativeTargetsSearchPaths(("",))), AllOwnedSources(["src/wsdl/simple.wsdl"]), ], ) assert ( PutativeTargets( [ PutativeTarget.for_target_type( WsdlSourcesGeneratorTarget, path="src/wsdl/dir1", name=None, triggering_sources=["hello.wsdl", "world.wsdl"], ), ] ) == pts )
def test_restrict_conflicting_sources(rule_runner: RuleRunner) -> None: dir_structure = { "src/fortran/foo/BUILD": "fortran_library(sources=['bar/baz1.f90'])", "src/fortran/foo/bar/BUILD": "fortran_library(sources=['baz2.f90'])", "src/fortran/foo/bar/baz1.f90": "", "src/fortran/foo/bar/baz2.f90": "", "src/fortran/foo/bar/baz3.f90": "", } for path, content in dir_structure.items(): rule_runner.create_file(path, content) ptgt = PutativeTarget( "src/fortran/foo/bar", "bar0", "fortran_library", ["baz3.f90"], FortranLibrarySources.default, ) dspt = rule_runner.request(DisjointSourcePutativeTarget, [ptgt]) ptgt = dspt.putative_target assert ("baz3.f90",) == ptgt.owned_sources assert ("baz3.f90",) == ptgt.kwargs.get("sources") assert ( "# NOTE: Sources restricted from the default for fortran_library due to conflict with", "# - src/fortran/foo", "# - src/fortran/foo/bar", ) == ptgt.comments
async def find_putative_targets( req: PutativeKotlinTargetsRequest, all_owned_sources: AllOwnedSources, kotlin_subsystem: KotlinSubsystem, ) -> PutativeTargets: putative_targets = [] if kotlin_subsystem.tailor_source_targets: all_kotlin_files_globs = req.path_globs("*.kt") all_kotlin_files = await Get(Paths, PathGlobs, all_kotlin_files_globs) unowned_kotlin_files = set( all_kotlin_files.files) - set(all_owned_sources) classified_unowned_kotlin_files = classify_source_files( unowned_kotlin_files) for tgt_type, paths in classified_unowned_kotlin_files.items(): for dirname, filenames in group_by_dir(paths).items(): putative_targets.append( PutativeTarget.for_target_type( tgt_type, path=dirname, name=None, triggering_sources=sorted(filenames))) return PutativeTargets(putative_targets)
def add_req_targets(files: Iterable[FileContent], alias: str, target_name: str) -> None: contents = {i.path: i.content for i in files} unowned_files = set(contents) - set(all_owned_sources) for fp in unowned_files: path, name = os.path.split(fp) try: validate(fp, contents[fp], alias) except Exception as e: logger.warning( f"An error occurred when validating `{fp}`: {e}.\n\n" "You'll need to create targets for its contents manually.\n" "To silence this error in future, see " "https://www.pantsbuild.org/docs/reference-tailor#section-ignore-paths \n" ) continue pts.append( PutativeTarget( path=path, name=target_name, type_alias=alias, triggering_sources=[fp], owned_sources=[name], kwargs=({} if alias != "python_requirements" or name == "requirements.txt" else { "source": name }), ))
def test_make_content_str() -> None: content = make_content_str( "fortran_library()\n", " ", [ PutativeTarget.for_target_type( FortranTestsTarget, path="path/to", name="tests", triggering_sources=["test1.f90", "test2.f90"], kwargs={"sources": ("test1.f90", "test2.f90")}, ) ], ) assert (dedent("""\ fortran_library() fortran_tests( name="tests", sources=[ "test1.f90", "test2.f90", ], ) """) == content)
def test_find_putative_targets(rule_runner: RuleRunner) -> None: rule_runner.write_files( { "src/owned/BUILD": "javascript_sources()\n", "src/owned/OwnedFile.js": "", "src/unowned/UnownedFile1.js": "", "src/unowned/UnownedFile2.js": "", } ) putative_targets = rule_runner.request( PutativeTargets, [ PutativeJSTargetsRequest(("src/owned", "src/unowned")), AllOwnedSources(["src/owned/OwnedFile.js"]), ], ) assert ( PutativeTargets( [ PutativeTarget.for_target_type( JSSourcesGeneratorTarget, "src/unowned", "unowned", ["UnownedFile1.js", "UnownedFile2.js"], ), ] ) == putative_targets )
def test_root_targets_are_explicitly_named(rule_runner: RuleRunner) -> None: rule_runner.write_files({"foo.f90": ""}) ptgt = PutativeTarget("", "", "fortran_library", ["foo.f90"], FortranLibrarySources.default) unpts = rule_runner.request(UniquelyNamedPutativeTargets, [PutativeTargets([ptgt])]) ptgts = unpts.putative_targets assert (PutativeTargets([ PutativeTarget( "", "root", "fortran_library", ["foo.f90"], FortranLibrarySources.default, ) ]) == ptgts)
def test_edit_build_files_without_header_text(rule_runner: RuleRunner) -> None: rule_runner.create_dir( "src/fortran/baz/BUILD") # NB: A directory, not a file. req = EditBuildFilesRequest( PutativeTargets([ PutativeTarget.for_target_type(FortranLibraryTarget, "src/fortran/baz", "baz", ["qux1.f90"]), ]), ) edited_build_files = rule_runner.request(EditedBuildFiles, [req]) assert edited_build_files.created_paths == ( "src/fortran/baz/BUILD.pants", ) contents = rule_runner.request(DigestContents, [edited_build_files.digest]) expected = [ FileContent( "src/fortran/baz/BUILD.pants", dedent("""\ fortran_library() """).encode(), ), ] actual = list(contents) # We do these more laborious asserts instead of just comparing the lists so that # on a text mismatch we see the actual string diff on the decoded strings. assert len(expected) == len(actual) for efc, afc in zip(expected, actual): assert efc.path == afc.path assert efc.content.decode() == afc.content.decode() assert efc.is_executable == afc.is_executable