def test_resolve_address() -> None: rule_runner = RuleRunner(rules=[QueryRule(Address, (AddressInput, ))]) def assert_is_expected(address_input: AddressInput, expected: Address) -> None: assert rule_runner.request_product(Address, [address_input]) == expected rule_runner.create_file("a/b/c.txt") assert_is_expected( AddressInput("a/b/c.txt"), Address("a/b", target_name=None, relative_file_path="c.txt")) assert_is_expected( AddressInput("a/b"), Address("a/b", target_name=None, relative_file_path=None)) assert_is_expected(AddressInput("a/b", target_component="c"), Address("a/b", target_name="c")) assert_is_expected( AddressInput("a/b/c.txt", target_component="c"), Address("a/b", relative_file_path="c.txt", target_name="c"), ) # Top-level addresses will not have a path_component, unless they are a file address. rule_runner.create_file("f.txt") assert_is_expected( AddressInput("f.txt", target_component="original"), Address("", relative_file_path="f.txt", target_name="original"), ) assert_is_expected(AddressInput("", target_component="t"), Address("", target_name="t")) with pytest.raises(ExecutionError) as exc: rule_runner.request_product(Address, [AddressInput("a/b/fake")]) assert "'a/b/fake' does not exist on disk" in str(exc.value)
def test_non_archive(rule_runner: RuleRunner) -> None: input_snapshot = rule_runner.make_snapshot( {"test.sh": b"# A shell script"}) extracted_digest = rule_runner.request_product( ExtractedDigest, [MaybeExtractable(input_snapshot.digest)]) digest_contents = rule_runner.request_product(DigestContents, [extracted_digest.digest]) assert DigestContents([FileContent("test.sh", b"# A shell script") ]) == digest_contents
def test_extract_zip(rule_runner: RuleRunner, compression: int) -> None: io = BytesIO() with zipfile.ZipFile(io, "w", compression=compression) as zf: for name, content in FILES.items(): zf.writestr(name, content) io.flush() input_snapshot = rule_runner.make_snapshot({"test.zip": io.getvalue()}) extracted_digest = rule_runner.request_product( ExtractedDigest, [MaybeExtractable(input_snapshot.digest)]) digest_contents = rule_runner.request_product(DigestContents, [extracted_digest.digest]) assert digest_contents == EXPECTED_DIGEST_CONTENTS
def test_extract_tar(rule_runner: RuleRunner, compression: str) -> None: io = BytesIO() mode = f"w:{compression}" if compression else "w" with tarfile.open(mode=mode, fileobj=io) as tf: for name, content in FILES.items(): tarinfo = tarfile.TarInfo(name) tarinfo.size = len(content) tf.addfile(tarinfo, BytesIO(content)) ext = f"tar.{compression}" if compression else "tar" input_snapshot = rule_runner.make_snapshot({f"test.{ext}": io.getvalue()}) extracted_digest = rule_runner.request_product( ExtractedDigest, [MaybeExtractable(input_snapshot.digest)]) digest_contents = rule_runner.request_product(DigestContents, [extracted_digest.digest]) assert digest_contents == EXPECTED_DIGEST_CONTENTS
def test_filter_field_sets(rule_runner: RuleRunner) -> None: @dataclass(frozen=True) class MockFieldSet(FieldSet): sources: Sources # Another field to demo that we will preserve the whole FieldSet data structure. tags: Tags rule_runner.create_file("f1.txt") valid_addr = Address("", target_name="valid") valid_field_set = MockFieldSet(valid_addr, Sources(["f1.txt"], address=valid_addr), Tags(None, address=valid_addr)) empty_addr = Address("", target_name="empty") empty_field_set = MockFieldSet(empty_addr, Sources(None, address=empty_addr), Tags(None, address=empty_addr)) result = rule_runner.request_product( FieldSetsWithSources, [ FieldSetsWithSourcesRequest([valid_field_set, empty_field_set]), create_options_bootstrapper(), ], ) assert tuple(result) == (valid_field_set, )
def test_source_roots_request() -> None: rule_runner = RuleRunner(rules=[ *source_root_rules(), QueryRule(SourceRootsResult, (SourceRootsRequest, OptionsBootstrapper)), ]) req = SourceRootsRequest( files=(PurePath("src/python/foo/bar.py"), PurePath("tests/python/foo/bar_test.py")), dirs=(PurePath("src/python/foo"), PurePath("src/python/baz/qux")), ) res = rule_runner.request_product( SourceRootsResult, [ req, create_options_bootstrapper( args=["--source-root-patterns=['src/python','tests/python']"]), ], ) assert { PurePath("src/python/foo/bar.py"): SourceRoot("src/python"), PurePath("tests/python/foo/bar_test.py"): SourceRoot("tests/python"), PurePath("src/python/foo"): SourceRoot("src/python"), PurePath("src/python/baz/qux"): SourceRoot("src/python"), } == dict(res.path_to_root)
def test_target_adaptor_parsed_correctly( target_adaptor_rule_runner: RuleRunner) -> None: target_adaptor_rule_runner.add_to_build_file( "helloworld", dedent("""\ mock_tgt( fake_field=42, dependencies=[ # Because we don't follow dependencies or even parse dependencies, this # self-cycle should be fine. "helloworld", ":sibling", "helloworld/util", "helloworld/util:tests", ], ) """), ) addr = Address("helloworld") target_adaptor = target_adaptor_rule_runner.request_product( TargetAdaptor, [addr, create_options_bootstrapper()]) assert target_adaptor.name == "helloworld" assert target_adaptor.type_alias == "mock_tgt" assert target_adaptor.kwargs["dependencies"] == [ "helloworld", ":sibling", "helloworld/util", "helloworld/util:tests", ] # NB: TargetAdaptors do not validate what fields are valid. The Target API should error # when encountering this, but it's fine at this stage. assert target_adaptor.kwargs["fake_field"] == 42
def test_filter_targets(rule_runner: RuleRunner) -> None: class MockTarget(Target): alias = "target" core_fields = (Sources, ) class MockTargetWithNoSourcesField(Target): alias = "no_sources" core_fields = () rule_runner.create_file("f1.txt") valid_tgt = MockTarget({Sources.alias: ["f1.txt"]}, address=Address("", target_name="valid")) empty_tgt = MockTarget({}, address=Address("", target_name="empty")) invalid_tgt = MockTargetWithNoSourcesField({}, address=Address( "", target_name="invalid")) result = rule_runner.request_product( TargetsWithSources, [ TargetsWithSourcesRequest([valid_tgt, empty_tgt, invalid_tgt]), create_options_bootstrapper(), ], ) assert tuple(result) == (valid_tgt, )
def test_target_adaptor_not_found( target_adaptor_rule_runner: RuleRunner) -> None: bootstrapper = create_options_bootstrapper() with pytest.raises(ExecutionError) as exc: target_adaptor_rule_runner.request_product( TargetAdaptor, [Address("helloworld"), bootstrapper]) assert "Directory \\'helloworld\\' does not contain any BUILD files" in str( exc) target_adaptor_rule_runner.add_to_build_file("helloworld", "mock_tgt(name='other_tgt')") expected_rx_str = re.escape( "'helloworld' was not found in namespace 'helloworld'. Did you mean one of:\n :other_tgt" ) with pytest.raises(ExecutionError, match=expected_rx_str): target_adaptor_rule_runner.request_product( TargetAdaptor, [Address("helloworld"), bootstrapper])
def test_platform_on_local_epr_result() -> None: rule_runner = RuleRunner( rules=[QueryRule(FallibleProcessResultWithPlatform, (Process, ))]) this_platform = Platform.current process = Process(argv=("/bin/echo", "test"), description="Run some program that will exit cleanly.") result = rule_runner.request_product(FallibleProcessResultWithPlatform, [process]) assert result.exit_code == 0 assert result.platform == this_platform
def resolve_address_specs( rule_runner: RuleRunner, specs: Iterable[AddressSpec], bootstrapper: Optional[OptionsBootstrapper] = None, ) -> Set[AddressWithOrigin]: result = rule_runner.request_product( AddressesWithOrigins, [ AddressSpecs(specs, filter_by_global_options=True), bootstrapper or create_options_bootstrapper(), ], ) return set(result)
def assert_sources_resolved( rule_runner: RuleRunner, sources_fields: Iterable[SourcesField], *, expected: Iterable[TargetSources], expected_unrooted: Iterable[str] = (), ) -> None: result = rule_runner.request_product( SourceFiles, [SourceFilesRequest(sources_fields), create_options_bootstrapper()], ) assert list(result.snapshot.files) == sorted( set( itertools.chain.from_iterable(sources.full_paths for sources in expected))) assert list(result.unrooted_files) == sorted(expected_unrooted)
def assert_python_requirements( rule_runner: RuleRunner, build_file_entry: str, requirements_txt: str, *, expected_file_dep: PythonRequirementsFile, expected_targets: Iterable[PythonRequirementLibrary], requirements_txt_relpath: str = "requirements.txt", ) -> None: rule_runner.add_to_build_file("", f"{build_file_entry}\n") rule_runner.create_file(requirements_txt_relpath, requirements_txt) targets = rule_runner.request_product( Targets, [ Specs(AddressSpecs([DescendantAddresses("")]), FilesystemSpecs([])), create_options_bootstrapper(), ], ) assert {expected_file_dep, *expected_targets} == set(targets)
def assert_pipenv_requirements( rule_runner: RuleRunner, build_file_entry: str, pipfile_lock: dict, *, expected_file_dep: PythonRequirementsFile, expected_targets: Iterable[PythonRequirementLibrary], pipfile_lock_relpath: str = "Pipfile.lock", ) -> None: rule_runner.add_to_build_file("", f"{build_file_entry}\n") rule_runner.create_file(pipfile_lock_relpath, dumps(pipfile_lock)) targets = rule_runner.request_product( Targets, [ Specs(AddressSpecs([DescendantAddresses("")]), FilesystemSpecs([])), create_options_bootstrapper(), ], ) assert {expected_file_dep, *expected_targets} == set(targets)
def assert_rule_match(rule_runner: RuleRunner, glob: str, paths: Tuple[str, ...], *, should_match: bool) -> None: # Confirm in-memory behavior. matched_filespec = matches_filespec({"includes": [glob]}, paths=paths) if should_match: assert matched_filespec == paths else: assert not matched_filespec # Confirm on-disk behavior. for expected_match in paths: if expected_match.endswith("/"): rule_runner.create_dir(expected_match) else: rule_runner.create_file(expected_match) snapshot = rule_runner.request_product(Snapshot, [PathGlobs([glob])]) if should_match: assert sorted(paths) == sorted(snapshot.files) else: assert not snapshot.files
def get_stripped_files( rule_runner: RuleRunner, request: SourceFiles, *, args: Optional[List[str]] = None, ) -> List[str]: args = args or [] has_source_root_patterns = False for arg in args: if arg.startswith("--source-root-patterns"): has_source_root_patterns = True break if not has_source_root_patterns: source_root_patterns = ["src/python", "src/java", "tests/python"] args.append( f"--source-root-patterns={json.dumps(source_root_patterns)}") result = rule_runner.request_product( StrippedSourceFiles, [request, create_options_bootstrapper(args=args)], ) return list(result.snapshot.files)
def assert_pants_requirement( rule_runner: RuleRunner, build_file_entry: str, *, expected_target_name: str, expected_dist: str = "pantsbuild.pants", expected_module: str = "pants", ) -> None: rule_runner.add_to_build_file("3rdparty/python", f"{build_file_entry}\n") target = rule_runner.request_product( WrappedTarget, [ Address("3rdparty/python", target_name=expected_target_name), create_options_bootstrapper(), ], ).target assert isinstance(target, PythonRequirementLibrary) assert target[PythonRequirementsField].value == ( Requirement.parse(f"{expected_dist}=={pants_version()}"), ) assert target[ModuleMappingField].value == FrozenDict( {expected_dist: (expected_module, )})
def test_identical_uuids(rule_runner: RuleRunner) -> None: uuid1 = rule_runner.request_product(UUID, [UUIDRequest(randomizer=0.0)]) uuid2 = rule_runner.request_product(UUID, [UUIDRequest(randomizer=0.0)]) assert uuid1 == uuid2
def test_distinct_uuids(rule_runner: RuleRunner) -> None: uuid1 = rule_runner.request_product(UUID, [UUIDRequest()]) uuid2 = rule_runner.request_product(UUID, [UUIDRequest()]) assert uuid1 != uuid2