def test_parse_metadata_digest(rule_runner: RuleRunner, chart_contents: str) -> None: chart_dict = yaml.safe_load(chart_contents) chart_bytes = bytes(chart_contents, "utf-8") non_prefixed_digest = rule_runner.request( Digest, [CreateDigest([FileContent("Chart.yaml", chart_bytes)])] ) non_prefixed_metadata = rule_runner.request( HelmChartMetadata, [ ParseHelmChartMetadataDigest( non_prefixed_digest, description_of_origin="test_parse_metadata_digest" ) ], ) assert_metadata(non_prefixed_metadata, chart_dict) prefix = "foo" prefixed_digest = rule_runner.request( Digest, [CreateDigest([FileContent(os.path.join(prefix, "Chart.yml"), chart_bytes)])] ) prefixed_metadata = rule_runner.request( HelmChartMetadata, [ ParseHelmChartMetadataDigest( prefixed_digest, description_of_origin="test_parse_metadata_digest", prefix="*" ) ], ) assert_metadata(prefixed_metadata, chart_dict)
def test_more_complicated_engine_aware(rule_runner: RuleRunner, run_tracker: RunTracker) -> None: tracker = WorkunitTracker() handler = StreamingWorkunitHandler( rule_runner.scheduler, run_tracker=run_tracker, callbacks=[tracker.add], report_interval_seconds=0.01, max_workunit_verbosity=LogLevel.TRACE, specs=Specs.empty(), options_bootstrapper=create_options_bootstrapper([]), ) with handler.session(): input_1 = CreateDigest(( FileContent(path="a.txt", content=b"alpha"), FileContent(path="b.txt", content=b"beta"), )) digest_1 = rule_runner.request(Digest, [input_1]) snapshot_1 = rule_runner.request(Snapshot, [digest_1]) input_2 = CreateDigest((FileContent(path="g.txt", content=b"gamma"), )) digest_2 = rule_runner.request(Digest, [input_2]) snapshot_2 = rule_runner.request(Snapshot, [digest_2]) input = ComplicatedInput(snapshot_1=snapshot_1, snapshot_2=snapshot_2) rule_runner.request(Output, [input]) finished = list( itertools.chain.from_iterable(tracker.finished_workunit_chunks)) workunit = next( item for item in finished if item["name"] == "pants.engine.internals.engine_test.a_rule") streaming_workunit_context = handler._context artifacts = workunit["artifacts"] output_snapshot_1 = artifacts["snapshot_1"] output_snapshot_2 = artifacts["snapshot_2"] output_contents_list = streaming_workunit_context.snapshots_to_file_contents( [output_snapshot_1, output_snapshot_2]) assert len(output_contents_list) == 2 assert isinstance(output_contents_list[0], DigestContents) assert isinstance(output_contents_list[1], DigestContents) digest_contents_1 = output_contents_list[0] digest_contents_2 = output_contents_list[1] assert len(tuple(x for x in digest_contents_1 if x.content == b"alpha")) == 1 assert len(tuple(x for x in digest_contents_1 if x.content == b"beta")) == 1 assert len(tuple(x for x in digest_contents_2 if x.content == b"gamma")) == 1
def test_create_empty_directory(rule_runner: RuleRunner) -> None: res = rule_runner.request(Snapshot, [CreateDigest([Directory("a/")])]) assert res.dirs == ("a",) assert not res.files assert res.digest != EMPTY_DIGEST res = rule_runner.request( Snapshot, [CreateDigest([Directory("x/y/z"), Directory("m"), Directory("m/n")])] ) assert res.dirs == ("m", "m/n", "x", "x/y", "x/y/z") assert not res.files assert res.digest != EMPTY_DIGEST
async def parse_python_imports(request: ParsePythonImportsRequest) -> ParsedPythonImports: python_interpreter, script_digest, stripped_sources = await MultiGet( Get(PythonExecutable, PexInterpreterConstraints, request.interpreter_constraints), Get(Digest, CreateDigest([FileContent("__parse_python_imports.py", _SCRIPT.encode())])), Get(StrippedSourceFiles, SourceFilesRequest([request.sources])), ) input_digest = await Get( Digest, MergeDigests([script_digest, stripped_sources.snapshot.digest]) ) process_result = await Get( ProcessResult, Process( argv=[ python_interpreter.path, "./__parse_python_imports.py", *stripped_sources.snapshot.files, ], input_digest=input_digest, description=f"Determine Python imports for {request.sources.address}", level=LogLevel.DEBUG, ), ) explicit_imports, _, string_imports = process_result.stdout.decode().partition("--") return ParsedPythonImports( explicit_imports=FrozenOrderedSet(explicit_imports.strip().splitlines()), string_imports=FrozenOrderedSet(string_imports.strip().splitlines()), )
def test_pex_execution(rule_runner: RuleRunner) -> None: sources = rule_runner.request( Digest, [ CreateDigest(( FileContent("main.py", b'print("from main")'), FileContent("subdir/sub.py", b'print("from sub")'), )), ], ) pex_output = create_pex_and_get_all_data(rule_runner, entry_point="main", sources=sources) pex_files = pex_output["files"] assert "pex" not in pex_files assert "main.py" in pex_files assert "subdir/sub.py" in pex_files # We reasonably expect there to be a python interpreter on the test-running process's path. env = {"PATH": os.getenv("PATH", "")} process = Process( argv=("python", "test.pex"), env=env, input_digest=pex_output["pex"].digest, description="Run the pex and make sure it works", ) result = rule_runner.request(ProcessResult, [process]) assert result.stdout == b"from main\n"
def test_write_digest() -> None: rule_runner = RuleRunner() workspace = Workspace(rule_runner.scheduler) digest = rule_runner.request( Digest, [ CreateDigest([ FileContent("a.txt", b"hello"), FileContent("subdir/b.txt", b"goodbye") ]) ], ) path1 = Path(rule_runner.build_root, "a.txt") path2 = Path(rule_runner.build_root, "subdir/b.txt") assert not path1.is_file() assert not path2.is_file() workspace.write_digest(digest) assert path1.is_file() assert path2.is_file() workspace.write_digest(digest, path_prefix="prefix") assert Path(rule_runner.build_root, "prefix", path1).is_file() assert Path(rule_runner.build_root, "prefix", path2).is_file()
async def create_archive(request: CreateArchive) -> Digest: if request.format == ArchiveFormat.ZIP: zip_binary = await Get(ZipBinary, _ZipBinaryRequest()) argv = zip_binary.create_archive_argv(request) env = {} input_digest = request.snapshot.digest else: tar_binary = await Get(TarBinary, _TarBinaryRequest()) argv = tar_binary.create_archive_argv(request) # `tar` expects to find a couple binaries like `gzip` and `xz` by looking on the PATH. env = {"PATH": os.pathsep.join(SEARCH_PATHS)} # `tar` requires that the output filename's parent directory exists. output_dir_digest = await Get( Digest, CreateDigest([Directory(os.path.dirname(request.output_filename)) ])) input_digest = await Get( Digest, MergeDigests([output_dir_digest, request.snapshot.digest])) result = await Get( ProcessResult, Process( argv=argv, env=env, input_digest=input_digest, description=f"Create {request.output_filename}", level=LogLevel.DEBUG, output_files=(request.output_filename, ), ), ) return result.output_digest
def test_add_prefix(rule_runner: RuleRunner) -> None: digest = rule_runner.request( Digest, [CreateDigest([FileContent("main.ext", b""), FileContent("subdir/sub.ext", b"")])], ) # Two components. output_digest = rule_runner.request(Digest, [AddPrefix(digest, "outer_dir/middle_dir")]) snapshot = rule_runner.request(Snapshot, [output_digest]) assert sorted(snapshot.files) == [ "outer_dir/middle_dir/main.ext", "outer_dir/middle_dir/subdir/sub.ext", ] assert sorted(snapshot.dirs) == [ "outer_dir", "outer_dir/middle_dir", "outer_dir/middle_dir/subdir", ] # Empty. output_digest = rule_runner.request(Digest, [AddPrefix(digest, "")]) assert digest == output_digest # Illegal. with pytest.raises(Exception, match=r"The `prefix` must be relative."): rule_runner.request(Digest, [AddPrefix(digest, "../something")])
async def generate_import_config( request: GatherImportsRequest, stdlib_imports: GoStdLibImports, goroot: GoRoot ) -> GatheredImports: import_config_digests: dict[str, tuple[str, Digest]] = {} for pkg in request.packages: fp = pkg.object_digest.fingerprint prefixed_digest = await Get(Digest, AddPrefix(pkg.object_digest, f"__pkgs__/{fp}")) import_config_digests[pkg.import_path] = (fp, prefixed_digest) pkg_digests: OrderedSet[Digest] = OrderedSet() import_config = ["# import config"] for import_path, (fp, digest) in import_config_digests.items(): pkg_digests.add(digest) import_config.append(f"packagefile {import_path}=__pkgs__/{fp}/__pkg__.a") if request.include_stdlib: pkg_digests.add(goroot.digest) import_config.extend( f"packagefile {import_path}={os.path.normpath(static_file_path)}" for import_path, static_file_path in stdlib_imports.items() ) import_config_content = "\n".join(import_config).encode("utf-8") import_config_digest = await Get( Digest, CreateDigest([FileContent("./importcfg", import_config_content)]) ) pkg_digests.add(import_config_digest) digest = await Get(Digest, MergeDigests(pkg_digests)) return GatheredImports(digest=digest)
def make_interactive_process(self) -> InteractiveProcess: digest = self.request_product(Digest, [ CreateDigest((FileContent(path="program.py", content=b"def test(): pass"), )) ]) return InteractiveProcess(["/usr/bin/python", "program.py"], input_digest=digest)
def test_pex_execution(rule_runner: RuleRunner, pex_type: type[Pex | VenvPex], internal_only: bool) -> None: sources = rule_runner.request( Digest, [ CreateDigest(( FileContent("main.py", b'print("from main")'), FileContent("subdir/sub.py", b'print("from sub")'), )), ], ) pex_data = create_pex_and_get_all_data( rule_runner, pex_type=pex_type, internal_only=internal_only, main=EntryPoint("main"), sources=sources, ) assert "pex" not in pex_data.files assert "main.py" in pex_data.files assert "subdir/sub.py" in pex_data.files # This should run the Pex using the same interpreter used to create it. We must set the `PATH` # so that the shebang works. pex_exe = (f"./{pex_data.sandbox_path}" if pex_data.is_zipapp else os.path.join(pex_data.sandbox_path, "__main__.py")) process = Process( argv=(pex_exe, ), env={"PATH": os.getenv("PATH", "")}, input_digest=pex_data.pex.digest, description="Run the pex and make sure it works", ) result = rule_runner.request(ProcessResult, [process]) assert result.stdout == b"from main\n"
async def setup_parser(hcl2_parser: TerraformHcl2Parser) -> ParserSetup: parser_script_content = pkgutil.get_data("pants.backend.terraform", "hcl2_parser.py") if not parser_script_content: raise ValueError("Unable to find source to hcl2_parser.py wrapper script.") parser_content = FileContent( path="__pants_tf_parser.py", content=parser_script_content, is_executable=True, ) parser_digest = await Get( Digest, CreateDigest([parser_content]), ) parser_pex = await Get( VenvPex, PexRequest( output_filename="tf_parser.pex", internal_only=True, requirements=hcl2_parser.pex_requirements(), interpreter_constraints=hcl2_parser.interpreter_constraints, main=EntryPoint(PurePath(parser_content.path).stem), sources=parser_digest, ), ) return ParserSetup(parser_pex)
async def setup_assembly_pre_compilation( request: AssemblyPreCompilationRequest, ) -> AssemblyPreCompilation: # From Go tooling comments: # # Supply an empty go_asm.h as if the compiler had been run. -symabis parsing is lax enough # that we don't need the actual definitions that would appear in go_asm.h. # # See https://go-review.googlesource.com/c/go/+/146999/8/src/cmd/go/internal/work/gc.go go_asm_h_digest = await Get(Digest, CreateDigest([FileContent("go_asm.h", b"")])) symabis_input_digest = await Get( Digest, MergeDigests([request.compilation_input, go_asm_h_digest])) symabis_result = await Get( ProcessResult, GoSdkProcess( input_digest=symabis_input_digest, command=( "tool", "asm", "-I", "go/pkg/include", "-gensymabis", "-o", "symabis", "--", *(f"./{request.source_files_subpath}/{name}" for name in request.s_files), ), description="Generate symabis metadata for assembly files.", output_files=("symabis", ), ), ) merged = await Get( Digest, MergeDigests([request.compilation_input, symabis_result.output_digest]), ) assembly_results = await MultiGet( Get( ProcessResult, GoSdkProcess( input_digest=request.compilation_input, command=( "tool", "asm", "-I", "go/pkg/include", "-o", f"./{request.source_files_subpath}/{PurePath(s_file).with_suffix('.o')}", f"./{request.source_files_subpath}/{s_file}", ), description=f"Assemble {s_file}", output_files= (f"./{request.source_files_subpath}/{PurePath(s_file).with_suffix('.o')}", ), ), ) for s_file in request.s_files) return AssemblyPreCompilation( merged, tuple(result.output_digest for result in assembly_results))
def test_digest_subset_globs(self) -> None: subset_snapshot = self.request_product( Snapshot, [ DigestSubset( self.generate_original_digest(), PathGlobs(("a.txt", "c.txt", "subdir2/**")), ) ], ) assert set(subset_snapshot.files) == { "a.txt", "c.txt", "subdir2/a.txt", "subdir2/nested_subdir/x.txt", } assert set( subset_snapshot.dirs) == {"subdir2", "subdir2/nested_subdir"} content = b"dummy content" subset_input = CreateDigest(( FileContent(path="a.txt", content=content), FileContent(path="c.txt", content=content), FileContent(path="subdir2/a.txt", content=content), FileContent(path="subdir2/nested_subdir/x.txt", content=content), )) subset_digest = self.request_product(Digest, [subset_input]) assert subset_snapshot.digest == subset_digest
def test_add_prefix(self) -> None: digest = self.request_single_product( Digest, CreateDigest( ( FileContent(path="main.py", content=b'print("from main")'), FileContent(path="subdir/sub.py", content=b'print("from sub")'), ) ), ) # Two components. output_digest = self.request_single_product( Digest, AddPrefix(digest, "outer_dir/middle_dir") ) snapshot = self.request_single_product(Snapshot, output_digest) assert sorted(snapshot.files) == [ "outer_dir/middle_dir/main.py", "outer_dir/middle_dir/subdir/sub.py", ] assert sorted(snapshot.dirs) == [ "outer_dir", "outer_dir/middle_dir", "outer_dir/middle_dir/subdir", ] # Empty. output_digest = self.request_single_product(Digest, AddPrefix(digest, "")) assert digest == output_digest # Illegal. with self.assertRaisesRegex( Exception, r"Cannot add component .*ParentDir.* of path prefix `../something`." ): self.request_single_product(Digest, AddPrefix(digest, "../something"))
async def create_mock_repl_request(repl: MockRepl) -> ReplRequest: digest = await Get(Digest, CreateDigest([FileContent("repl.sh", b"exit 0")])) return ReplRequest( digest=digest, args=("/bin/bash", "repl.sh"), run_in_workspace=False, )
async def bsp_scala_compile_request( request: ScalaBSPCompileFieldSet, classpath_entry_request: ClasspathEntryRequestFactory, ) -> BSPCompileResult: coarsened_targets = await Get(CoarsenedTargets, Addresses([request.source.address])) assert len(coarsened_targets) == 1 coarsened_target = coarsened_targets[0] resolve = await Get(CoursierResolveKey, CoarsenedTargets([coarsened_target])) result = await Get( FallibleClasspathEntry, ClasspathEntryRequest, classpath_entry_request.for_targets(component=coarsened_target, resolve=resolve), ) _logger.info(f"scala compile result = {result}") output_digest = EMPTY_DIGEST if result.exit_code == 0 and result.output: entries = await Get(DigestEntries, Digest, result.output.digest) new_entires = [ dataclasses.replace(entry, path=os.path.basename(entry.path)) for entry in entries ] flat_digest = await Get(Digest, CreateDigest(new_entires)) output_digest = await Get( Digest, AddPrefix(flat_digest, f"jvm/resolves/{resolve.name}/lib")) return BSPCompileResult( status=StatusCode.ERROR if result.exit_code != 0 else StatusCode.OK, output_digest=output_digest, )
def test_add_prefix(self) -> None: digest = self.request( Digest, [ CreateDigest(( FileContent(path="main.py", content=b'print("from main")'), FileContent(path="subdir/sub.py", content=b'print("from sub")'), )) ], ) # Two components. output_digest = self.request( Digest, [AddPrefix(digest, "outer_dir/middle_dir")]) snapshot = self.request(Snapshot, [output_digest]) assert sorted(snapshot.files) == [ "outer_dir/middle_dir/main.py", "outer_dir/middle_dir/subdir/sub.py", ] assert sorted(snapshot.dirs) == [ "outer_dir", "outer_dir/middle_dir", "outer_dir/middle_dir/subdir", ] # Empty. output_digest = self.request(Digest, [AddPrefix(digest, "")]) assert digest == output_digest # Illegal. with self.assertRaisesRegex(Exception, r"The `prefix` must be relative."): self.request(Digest, [AddPrefix(digest, "../something")])
def test_output_digest(rule_runner: RuleRunner, working_directory) -> None: # Test that the output files are relative to the working directory, both in how # they're specified, and their paths in the output_digest. input_digest = (rule_runner.request( Digest, [CreateDigest([Directory(working_directory)])], ) if working_directory else EMPTY_DIGEST) process = Process( input_digest=input_digest, argv=("/bin/bash", "-c", "echo -n 'European Burmese' > roland"), description="echo roland", output_files=("roland", ), working_directory=working_directory, ) result = rule_runner.request(ProcessResult, [process]) assert result.output_digest == Digest( fingerprint= "63949aa823baf765eff07b946050d76ec0033144c785a94d3ebd82baa931cd16", serialized_bytes_length=80, ) digest_contents = rule_runner.request(DigestContents, [result.output_digest]) assert digest_contents == DigestContents( [FileContent("roland", b"European Burmese", False)])
def test_raises_error_if_more_than_one_metadata_file(rule_runner: RuleRunner) -> None: digest = rule_runner.request( Digest, [ CreateDigest( [ FileContent("Chart.yaml", HELM_CHART_FILE_V1_FULL.encode()), FileContent("Chart.yml", HELM_CHART_FILE_V2_FULL.encode()), FileContent( os.path.join("foo", "Chart.yaml"), HELM_CHART_FILE_V1_FULL.encode() ), ] ) ], ) with pytest.raises(ExecutionError, match="Found more than one Helm chart metadata file at"): rule_runner.request( HelmChartMetadata, [ ParseHelmChartMetadataDigest( digest, description_of_origin="test_raises_error_if_more_than_one_metadata_file", prefix="**", ) ], )
def test_additional_inputs(rule_runner: RuleRunner, pex_type: type[Pex | VenvPex], internal_only: bool) -> None: # We use Pex's --sources-directory option to add an extra source file to the PEX. # This verifies that the file was indeed provided as additional input to the pex call. extra_src_dir = "extra_src" data_file = os.path.join("data", "file") data = "42" additional_inputs = rule_runner.request( Digest, [ CreateDigest([ FileContent(path=os.path.join(extra_src_dir, data_file), content=data.encode()) ]) ], ) additional_pex_args = ("--sources-directory", extra_src_dir) pex_data = create_pex_and_get_all_data( rule_runner, pex_type=pex_type, internal_only=internal_only, additional_inputs=additional_inputs, additional_pex_args=additional_pex_args, ) if pex_data.is_zipapp: with zipfile.ZipFile(pex_data.local_path, "r") as zipfp: with zipfp.open(data_file, "r") as datafp: data_file_content = datafp.read() else: with open(pex_data.local_path / data_file, "rb") as datafp: data_file_content = datafp.read() assert data == data_file_content.decode()
async def generate_from_file(request: GoCodegenBuildFilesRequest) -> FallibleBuildGoPackageRequest: content = dedent( """\ package gen import "fmt" import "github.com/google/uuid" func Quote(s string) string { uuid.SetClockSequence(-1) // A trivial line to use uuid. return fmt.Sprintf(">> %s <<", s) } """ ) digest = await Get(Digest, CreateDigest([FileContent("codegen/f.go", content.encode())])) deps = await Get(Addresses, DependenciesRequest(request.target[Dependencies])) assert len(deps) == 1 assert deps[0].generated_name == "github.com/google/uuid" thirdparty_dep = await Get(FallibleBuildGoPackageRequest, BuildGoPackageTargetRequest(deps[0])) assert thirdparty_dep.request is not None return FallibleBuildGoPackageRequest( request=BuildGoPackageRequest( import_path="codegen.com/gen", digest=digest, dir_path="codegen", go_file_names=("f.go",), s_file_names=(), direct_dependencies=(thirdparty_dep.request,), minimum_go_version=None, ), import_path="codegen.com/gen", )
def test_pex_execution(rule_runner: RuleRunner) -> None: sources = rule_runner.request( Digest, [ CreateDigest(( FileContent("main.py", b'print("from main")'), FileContent("subdir/sub.py", b'print("from sub")'), )), ], ) pex_output = create_pex_and_get_all_data(rule_runner, main=EntryPoint("main"), sources=sources) pex_files = pex_output["files"] assert "pex" not in pex_files assert "main.py" in pex_files assert "subdir/sub.py" in pex_files # This should run the Pex using the same interpreter used to create it. We must set the `PATH` so that the shebang # works. process = Process( argv=("./test.pex", ), env={"PATH": os.getenv("PATH", "")}, input_digest=pex_output["pex"].digest, description="Run the pex and make sure it works", ) result = rule_runner.request(ProcessResult, [process]) assert result.stdout == b"from main\n"
async def render_war_deployment_descriptor( request: RenderWarDeploymentDescriptorRequest, ) -> RenderedWarDeploymentDescriptor: descriptor_sources = await Get( HydratedSources, HydrateSourcesRequest(request.descriptor), ) descriptor_sources_entries = await Get(DigestEntries, Digest, descriptor_sources.snapshot.digest) if len(descriptor_sources_entries) != 1: raise AssertionError( f"Expected `descriptor` field for {request.descriptor.address} to only refer to one file." ) descriptor_entry = descriptor_sources_entries[0] if not isinstance(descriptor_entry, FileEntry): raise AssertionError( f"Expected `descriptor` field for {request.descriptor.address} to produce a file." ) descriptor_digest = await Get( Digest, CreateDigest([ FileEntry("__war__/WEB-INF/web.xml", descriptor_entry.file_digest) ]), ) return RenderedWarDeploymentDescriptor(descriptor_digest)
def test_digest_subset_globs(rule_runner: RuleRunner) -> None: subset_snapshot = rule_runner.request( Snapshot, [ DigestSubset( generate_original_digest(rule_runner), PathGlobs(("a.txt", "c.txt", "subdir2/**")), ) ], ) assert set(subset_snapshot.files) == { "a.txt", "c.txt", "subdir2/a.txt", "subdir2/nested_subdir/x.txt", } assert set(subset_snapshot.dirs) == {"subdir2", "subdir2/nested_subdir"} expected_files = [ FileContent(path, b"dummy content") for path in [ "a.txt", "c.txt", "subdir2/a.txt", "subdir2/nested_subdir/x.txt", ] ] subset_digest = rule_runner.request(Digest, [CreateDigest(expected_files)]) assert subset_snapshot.digest == subset_digest
async def setup_coursier(coursier_binary: CoursierBinary) -> Coursier: downloaded_coursier_get = Get( DownloadedExternalTool, ExternalToolRequest, coursier_binary.get_request(Platform.current)) wrapper_scripts_digest_get = Get( Digest, CreateDigest([ FileContent( Coursier.wrapper_script, COURSIER_WRAPPER_SCRIPT.encode("utf-8"), is_executable=True, ), FileContent( Coursier.post_processing_script, COURSIER_POST_PROCESSING_SCRIPT.encode("utf-8"), is_executable=True, ), ]), ) downloaded_coursier, wrapper_scripts_digest = await MultiGet( downloaded_coursier_get, wrapper_scripts_digest_get) return Coursier( coursier=downloaded_coursier, digest=await Get( Digest, MergeDigests([ downloaded_coursier.digest, wrapper_scripts_digest, ]), ), )
def test_write_digest_workspace(rule_runner: RuleRunner) -> None: workspace = Workspace(rule_runner.scheduler, _enforce_effects=False) digest = rule_runner.request( Digest, [CreateDigest([FileContent("a.txt", b"hello"), FileContent("subdir/b.txt", b"goodbye")])], ) path1 = Path(rule_runner.build_root, "a.txt") path2 = Path(rule_runner.build_root, "subdir/b.txt") assert not path1.is_file() assert not path2.is_file() workspace.write_digest(digest) assert path1.is_file() assert path2.is_file() assert path1.read_text() == "hello" assert path2.read_text() == "goodbye" workspace.write_digest(digest, path_prefix="prefix") path1 = Path(rule_runner.build_root, "prefix/a.txt") path2 = Path(rule_runner.build_root, "prefix/subdir/b.txt") assert path1.is_file() assert path2.is_file() assert path1.read_text() == "hello" assert path2.read_text() == "goodbye"
async def setup_parser(dockerfile_parser: DockerfileParser) -> ParserSetup: parser_script_content = pkgutil.get_data(_DOCKERFILE_PACKAGE, _DOCKERFILE_SANDBOX_TOOL) if not parser_script_content: raise ValueError( "Unable to find source to {_DOCKERFILE_SANDBOX_TOOL!r} in {_DOCKERFILE_PACKAGE}." ) parser_content = FileContent( path="__pants_df_parser.py", content=parser_script_content, is_executable=True, ) parser_digest = await Get(Digest, CreateDigest([parser_content])) parser_pex = await Get( VenvPex, PexRequest( output_filename="dockerfile_parser.pex", internal_only=True, requirements=dockerfile_parser.pex_requirements(), interpreter_constraints=dockerfile_parser.interpreter_constraints, main=EntryPoint(PurePath(parser_content.path).stem), sources=parser_digest, ), ) return ParserSetup(parser_pex)
def get_digest(rule_runner: RuleRunner, source_files: dict[str, str]) -> Digest: files = [ FileContent(path, content.encode()) for path, content in source_files.items() ] return rule_runner.request(Digest, [CreateDigest(files)])
async def parse_python_imports(request: ParsePythonImportsRequest) -> ParsedPythonImports: script = _SCRIPT_FORMAT.format(min_dots=request.string_imports_min_dots).encode() python_interpreter, script_digest, stripped_sources = await MultiGet( Get(PythonExecutable, InterpreterConstraints, request.interpreter_constraints), Get(Digest, CreateDigest([FileContent("__parse_python_imports.py", script)])), Get(StrippedSourceFiles, SourceFilesRequest([request.source])), ) # We operate on PythonSourceField, which should be one file. assert len(stripped_sources.snapshot.files) == 1 file = stripped_sources.snapshot.files[0] input_digest = await Get( Digest, MergeDigests([script_digest, stripped_sources.snapshot.digest]) ) process_result = await Get( ProcessResult, Process( argv=[ python_interpreter.path, "./__parse_python_imports.py", file, ], input_digest=input_digest, description=f"Determine Python imports for {request.source.address}", env={"STRING_IMPORTS": "y" if request.string_imports else "n"}, level=LogLevel.DEBUG, ), ) # See above for where we explicitly encoded as utf8. Even though utf8 is the # default for decode(), we make that explicit here for emphasis. return ParsedPythonImports(process_result.stdout.decode("utf8").strip().splitlines())