Exemplo n.º 1
0
    def test_write_digest(self):
        # TODO(#8336): at some point, this test should require that Workspace only be invoked from
        #  an @goal_rule
        workspace = Workspace(self.scheduler)
        digest = self.request_product(
            Digest,
            [
                CreateDigest([
                    FileContent("a.txt", b"hello"),
                    FileContent("subdir/b.txt", b"goodbye")
                ])
            ],
        )

        path1 = Path(self.build_root, "a.txt")
        path2 = Path(self.build_root, "subdir/b.txt")
        assert not path1.is_file()
        assert not path2.is_file()

        workspace.write_digest(digest)
        assert path1.is_file()
        assert path2.is_file()

        workspace.write_digest(digest, path_prefix="prefix")
        assert Path(self.build_root, "prefix", path1).is_file()
        assert Path(self.build_root, "prefix", path2).is_file()
Exemplo n.º 2
0
async def tailor(
    tailor_subsystem: TailorSubsystem,
    console: Console,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Tailor:
    putative_target_request_types = union_membership[PutativeTargetsRequest]
    putative_targets_results = await MultiGet(
        Get(PutativeTargets, PutativeTargetsRequest, req_type())
        for req_type in putative_target_request_types)
    putative_targets = PutativeTargets.merge(putative_targets_results)
    fixed_names_ptgts = await Get(UniquelyNamedPutativeTargets,
                                  PutativeTargets, putative_targets)
    fixed_sources_ptgts = await MultiGet(
        Get(DisjointSourcePutativeTarget, PutativeTarget, ptgt)
        for ptgt in fixed_names_ptgts.putative_targets)
    ptgts = [dspt.putative_target for dspt in fixed_sources_ptgts]

    if ptgts:
        edited_build_files = await Get(
            EditedBuildFiles,
            EditBuildFilesRequest(PutativeTargets(ptgts),
                                  tailor_subsystem.build_file_indent),
        )
        updated_build_files = set(edited_build_files.updated_paths)
        workspace.write_digest(edited_build_files.digest)
        ptgts_by_build_file = group_by_build_file(ptgts)
        for build_file_path, ptgts in ptgts_by_build_file.items():
            verb = "Updated" if build_file_path in updated_build_files else "Created"
            console.print_stdout(f"{verb} {console.blue(build_file_path)}:")
            for ptgt in ptgts:
                console.print_stdout(
                    f"  - Added {console.green(ptgt.type_alias)} target "
                    f"{console.cyan(ptgt.address.spec)}")
    return Tailor(0)
Exemplo n.º 3
0
 async def workspace_goal_rule(
     console: Console, workspace: Workspace, digest_request: DigestRequest
 ) -> WorkspaceGoal:
     snapshot = await Get(Snapshot, CreateDigest, digest_request.create_digest)
     workspace.write_digest(snapshot.digest)
     console.print_stdout(snapshot.files[0], end="")
     return WorkspaceGoal(exit_code=0)
Exemplo n.º 4
0
async def handle_bsp_scalac_options_request(
    request: HandleScalacOptionsRequest,
    build_root: BuildRoot,
    workspace: Workspace,
) -> HandleScalacOptionsResult:
    targets = await Get(Targets, BuildTargetIdentifier, request.bsp_target_id)
    thirdparty_modules = await Get(
        ThirdpartyModules,
        ThirdpartyModulesRequest(Addresses(tgt.address for tgt in targets)))
    resolve = thirdparty_modules.resolve

    resolve_digest = await Get(
        Digest,
        AddPrefix(thirdparty_modules.merged_digest,
                  f"jvm/resolves/{resolve.name}/lib"))

    workspace.write_digest(resolve_digest, path_prefix=".pants.d/bsp")

    classpath = tuple(
        build_root.pathlib_path.joinpath(
            f".pants.d/bsp/jvm/resolves/{resolve.name}/lib/{filename}").as_uri(
            ) for cp_entry in thirdparty_modules.entries.values()
        for filename in cp_entry.filenames)

    return HandleScalacOptionsResult(
        ScalacOptionsItem(
            target=request.bsp_target_id,
            options=(),
            classpath=classpath,
            class_directory=build_root.pathlib_path.joinpath(
                f".pants.d/bsp/{jvm_classes_directory(request.bsp_target_id)}"
            ).as_uri(),
        ))
Exemplo n.º 5
0
async def package_asset(workspace: Workspace, dist_dir: DistDir) -> Package:
    target_roots_to_field_sets = await Get(
        TargetRootsToFieldSets,
        TargetRootsToFieldSetsRequest(
            PackageFieldSet,
            goal_description="the `package` goal",
            no_applicable_targets_behavior=NoApplicableTargetsBehavior.warn,
        ),
    )
    if not target_roots_to_field_sets.field_sets:
        return Package(exit_code=0)

    packages = await MultiGet(
        Get(BuiltPackage, PackageFieldSet, field_set)
        for field_set in target_roots_to_field_sets.field_sets)
    merged_digest = await Get(Digest,
                              MergeDigests(pkg.digest for pkg in packages))
    workspace.write_digest(merged_digest, path_prefix=str(dist_dir.relpath))
    for pkg in packages:
        for artifact in pkg.artifacts:
            msg = []
            if artifact.relpath:
                msg.append(f"Wrote {dist_dir.relpath / artifact.relpath}")
            msg.extend(str(line) for line in artifact.extra_log_lines)
            if msg:
                logger.info("\n".join(msg))
    return Package(exit_code=0)
Exemplo n.º 6
0
async def coursier_resolve_lockfiles(
    console: Console,
    targets: Targets,
    resolve_subsystem: CoursierResolveSubsystem,
    workspace: Workspace,
) -> CoursierResolve:
    jvm_lockfile_targets = Targets(
        target for target in targets if target.has_field(JvmLockfileSources)
    )
    results = await MultiGet(
        Get(CoursierGenerateLockfileResult, CoursierGenerateLockfileRequest(target=target))
        for target in jvm_lockfile_targets
    )
    # For performance reasons, avoid writing out files to the workspace that haven't changed.
    results_to_write = tuple(result for result in results if result.digest != EMPTY_DIGEST)
    if results_to_write:
        merged_digest = await Get(
            Digest, MergeDigests(result.digest for result in results_to_write)
        )
        workspace.write_digest(merged_digest)
        merged_digest_snapshot = await Get(Snapshot, Digest, merged_digest)
        for path in merged_digest_snapshot.files:
            console.print_stderr(f"Updated lockfile at: {path}")

    return CoursierResolve(exit_code=0)
Exemplo n.º 7
0
async def export(
    console: Console,
    targets: Targets,
    export_subsystem: ExportSubsystem,
    workspace: Workspace,
    union_membership: UnionMembership,
    build_root: BuildRoot,
    dist_dir: DistDir,
) -> Export:
    request_types = cast("Iterable[type[ExportableDataRequest]]",
                         union_membership.get(ExportableDataRequest))
    requests = tuple(request_type(targets) for request_type in request_types)
    exportables = await MultiGet(
        Get(ExportableData, ExportableDataRequest, request)
        for request in requests)
    prefixed_digests = await MultiGet(
        Get(Digest, AddPrefix(exp.digest, exp.reldir)) for exp in exportables)
    output_dir = os.path.join(str(dist_dir.relpath), "export")
    merged_digest = await Get(Digest, MergeDigests(prefixed_digests))
    dist_digest = await Get(Digest, AddPrefix(merged_digest, output_dir))
    workspace.write_digest(dist_digest)
    for exp in exportables:
        for symlink in exp.symlinks:
            # Note that if symlink.source_path is an abspath, join returns it unchanged.
            source_abspath = os.path.join(build_root.path, symlink.source_path)
            link_abspath = os.path.abspath(
                os.path.join(output_dir, exp.reldir, symlink.link_rel_path))
            absolute_symlink(source_abspath, link_abspath)
        console.print_stdout(
            f"Wrote {exp.description} to {os.path.join(output_dir, exp.reldir)}"
        )
    return Export(exit_code=0)
Exemplo n.º 8
0
async def bsp_compile_request(
    request: CompileParams,
    workspace: Workspace,
) -> CompileResult:
    bsp_targets = await MultiGet(
        Get(BSPBuildTargetInternal, BuildTargetIdentifier, bsp_target_id)
        for bsp_target_id in request.targets
    )

    compile_results = await MultiGet(
        Get(
            BSPCompileResult,
            CompileOneBSPTargetRequest(
                bsp_target=bsp_target,
                origin_id=request.origin_id,
                arguments=request.arguments,
            ),
        )
        for bsp_target in bsp_targets
    )

    output_digest = await Get(Digest, MergeDigests([r.output_digest for r in compile_results]))
    if output_digest != EMPTY_DIGEST:
        workspace.write_digest(output_digest, path_prefix=".pants.d/bsp")

    status_code = StatusCode.OK
    if any(r.status != StatusCode.OK for r in compile_results):
        status_code = StatusCode.ERROR

    return CompileResult(
        origin_id=request.origin_id,
        status_code=status_code.value,
    )
Exemplo n.º 9
0
async def create_awslambda(
    console: Console,
    awslambda_subsystem: AWSLambdaSubsystem,
    distdir: DistDir,
    workspace: Workspace,
) -> AWSLambdaGoal:
    targets_to_valid_field_sets = await Get(
        TargetsToValidFieldSets,
        TargetsToValidFieldSetsRequest(
            AWSLambdaFieldSet,
            goal_description="the `awslambda` goal",
            error_if_no_valid_targets=True,
        ),
    )
    awslambdas = await MultiGet(
        Get(CreatedAWSLambda, AWSLambdaFieldSet, field_set)
        for field_set in targets_to_valid_field_sets.field_sets)
    merged_digest = await Get(
        Digest, MergeDigests(awslambda.digest for awslambda in awslambdas))
    workspace.write_digest(merged_digest, path_prefix=str(distdir.relpath))
    with awslambda_subsystem.line_oriented(console) as print_stdout:
        for awslambda in awslambdas:
            output_path = distdir.relpath / awslambda.zip_file_relpath
            print_stdout(
                dedent(f"""\
                    Wrote code bundle to {output_path}
                      Runtime: {awslambda.runtime}
                      Handler: {awslambda.handler}
                    """))
    return AWSLambdaGoal(exit_code=0)
Exemplo n.º 10
0
async def package_asset(workspace: Workspace, dist_dir: DistDir) -> Package:
    target_roots_to_field_sets = await Get(
        TargetRootsToFieldSets,
        TargetRootsToFieldSetsRequest(
            PackageFieldSet,
            goal_description="the `package` goal",
            error_if_no_applicable_targets=True,
        ),
    )
    packages = await MultiGet(
        Get(BuiltPackage, PackageFieldSet, field_set)
        for field_set in target_roots_to_field_sets.field_sets)
    merged_snapshot = await Get(Snapshot,
                                MergeDigests(pkg.digest for pkg in packages))
    workspace.write_digest(merged_snapshot.digest,
                           path_prefix=str(dist_dir.relpath))
    for pkg in packages:
        for artifact in pkg.artifacts:
            msg = ""
            if artifact.relpath:
                msg += f"Wrote {dist_dir.relpath / artifact.relpath}"
            for line in artifact.extra_log_lines:
                msg += f"\n{line}"
            logger.info(msg)
    return Package(exit_code=0)
Exemplo n.º 11
0
def test_write_digest() -> None:
    rule_runner = RuleRunner()

    workspace = Workspace(rule_runner.scheduler)
    digest = rule_runner.request(
        Digest,
        [
            CreateDigest([
                FileContent("a.txt", b"hello"),
                FileContent("subdir/b.txt", b"goodbye")
            ])
        ],
    )

    path1 = Path(rule_runner.build_root, "a.txt")
    path2 = Path(rule_runner.build_root, "subdir/b.txt")
    assert not path1.is_file()
    assert not path2.is_file()

    workspace.write_digest(digest)
    assert path1.is_file()
    assert path2.is_file()

    workspace.write_digest(digest, path_prefix="prefix")
    assert Path(rule_runner.build_root, "prefix", path1).is_file()
    assert Path(rule_runner.build_root, "prefix", path2).is_file()
Exemplo n.º 12
0
def test_write_digest_workspace(rule_runner: RuleRunner) -> None:
    workspace = Workspace(rule_runner.scheduler, _enforce_effects=False)
    digest = rule_runner.request(
        Digest,
        [CreateDigest([FileContent("a.txt", b"hello"), FileContent("subdir/b.txt", b"goodbye")])],
    )

    path1 = Path(rule_runner.build_root, "a.txt")
    path2 = Path(rule_runner.build_root, "subdir/b.txt")
    assert not path1.is_file()
    assert not path2.is_file()

    workspace.write_digest(digest)
    assert path1.is_file()
    assert path2.is_file()
    assert path1.read_text() == "hello"
    assert path2.read_text() == "goodbye"

    workspace.write_digest(digest, path_prefix="prefix")
    path1 = Path(rule_runner.build_root, "prefix/a.txt")
    path2 = Path(rule_runner.build_root, "prefix/subdir/b.txt")
    assert path1.is_file()
    assert path2.is_file()
    assert path1.read_text() == "hello"
    assert path2.read_text() == "goodbye"
Exemplo n.º 13
0
async def bsp_resources_request(
    request: ResourcesParams,
    workspace: Workspace,
) -> ResourcesResult:
    bsp_targets = await MultiGet(
        Get(BSPBuildTargetInternal, BuildTargetIdentifier, bsp_target_id)
        for bsp_target_id in request.targets
    )

    resources_results = await MultiGet(
        Get(
            BSPResourcesResult,
            ResourcesForOneBSPTargetRequest(
                bsp_target=bsp_target,
            ),
        )
        for bsp_target in bsp_targets
    )

    # TODO: Need to determine how resources are expected to be exposed. Directories? Individual files?
    # Initially, it looks like loose directories.
    output_digest = await Get(Digest, MergeDigests([r.output_digest for r in resources_results]))
    if output_digest != EMPTY_DIGEST:
        workspace.write_digest(output_digest, path_prefix=".pants.d/bsp")

    return ResourcesResult(
        tuple(
            ResourcesItem(
                target,
                rr.resources,
            )
            for target, rr in zip(request.targets, resources_results)
        )
    )
Exemplo n.º 14
0
async def run(
    run_subsystem: RunSubsystem,
    global_options: GlobalOptions,
    workspace: Workspace,
    build_root: BuildRoot,
    complete_env: CompleteEnvironment,
) -> Run:
    targets_to_valid_field_sets = await Get(
        TargetRootsToFieldSets,
        TargetRootsToFieldSetsRequest(
            RunFieldSet,
            goal_description="the `run` goal",
            no_applicable_targets_behavior=NoApplicableTargetsBehavior.error,
            expect_single_field_set=True,
        ),
    )
    field_set = targets_to_valid_field_sets.field_sets[0]
    request = await Get(RunRequest, RunFieldSet, field_set)
    wrapped_target = await Get(
        WrappedTarget,
        WrappedTargetRequest(field_set.address,
                             description_of_origin="<infallible>"))
    restartable = wrapped_target.target.get(RestartableField).value
    # Cleanup is the default, so we want to preserve the chroot if either option is off.
    cleanup = run_subsystem.cleanup and global_options.process_cleanup

    with temporary_dir(root_dir=global_options.pants_workdir,
                       cleanup=cleanup) as tmpdir:
        if not cleanup:
            logger.info(f"Preserving running binary chroot {tmpdir}")
        workspace.write_digest(
            request.digest,
            path_prefix=PurePath(tmpdir).relative_to(
                build_root.path).as_posix(),
            # We don't want to influence whether the InteractiveProcess is able to restart. Because
            # we're writing into a temp directory, we can safely mark this side_effecting=False.
            side_effecting=False,
        )

        args = (arg.format(chroot=tmpdir) for arg in request.args)
        env = {
            **complete_env,
            **{
                k: v.format(chroot=tmpdir)
                for k, v in request.extra_env.items()
            }
        }
        result = await Effect(
            InteractiveProcessResult,
            InteractiveProcess(
                argv=(*args, *run_subsystem.args),
                env=env,
                run_in_workspace=True,
                restartable=restartable,
            ),
        )
        exit_code = result.exit_code

    return Run(exit_code)
Exemplo n.º 15
0
 def materialize(self, console: Console, workspace: Workspace) -> None:
     if not self.results_file:
         return
     output_path = self.results_file.output_path
     workspace.write_digest(self.results_file.digest,
                            path_prefix=output_path.parent.as_posix())
     console.print_stdout(
         f"Wrote {self.linter_name} report to: {output_path.as_posix()}")
Exemplo n.º 16
0
 def materialize(self, console: Console, workspace: Workspace) -> Optional[PurePath]:
     workspace.write_digest(
         self.result_snapshot.digest, path_prefix=str(self.directory_to_materialize_to)
     )
     console.print_stderr(
         f"\nWrote {self.report_type} coverage report to `{self.directory_to_materialize_to}`"
     )
     return self.report_file
Exemplo n.º 17
0
async def export_codegen(
    targets: Targets,
    union_membership: UnionMembership,
    workspace: Workspace,
    dist_dir: DistDir,
    registered_target_types: RegisteredTargetTypes,
) -> ExportCodegen:
    # We run all possible code generators. Running codegen requires specifying the expected
    # output_type, so we must inspect what is possible to generate.
    all_generate_request_types = union_membership.get(GenerateSourcesRequest)
    inputs_to_outputs = {
        req.input: req.output
        for req in all_generate_request_types
    }
    codegen_sources_fields_with_output = []
    for tgt in targets:
        if not tgt.has_field(SourcesField):
            continue
        sources = tgt[SourcesField]
        for input_type in inputs_to_outputs:
            if isinstance(sources, input_type):
                output_type = inputs_to_outputs[input_type]
                codegen_sources_fields_with_output.append(
                    (sources, output_type))

    if not codegen_sources_fields_with_output:
        codegen_targets = sorted({
            tgt_type.alias
            for tgt_type in registered_target_types.types
            for input_sources in inputs_to_outputs.keys()
            if tgt_type.class_has_field(input_sources,
                                        union_membership=union_membership)
        })
        logger.warning(
            "No codegen files/targets matched. All codegen target types: "
            f"{', '.join(codegen_targets)}")
        return ExportCodegen(exit_code=0)

    all_hydrated_sources = await MultiGet(
        Get(
            HydratedSources,
            HydrateSourcesRequest(
                sources_and_output_type[0],
                for_sources_types=(sources_and_output_type[1], ),
                enable_codegen=True,
            ),
        ) for sources_and_output_type in codegen_sources_fields_with_output)

    merged_digest = await Get(
        Digest,
        MergeDigests(hydrated_sources.snapshot.digest
                     for hydrated_sources in all_hydrated_sources),
    )

    dest = str(dist_dir.relpath / "codegen")
    logger.info(f"Writing generated files to {dest}")
    workspace.write_digest(merged_digest, path_prefix=dest)
    return ExportCodegen(exit_code=0)
Exemplo n.º 18
0
async def run_repl(
    console: Console,
    workspace: Workspace,
    repl_subsystem: ReplSubsystem,
    all_specified_addresses: Addresses,
    build_root: BuildRoot,
    union_membership: UnionMembership,
    global_options: GlobalOptions,
    complete_env: CompleteEnvironment,
) -> Repl:
    transitive_targets = await Get(
        TransitiveTargets, TransitiveTargetsRequest(all_specified_addresses))

    # TODO: When we support multiple languages, detect the default repl to use based
    #  on the targets.  For now we default to the python repl.
    repl_shell_name = repl_subsystem.shell or "python"
    implementations = {
        impl.name: impl
        for impl in union_membership[ReplImplementation]
    }
    repl_implementation_cls = implementations.get(repl_shell_name)
    if repl_implementation_cls is None:
        available = sorted(implementations.keys())
        console.print_stderr(
            f"{repr(repl_shell_name)} is not a registered REPL. Available REPLs (which may "
            f"be specified through the option `--repl-shell`): {available}")
        return Repl(-1)

    with temporary_dir(root_dir=global_options.options.pants_workdir,
                       cleanup=False) as tmpdir:
        repl_impl = repl_implementation_cls(targets=Targets(
            transitive_targets.closure),
                                            chroot=tmpdir)
        request = await Get(ReplRequest, ReplImplementation, repl_impl)

        workspace.write_digest(
            request.digest,
            path_prefix=PurePath(tmpdir).relative_to(
                build_root.path).as_posix(),
            # We don't want to influence whether the InteractiveProcess is able to restart. Because
            # we're writing into a temp directory, we can safely mark this side_effecting=False.
            side_effecting=False,
        )
        env = {**complete_env, **request.extra_env}
        result = await Effect(
            InteractiveProcessResult,
            InteractiveProcess(
                argv=request.args,
                env=env,
                run_in_workspace=True,
                restartable=repl_subsystem.restartable,
            ),
        )
    return Repl(result.exit_code)
Exemplo n.º 19
0
async def generate_user_lockfile_goal(
    addresses: Addresses,
    python_setup: PythonSetup,
    workspace: Workspace,
) -> GenerateUserLockfileGoal:
    if python_setup.lockfile is None:
        logger.warning(
            "You ran `./pants generate-user-lockfile`, but `[python].experimental_lockfile` "
            "is not set. Please set this option to the path where you'd like the lockfile for "
            "your code's dependencies to live."
        )
        return GenerateUserLockfileGoal(exit_code=1)

    transitive_targets = await Get(TransitiveTargets, TransitiveTargetsRequest(addresses))
    reqs = PexRequirements.create_from_requirement_fields(
        tgt[PythonRequirementsField]
        # NB: By looking at the dependencies, rather than the closure, we only generate for
        # requirements that are actually used in the project.
        for tgt in transitive_targets.dependencies
        if tgt.has_field(PythonRequirementsField)
    )

    if not reqs:
        logger.warning(
            "No third-party requirements found for the transitive closure, so a lockfile will not "
            "be generated."
        )
        return GenerateUserLockfileGoal(exit_code=0)

    result = await Get(
        PythonLockfile,
        PythonLockfileRequest(
            reqs.req_strings,
            # TODO(#12314): Use interpreter constraints from the transitive closure.
            InterpreterConstraints(python_setup.interpreter_constraints),
            resolve_name="not yet implemented",
            lockfile_dest=python_setup.lockfile,
            _description=(
                f"Generate lockfile for {pluralize(len(reqs.req_strings), 'requirement')}: "
                f"{', '.join(reqs.req_strings)}"
            ),
            # TODO(12382): Make this command actually accurate once we figure out the semantics
            #  for user lockfiles. This is currently misleading.
            _regenerate_command="./pants generate-user-lockfile ::",
        ),
    )
    workspace.write_digest(result.digest)
    logger.info(f"Wrote lockfile to {result.path}")

    return GenerateUserLockfileGoal(exit_code=0)
Exemplo n.º 20
0
async def bsp_dependency_modules(
        request: DependencyModulesParams,
        workspace: Workspace) -> DependencyModulesResult:
    responses = await MultiGet(
        Get(ResolveOneDependencyModuleResult,
            ResolveOneDependencyModuleRequest(btgt))
        for btgt in request.targets)
    output_digest = await Get(Digest,
                              MergeDigests([r.digest for r in responses]))
    workspace.write_digest(output_digest, path_prefix=".pants.d/bsp")
    return DependencyModulesResult(
        tuple(
            DependencyModulesItem(target=r.bsp_target_id, modules=r.modules)
            for r in responses))
Exemplo n.º 21
0
async def run(
    run_subsystem: RunSubsystem,
    global_options: GlobalOptions,
    console: Console,
    interactive_runner: InteractiveRunner,
    workspace: Workspace,
    build_root: BuildRoot,
    complete_env: CompleteEnvironment,
) -> Run:
    targets_to_valid_field_sets = await Get(
        TargetRootsToFieldSets,
        TargetRootsToFieldSetsRequest(
            RunFieldSet,
            goal_description="the `run` goal",
            no_applicable_targets_behavior=NoApplicableTargetsBehavior.error,
            expect_single_field_set=True,
        ),
    )
    field_set = targets_to_valid_field_sets.field_sets[0]
    request = await Get(RunRequest, RunFieldSet, field_set)

    with temporary_dir(root_dir=global_options.options.pants_workdir,
                       cleanup=True) as tmpdir:
        workspace.write_digest(request.digest,
                               path_prefix=PurePath(tmpdir).relative_to(
                                   build_root.path).as_posix())

        args = (arg.format(chroot=tmpdir) for arg in request.args)
        env = {
            **complete_env,
            **{
                k: v.format(chroot=tmpdir)
                for k, v in request.extra_env.items()
            }
        }
        try:
            result = interactive_runner.run(
                InteractiveProcess(
                    argv=(*args, *run_subsystem.args),
                    env=env,
                    run_in_workspace=True,
                ))
            exit_code = result.exit_code
        except Exception as e:
            console.print_stderr(
                f"Exception when attempting to run {field_set.address}: {e!r}")
            exit_code = -1

    return Run(exit_code)
Exemplo n.º 22
0
async def bsp_workspace_build_targets(
    _: WorkspaceBuildTargetsParams,
    bsp_build_targets: BSPBuildTargets,
    workspace: Workspace,
) -> WorkspaceBuildTargetsResult:
    bsp_target_results = await MultiGet(
        Get(GenerateOneBSPBuildTargetResult,
            GenerateOneBSPBuildTargetRequest(target_internal))
        for target_internal in bsp_build_targets.targets_mapping.values())
    digest = await Get(Digest,
                       MergeDigests([r.digest for r in bsp_target_results]))
    if digest != EMPTY_DIGEST:
        workspace.write_digest(digest, path_prefix=".pants.d/bsp")

    return WorkspaceBuildTargetsResult(targets=tuple(
        r.build_target for r in bsp_target_results), )
Exemplo n.º 23
0
async def run_go_resolve(targets: UnexpandedTargets,
                         workspace: Workspace) -> GoResolveGoal:
    # TODO: Use MultiGet to resolve the go_module targets.
    # TODO: Combine all of the go.sum's into a single Digest to write.
    for target in targets:
        if target.has_field(GoModuleSources):
            resolved_go_module = await Get(
                ResolvedGoModule, ResolveGoModuleRequest(target.address))
            # TODO: Only update the files if they actually changed.
            workspace.write_digest(resolved_go_module.digest,
                                   path_prefix=target.address.spec_path)
            logger.info(f"{target.address}: Updated go.mod and go.sum.\n")
        else:
            logger.info(
                f"{target.address}: Skipping because target is not a `go_module`.\n"
            )
    return GoResolveGoal(exit_code=0)
Exemplo n.º 24
0
async def internal_render_test_lockfile_fixtures(
    rendered_fixtures: RenderedJVMLockfileFixtures,
    workspace: Workspace,
    console: Console,
) -> InternalGenerateTestLockfileFixturesGoal:
    if not rendered_fixtures:
        console.write_stdout("No test lockfile fixtures found.\n")
        return InternalGenerateTestLockfileFixturesGoal(exit_code=0)

    digest_contents = [
        FileContent(rendered_fixture.path, rendered_fixture.content)
        for rendered_fixture in rendered_fixtures
    ]
    snapshot = await Get(Snapshot, CreateDigest(digest_contents))
    console.write_stdout(f"Writing test lockfile fixtures: {snapshot.files}\n")
    workspace.write_digest(snapshot.digest)
    return InternalGenerateTestLockfileFixturesGoal(exit_code=0)
Exemplo n.º 25
0
async def create_binary(workspace: Workspace, dist_dir: DistDir) -> Binary:
    targets_to_valid_field_sets = await Get(
        TargetsToValidFieldSets,
        TargetsToValidFieldSetsRequest(BinaryFieldSet,
                                       goal_description="the `binary` goal",
                                       error_if_no_valid_targets=True),
    )
    binaries = await MultiGet(
        Get(CreatedBinary, BinaryFieldSet, field_set)
        for field_set in targets_to_valid_field_sets.field_sets)
    merged_snapshot = await Get(
        Snapshot, MergeDigests(binary.digest for binary in binaries))
    workspace.write_digest(merged_snapshot.digest,
                           path_prefix=str(dist_dir.relpath))
    for path in merged_snapshot.files:
        logger.info(f"Wrote {dist_dir.relpath / path}")
    return Binary(exit_code=0)
Exemplo n.º 26
0
async def handle_bsp_scalac_options_request(
    request: HandleScalacOptionsRequest,
    build_root: BuildRoot,
    workspace: Workspace,
) -> HandleScalacOptionsResult:
    bsp_target = await Get(BSPBuildTargetInternal, BuildTargetIdentifier,
                           request.bsp_target_id)
    targets = await Get(
        Targets,
        AddressSpecs,
        bsp_target.specs.address_specs,
    )
    coarsened_targets = await Get(CoarsenedTargets,
                                  Addresses(tgt.address for tgt in targets))
    resolve = await Get(CoursierResolveKey, CoarsenedTargets,
                        coarsened_targets)
    lockfile = await Get(CoursierResolvedLockfile, CoursierResolveKey, resolve)

    resolve_digest = await Get(
        Digest,
        CreateDigest([
            FileEntry(entry.file_name, entry.file_digest)
            for entry in lockfile.entries
        ]),
    )

    resolve_digest = await Get(
        Digest, AddPrefix(resolve_digest, f"jvm/resolves/{resolve.name}/lib"))

    workspace.write_digest(resolve_digest, path_prefix=".pants.d/bsp")

    classpath = [
        build_root.pathlib_path.joinpath(
            f".pants.d/bsp/jvm/resolves/{resolve.name}/lib/{entry.file_name}").
        as_uri() for entry in lockfile.entries
    ]

    return HandleScalacOptionsResult(
        ScalacOptionsItem(
            target=request.bsp_target_id,
            options=(),
            classpath=tuple(classpath),
            class_directory=build_root.pathlib_path.joinpath(
                f".pants.d/bsp/jvm/resolves/{resolve.name}/classes").as_uri(),
        ))
Exemplo n.º 27
0
async def run_repl(
    console: Console,
    workspace: Workspace,
    interactive_runner: InteractiveRunner,
    repl_subsystem: ReplSubsystem,
    all_specified_addresses: Addresses,
    build_root: BuildRoot,
    union_membership: UnionMembership,
    global_options: GlobalOptions,
) -> Repl:
    transitive_targets = await Get(
        TransitiveTargets, TransitiveTargetsRequest(all_specified_addresses))

    # TODO: When we support multiple languages, detect the default repl to use based
    #  on the targets.  For now we default to the python repl.
    repl_shell_name = repl_subsystem.shell or "python"

    implementations: Dict[str, Type[ReplImplementation]] = {
        impl.name: impl
        for impl in union_membership[ReplImplementation]
    }
    repl_implementation_cls = implementations.get(repl_shell_name)
    if repl_implementation_cls is None:
        available = sorted(implementations.keys())
        console.print_stderr(
            f"{repr(repl_shell_name)} is not a registered REPL. Available REPLs (which may "
            f"be specified through the option `--repl-shell`): {available}")
        return Repl(-1)

    with temporary_dir(root_dir=global_options.options.pants_workdir,
                       cleanup=False) as tmpdir:
        repl_impl = repl_implementation_cls(targets=Targets(
            transitive_targets.closure),
                                            chroot=tmpdir)
        request = await Get(ReplRequest, ReplImplementation, repl_impl)

        workspace.write_digest(request.digest,
                               path_prefix=PurePath(tmpdir).relative_to(
                                   build_root.path).as_posix())
        result = interactive_runner.run(
            InteractiveProcess(argv=request.args,
                               env=request.extra_env,
                               run_in_workspace=True,
                               hermetic_env=False))
    return Repl(result.exit_code)
Exemplo n.º 28
0
async def run_repl(
    console: Console,
    workspace: Workspace,
    interactive_runner: InteractiveRunner,
    repl_subsystem: ReplSubsystem,
    transitive_targets: TransitiveTargets,
    build_root: BuildRoot,
    union_membership: UnionMembership,
    global_options: GlobalOptions,
) -> Repl:
    repl_shell_name = repl_subsystem.shell or "python"

    implementations: Dict[str, Type[ReplImplementation]] = {
        impl.name: impl
        for impl in union_membership[ReplImplementation]
    }
    repl_implementation_cls = implementations.get(repl_shell_name)
    if repl_implementation_cls is None:
        available = sorted(implementations.keys())
        console.print_stderr(
            f"{repr(repl_shell_name)} is not a registered REPL. Available REPLs (which may "
            f"be specified through the option `--repl-shell`): {available}")
        return Repl(-1)

    repl_impl = repl_implementation_cls(targets=Targets(
        tgt for tgt in transitive_targets.closure
        if repl_implementation_cls.is_valid(tgt)))
    request = await Get(ReplRequest, ReplImplementation, repl_impl)

    with temporary_dir(root_dir=global_options.options.pants_workdir,
                       cleanup=False) as tmpdir:
        tmpdir_relative_path = PurePath(tmpdir).relative_to(
            build_root.path).as_posix()
        exe_path = PurePath(tmpdir, request.binary_name).as_posix()
        workspace.write_digest(request.digest,
                               path_prefix=tmpdir_relative_path)
        result = interactive_runner.run(
            InteractiveProcess(argv=(exe_path, ),
                               env=request.env,
                               run_in_workspace=True))

    return Repl(result.exit_code)
Exemplo n.º 29
0
async def export(
    console: Console,
    targets: Targets,
    workspace: Workspace,
    union_membership: UnionMembership,
    build_root: BuildRoot,
    dist_dir: DistDir,
) -> Export:
    request_types = cast("Iterable[type[ExportRequest]]",
                         union_membership.get(ExportRequest))
    requests = tuple(request_type(targets) for request_type in request_types)
    all_results = await MultiGet(
        Get(ExportResults, ExportRequest, request) for request in requests)
    flattened_results = [res for results in all_results for res in results]

    prefixed_digests = await MultiGet(
        Get(Digest, AddPrefix(result.digest, result.reldir))
        for result in flattened_results)
    output_dir = os.path.join(str(dist_dir.relpath), "export")
    merged_digest = await Get(Digest, MergeDigests(prefixed_digests))
    dist_digest = await Get(Digest, AddPrefix(merged_digest, output_dir))
    workspace.write_digest(dist_digest)
    environment = await Get(Environment, EnvironmentRequest(["PATH"]))
    for result in flattened_results:
        digest_root = os.path.join(build_root.path, output_dir, result.reldir)
        for cmd in result.post_processing_cmds:
            argv = tuple(
                arg.format(digest_root=digest_root) for arg in cmd.argv)
            ip = InteractiveProcess(
                argv=argv,
                env={
                    "PATH": environment.get("PATH", ""),
                    **cmd.extra_env
                },
                run_in_workspace=True,
            )
            await Effect(InteractiveProcessResult, InteractiveProcess, ip)

        console.print_stdout(
            f"Wrote {result.description} to {os.path.join(output_dir, result.reldir)}"
        )
    return Export(exit_code=0)
Exemplo n.º 30
0
async def generate_lockfiles_goal(
    workspace: Workspace,
    union_membership: UnionMembership,
    generate_lockfiles_subsystem: GenerateLockfilesSubsystem,
) -> GenerateLockfilesGoal:
    known_user_resolve_names = await MultiGet(
        Get(KnownUserResolveNames, KnownUserResolveNamesRequest, request())
        for request in union_membership.get(KnownUserResolveNamesRequest)
    )
    requested_user_resolve_names, requested_tool_sentinels = determine_resolves_to_generate(
        known_user_resolve_names,
        union_membership.get(GenerateToolLockfileSentinel),
        set(generate_lockfiles_subsystem.resolve_names),
    )

    all_specified_user_requests = await MultiGet(
        Get(UserGenerateLockfiles, RequestedUserResolveNames, resolve_names)
        for resolve_names in requested_user_resolve_names
    )
    specified_tool_requests = await MultiGet(
        Get(WrappedGenerateLockfile, GenerateToolLockfileSentinel, sentinel())
        for sentinel in requested_tool_sentinels
    )
    applicable_tool_requests = filter_tool_lockfile_requests(
        specified_tool_requests,
        resolve_specified=bool(generate_lockfiles_subsystem.resolve_names),
    )

    results = await MultiGet(
        Get(GenerateLockfileResult, GenerateLockfile, req)
        for req in (
            *(req for reqs in all_specified_user_requests for req in reqs),
            *applicable_tool_requests,
        )
    )

    merged_digest = await Get(Digest, MergeDigests(res.digest for res in results))
    workspace.write_digest(merged_digest)
    for result in results:
        logger.info(f"Wrote lockfile for the resolve `{result.resolve_name}` to {result.path}")

    return GenerateLockfilesGoal(exit_code=0)