Exemple #1
0
async def fmt(console: Console, targets: HydratedTargets, workspace: Workspace,
              union_membership: UnionMembership) -> Fmt:
    aggregated_results = await MultiGet(
        Get[AggregatedFmtResults](FormatTarget, target.adaptor)
        for target in targets
        if FormatTarget.is_formattable(target.adaptor,
                                       union_membership=union_membership))
    individual_results = [
        result for aggregated_result in aggregated_results
        for result in aggregated_result.results
    ]

    if not individual_results:
        return Fmt(exit_code=0)

    # NB: this will fail if there are any conflicting changes, which we want to happen rather than
    # silently having one result override the other. In practicality, this should never happen due
    # to our use of an aggregator rule for each distinct language.
    merged_formatted_digest = await Get[Digest](DirectoriesToMerge(
        tuple(aggregated_result.combined_digest
              for aggregated_result in aggregated_results)))
    workspace.materialize_directory(
        DirectoryToMaterialize(merged_formatted_digest))
    for result in individual_results:
        if result.stdout:
            console.print_stdout(result.stdout)
        if result.stderr:
            console.print_stderr(result.stderr)

    # Since the rules to produce FmtResult should use ExecuteRequest, rather than
    # FallibleExecuteProcessRequest, we assume that there were no failures.
    return Fmt(exit_code=0)
async def generate_pants_ini(console: Console, workspace: Workspace) -> GeneratePantsIni:
  pants_ini_content = dedent(f"""\
    [GLOBAL]
    pants_version: {pants_version}
    """)

  preexisting_snapshot = await Get[Snapshot](PathGlobs(include=('pants.ini',)))
  if preexisting_snapshot.files:
    console.print_stderr(
      "./pants.ini already exists. This goal is only meant to be run the first time you run Pants "
      "in a project.\n\nTo update config values, please directly modify the file."
    )
    return GeneratePantsIni(exit_code=1)

  console.print_stdout(dedent(f"""\
    Adding sensible defaults to ./pants.ini:
      * Pinning `pants_version` to `{pants_version}`.
    """))

  digest = await Get[Digest](InputFilesContent([
    FileContent(path='pants.ini', content=pants_ini_content.encode())
  ]))
  workspace.materialize_directory(DirectoryToMaterialize(digest))

  console.print_stdout(
    "You may modify these values directly in the file at any time. The ./pants script will detect "
    "any changes the next time you run it.\n\nYou are now ready to use Pants!"
  )
  return GeneratePantsIni(exit_code=0)
Exemple #3
0
async def fmt(console: Console, targets: HydratedTargets, workspace: Workspace,
              union_membership: UnionMembership) -> Fmt:
    results = await MultiGet(
        Get[FmtResult](TargetWithSources, target.adaptor) for target in targets
        if TargetWithSources.is_formattable_and_lintable(
            target.adaptor, union_membership=union_membership))

    if not results:
        return Fmt(exit_code=0)

    # NB: this will fail if there are any conflicting changes, which we want to happen rather than
    # silently having one result override the other.
    # TODO(#8722): how should we handle multiple auto-formatters touching the same files?
    merged_formatted_digest = await Get[Digest](DirectoriesToMerge(
        tuple(result.digest for result in results)))
    workspace.materialize_directory(
        DirectoryToMaterialize(merged_formatted_digest))
    for result in results:
        if result.stdout:
            console.print_stdout(result.stdout)
        if result.stderr:
            console.print_stderr(result.stderr)

    # Since the rules to produce FmtResult should use ExecuteRequest, rather than
    # FallibleExecuteProcessRequest, we assume that there were no failures.
    return Fmt(exit_code=0)
Exemple #4
0
 def materialize(self, console: Console, workspace: Workspace) -> None:
     workspace.materialize_directory(
         DirectoryToMaterialize(
             self.result_digest, path_prefix=str(self.directory_to_materialize_to),
         )
     )
     console.print_stdout(f"\nWrote coverage report to `{self.directory_to_materialize_to}`")
Exemple #5
0
async def run_setup_pys(targets: HydratedTargets, options: SetupPyOptions, console: Console,
                        provenance_map: AddressProvenanceMap,
                        distdir: DistDir, workspace: Workspace) -> SetupPy:
  """Run setup.py commands on all exported targets addressed."""
  args = tuple(options.values.args)
  validate_args(args)

  # Get all exported targets, ignoring any non-exported targets that happened to be
  # globbed over, but erroring on any explicitly-requested non-exported targets.

  exported_targets: List[ExportedTarget] = []
  explicit_nonexported_targets: List[HydratedTarget] = []

  for hydrated_target in targets:
    if _is_exported(hydrated_target):
      exported_targets.append(ExportedTarget(hydrated_target))
    elif provenance_map.is_single_address(hydrated_target.address):
      explicit_nonexported_targets.append(hydrated_target)
  if explicit_nonexported_targets:
    raise TargetNotExported(
      'Cannot run setup.py on these targets, because they have no `provides=` clause: '
      f'{", ".join(so.address.reference() for so in explicit_nonexported_targets)}')

  if options.values.transitive:
    # Expand out to all owners of the entire dep closure.
    tht = await Get[TransitiveHydratedTargets](
      BuildFileAddresses([et.hydrated_target.address for et in exported_targets]))
    owners = await MultiGet(
      Get[ExportedTarget](OwnedDependency(ht)) for ht in tht.closure if is_ownable_target(ht)
    )
    exported_targets = list(set(owners))

  chroots = await MultiGet(Get[SetupPyChroot](SetupPyChrootRequest(target))
                           for target in exported_targets)

  if args:
    setup_py_results = await MultiGet(
      Get[RunSetupPyResult](RunSetupPyRequest(exported_target, chroot, tuple(args)))
      for exported_target, chroot in zip(exported_targets, chroots)
    )

    for exported_target, setup_py_result in zip(exported_targets, setup_py_results):
      addr = exported_target.hydrated_target.address.reference()
      console.print_stderr(f'Writing contents of dist dir for {addr} to {distdir.relpath}')
      workspace.materialize_directory(
        DirectoryToMaterialize(setup_py_result.output, path_prefix=str(distdir.relpath))
      )
  else:
    # Just dump the chroot.
    for exported_target, chroot in zip(exported_targets, chroots):
      addr = exported_target.hydrated_target.address.reference()
      provides = exported_target.hydrated_target.adaptor.provides
      setup_py_dir = distdir.relpath / f'{provides.name}-{provides.version}'
      console.print_stderr(f'Writing setup.py chroot for {addr} to {setup_py_dir}')
      workspace.materialize_directory(
        DirectoryToMaterialize(chroot.digest, path_prefix=str(setup_py_dir))
      )

  return SetupPy(0)
Exemple #6
0
 def materialize(self, console: Console, workspace: Workspace) -> Optional[PurePath]:
     workspace.materialize_directory(
         DirectoryToMaterialize(
             self.result_digest, path_prefix=str(self.directory_to_materialize_to),
         )
     )
     console.print_stderr(f"\nWrote coverage report to `{self.directory_to_materialize_to}`")
     return self.report_file
Exemple #7
0
async def run_repl(
    console: Console,
    workspace: Workspace,
    runner: InteractiveRunner,
    options: ReplOptions,
    transitive_targets: TransitiveTargets,
    build_root: BuildRoot,
    union_membership: UnionMembership,
    global_options: GlobalOptions,
) -> Repl:

    # We can guarantee that we will only even enter this `goal_rule` if there exists an implementer
    # of the `ReplImplementation` union because `LegacyGraphSession.run_goal_rules()` will not
    # execute this rule's body if there are no implementations registered.
    membership: Iterable[Type[
        ReplImplementation]] = union_membership.union_rules[ReplImplementation]
    implementations = {impl.name: impl for impl in membership}

    default_repl = "python"
    repl_shell_name = cast(str, options.values.shell or default_repl)

    repl_implementation_cls = implementations.get(repl_shell_name)
    if repl_implementation_cls is None:
        available = sorted(set(implementations.keys()))
        console.write_stdout(
            f"{repl_shell_name} is not an installed REPL program. Available REPLs: {available}"
        )
        return Repl(-1)

    repl_impl = repl_implementation_cls(targets=Targets(
        tgt for tgt in transitive_targets.closure
        if repl_implementation_cls.is_valid(tgt)))
    repl_binary = await Get[ReplBinary](ReplImplementation, repl_impl)

    with temporary_dir(root_dir=global_options.options.pants_workdir,
                       cleanup=False) as tmpdir:
        path_relative_to_build_root = PurePath(tmpdir).relative_to(
            build_root.path).as_posix()
        workspace.materialize_directory(
            DirectoryToMaterialize(repl_binary.digest,
                                   path_prefix=path_relative_to_build_root))

        full_path = PurePath(tmpdir, repl_binary.binary_name).as_posix()
        run_request = InteractiveProcessRequest(
            argv=(full_path, ),
            run_in_workspace=True,
        )

    result = runner.run_local_interactive_process(run_request)
    exit_code = result.process_exit_code

    if exit_code == 0:
        console.write_stdout("REPL exited successfully.")
    else:
        console.write_stdout(f"REPL exited with error: {exit_code}.")

    return Repl(exit_code)
Exemple #8
0
async def run(
    console: Console,
    workspace: Workspace,
    runner: InteractiveRunner,
    build_root: BuildRoot,
    options: RunOptions,
    global_options: GlobalOptions,
) -> Run:
    targets_to_valid_configs = await Get[TargetsToValidConfigurations](
        TargetsToValidConfigurationsRequest(
            BinaryConfiguration,
            goal_description=f"the `{options.name}` goal",
            error_if_no_valid_targets=True,
            expect_single_config=True,
        ))
    config = targets_to_valid_configs.configurations[0]
    binary = await Get[CreatedBinary](BinaryConfiguration, config)

    workdir = global_options.options.pants_workdir
    with temporary_dir(root_dir=workdir, cleanup=True) as tmpdir:
        path_relative_to_build_root = PurePath(tmpdir).relative_to(
            build_root.path).as_posix()
        workspace.materialize_directory(
            DirectoryToMaterialize(binary.digest,
                                   path_prefix=path_relative_to_build_root))

        console.write_stdout(f"Running target: {config.address}\n")
        full_path = PurePath(tmpdir, binary.binary_name).as_posix()
        run_request = InteractiveProcessRequest(
            argv=(full_path, *options.values.args),
            run_in_workspace=True,
        )

        try:
            result = runner.run_local_interactive_process(run_request)
            exit_code = result.process_exit_code
            if result.process_exit_code == 0:
                console.write_stdout(f"{config.address} ran successfully.\n")
            else:
                console.write_stderr(
                    f"{config.address} failed with code {result.process_exit_code}!\n"
                )

        except Exception as e:
            console.write_stderr(
                f"Exception when attempting to run {config.address}: {e!r}\n")
            exit_code = -1

    return Run(exit_code)
Exemple #9
0
async def workspace_console_rule(
        console: Console, workspace: Workspace,
        msg: MessageToConsoleRule) -> MockWorkspaceGoal:
    digest = await Get(Digest, InputFilesContent, msg.input_files_content)
    output = workspace.materialize_directory(DirectoryToMaterialize(digest))
    console.print_stdout(output.output_paths[0], end='')
    return MockWorkspaceGoal(exit_code=0)
Exemple #10
0
async def create_awslambda(
    console: Console,
    options: AWSLambdaOptions,
    distdir: DistDir,
    buildroot: BuildRoot,
    workspace: Workspace,
) -> AWSLambdaGoal:
    targets_to_valid_configs = await Get[TargetsToValidConfigurations](
        TargetsToValidConfigurationsRequest(
            AWSLambdaConfiguration,
            goal_description=f"the `{options.name}` goal",
            error_if_no_valid_targets=True,
        ))
    awslambdas = await MultiGet(
        Get[CreatedAWSLambda](AWSLambdaConfiguration, config)
        for config in targets_to_valid_configs.configurations)
    merged_digest = await Get[Digest](DirectoriesToMerge(
        tuple(awslambda.digest for awslambda in awslambdas)))
    result = workspace.materialize_directory(
        DirectoryToMaterialize(merged_digest,
                               path_prefix=str(distdir.relpath)))
    with options.line_oriented(console) as print_stdout:
        for awslambda, path in zip(awslambdas, result.output_paths):
            print_stdout(
                f"Wrote code bundle to {os.path.relpath(path, buildroot.path)}"
            )
            print_stdout(f"  Runtime: {awslambda.runtime}")
            print_stdout(f"  Handler: {awslambda.handler}")
            print_stdout("")
    return AWSLambdaGoal(exit_code=0)
Exemple #11
0
async def create_binary(
    console: Console,
    workspace: Workspace,
    options: BinaryOptions,
    distdir: DistDir,
    buildroot: BuildRoot,
) -> Binary:
    targets_to_valid_configs = await Get[TargetsToValidConfigurations](
        TargetsToValidConfigurationsRequest(
            BinaryConfiguration,
            goal_description=f"the `{options.name}` goal",
            error_if_no_valid_targets=True,
        ))
    binaries = await MultiGet(
        Get[CreatedBinary](BinaryConfiguration, config)
        for config in targets_to_valid_configs.configurations)
    merged_digest = await Get[Digest](DirectoriesToMerge(
        tuple(binary.digest for binary in binaries)))
    result = workspace.materialize_directory(
        DirectoryToMaterialize(merged_digest,
                               path_prefix=str(distdir.relpath)))
    with options.line_oriented(console) as print_stdout:
        for path in result.output_paths:
            print_stdout(f"Wrote {os.path.relpath(path, buildroot.path)}")
    return Binary(exit_code=0)
Exemple #12
0
async def create_binary(addresses: BuildFileAddresses, console: Console,
                        workspace: Workspace, options: Binary.Options,
                        options_bootstrapper: OptionsBootstrapper,
                        build_root: BuildRoot) -> Binary:
    with Binary.line_oriented(options, console) as print_stdout:
        global_options = options_bootstrapper.bootstrap_options.for_global_scope(
        )
        pants_distdir = Path(global_options.pants_distdir)
        if not is_child_of(pants_distdir, build_root.pathlib_path):
            console.print_stderr(
                f"When set to an absolute path, `--pants-distdir` must be relative to the build root."
                "You set it to {pants_distdir}. Instead, use a relative path or an absolute path relative to the build root."
            )
            return Binary(exit_code=1)

        relative_distdir = pants_distdir.relative_to(
            build_root.pathlib_path) if pants_distdir.is_absolute(
            ) else pants_distdir
        print_stdout(f"Generating binaries in `./{relative_distdir}`")

        binaries = await MultiGet(
            Get[CreatedBinary](Address, address.to_address())
            for address in addresses)
        merged_digest = await Get[Digest](DirectoriesToMerge(
            tuple(binary.digest for binary in binaries)))
        result = workspace.materialize_directory(
            DirectoryToMaterialize(merged_digest,
                                   path_prefix=str(relative_distdir)))
        for path in result.output_paths:
            print_stdout(f"Wrote {path}")
    return Binary(exit_code=0)
Exemple #13
0
async def create_binary(
    targets_with_origins: TargetsWithOrigins,
    console: Console,
    workspace: Workspace,
    options: BinaryOptions,
    distdir: DistDir,
    buildroot: BuildRoot,
    union_membership: UnionMembership,
    registered_target_types: RegisteredTargetTypes,
) -> Binary:
    valid_config_types_by_target = gather_valid_binary_configuration_types(
        goal_subsytem=options,
        targets_with_origins=targets_with_origins,
        union_membership=union_membership,
        registered_target_types=registered_target_types,
    )
    binaries = await MultiGet(
        Get[CreatedBinary](BinaryConfiguration, valid_config_type.create(
            target))
        for target, valid_config_types in valid_config_types_by_target.items()
        for valid_config_type in valid_config_types)
    merged_digest = await Get[Digest](DirectoriesToMerge(
        tuple(binary.digest for binary in binaries)))
    result = workspace.materialize_directory(
        DirectoryToMaterialize(merged_digest,
                               path_prefix=str(distdir.relpath)))
    with options.line_oriented(console) as print_stdout:
        for path in result.output_paths:
            print_stdout(f"Wrote {os.path.relpath(path, buildroot.path)}")
    return Binary(exit_code=0)
Exemple #14
0
async def run_repl(
    console: Console,
    workspace: Workspace,
    runner: InteractiveRunner,
    options: ReplOptions,
    transitive_targets: TransitiveTargets,
    build_root: BuildRoot,
    union_membership: UnionMembership,
    global_options: GlobalOptions,
) -> Repl:
    default_repl = "python"
    repl_shell_name = cast(str, options.values.shell) or default_repl

    implementations: Dict[str, Type[ReplImplementation]] = {
        impl.name: impl
        for impl in union_membership[ReplImplementation]
    }
    repl_implementation_cls = implementations.get(repl_shell_name)
    if repl_implementation_cls is None:
        available = sorted(implementations.keys())
        console.print_stderr(
            f"{repr(repl_shell_name)} is not a registered REPL. Available REPLs (which may "
            f"be specified through the option `--repl-shell`): {available}")
        return Repl(-1)

    repl_impl = repl_implementation_cls(targets=Targets(
        tgt for tgt in transitive_targets.closure
        if repl_implementation_cls.is_valid(tgt)))
    repl_binary = await Get[ReplBinary](ReplImplementation, repl_impl)

    with temporary_dir(root_dir=global_options.options.pants_workdir,
                       cleanup=False) as tmpdir:
        path_relative_to_build_root = PurePath(tmpdir).relative_to(
            build_root.path).as_posix()
        workspace.materialize_directory(
            DirectoryToMaterialize(repl_binary.digest,
                                   path_prefix=path_relative_to_build_root))

        full_path = PurePath(tmpdir, repl_binary.binary_name).as_posix()
        run_request = InteractiveProcessRequest(
            argv=(full_path, ),
            run_in_workspace=True,
        )

    result = runner.run_local_interactive_process(run_request)
    return Repl(result.process_exit_code)
Exemple #15
0
async def run(
    console: Console,
    workspace: Workspace,
    runner: InteractiveRunner,
    build_root: BuildRoot,
    addresses: Addresses,
    options: RunOptions,
) -> Run:
    address = addresses.expect_single()
    binary = await Get[CreatedBinary](Address, address)

    with temporary_dir(root_dir=PurePath(build_root.path,
                                         ".pants.d").as_posix(),
                       cleanup=True) as tmpdir:
        path_relative_to_build_root = PurePath(tmpdir).relative_to(
            build_root.path).as_posix()
        workspace.materialize_directory(
            DirectoryToMaterialize(binary.digest,
                                   path_prefix=path_relative_to_build_root))

        console.write_stdout(f"Running target: {address}\n")
        full_path = PurePath(tmpdir, binary.binary_name).as_posix()
        run_request = InteractiveProcessRequest(
            argv=(full_path, *options.values.args),
            run_in_workspace=True,
        )

        try:
            result = runner.run_local_interactive_process(run_request)
            exit_code = result.process_exit_code
            if result.process_exit_code == 0:
                console.write_stdout(f"{address} ran successfully.\n")
            else:
                console.write_stderr(
                    f"{address} failed with code {result.process_exit_code}!\n"
                )

        except Exception as e:
            console.write_stderr(
                f"Exception when attempting to run {address}: {e!r}\n")
            exit_code = -1

    return Run(exit_code)
Exemple #16
0
async def run(
    options: RunOptions,
    global_options: GlobalOptions,
    console: Console,
    runner: InteractiveRunner,
    workspace: Workspace,
    build_root: BuildRoot,
) -> Run:
    targets_to_valid_field_sets = await Get[TargetsToValidFieldSets](
        TargetsToValidFieldSetsRequest(
            BinaryFieldSet,
            goal_description=f"the `{options.name}` goal",
            error_if_no_valid_targets=True,
            expect_single_field_set=True,
        ))
    field_set = targets_to_valid_field_sets.field_sets[0]
    binary = await Get[CreatedBinary](BinaryFieldSet, field_set)

    workdir = global_options.options.pants_workdir
    with temporary_dir(root_dir=workdir, cleanup=True) as tmpdir:
        path_relative_to_build_root = PurePath(tmpdir).relative_to(
            build_root.path).as_posix()
        workspace.materialize_directory(
            DirectoryToMaterialize(binary.digest,
                                   path_prefix=path_relative_to_build_root))

        full_path = PurePath(tmpdir, binary.binary_name).as_posix()
        run_request = InteractiveProcessRequest(
            argv=(full_path, *options.values.args),
            run_in_workspace=True,
        )

        try:
            result = runner.run_local_interactive_process(run_request)
            exit_code = result.process_exit_code
        except Exception as e:
            console.print_stderr(
                f"Exception when attempting to run {field_set.address}: {e!r}")
            exit_code = -1

    return Run(exit_code)
Exemple #17
0
async def run(
    console: Console,
    workspace: Workspace,
    runner: InteractiveRunner,
    build_root: BuildRoot,
    bfa: BuildFileAddress,
) -> Run:
    target = bfa.to_address()
    binary = await Get[CreatedBinary](Address, target)

    with temporary_dir(root_dir=str(Path(build_root.path, ".pants.d")),
                       cleanup=True) as tmpdir:
        path_relative_to_build_root = str(
            Path(tmpdir).relative_to(build_root.path))
        workspace.materialize_directory(
            DirectoryToMaterialize(binary.digest,
                                   path_prefix=path_relative_to_build_root))

        console.write_stdout(f"Running target: {target}\n")
        full_path = str(Path(tmpdir, binary.binary_name))
        run_request = InteractiveProcessRequest(
            argv=(full_path, ),
            run_in_workspace=True,
        )

        try:
            result = runner.run_local_interactive_process(run_request)
            exit_code = result.process_exit_code
            if result.process_exit_code == 0:
                console.write_stdout(f"{target} ran successfully.\n")
            else:
                console.write_stderr(
                    f"{target} failed with code {result.process_exit_code}!\n")

        except Exception as e:
            console.write_stderr(
                f"Exception when attempting to run {target} : {e}\n")
            exit_code = -1

    return Run(exit_code)
async def create_awslambda(addresses: BuildFileAddresses, console: Console,
                           options: AWSLambdaOptions, distdir: DistDir,
                           workspace: Workspace) -> AWSLambdaGoal:
    with options.line_oriented(console) as print_stdout:
        print_stdout(f"Generating AWS lambdas in `./{distdir.relpath}`")
        awslambdas = await MultiGet(
            Get[CreatedAWSLambda](Address, address.to_address())
            for address in addresses)
        merged_digest = await Get[Digest](DirectoriesToMerge(
            tuple(awslambda.digest for awslambda in awslambdas)))
        result = workspace.materialize_directory(
            DirectoryToMaterialize(merged_digest,
                                   path_prefix=str(distdir.relpath)))
        for path in result.output_paths:
            print_stdout(f"Wrote {path}")
    return AWSLambdaGoal(exit_code=0)
Exemple #19
0
async def create_binary(workspace: Workspace, dist_dir: DistDir,
                        build_root: BuildRoot) -> Binary:
    targets_to_valid_field_sets = await Get[TargetsToValidFieldSets](
        TargetsToValidFieldSetsRequest(BinaryFieldSet,
                                       goal_description="the `binary` goal",
                                       error_if_no_valid_targets=True))
    binaries = await MultiGet(
        Get[CreatedBinary](BinaryFieldSet, field_set)
        for field_set in targets_to_valid_field_sets.field_sets)
    merged_digest = await Get[Digest](MergeDigests(binary.digest
                                                   for binary in binaries))
    result = workspace.materialize_directory(
        DirectoryToMaterialize(merged_digest,
                               path_prefix=str(dist_dir.relpath)))
    for path in result.output_paths:
        logger.info(f"Wrote {os.path.relpath(path, build_root.path)}")
    return Binary(exit_code=0)
async def create_awslambda(
    targets: Targets,
    console: Console,
    options: AWSLambdaOptions,
    union_membership: UnionMembership,
    registered_target_types: RegisteredTargetTypes,
    distdir: DistDir,
    buildroot: BuildRoot,
    workspace: Workspace,
) -> AWSLambdaGoal:
    config_types: Iterable[Type[AWSLambdaConfiguration]] = union_membership.union_rules[
        AWSLambdaConfiguration
    ]
    configs = tuple(
        config_type.create(tgt)
        for tgt in targets
        for config_type in config_types
        if config_type.is_valid(tgt)
    )
    if not configs:
        all_valid_target_types = itertools.chain.from_iterable(
            config_type.valid_target_types(
                registered_target_types.types, union_membership=union_membership
            )
            for config_type in config_types
        )
        formatted_target_types = sorted(target_type.alias for target_type in all_valid_target_types)
        raise ValueError(
            f"None of the provided targets work with the goal `{options.name}`. This goal "
            f"works with the following target types: {formatted_target_types}."
        )

    awslambdas = await MultiGet(
        Get[CreatedAWSLambda](AWSLambdaConfiguration, config) for config in configs
    )
    merged_digest = await Get[Digest](
        DirectoriesToMerge(tuple(awslambda.digest for awslambda in awslambdas))
    )
    result = workspace.materialize_directory(
        DirectoryToMaterialize(merged_digest, path_prefix=str(distdir.relpath))
    )
    with options.line_oriented(console) as print_stdout:
        for path in result.output_paths:
            print_stdout(f"Wrote {os.path.relpath(path, buildroot.path)}")
    return AWSLambdaGoal(exit_code=0)
Exemple #21
0
async def create_binary(
    addresses: Addresses,
    console: Console,
    workspace: Workspace,
    options: BinaryOptions,
    distdir: DistDir,
    buildroot: BuildRoot,
) -> Binary:
    with options.line_oriented(console) as print_stdout:
        binaries = await MultiGet(Get[CreatedBinary](Address, address)
                                  for address in addresses)
        merged_digest = await Get[Digest](DirectoriesToMerge(
            tuple(binary.digest for binary in binaries)))
        result = workspace.materialize_directory(
            DirectoryToMaterialize(merged_digest,
                                   path_prefix=str(distdir.relpath)))
        for path in result.output_paths:
            print_stdout(f"Wrote {os.path.relpath(path, buildroot.path)}")
    return Binary(exit_code=0)
Exemple #22
0
async def run_tests(
    console: Console,
    options: TestOptions,
    interactive_runner: InteractiveRunner,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Test:
    if options.values.debug:
        targets_to_valid_field_sets = await Get[TargetsToValidFieldSets](
            TargetsToValidFieldSetsRequest(
                TestFieldSet,
                goal_description="`test --debug`",
                error_if_no_valid_targets=True,
                expect_single_field_set=True,
            )
        )
        field_set = targets_to_valid_field_sets.field_sets[0]
        request = await Get[TestDebugRequest](TestFieldSet, field_set)
        debug_result = interactive_runner.run_local_interactive_process(request.ipr)
        return Test(debug_result.process_exit_code)

    targets_to_valid_field_sets = await Get[TargetsToValidFieldSets](
        TargetsToValidFieldSetsRequest(
            TestFieldSet,
            goal_description=f"the `{options.name}` goal",
            error_if_no_valid_targets=False,
        )
    )
    field_sets_with_sources = await Get[FieldSetsWithSources](
        FieldSetsWithSourcesRequest(targets_to_valid_field_sets.field_sets)
    )

    results = await MultiGet(
        Get[AddressAndTestResult](WrappedTestFieldSet(field_set))
        for field_set in field_sets_with_sources
    )

    exit_code = PANTS_SUCCEEDED_EXIT_CODE
    for result in results:
        if result.test_result.status == Status.FAILURE:
            exit_code = PANTS_FAILED_EXIT_CODE
        has_output = result.test_result.stdout or result.test_result.stderr
        if has_output:
            status = (
                console.green("✓")
                if result.test_result.status == Status.SUCCESS
                else console.red("𐄂")
            )
            console.print_stderr(f"{status} {result.address}")
        if result.test_result.stdout:
            console.print_stderr(result.test_result.stdout)
        if result.test_result.stderr:
            console.print_stderr(result.test_result.stderr)
        if has_output and result != results[-1]:
            console.print_stderr("")

    # Print summary
    if len(results) > 1:
        console.print_stderr("")
        for result in results:
            console.print_stderr(
                f"{result.address.reference():80}.....{result.test_result.status.value:>10}"
            )

    for result in results:
        xml_results = result.test_result.xml_results
        if not xml_results:
            continue
        workspace.materialize_directory(DirectoryToMaterialize(xml_results))

    if options.values.use_coverage:
        all_coverage_data: Iterable[CoverageData] = [
            result.test_result.coverage_data
            for result in results
            if result.test_result.coverage_data is not None
        ]

        coverage_types_to_collection_types: Dict[
            Type[CoverageData], Type[CoverageDataCollection]
        ] = {
            collection_cls.element_type: collection_cls
            for collection_cls in union_membership.get(CoverageDataCollection)
        }
        coverage_collections: List[CoverageDataCollection] = []
        for data_cls, data in itertools.groupby(all_coverage_data, lambda data: type(data)):
            collection_cls = coverage_types_to_collection_types[data_cls]
            coverage_collections.append(collection_cls(data))

        coverage_reports = await MultiGet(
            Get[CoverageReport](CoverageDataCollection, coverage_collection)
            for coverage_collection in coverage_collections
        )

        coverage_report_files = []
        for report in coverage_reports:
            report_file = report.materialize(console, workspace)
            if report_file is not None:
                coverage_report_files.append(report_file)

        if coverage_report_files and options.values.open_coverage:
            desktop.ui_open(console, interactive_runner, coverage_report_files)

    return Test(exit_code)
Exemple #23
0
async def fmt(
    console: Console,
    targets_with_origins: TargetsWithOrigins,
    options: FmtOptions,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Fmt:
    language_target_collection_types: Iterable[Type[LanguageFmtTargets]] = (
        union_membership.union_rules[LanguageFmtTargets]
    )

    language_target_collections: Iterable[LanguageFmtTargets] = tuple(
        language_target_collection_type(
            TargetsWithOrigins(
                target_with_origin
                for target_with_origin in targets_with_origins
                if language_target_collection_type.belongs_to_language(target_with_origin.target)
            )
        )
        for language_target_collection_type in language_target_collection_types
    )
    targets_with_sources: Iterable[TargetsWithSources] = await MultiGet(
        Get[TargetsWithSources](
            TargetsWithSourcesRequest(
                target_with_origin.target
                for target_with_origin in language_target_collection.targets_with_origins
            )
        )
        for language_target_collection in language_target_collections
    )
    # NB: We must convert back the generic TargetsWithSources objects back into their
    # corresponding LanguageFmtTargets, e.g. back to PythonFmtTargets, in order for the union
    # rule to work.
    valid_language_target_collections: Iterable[LanguageFmtTargets] = tuple(
        language_target_collection_cls(
            TargetsWithOrigins(
                target_with_origin
                for target_with_origin in language_target_collection.targets_with_origins
                if target_with_origin.target in language_targets_with_sources
            )
        )
        for language_target_collection_cls, language_target_collection, language_targets_with_sources in zip(
            language_target_collection_types, language_target_collections, targets_with_sources
        )
        if language_targets_with_sources
    )

    if options.values.per_target_caching:
        per_language_results = await MultiGet(
            Get[LanguageFmtResults](
                LanguageFmtTargets,
                language_target_collection.__class__(TargetsWithOrigins([target_with_origin])),
            )
            for language_target_collection in valid_language_target_collections
            for target_with_origin in language_target_collection.targets_with_origins
        )
    else:
        per_language_results = await MultiGet(
            Get[LanguageFmtResults](LanguageFmtTargets, language_target_collection)
            for language_target_collection in valid_language_target_collections
        )

    individual_results: List[FmtResult] = list(
        itertools.chain.from_iterable(
            language_result.results for language_result in per_language_results
        )
    )

    if not individual_results:
        return Fmt(exit_code=0)

    # NB: this will fail if there are any conflicting changes, which we want to happen rather than
    # silently having one result override the other. In practicality, this should never happen due
    # to us grouping each language's formatters into a single combined_digest.
    merged_formatted_digest = await Get[Digest](
        DirectoriesToMerge(
            tuple(language_result.combined_digest for language_result in per_language_results)
        )
    )
    workspace.materialize_directory(DirectoryToMaterialize(merged_formatted_digest))
    for result in individual_results:
        if result.stdout:
            console.print_stdout(result.stdout)
        if result.stderr:
            console.print_stderr(result.stderr)

    # Since the rules to produce FmtResult should use ExecuteRequest, rather than
    # FallibleProcess, we assume that there were no failures.
    return Fmt(exit_code=0)
Exemple #24
0
async def run(
    console: Console,
    workspace: Workspace,
    runner: InteractiveRunner,
    build_root: BuildRoot,
    targets_with_origins: TargetsWithOrigins,
    options: RunOptions,
    global_options: GlobalOptions,
    union_membership: UnionMembership,
    registered_target_types: RegisteredTargetTypes,
) -> Run:
    valid_config_types_by_target = gather_valid_binary_configuration_types(
        goal_subsytem=options,
        targets_with_origins=targets_with_origins,
        union_membership=union_membership,
        registered_target_types=registered_target_types,
    )

    bulleted_list_sep = "\n  * "

    if len(valid_config_types_by_target) > 1:
        binary_target_addresses = sorted(
            binary_target.address.spec
            for binary_target in valid_config_types_by_target)
        raise ValueError(
            f"The `run` goal only works on one binary target but was given multiple targets that "
            f"can produce a binary:"
            f"{bulleted_list_sep}{bulleted_list_sep.join(binary_target_addresses)}\n\n"
            f"Please select one of these targets to run.")

    target, valid_config_types = list(valid_config_types_by_target.items())[0]
    if len(valid_config_types) > 1:
        possible_config_types = sorted(config_type.__name__
                                       for config_type in valid_config_types)
        # TODO: improve this error message. (It's never actually triggered yet because we only have
        #  Python implemented with V2.) A better error message would explain to users how they can
        #  resolve the issue.
        raise ValueError(
            f"Multiple of the registered binary implementations work for {target.address} "
            f"(target type {repr(target.alias)}).\n\n"
            f"It is ambiguous which implementation to use. Possible implementations:"
            f"{bulleted_list_sep}{bulleted_list_sep.join(possible_config_types)}."
        )
    config_type = valid_config_types[0]

    binary = await Get[CreatedBinary](BinaryConfiguration,
                                      config_type.create(target))

    workdir = global_options.options.pants_workdir

    with temporary_dir(root_dir=workdir, cleanup=True) as tmpdir:
        path_relative_to_build_root = PurePath(tmpdir).relative_to(
            build_root.path).as_posix()
        workspace.materialize_directory(
            DirectoryToMaterialize(binary.digest,
                                   path_prefix=path_relative_to_build_root))

        console.write_stdout(f"Running target: {target.address}\n")
        full_path = PurePath(tmpdir, binary.binary_name).as_posix()
        run_request = InteractiveProcessRequest(
            argv=(full_path, *options.values.args),
            run_in_workspace=True,
        )

        try:
            result = runner.run_local_interactive_process(run_request)
            exit_code = result.process_exit_code
            if result.process_exit_code == 0:
                console.write_stdout(f"{target.address} ran successfully.\n")
            else:
                console.write_stderr(
                    f"{target.address} failed with code {result.process_exit_code}!\n"
                )

        except Exception as e:
            console.write_stderr(
                f"Exception when attempting to run {target.address}: {e!r}\n")
            exit_code = -1

    return Run(exit_code)
Exemple #25
0
async def run_tests(
    console: Console,
    options: TestOptions,
    runner: InteractiveRunner,
    addresses_with_origins: AddressesWithOrigins,
    workspace: Workspace,
) -> Test:
    if options.values.debug:
        address_with_origin = addresses_with_origins.expect_single()
        addr_debug_request = await Get[AddressAndDebugRequest](
            AddressWithOrigin, address_with_origin
        )
        result = runner.run_local_interactive_process(addr_debug_request.request.ipr)
        return Test(result.process_exit_code)

    results = await MultiGet(
        Get[AddressAndTestResult](AddressWithOrigin, address_with_origin)
        for address_with_origin in addresses_with_origins
    )

    if options.values.run_coverage:
        # TODO: consider warning if a user uses `--coverage` but the language backend does not
        # provide coverage support. This might be too chatty to be worth doing?
        results_with_coverage = [
            x
            for x in results
            if x.test_result is not None and x.test_result.coverage_data is not None
        ]
        coverage_data_collections = itertools.groupby(
            results_with_coverage,
            lambda address_and_test_result: address_and_test_result.test_result.coverage_data.batch_cls,  # type: ignore[union-attr]
        )

        coverage_reports = await MultiGet(
            Get[CoverageReport](
                CoverageDataBatch, coverage_batch_cls(tuple(addresses_and_test_results))  # type: ignore[call-arg]
            )
            for coverage_batch_cls, addresses_and_test_results in coverage_data_collections
        )
        for report in coverage_reports:
            workspace.materialize_directory(
                DirectoryToMaterialize(
                    report.result_digest, path_prefix=str(report.directory_to_materialize_to),
                )
            )
            console.print_stdout(f"Wrote coverage report to `{report.directory_to_materialize_to}`")

    did_any_fail = False
    filtered_results = [(x.address, x.test_result) for x in results if x.test_result is not None]
    for address, test_result in filtered_results:
        if test_result.status == Status.FAILURE:
            did_any_fail = True
        if test_result.stdout:
            console.write_stdout(f"{address.reference()} stdout:\n{test_result.stdout}\n")
        if test_result.stderr:
            # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving the
            # two streams.
            console.write_stdout(f"{address.reference()} stderr:\n{test_result.stderr}\n")

    console.write_stdout("\n")

    for address, test_result in filtered_results:
        console.print_stdout(f"{address.reference():80}.....{test_result.status.value:>10}")

    if did_any_fail:
        console.print_stderr(console.red("\nTests failed"))
        exit_code = PANTS_FAILED_EXIT_CODE
    else:
        exit_code = PANTS_SUCCEEDED_EXIT_CODE

    return Test(exit_code)
Exemple #26
0
async def run_setup_pys(
    targets_with_origins: TargetsWithOrigins,
    options: SetupPyOptions,
    console: Console,
    python_setup: PythonSetup,
    distdir: DistDir,
    workspace: Workspace,
) -> SetupPy:
    """Run setup.py commands on all exported targets addressed."""
    args = tuple(options.values.args)
    validate_args(args)

    # Get all exported targets, ignoring any non-exported targets that happened to be
    # globbed over, but erroring on any explicitly-requested non-exported targets.

    exported_targets: List[ExportedTarget] = []
    explicit_nonexported_targets: List[Target] = []

    for target_with_origin in targets_with_origins:
        tgt = target_with_origin.target
        if _is_exported(tgt):
            exported_targets.append(ExportedTarget(tgt))
        elif isinstance(target_with_origin.origin, SingleAddress):
            explicit_nonexported_targets.append(tgt)
    if explicit_nonexported_targets:
        raise TargetNotExported(
            "Cannot run setup.py on these targets, because they have no `provides=` clause: "
            f'{", ".join(so.address.reference() for so in explicit_nonexported_targets)}'
        )

    if options.values.transitive:
        # Expand out to all owners of the entire dep closure.
        transitive_targets = await Get[TransitiveTargets](Addresses(
            et.target.address for et in exported_targets))
        owners = await MultiGet(Get[ExportedTarget](OwnedDependency(tgt))
                                for tgt in transitive_targets.closure
                                if is_ownable_target(tgt))
        exported_targets = list(FrozenOrderedSet(owners))

    py2 = is_python2(
        (target_with_origin.target.get(PythonInterpreterCompatibility).value
         for target_with_origin in targets_with_origins),
        python_setup,
    )
    chroots = await MultiGet(
        Get[SetupPyChroot](SetupPyChrootRequest(exported_target, py2))
        for exported_target in exported_targets)

    # If args were provided, run setup.py with them; Otherwise just dump chroots.
    if args:
        setup_py_results = await MultiGet(
            Get[RunSetupPyResult](RunSetupPyRequest(exported_target, chroot,
                                                    tuple(args)))
            for exported_target, chroot in zip(exported_targets, chroots))

        for exported_target, setup_py_result in zip(exported_targets,
                                                    setup_py_results):
            addr = exported_target.target.address.reference()
            console.print_stderr(
                f"Writing dist for {addr} under {distdir.relpath}/.")
            workspace.materialize_directory(
                DirectoryToMaterialize(setup_py_result.output,
                                       path_prefix=str(distdir.relpath)))
    else:
        # Just dump the chroot.
        for exported_target, chroot in zip(exported_targets, chroots):
            addr = exported_target.target.address.reference()
            provides = exported_target.provides
            setup_py_dir = distdir.relpath / f"{provides.name}-{provides.version}"
            console.print_stderr(
                f"Writing setup.py chroot for {addr} to {setup_py_dir}")
            workspace.materialize_directory(
                DirectoryToMaterialize(chroot.digest,
                                       path_prefix=str(setup_py_dir)))

    return SetupPy(0)
Exemple #27
0
async def fmt(
    console: Console,
    targets_with_origins: HydratedTargetsWithOrigins,
    options: FmtOptions,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Fmt:
    adaptors_with_origins = [
        TargetAdaptorWithOrigin.create(target_with_origin.target.adaptor,
                                       target_with_origin.origin)
        for target_with_origin in targets_with_origins
        if target_with_origin.target.adaptor.has_sources()
    ]

    all_language_formatters: Iterable[Type[
        LanguageFormatters]] = union_membership.union_rules[LanguageFormatters]
    if options.values.per_target_caching:
        per_language_results = await MultiGet(
            Get[LanguageFmtResults]
            (LanguageFormatters, language_formatters((adaptor_with_origin, )))
            for adaptor_with_origin in adaptors_with_origins
            for language_formatters in all_language_formatters
            if language_formatters.belongs_to_language(adaptor_with_origin))
    else:
        language_formatters_with_valid_targets = {
            language_formatters:
            tuple(adaptor_with_origin
                  for adaptor_with_origin in adaptors_with_origins if
                  language_formatters.belongs_to_language(adaptor_with_origin))
            for language_formatters in all_language_formatters
        }
        per_language_results = await MultiGet(
            Get[LanguageFmtResults](LanguageFormatters,
                                    language_formatters(valid_targets))
            for language_formatters, valid_targets in
            language_formatters_with_valid_targets.items() if valid_targets)

    individual_results: List[FmtResult] = list(
        itertools.chain.from_iterable(
            language_result.results
            for language_result in per_language_results))

    if not individual_results:
        return Fmt(exit_code=0)

    # NB: this will fail if there are any conflicting changes, which we want to happen rather than
    # silently having one result override the other. In practicality, this should never happen due
    # to us grouping each language's formatters into a single combined_digest.
    merged_formatted_digest = await Get[Digest](DirectoriesToMerge(
        tuple(language_result.combined_digest
              for language_result in per_language_results)))
    workspace.materialize_directory(
        DirectoryToMaterialize(merged_formatted_digest))
    for result in individual_results:
        if result.stdout:
            console.print_stdout(result.stdout)
        if result.stderr:
            console.print_stderr(result.stderr)

    # Since the rules to produce FmtResult should use ExecuteRequest, rather than
    # FallibleExecuteProcessRequest, we assume that there were no failures.
    return Fmt(exit_code=0)