コード例 #1
0
async def run_tests(
  console: Console, options: TestOptions, runner: InteractiveRunner, addresses: BuildFileAddresses,
) -> Test:
  if options.values.debug:
    address = await Get[BuildFileAddress](BuildFileAddresses, addresses)
    addr_debug_request = await Get[AddressAndDebugRequest](Address, address.to_address())
    result = runner.run_local_interactive_process(addr_debug_request.request.ipr)
    return Test(result.process_exit_code)

  results = await MultiGet(Get[AddressAndTestResult](Address, addr.to_address()) for addr in addresses)
  did_any_fail = False
  filtered_results = [(x.address, x.test_result) for x in results if x.test_result is not None]

  for address, test_result in filtered_results:
    if test_result.status == Status.FAILURE:
      did_any_fail = True
    if test_result.stdout:
      console.write_stdout(f"{address.reference()} stdout:\n{test_result.stdout}\n")
    if test_result.stderr:
      # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving the
      # two streams.
      console.write_stdout(f"{address.reference()} stderr:\n{test_result.stderr}\n")

  console.write_stdout("\n")

  for address, test_result in filtered_results:
    console.print_stdout(f'{address.reference():80}.....{test_result.status.value:>10}')

  if did_any_fail:
    console.print_stderr(console.red('\nTests failed'))
    exit_code = PANTS_FAILED_EXIT_CODE
  else:
    exit_code = PANTS_SUCCEEDED_EXIT_CODE

  return Test(exit_code)
コード例 #2
0
ファイル: run.py プロジェクト: tpasternak/pants
def run(console: Console, workspace: Workspace, runner: InteractiveRunner,
        bfa: BuildFileAddress) -> Run:
    target = bfa.to_address()
    binary = yield Get(CreatedBinary, Address, target)

    with temporary_dir(cleanup=True) as tmpdir:
        dirs_to_materialize = (DirectoryToMaterialize(
            path=str(tmpdir), directory_digest=binary.digest), )
        workspace.materialize_directories(dirs_to_materialize)

        console.write_stdout(f"Running target: {target}\n")
        full_path = str(Path(tmpdir, binary.binary_name))
        run_request = InteractiveProcessRequest(
            argv=[full_path],
            run_in_workspace=True,
        )

        try:
            result = runner.run_local_interactive_process(run_request)
            exit_code = result.process_exit_code
            if result.process_exit_code == 0:
                console.write_stdout(f"{target} ran successfully.\n")
            else:
                console.write_stderr(
                    f"{target} failed with code {result.process_exit_code}!\n")

        except Exception as e:
            console.write_stderr(
                f"Exception when attempting to run {target} : {e}\n")
            exit_code = -1

    yield Run(exit_code)
コード例 #3
0
async def run_repl(
    console: Console,
    workspace: Workspace,
    runner: InteractiveRunner,
    options: ReplOptions,
    transitive_targets: TransitiveTargets,
    build_root: BuildRoot,
    union_membership: UnionMembership,
    global_options: GlobalOptions,
) -> Repl:

    # We can guarantee that we will only even enter this `goal_rule` if there exists an implementer
    # of the `ReplImplementation` union because `LegacyGraphSession.run_goal_rules()` will not
    # execute this rule's body if there are no implementations registered.
    membership: Iterable[Type[
        ReplImplementation]] = union_membership.union_rules[ReplImplementation]
    implementations = {impl.name: impl for impl in membership}

    default_repl = "python"
    repl_shell_name = cast(str, options.values.shell or default_repl)

    repl_implementation_cls = implementations.get(repl_shell_name)
    if repl_implementation_cls is None:
        available = sorted(set(implementations.keys()))
        console.write_stdout(
            f"{repl_shell_name} is not an installed REPL program. Available REPLs: {available}"
        )
        return Repl(-1)

    repl_impl = repl_implementation_cls(targets=Targets(
        tgt for tgt in transitive_targets.closure
        if repl_implementation_cls.is_valid(tgt)))
    repl_binary = await Get[ReplBinary](ReplImplementation, repl_impl)

    with temporary_dir(root_dir=global_options.options.pants_workdir,
                       cleanup=False) as tmpdir:
        path_relative_to_build_root = PurePath(tmpdir).relative_to(
            build_root.path).as_posix()
        workspace.materialize_directory(
            DirectoryToMaterialize(repl_binary.digest,
                                   path_prefix=path_relative_to_build_root))

        full_path = PurePath(tmpdir, repl_binary.binary_name).as_posix()
        run_request = InteractiveProcessRequest(
            argv=(full_path, ),
            run_in_workspace=True,
        )

    result = runner.run_local_interactive_process(run_request)
    exit_code = result.process_exit_code

    if exit_code == 0:
        console.write_stdout("REPL exited successfully.")
    else:
        console.write_stdout(f"REPL exited with error: {exit_code}.")

    return Repl(exit_code)
コード例 #4
0
 def test_materialize_input_files(self) -> None:
     program_text = b'#!/usr/bin/python\nprint("hello")'
     binary = self.create_mock_binary(program_text)
     interactive_runner = InteractiveRunner(self.scheduler)
     request = InteractiveProcessRequest(
         argv=("./program.py",), run_in_workspace=False, input_files=binary.digest,
     )
     result = interactive_runner.run_local_interactive_process(request)
     self.assertEqual(result.process_exit_code, 0)
コード例 #5
0
async def run(
    console: Console,
    workspace: Workspace,
    runner: InteractiveRunner,
    build_root: BuildRoot,
    options: RunOptions,
    global_options: GlobalOptions,
) -> Run:
    targets_to_valid_configs = await Get[TargetsToValidConfigurations](
        TargetsToValidConfigurationsRequest(
            BinaryConfiguration,
            goal_description=f"the `{options.name}` goal",
            error_if_no_valid_targets=True,
            expect_single_config=True,
        ))
    config = targets_to_valid_configs.configurations[0]
    binary = await Get[CreatedBinary](BinaryConfiguration, config)

    workdir = global_options.options.pants_workdir
    with temporary_dir(root_dir=workdir, cleanup=True) as tmpdir:
        path_relative_to_build_root = PurePath(tmpdir).relative_to(
            build_root.path).as_posix()
        workspace.materialize_directory(
            DirectoryToMaterialize(binary.digest,
                                   path_prefix=path_relative_to_build_root))

        console.write_stdout(f"Running target: {config.address}\n")
        full_path = PurePath(tmpdir, binary.binary_name).as_posix()
        run_request = InteractiveProcessRequest(
            argv=(full_path, *options.values.args),
            run_in_workspace=True,
        )

        try:
            result = runner.run_local_interactive_process(run_request)
            exit_code = result.process_exit_code
            if result.process_exit_code == 0:
                console.write_stdout(f"{config.address} ran successfully.\n")
            else:
                console.write_stderr(
                    f"{config.address} failed with code {result.process_exit_code}!\n"
                )

        except Exception as e:
            console.write_stderr(
                f"Exception when attempting to run {config.address}: {e!r}\n")
            exit_code = -1

    return Run(exit_code)
コード例 #6
0
ファイル: run.py プロジェクト: triplequote/pants
def run(console: Console, runner: InteractiveRunner, build_file_addresses: BuildFileAddresses) -> Run:
  console.write_stdout("Running the `run` goal\n")

  request = InteractiveProcessRequest(
    argv=["/usr/bin/python"],
    env=("TEST_ENV", "TEST"),
    run_in_workspace=False,
  )

  try:
    res = runner.run_local_interactive_process(request)
    print(f"Subprocess exited with result: {res.process_exit_code}")
    yield Run(res.process_exit_code)
  except Exception as e:
    print(f"Exception when running local interactive process: {e}")
    yield Run(-1)
コード例 #7
0
async def run_repl(
    console: Console,
    workspace: Workspace,
    runner: InteractiveRunner,
    options: ReplOptions,
    transitive_targets: TransitiveTargets,
    build_root: BuildRoot,
    union_membership: UnionMembership,
    global_options: GlobalOptions,
) -> Repl:
    default_repl = "python"
    repl_shell_name = cast(str, options.values.shell) or default_repl

    implementations: Dict[str, Type[ReplImplementation]] = {
        impl.name: impl
        for impl in union_membership[ReplImplementation]
    }
    repl_implementation_cls = implementations.get(repl_shell_name)
    if repl_implementation_cls is None:
        available = sorted(implementations.keys())
        console.print_stderr(
            f"{repr(repl_shell_name)} is not a registered REPL. Available REPLs (which may "
            f"be specified through the option `--repl-shell`): {available}")
        return Repl(-1)

    repl_impl = repl_implementation_cls(targets=Targets(
        tgt for tgt in transitive_targets.closure
        if repl_implementation_cls.is_valid(tgt)))
    repl_binary = await Get[ReplBinary](ReplImplementation, repl_impl)

    with temporary_dir(root_dir=global_options.options.pants_workdir,
                       cleanup=False) as tmpdir:
        path_relative_to_build_root = PurePath(tmpdir).relative_to(
            build_root.path).as_posix()
        workspace.materialize_directory(
            DirectoryToMaterialize(repl_binary.digest,
                                   path_prefix=path_relative_to_build_root))

        full_path = PurePath(tmpdir, repl_binary.binary_name).as_posix()
        run_request = InteractiveProcessRequest(
            argv=(full_path, ),
            run_in_workspace=True,
        )

    result = runner.run_local_interactive_process(run_request)
    return Repl(result.process_exit_code)
コード例 #8
0
async def run(
    console: Console,
    workspace: Workspace,
    runner: InteractiveRunner,
    build_root: BuildRoot,
    addresses: Addresses,
    options: RunOptions,
) -> Run:
    address = addresses.expect_single()
    binary = await Get[CreatedBinary](Address, address)

    with temporary_dir(root_dir=PurePath(build_root.path,
                                         ".pants.d").as_posix(),
                       cleanup=True) as tmpdir:
        path_relative_to_build_root = PurePath(tmpdir).relative_to(
            build_root.path).as_posix()
        workspace.materialize_directory(
            DirectoryToMaterialize(binary.digest,
                                   path_prefix=path_relative_to_build_root))

        console.write_stdout(f"Running target: {address}\n")
        full_path = PurePath(tmpdir, binary.binary_name).as_posix()
        run_request = InteractiveProcessRequest(
            argv=(full_path, *options.values.args),
            run_in_workspace=True,
        )

        try:
            result = runner.run_local_interactive_process(run_request)
            exit_code = result.process_exit_code
            if result.process_exit_code == 0:
                console.write_stdout(f"{address} ran successfully.\n")
            else:
                console.write_stderr(
                    f"{address} failed with code {result.process_exit_code}!\n"
                )

        except Exception as e:
            console.write_stderr(
                f"Exception when attempting to run {address}: {e!r}\n")
            exit_code = -1

    return Run(exit_code)
コード例 #9
0
ファイル: run.py プロジェクト: MEDIARITHMICS/pants
async def run(
    options: RunOptions,
    global_options: GlobalOptions,
    console: Console,
    runner: InteractiveRunner,
    workspace: Workspace,
    build_root: BuildRoot,
) -> Run:
    targets_to_valid_field_sets = await Get[TargetsToValidFieldSets](
        TargetsToValidFieldSetsRequest(
            BinaryFieldSet,
            goal_description=f"the `{options.name}` goal",
            error_if_no_valid_targets=True,
            expect_single_field_set=True,
        ))
    field_set = targets_to_valid_field_sets.field_sets[0]
    binary = await Get[CreatedBinary](BinaryFieldSet, field_set)

    workdir = global_options.options.pants_workdir
    with temporary_dir(root_dir=workdir, cleanup=True) as tmpdir:
        path_relative_to_build_root = PurePath(tmpdir).relative_to(
            build_root.path).as_posix()
        workspace.materialize_directory(
            DirectoryToMaterialize(binary.digest,
                                   path_prefix=path_relative_to_build_root))

        full_path = PurePath(tmpdir, binary.binary_name).as_posix()
        run_request = InteractiveProcessRequest(
            argv=(full_path, *options.values.args),
            run_in_workspace=True,
        )

        try:
            result = runner.run_local_interactive_process(run_request)
            exit_code = result.process_exit_code
        except Exception as e:
            console.print_stderr(
                f"Exception when attempting to run {field_set.address}: {e!r}")
            exit_code = -1

    return Run(exit_code)
コード例 #10
0
ファイル: run.py プロジェクト: letisiapangataa/pants
async def run(
    console: Console,
    workspace: Workspace,
    runner: InteractiveRunner,
    build_root: BuildRoot,
    bfa: BuildFileAddress,
) -> Run:
    target = bfa.to_address()
    binary = await Get[CreatedBinary](Address, target)

    with temporary_dir(root_dir=str(Path(build_root.path, ".pants.d")),
                       cleanup=True) as tmpdir:
        path_relative_to_build_root = str(
            Path(tmpdir).relative_to(build_root.path))
        workspace.materialize_directory(
            DirectoryToMaterialize(binary.digest,
                                   path_prefix=path_relative_to_build_root))

        console.write_stdout(f"Running target: {target}\n")
        full_path = str(Path(tmpdir, binary.binary_name))
        run_request = InteractiveProcessRequest(
            argv=(full_path, ),
            run_in_workspace=True,
        )

        try:
            result = runner.run_local_interactive_process(run_request)
            exit_code = result.process_exit_code
            if result.process_exit_code == 0:
                console.write_stdout(f"{target} ran successfully.\n")
            else:
                console.write_stderr(
                    f"{target} failed with code {result.process_exit_code}!\n")

        except Exception as e:
            console.write_stderr(
                f"Exception when attempting to run {target} : {e}\n")
            exit_code = -1

    return Run(exit_code)
コード例 #11
0
async def run_tests(
    console: Console,
    options: TestOptions,
    runner: InteractiveRunner,
    addresses_with_origins: AddressesWithOrigins,
    workspace: Workspace,
) -> Test:
    if options.values.debug:
        address_with_origin = addresses_with_origins.expect_single()
        addr_debug_request = await Get[AddressAndDebugRequest](
            AddressWithOrigin, address_with_origin
        )
        result = runner.run_local_interactive_process(addr_debug_request.request.ipr)
        return Test(result.process_exit_code)

    results = await MultiGet(
        Get[AddressAndTestResult](AddressWithOrigin, address_with_origin)
        for address_with_origin in addresses_with_origins
    )

    if options.values.run_coverage:
        # TODO: consider warning if a user uses `--coverage` but the language backend does not
        # provide coverage support. This might be too chatty to be worth doing?
        results_with_coverage = [
            x
            for x in results
            if x.test_result is not None and x.test_result.coverage_data is not None
        ]
        coverage_data_collections = itertools.groupby(
            results_with_coverage,
            lambda address_and_test_result: address_and_test_result.test_result.coverage_data.batch_cls,  # type: ignore[union-attr]
        )

        coverage_reports = await MultiGet(
            Get[CoverageReport](
                CoverageDataBatch, coverage_batch_cls(tuple(addresses_and_test_results))  # type: ignore[call-arg]
            )
            for coverage_batch_cls, addresses_and_test_results in coverage_data_collections
        )
        for report in coverage_reports:
            workspace.materialize_directory(
                DirectoryToMaterialize(
                    report.result_digest, path_prefix=str(report.directory_to_materialize_to),
                )
            )
            console.print_stdout(f"Wrote coverage report to `{report.directory_to_materialize_to}`")

    did_any_fail = False
    filtered_results = [(x.address, x.test_result) for x in results if x.test_result is not None]
    for address, test_result in filtered_results:
        if test_result.status == Status.FAILURE:
            did_any_fail = True
        if test_result.stdout:
            console.write_stdout(f"{address.reference()} stdout:\n{test_result.stdout}\n")
        if test_result.stderr:
            # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving the
            # two streams.
            console.write_stdout(f"{address.reference()} stderr:\n{test_result.stderr}\n")

    console.write_stdout("\n")

    for address, test_result in filtered_results:
        console.print_stdout(f"{address.reference():80}.....{test_result.status.value:>10}")

    if did_any_fail:
        console.print_stderr(console.red("\nTests failed"))
        exit_code = PANTS_FAILED_EXIT_CODE
    else:
        exit_code = PANTS_SUCCEEDED_EXIT_CODE

    return Test(exit_code)
コード例 #12
0
ファイル: test.py プロジェクト: briespoke/pants
async def run_tests(
    console: Console,
    options: TestOptions,
    interactive_runner: InteractiveRunner,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Test:
    if options.values.debug:
        targets_to_valid_field_sets = await Get[TargetsToValidFieldSets](
            TargetsToValidFieldSetsRequest(
                TestFieldSet,
                goal_description="`test --debug`",
                error_if_no_valid_targets=True,
                expect_single_field_set=True,
            )
        )
        field_set = targets_to_valid_field_sets.field_sets[0]
        request = await Get[TestDebugRequest](TestFieldSet, field_set)
        debug_result = interactive_runner.run_local_interactive_process(request.ipr)
        return Test(debug_result.process_exit_code)

    targets_to_valid_field_sets = await Get[TargetsToValidFieldSets](
        TargetsToValidFieldSetsRequest(
            TestFieldSet,
            goal_description=f"the `{options.name}` goal",
            error_if_no_valid_targets=False,
        )
    )
    field_sets_with_sources = await Get[FieldSetsWithSources](
        FieldSetsWithSourcesRequest(targets_to_valid_field_sets.field_sets)
    )

    results = await MultiGet(
        Get[AddressAndTestResult](WrappedTestFieldSet(field_set))
        for field_set in field_sets_with_sources
    )

    exit_code = PANTS_SUCCEEDED_EXIT_CODE
    for result in results:
        if result.test_result.status == Status.FAILURE:
            exit_code = PANTS_FAILED_EXIT_CODE
        has_output = result.test_result.stdout or result.test_result.stderr
        if has_output:
            status = (
                console.green("✓")
                if result.test_result.status == Status.SUCCESS
                else console.red("𐄂")
            )
            console.print_stderr(f"{status} {result.address}")
        if result.test_result.stdout:
            console.print_stderr(result.test_result.stdout)
        if result.test_result.stderr:
            console.print_stderr(result.test_result.stderr)
        if has_output and result != results[-1]:
            console.print_stderr("")

    # Print summary
    if len(results) > 1:
        console.print_stderr("")
        for result in results:
            console.print_stderr(
                f"{result.address.reference():80}.....{result.test_result.status.value:>10}"
            )

    for result in results:
        xml_results = result.test_result.xml_results
        if not xml_results:
            continue
        workspace.materialize_directory(DirectoryToMaterialize(xml_results))

    if options.values.use_coverage:
        all_coverage_data: Iterable[CoverageData] = [
            result.test_result.coverage_data
            for result in results
            if result.test_result.coverage_data is not None
        ]

        coverage_types_to_collection_types: Dict[
            Type[CoverageData], Type[CoverageDataCollection]
        ] = {
            collection_cls.element_type: collection_cls
            for collection_cls in union_membership.get(CoverageDataCollection)
        }
        coverage_collections: List[CoverageDataCollection] = []
        for data_cls, data in itertools.groupby(all_coverage_data, lambda data: type(data)):
            collection_cls = coverage_types_to_collection_types[data_cls]
            coverage_collections.append(collection_cls(data))

        coverage_reports = await MultiGet(
            Get[CoverageReport](CoverageDataCollection, coverage_collection)
            for coverage_collection in coverage_collections
        )

        coverage_report_files = []
        for report in coverage_reports:
            report_file = report.materialize(console, workspace)
            if report_file is not None:
                coverage_report_files.append(report_file)

        if coverage_report_files and options.values.open_coverage:
            desktop.ui_open(console, interactive_runner, coverage_report_files)

    return Test(exit_code)
コード例 #13
0
async def run_tests(
    console: Console,
    options: TestOptions,
    interactive_runner: InteractiveRunner,
    targets_with_origins: TargetsWithOrigins,
    workspace: Workspace,
    union_membership: UnionMembership,
    registered_target_types: RegisteredTargetTypes,
) -> Test:
    config_types: Iterable[Type[
        TestConfiguration]] = union_membership.union_rules[TestConfiguration]

    if options.values.debug:
        target_with_origin = targets_with_origins.expect_single()
        target = target_with_origin.target
        valid_config_types = [
            config_type for config_type in config_types
            if config_type.is_valid(target)
        ]
        if not valid_config_types:
            all_valid_target_types = itertools.chain.from_iterable(
                config_type.valid_target_types(
                    registered_target_types.types,
                    union_membership=union_membership)
                for config_type in config_types)
            formatted_target_types = sorted(
                target_type.alias for target_type in all_valid_target_types)
            raise ValueError(
                f"The `test` goal only works with the following target types: "
                f"{formatted_target_types}\n\nYou used {target.address} with target "
                f"type {repr(target.alias)}.")
        if len(valid_config_types) > 1:
            possible_config_types = sorted(
                config_type.__name__ for config_type in valid_config_types)
            raise ValueError(
                f"Multiple of the registered test implementations work for {target.address} "
                f"(target type {repr(target.alias)}). It is ambiguous which implementation to use. "
                f"Possible implementations: {possible_config_types}.")
        config_type = valid_config_types[0]
        logger.info(
            f"Starting test in debug mode: {target.address.reference()}")
        request = await Get[TestDebugRequest](
            TestConfiguration, config_type.create(target_with_origin))
        debug_result = interactive_runner.run_local_interactive_process(
            request.ipr)
        return Test(debug_result.process_exit_code)

    configs = tuple(
        config_type.create(target_with_origin)
        for target_with_origin in targets_with_origins
        for config_type in config_types
        if config_type.is_valid(target_with_origin.target))
    configs_with_sources = await Get[ConfigurationsWithSources](
        ConfigurationsWithSourcesRequest(configs))

    results = await MultiGet(
        Get[AddressAndTestResult](WrappedTestConfiguration(config))
        for config in configs_with_sources)

    did_any_fail = False
    for result in results:
        if result.test_result.status == Status.FAILURE:
            did_any_fail = True
        if result.test_result.stdout:
            console.write_stdout(
                f"{result.address.reference()} stdout:\n{result.test_result.stdout}\n"
            )
        if result.test_result.stderr:
            # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving
            # the two streams.
            console.write_stdout(
                f"{result.address.reference()} stderr:\n{result.test_result.stderr}\n"
            )

    console.write_stdout("\n")

    for result in results:
        console.print_stdout(
            f"{result.address.reference():80}.....{result.test_result.status.value:>10}"
        )

    if did_any_fail:
        console.print_stderr(console.red("\nTests failed"))
        exit_code = PANTS_FAILED_EXIT_CODE
    else:
        exit_code = PANTS_SUCCEEDED_EXIT_CODE

    if options.values.run_coverage:
        all_coverage_data: Iterable[CoverageData] = [
            result.test_result.coverage_data for result in results
            if result.test_result.coverage_data is not None
        ]

        coverage_types_to_collection_types: Dict[
            Type[CoverageData], Type[CoverageDataCollection]] = {
                collection_cls.element_type: collection_cls
                for collection_cls in
                union_membership.union_rules[CoverageDataCollection]
            }
        coverage_collections: List[CoverageDataCollection] = []
        for data_cls, data in itertools.groupby(all_coverage_data,
                                                lambda data: type(data)):
            collection_cls = coverage_types_to_collection_types[data_cls]
            coverage_collections.append(collection_cls(data))

        coverage_reports = await MultiGet(
            Get[CoverageReport](CoverageDataCollection, coverage_collection)
            for coverage_collection in coverage_collections)

        coverage_report_files = []
        for report in coverage_reports:
            report_file = report.materialize(console, workspace)
            if report_file is not None:
                coverage_report_files.append(report_file)

        if coverage_report_files and options.values.open_coverage:
            desktop.ui_open(console, interactive_runner, coverage_report_files)

    return Test(exit_code)
コード例 #14
0
async def run_tests(
    console: Console,
    options: TestOptions,
    interactive_runner: InteractiveRunner,
    targets_with_origins: TargetsWithOrigins,
    workspace: Workspace,
    union_membership: UnionMembership,
    registered_target_types: RegisteredTargetTypes,
) -> Test:
    config_types: Iterable[Type[
        TestConfiguration]] = union_membership.union_rules[TestConfiguration]

    if options.values.debug:
        target_with_origin = targets_with_origins.expect_single()
        target = target_with_origin.target
        valid_config_types = [
            config_type for config_type in config_types
            if config_type.is_valid(target)
        ]
        if not valid_config_types:
            all_valid_target_types = itertools.chain.from_iterable(
                config_type.valid_target_types(
                    registered_target_types.types,
                    union_membership=union_membership)
                for config_type in config_types)
            formatted_target_types = sorted(
                target_type.alias for target_type in all_valid_target_types)
            raise ValueError(
                f"The `test` goal only works with the following target types: "
                f"{formatted_target_types}\n\nYou used {target.address} with target "
                f"type {repr(target.alias)}.")
        if len(valid_config_types) > 1:
            possible_config_types = sorted(
                config_type.__name__ for config_type in valid_config_types)
            raise ValueError(
                f"Multiple of the registered test implementations work for {target.address} "
                f"(target type {repr(target.alias)}). It is ambiguous which implementation to use. "
                f"Possible implementations: {possible_config_types}.")
        config_type = valid_config_types[0]
        logger.info(
            f"Starting test in debug mode: {target.address.reference()}")
        request = await Get[TestDebugRequest](
            TestConfiguration, config_type.create(target_with_origin))
        debug_result = interactive_runner.run_local_interactive_process(
            request.ipr)
        return Test(debug_result.process_exit_code)

    # TODO: possibly factor out this filtering out of empty `sources`. We do this at this level of
    #  abstraction, rather than in the test runners, because the test runners often will use
    #  auto-discovery when given no input files.
    configs = tuple(
        config_type.create(target_with_origin)
        for target_with_origin in targets_with_origins
        for config_type in config_types
        if config_type.is_valid(target_with_origin.target))
    all_hydrated_sources = await MultiGet(Get[HydratedSources](
        HydrateSourcesRequest, test_target.sources.request)
                                          for test_target in configs)

    results = await MultiGet(
        Get[AddressAndTestResult](WrappedTestConfiguration(config))
        for config, hydrated_sources in zip(configs, all_hydrated_sources)
        if hydrated_sources.snapshot.files)

    did_any_fail = False
    for result in results:
        if result.test_result.status == Status.FAILURE:
            did_any_fail = True
        if result.test_result.stdout:
            console.write_stdout(
                f"{result.address.reference()} stdout:\n{result.test_result.stdout}\n"
            )
        if result.test_result.stderr:
            # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving
            # the two streams.
            console.write_stdout(
                f"{result.address.reference()} stderr:\n{result.test_result.stderr}\n"
            )

    console.write_stdout("\n")

    for result in results:
        console.print_stdout(
            f"{result.address.reference():80}.....{result.test_result.status.value:>10}"
        )

    if did_any_fail:
        console.print_stderr(console.red("\nTests failed"))
        exit_code = PANTS_FAILED_EXIT_CODE
    else:
        exit_code = PANTS_SUCCEEDED_EXIT_CODE

    if options.values.run_coverage:
        # TODO: consider warning if a user uses `--coverage` but the language backend does not
        # provide coverage support. This might be too chatty to be worth doing?
        results_with_coverage = [
            x for x in results if x.test_result.coverage_data is not None
        ]
        coverage_data_collections = itertools.groupby(
            results_with_coverage,
            lambda address_and_test_result: (
                address_and_test_result.test_result.coverage_data.
                batch_cls  # type: ignore[union-attr]
            ),
        )

        coverage_reports = await MultiGet(Get[CoverageReport](
            CoverageDataBatch,
            coverage_batch_cls(tuple(
                addresses_and_test_results)),  # type: ignore[call-arg]
        ) for coverage_batch_cls, addresses_and_test_results in
                                          coverage_data_collections)

        coverage_report_files = []
        for report in coverage_reports:
            report_file = report.materialize(console, workspace)
            if report_file is not None:
                coverage_report_files.append(report_file)

        if coverage_report_files and options.values.open_coverage:
            desktop.ui_open(console, interactive_runner, coverage_report_files)

    return Test(exit_code)
コード例 #15
0
async def run_tests(
    console: Console,
    options: TestOptions,
    interactive_runner: InteractiveRunner,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Test:
    if options.values.debug:
        targets_to_valid_configs = await Get[TargetsToValidConfigurations](
            TargetsToValidConfigurationsRequest(
                TestConfiguration,
                goal_description="`test --debug`",
                error_if_no_valid_targets=True,
                expect_single_config=True,
            )
        )
        config = targets_to_valid_configs.configurations[0]
        logger.info(f"Starting test in debug mode: {config.address.reference()}")
        request = await Get[TestDebugRequest](TestConfiguration, config)
        debug_result = interactive_runner.run_local_interactive_process(request.ipr)
        return Test(debug_result.process_exit_code)

    targets_to_valid_configs = await Get[TargetsToValidConfigurations](
        TargetsToValidConfigurationsRequest(
            TestConfiguration,
            goal_description=f"the `{options.name}` goal",
            error_if_no_valid_targets=False,
        )
    )
    configs_with_sources = await Get[ConfigurationsWithSources](
        ConfigurationsWithSourcesRequest(targets_to_valid_configs.configurations)
    )

    results = await MultiGet(
        Get[AddressAndTestResult](WrappedTestConfiguration(config))
        for config in configs_with_sources
    )

    did_any_fail = False
    for result in results:
        if result.test_result.status == Status.FAILURE:
            did_any_fail = True
        if result.test_result.stdout:
            console.write_stdout(
                f"{result.address.reference()} stdout:\n{result.test_result.stdout}\n"
            )
        if result.test_result.stderr:
            # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving
            # the two streams.
            console.write_stdout(
                f"{result.address.reference()} stderr:\n{result.test_result.stderr}\n"
            )

    console.write_stdout("\n")

    for result in results:
        console.print_stdout(
            f"{result.address.reference():80}.....{result.test_result.status.value:>10}"
        )

    if did_any_fail:
        console.print_stderr(console.red("\nTests failed"))
        exit_code = PANTS_FAILED_EXIT_CODE
    else:
        exit_code = PANTS_SUCCEEDED_EXIT_CODE

    if options.values.run_coverage:
        all_coverage_data: Iterable[CoverageData] = [
            result.test_result.coverage_data
            for result in results
            if result.test_result.coverage_data is not None
        ]

        coverage_types_to_collection_types: Dict[
            Type[CoverageData], Type[CoverageDataCollection]
        ] = {
            collection_cls.element_type: collection_cls
            for collection_cls in union_membership.union_rules[CoverageDataCollection]
        }
        coverage_collections: List[CoverageDataCollection] = []
        for data_cls, data in itertools.groupby(all_coverage_data, lambda data: type(data)):
            collection_cls = coverage_types_to_collection_types[data_cls]
            coverage_collections.append(collection_cls(data))

        coverage_reports = await MultiGet(
            Get[CoverageReport](CoverageDataCollection, coverage_collection)
            for coverage_collection in coverage_collections
        )

        coverage_report_files = []
        for report in coverage_reports:
            report_file = report.materialize(console, workspace)
            if report_file is not None:
                coverage_report_files.append(report_file)

        if coverage_report_files and options.values.open_coverage:
            desktop.ui_open(console, interactive_runner, coverage_report_files)

    return Test(exit_code)
コード例 #16
0
ファイル: test.py プロジェクト: wisechengyi/pants
async def run_tests(
    console: Console,
    options: TestOptions,
    runner: InteractiveRunner,
    targets_with_origins: HydratedTargetsWithOrigins,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Test:
    test_runners: Iterable[Type[TestRunner]] = union_membership.union_rules[TestRunner]

    if options.values.debug:
        target_with_origin = targets_with_origins.expect_single()
        adaptor_with_origin = TargetAdaptorWithOrigin.create(
            target_with_origin.target.adaptor, target_with_origin.origin
        )
        address = adaptor_with_origin.adaptor.address
        valid_test_runners = [
            test_runner
            for test_runner in test_runners
            if test_runner.is_valid_target(adaptor_with_origin)
        ]
        if not valid_test_runners:
            raise ValueError(f"No valid test runner for {address}.")
        if len(valid_test_runners) > 1:
            raise ValueError(
                f"Multiple possible test runners for {address} "
                f"({', '.join(test_runner.__name__ for test_runner in valid_test_runners)})."
            )
        test_runner = valid_test_runners[0]
        logger.info(f"Starting test in debug mode: {address.reference()}")
        request = await Get[TestDebugRequest](TestRunner, test_runner(adaptor_with_origin))
        debug_result = runner.run_local_interactive_process(request.ipr)
        return Test(debug_result.process_exit_code)

    adaptors_with_origins = tuple(
        TargetAdaptorWithOrigin.create(target_with_origin.target.adaptor, target_with_origin.origin)
        for target_with_origin in targets_with_origins
        if target_with_origin.target.adaptor.has_sources()
    )

    results = await MultiGet(
        Get[AddressAndTestResult](
            WrappedTestRunner, WrappedTestRunner(test_runner(adaptor_with_origin))
        )
        for adaptor_with_origin in adaptors_with_origins
        for test_runner in test_runners
        if test_runner.is_valid_target(adaptor_with_origin)
    )

    did_any_fail = False
    for result in results:
        if result.test_result.status == Status.FAILURE:
            did_any_fail = True
        if result.test_result.stdout:
            console.write_stdout(
                f"{result.address.reference()} stdout:\n{result.test_result.stdout}\n"
            )
        if result.test_result.stderr:
            # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving
            # the two streams.
            console.write_stdout(
                f"{result.address.reference()} stderr:\n{result.test_result.stderr}\n"
            )

    console.write_stdout("\n")

    for result in results:
        console.print_stdout(
            f"{result.address.reference():80}.....{result.test_result.status.value:>10}"
        )

    if did_any_fail:
        console.print_stderr(console.red("\nTests failed"))
        exit_code = PANTS_FAILED_EXIT_CODE
    else:
        exit_code = PANTS_SUCCEEDED_EXIT_CODE

    if options.values.run_coverage:
        # TODO: consider warning if a user uses `--coverage` but the language backend does not
        # provide coverage support. This might be too chatty to be worth doing?
        results_with_coverage = [x for x in results if x.test_result.coverage_data is not None]
        coverage_data_collections = itertools.groupby(
            results_with_coverage,
            lambda address_and_test_result: address_and_test_result.test_result.coverage_data.batch_cls,  # type: ignore[union-attr]
        )

        coverage_reports = await MultiGet(
            Get[CoverageReport](
                CoverageDataBatch, coverage_batch_cls(tuple(addresses_and_test_results))  # type: ignore[call-arg]
            )
            for coverage_batch_cls, addresses_and_test_results in coverage_data_collections
        )

        for report in coverage_reports:
            report.materialize(console, workspace)

    return Test(exit_code)
コード例 #17
0
async def run(
    console: Console,
    workspace: Workspace,
    runner: InteractiveRunner,
    build_root: BuildRoot,
    targets_with_origins: TargetsWithOrigins,
    options: RunOptions,
    global_options: GlobalOptions,
    union_membership: UnionMembership,
    registered_target_types: RegisteredTargetTypes,
) -> Run:
    valid_config_types_by_target = gather_valid_binary_configuration_types(
        goal_subsytem=options,
        targets_with_origins=targets_with_origins,
        union_membership=union_membership,
        registered_target_types=registered_target_types,
    )

    bulleted_list_sep = "\n  * "

    if len(valid_config_types_by_target) > 1:
        binary_target_addresses = sorted(
            binary_target.address.spec
            for binary_target in valid_config_types_by_target)
        raise ValueError(
            f"The `run` goal only works on one binary target but was given multiple targets that "
            f"can produce a binary:"
            f"{bulleted_list_sep}{bulleted_list_sep.join(binary_target_addresses)}\n\n"
            f"Please select one of these targets to run.")

    target, valid_config_types = list(valid_config_types_by_target.items())[0]
    if len(valid_config_types) > 1:
        possible_config_types = sorted(config_type.__name__
                                       for config_type in valid_config_types)
        # TODO: improve this error message. (It's never actually triggered yet because we only have
        #  Python implemented with V2.) A better error message would explain to users how they can
        #  resolve the issue.
        raise ValueError(
            f"Multiple of the registered binary implementations work for {target.address} "
            f"(target type {repr(target.alias)}).\n\n"
            f"It is ambiguous which implementation to use. Possible implementations:"
            f"{bulleted_list_sep}{bulleted_list_sep.join(possible_config_types)}."
        )
    config_type = valid_config_types[0]

    binary = await Get[CreatedBinary](BinaryConfiguration,
                                      config_type.create(target))

    workdir = global_options.options.pants_workdir

    with temporary_dir(root_dir=workdir, cleanup=True) as tmpdir:
        path_relative_to_build_root = PurePath(tmpdir).relative_to(
            build_root.path).as_posix()
        workspace.materialize_directory(
            DirectoryToMaterialize(binary.digest,
                                   path_prefix=path_relative_to_build_root))

        console.write_stdout(f"Running target: {target.address}\n")
        full_path = PurePath(tmpdir, binary.binary_name).as_posix()
        run_request = InteractiveProcessRequest(
            argv=(full_path, *options.values.args),
            run_in_workspace=True,
        )

        try:
            result = runner.run_local_interactive_process(run_request)
            exit_code = result.process_exit_code
            if result.process_exit_code == 0:
                console.write_stdout(f"{target.address} ran successfully.\n")
            else:
                console.write_stderr(
                    f"{target.address} failed with code {result.process_exit_code}!\n"
                )

        except Exception as e:
            console.write_stderr(
                f"Exception when attempting to run {target.address}: {e!r}\n")
            exit_code = -1

    return Run(exit_code)