예제 #1
0
async def run_tests(
    console: Console,
    test_subsystem: TestSubsystem,
    interactive_runner: InteractiveRunner,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Test:
    if test_subsystem.debug:
        targets_to_valid_field_sets = await Get(
            TargetsToValidFieldSets,
            TargetsToValidFieldSetsRequest(
                TestFieldSet, goal_description="`test --debug`", error_if_no_valid_targets=True
            ),
        )
        debug_requests = await MultiGet(
            Get(TestDebugRequest, TestFieldSet, field_set)
            for field_set in targets_to_valid_field_sets.field_sets
        )
        exit_code = 0
        for debug_request in debug_requests:
            if debug_request.process is None:
                continue
            debug_result = interactive_runner.run(debug_request.process)
            if debug_result.exit_code != 0:
                exit_code = debug_result.exit_code
        return Test(exit_code)

    targets_to_valid_field_sets = await Get(
        TargetsToValidFieldSets,
        TargetsToValidFieldSetsRequest(
            TestFieldSet,
            goal_description=f"the `{test_subsystem.name}` goal",
            error_if_no_valid_targets=False,
        ),
    )
    field_sets_with_sources = await Get(
        FieldSetsWithSources, FieldSetsWithSourcesRequest(targets_to_valid_field_sets.field_sets)
    )

    results = await MultiGet(
        Get(EnrichedTestResult, TestFieldSet, field_set) for field_set in field_sets_with_sources
    )

    # Print summary.
    exit_code = 0
    if results:
        console.print_stderr("")
    for result in sorted(results):
        if result.skipped:
            continue
        if result.exit_code == 0:
            sigil = console.green("✓")
            status = "succeeded"
        else:
            sigil = console.red("𐄂")
            status = "failed"
            exit_code = cast(int, result.exit_code)
        console.print_stderr(f"{sigil} {result.address} {status}.")

    merged_xml_results = await Get(
        Digest, MergeDigests(result.xml_results for result in results if result.xml_results),
    )
    workspace.write_digest(merged_xml_results)

    if test_subsystem.use_coverage:
        all_coverage_data: Iterable[CoverageData] = [
            result.coverage_data for result in results if result.coverage_data is not None
        ]

        coverage_types_to_collection_types: Dict[
            Type[CoverageData], Type[CoverageDataCollection]
        ] = {
            collection_cls.element_type: collection_cls
            for collection_cls in union_membership.get(CoverageDataCollection)
        }
        coverage_collections: List[CoverageDataCollection] = []
        for data_cls, data in itertools.groupby(all_coverage_data, lambda data: type(data)):
            collection_cls = coverage_types_to_collection_types[data_cls]
            coverage_collections.append(collection_cls(data))
        # We can create multiple reports for each coverage data (console, xml and html)
        coverage_reports_collections = await MultiGet(
            Get(CoverageReports, CoverageDataCollection, coverage_collection)
            for coverage_collection in coverage_collections
        )

        coverage_report_files: List[PurePath] = []
        for coverage_reports in coverage_reports_collections:
            report_files = coverage_reports.materialize(console, workspace)
            coverage_report_files.extend(report_files)

        if coverage_report_files and test_subsystem.open_coverage:
            desktop.ui_open(console, interactive_runner, coverage_report_files)

    return Test(exit_code)
예제 #2
0
async def run_tests(
    console: Console,
    options: TestOptions,
    interactive_runner: InteractiveRunner,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Test:
    if options.values.debug:
        targets_to_valid_configs = await Get[TargetsToValidConfigurations](
            TargetsToValidConfigurationsRequest(
                TestConfiguration,
                goal_description="`test --debug`",
                error_if_no_valid_targets=True,
                expect_single_config=True,
            )
        )
        config = targets_to_valid_configs.configurations[0]
        logger.info(f"Starting test in debug mode: {config.address.reference()}")
        request = await Get[TestDebugRequest](TestConfiguration, config)
        debug_result = interactive_runner.run_local_interactive_process(request.ipr)
        return Test(debug_result.process_exit_code)

    targets_to_valid_configs = await Get[TargetsToValidConfigurations](
        TargetsToValidConfigurationsRequest(
            TestConfiguration,
            goal_description=f"the `{options.name}` goal",
            error_if_no_valid_targets=False,
        )
    )
    configs_with_sources = await Get[ConfigurationsWithSources](
        ConfigurationsWithSourcesRequest(targets_to_valid_configs.configurations)
    )

    results = await MultiGet(
        Get[AddressAndTestResult](WrappedTestConfiguration(config))
        for config in configs_with_sources
    )

    did_any_fail = False
    for result in results:
        if result.test_result.status == Status.FAILURE:
            did_any_fail = True
        if result.test_result.stdout:
            console.write_stdout(
                f"{result.address.reference()} stdout:\n{result.test_result.stdout}\n"
            )
        if result.test_result.stderr:
            # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving
            # the two streams.
            console.write_stdout(
                f"{result.address.reference()} stderr:\n{result.test_result.stderr}\n"
            )

    console.write_stdout("\n")

    for result in results:
        console.print_stdout(
            f"{result.address.reference():80}.....{result.test_result.status.value:>10}"
        )

    if did_any_fail:
        console.print_stderr(console.red("\nTests failed"))
        exit_code = PANTS_FAILED_EXIT_CODE
    else:
        exit_code = PANTS_SUCCEEDED_EXIT_CODE

    if options.values.run_coverage:
        all_coverage_data: Iterable[CoverageData] = [
            result.test_result.coverage_data
            for result in results
            if result.test_result.coverage_data is not None
        ]

        coverage_types_to_collection_types: Dict[
            Type[CoverageData], Type[CoverageDataCollection]
        ] = {
            collection_cls.element_type: collection_cls
            for collection_cls in union_membership.union_rules[CoverageDataCollection]
        }
        coverage_collections: List[CoverageDataCollection] = []
        for data_cls, data in itertools.groupby(all_coverage_data, lambda data: type(data)):
            collection_cls = coverage_types_to_collection_types[data_cls]
            coverage_collections.append(collection_cls(data))

        coverage_reports = await MultiGet(
            Get[CoverageReport](CoverageDataCollection, coverage_collection)
            for coverage_collection in coverage_collections
        )

        coverage_report_files = []
        for report in coverage_reports:
            report_file = report.materialize(console, workspace)
            if report_file is not None:
                coverage_report_files.append(report_file)

        if coverage_report_files and options.values.open_coverage:
            desktop.ui_open(console, interactive_runner, coverage_report_files)

    return Test(exit_code)
예제 #3
0
파일: test.py 프로젝트: briespoke/pants
async def run_tests(
    console: Console,
    options: TestOptions,
    interactive_runner: InteractiveRunner,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Test:
    if options.values.debug:
        targets_to_valid_field_sets = await Get[TargetsToValidFieldSets](
            TargetsToValidFieldSetsRequest(
                TestFieldSet,
                goal_description="`test --debug`",
                error_if_no_valid_targets=True,
                expect_single_field_set=True,
            )
        )
        field_set = targets_to_valid_field_sets.field_sets[0]
        request = await Get[TestDebugRequest](TestFieldSet, field_set)
        debug_result = interactive_runner.run_local_interactive_process(request.ipr)
        return Test(debug_result.process_exit_code)

    targets_to_valid_field_sets = await Get[TargetsToValidFieldSets](
        TargetsToValidFieldSetsRequest(
            TestFieldSet,
            goal_description=f"the `{options.name}` goal",
            error_if_no_valid_targets=False,
        )
    )
    field_sets_with_sources = await Get[FieldSetsWithSources](
        FieldSetsWithSourcesRequest(targets_to_valid_field_sets.field_sets)
    )

    results = await MultiGet(
        Get[AddressAndTestResult](WrappedTestFieldSet(field_set))
        for field_set in field_sets_with_sources
    )

    exit_code = PANTS_SUCCEEDED_EXIT_CODE
    for result in results:
        if result.test_result.status == Status.FAILURE:
            exit_code = PANTS_FAILED_EXIT_CODE
        has_output = result.test_result.stdout or result.test_result.stderr
        if has_output:
            status = (
                console.green("✓")
                if result.test_result.status == Status.SUCCESS
                else console.red("𐄂")
            )
            console.print_stderr(f"{status} {result.address}")
        if result.test_result.stdout:
            console.print_stderr(result.test_result.stdout)
        if result.test_result.stderr:
            console.print_stderr(result.test_result.stderr)
        if has_output and result != results[-1]:
            console.print_stderr("")

    # Print summary
    if len(results) > 1:
        console.print_stderr("")
        for result in results:
            console.print_stderr(
                f"{result.address.reference():80}.....{result.test_result.status.value:>10}"
            )

    for result in results:
        xml_results = result.test_result.xml_results
        if not xml_results:
            continue
        workspace.materialize_directory(DirectoryToMaterialize(xml_results))

    if options.values.use_coverage:
        all_coverage_data: Iterable[CoverageData] = [
            result.test_result.coverage_data
            for result in results
            if result.test_result.coverage_data is not None
        ]

        coverage_types_to_collection_types: Dict[
            Type[CoverageData], Type[CoverageDataCollection]
        ] = {
            collection_cls.element_type: collection_cls
            for collection_cls in union_membership.get(CoverageDataCollection)
        }
        coverage_collections: List[CoverageDataCollection] = []
        for data_cls, data in itertools.groupby(all_coverage_data, lambda data: type(data)):
            collection_cls = coverage_types_to_collection_types[data_cls]
            coverage_collections.append(collection_cls(data))

        coverage_reports = await MultiGet(
            Get[CoverageReport](CoverageDataCollection, coverage_collection)
            for coverage_collection in coverage_collections
        )

        coverage_report_files = []
        for report in coverage_reports:
            report_file = report.materialize(console, workspace)
            if report_file is not None:
                coverage_report_files.append(report_file)

        if coverage_report_files and options.values.open_coverage:
            desktop.ui_open(console, interactive_runner, coverage_report_files)

    return Test(exit_code)
예제 #4
0
파일: test.py 프로젝트: Spacerat/pants
async def run_tests(
    console: Console,
    test_subsystem: TestSubsystem,
    interactive_runner: InteractiveRunner,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Test:
    if test_subsystem.debug:
        targets_to_valid_field_sets = await Get(
            TargetsToValidFieldSets,
            TargetsToValidFieldSetsRequest(
                TestFieldSet,
                goal_description="`test --debug`",
                error_if_no_valid_targets=True,
                expect_single_field_set=True,
            ),
        )
        field_set = targets_to_valid_field_sets.field_sets[0]
        request = await Get(TestDebugRequest, TestFieldSet, field_set)
        debug_result = interactive_runner.run(request.process)
        return Test(debug_result.exit_code)

    targets_to_valid_field_sets = await Get(
        TargetsToValidFieldSets,
        TargetsToValidFieldSetsRequest(
            TestFieldSet,
            goal_description=f"the `{test_subsystem.name}` goal",
            error_if_no_valid_targets=False,
        ),
    )
    field_sets_with_sources = await Get(
        FieldSetsWithSources,
        FieldSetsWithSourcesRequest(targets_to_valid_field_sets.field_sets))

    results = await MultiGet(
        Get(AddressAndTestResult, WrappedTestFieldSet(field_set))
        for field_set in field_sets_with_sources)

    # Print details.
    for result in results:
        if test_subsystem.options.output == ShowOutput.NONE or (
                test_subsystem.options.output == ShowOutput.FAILED
                and result.test_result.status == Status.SUCCESS):
            continue
        has_output = result.test_result.stdout or result.test_result.stderr
        if has_output:
            status = (console.green("✓") if result.test_result.status
                      == Status.SUCCESS else console.red("𐄂"))
            console.print_stderr(f"{status} {result.address}")
        if result.test_result.stdout:
            console.print_stderr(result.test_result.stdout)
        if result.test_result.stderr:
            console.print_stderr(result.test_result.stderr)
        if has_output and result != results[-1]:
            console.print_stderr("")

    # Print summary
    console.print_stderr("")
    for result in results:
        color = console.green if result.test_result.status == Status.SUCCESS else console.red
        # The right-align logic sees the color control codes as characters, so we have
        # to account for that. In f-strings the alignment field widths must be literals,
        # so we have to indirect via a call to .format().
        right_align = 19 if console.use_colors else 10
        format_str = f"{{addr:80}}.....{{result:>{right_align}}}"
        console.print_stderr(
            format_str.format(addr=result.address.spec,
                              result=color(result.test_result.status.value)))

    merged_xml_results = await Get(
        Digest,
        MergeDigests(result.test_result.xml_results for result in results
                     if result.test_result.xml_results),
    )
    workspace.write_digest(merged_xml_results)

    if test_subsystem.use_coverage:
        all_coverage_data: Iterable[CoverageData] = [
            result.test_result.coverage_data for result in results
            if result.test_result.coverage_data is not None
        ]

        coverage_types_to_collection_types: Dict[
            Type[CoverageData], Type[CoverageDataCollection]] = {
                collection_cls.element_type: collection_cls
                for collection_cls in union_membership.get(
                    CoverageDataCollection)
            }
        coverage_collections: List[CoverageDataCollection] = []
        for data_cls, data in itertools.groupby(all_coverage_data,
                                                lambda data: type(data)):
            collection_cls = coverage_types_to_collection_types[data_cls]
            coverage_collections.append(collection_cls(data))
        # We can create multiple reports for each coverage data (console, xml and html)
        coverage_reports_collections = await MultiGet(
            Get(CoverageReports, CoverageDataCollection, coverage_collection)
            for coverage_collection in coverage_collections)

        coverage_report_files: List[PurePath] = []
        for coverage_reports in coverage_reports_collections:
            report_files = coverage_reports.materialize(console, workspace)
            coverage_report_files.extend(report_files)

        if coverage_report_files and test_subsystem.open_coverage:
            desktop.ui_open(console, interactive_runner, coverage_report_files)

    exit_code = (PANTS_FAILED_EXIT_CODE if any(
        res.test_result.status == Status.FAILURE
        for res in results) else PANTS_SUCCEEDED_EXIT_CODE)

    return Test(exit_code)
예제 #5
0
async def run_tests(
    console: Console,
    options: TestOptions,
    interactive_runner: InteractiveRunner,
    targets_with_origins: TargetsWithOrigins,
    workspace: Workspace,
    union_membership: UnionMembership,
    registered_target_types: RegisteredTargetTypes,
) -> Test:
    config_types: Iterable[Type[
        TestConfiguration]] = union_membership.union_rules[TestConfiguration]

    if options.values.debug:
        target_with_origin = targets_with_origins.expect_single()
        target = target_with_origin.target
        valid_config_types = [
            config_type for config_type in config_types
            if config_type.is_valid(target)
        ]
        if not valid_config_types:
            all_valid_target_types = itertools.chain.from_iterable(
                config_type.valid_target_types(
                    registered_target_types.types,
                    union_membership=union_membership)
                for config_type in config_types)
            formatted_target_types = sorted(
                target_type.alias for target_type in all_valid_target_types)
            raise ValueError(
                f"The `test` goal only works with the following target types: "
                f"{formatted_target_types}\n\nYou used {target.address} with target "
                f"type {repr(target.alias)}.")
        if len(valid_config_types) > 1:
            possible_config_types = sorted(
                config_type.__name__ for config_type in valid_config_types)
            raise ValueError(
                f"Multiple of the registered test implementations work for {target.address} "
                f"(target type {repr(target.alias)}). It is ambiguous which implementation to use. "
                f"Possible implementations: {possible_config_types}.")
        config_type = valid_config_types[0]
        logger.info(
            f"Starting test in debug mode: {target.address.reference()}")
        request = await Get[TestDebugRequest](
            TestConfiguration, config_type.create(target_with_origin))
        debug_result = interactive_runner.run_local_interactive_process(
            request.ipr)
        return Test(debug_result.process_exit_code)

    configs = tuple(
        config_type.create(target_with_origin)
        for target_with_origin in targets_with_origins
        for config_type in config_types
        if config_type.is_valid(target_with_origin.target))
    configs_with_sources = await Get[ConfigurationsWithSources](
        ConfigurationsWithSourcesRequest(configs))

    results = await MultiGet(
        Get[AddressAndTestResult](WrappedTestConfiguration(config))
        for config in configs_with_sources)

    did_any_fail = False
    for result in results:
        if result.test_result.status == Status.FAILURE:
            did_any_fail = True
        if result.test_result.stdout:
            console.write_stdout(
                f"{result.address.reference()} stdout:\n{result.test_result.stdout}\n"
            )
        if result.test_result.stderr:
            # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving
            # the two streams.
            console.write_stdout(
                f"{result.address.reference()} stderr:\n{result.test_result.stderr}\n"
            )

    console.write_stdout("\n")

    for result in results:
        console.print_stdout(
            f"{result.address.reference():80}.....{result.test_result.status.value:>10}"
        )

    if did_any_fail:
        console.print_stderr(console.red("\nTests failed"))
        exit_code = PANTS_FAILED_EXIT_CODE
    else:
        exit_code = PANTS_SUCCEEDED_EXIT_CODE

    if options.values.run_coverage:
        all_coverage_data: Iterable[CoverageData] = [
            result.test_result.coverage_data for result in results
            if result.test_result.coverage_data is not None
        ]

        coverage_types_to_collection_types: Dict[
            Type[CoverageData], Type[CoverageDataCollection]] = {
                collection_cls.element_type: collection_cls
                for collection_cls in
                union_membership.union_rules[CoverageDataCollection]
            }
        coverage_collections: List[CoverageDataCollection] = []
        for data_cls, data in itertools.groupby(all_coverage_data,
                                                lambda data: type(data)):
            collection_cls = coverage_types_to_collection_types[data_cls]
            coverage_collections.append(collection_cls(data))

        coverage_reports = await MultiGet(
            Get[CoverageReport](CoverageDataCollection, coverage_collection)
            for coverage_collection in coverage_collections)

        coverage_report_files = []
        for report in coverage_reports:
            report_file = report.materialize(console, workspace)
            if report_file is not None:
                coverage_report_files.append(report_file)

        if coverage_report_files and options.values.open_coverage:
            desktop.ui_open(console, interactive_runner, coverage_report_files)

    return Test(exit_code)
예제 #6
0
파일: test.py 프로젝트: rahuliyer95/pants
async def run_tests(
    console: Console,
    options: TestOptions,
    runner: InteractiveRunner,
    targets_with_origins: HydratedTargetsWithOrigins,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Test:
    test_runners: Iterable[
        Type[TestRunner]] = union_membership.union_rules[TestRunner]

    if options.values.debug:
        target_with_origin = targets_with_origins.expect_single()
        adaptor_with_origin = TargetAdaptorWithOrigin.create(
            target_with_origin.target.adaptor, target_with_origin.origin)
        address = adaptor_with_origin.adaptor.address
        valid_test_runners = [
            test_runner for test_runner in test_runners
            if test_runner.is_valid_target(adaptor_with_origin)
        ]
        if not valid_test_runners:
            raise ValueError(f"No valid test runner for {address}.")
        if len(valid_test_runners) > 1:
            raise ValueError(
                f"Multiple possible test runners for {address} "
                f"({', '.join(test_runner.__name__ for test_runner in valid_test_runners)})."
            )
        test_runner = valid_test_runners[0]
        logger.info(f"Starting test in debug mode: {address.reference()}")
        request = await Get[TestDebugRequest](TestRunner,
                                              test_runner(adaptor_with_origin))
        debug_result = runner.run_local_interactive_process(request.ipr)
        return Test(debug_result.process_exit_code)

    adaptors_with_origins = tuple(
        TargetAdaptorWithOrigin.create(target_with_origin.target.adaptor,
                                       target_with_origin.origin)
        for target_with_origin in targets_with_origins
        if target_with_origin.target.adaptor.has_sources())

    results = await MultiGet(Get[AddressAndTestResult](
        WrappedTestRunner, WrappedTestRunner(test_runner(adaptor_with_origin)))
                             for adaptor_with_origin in adaptors_with_origins
                             for test_runner in test_runners if
                             test_runner.is_valid_target(adaptor_with_origin))

    did_any_fail = False
    for result in results:
        if result.test_result.status == Status.FAILURE:
            did_any_fail = True
        if result.test_result.stdout:
            console.write_stdout(
                f"{result.address.reference()} stdout:\n{result.test_result.stdout}\n"
            )
        if result.test_result.stderr:
            # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving
            # the two streams.
            console.write_stdout(
                f"{result.address.reference()} stderr:\n{result.test_result.stderr}\n"
            )

    console.write_stdout("\n")

    for result in results:
        console.print_stdout(
            f"{result.address.reference():80}.....{result.test_result.status.value:>10}"
        )

    if did_any_fail:
        console.print_stderr(console.red("\nTests failed"))
        exit_code = PANTS_FAILED_EXIT_CODE
    else:
        exit_code = PANTS_SUCCEEDED_EXIT_CODE

    if options.values.run_coverage:
        # TODO: consider warning if a user uses `--coverage` but the language backend does not
        # provide coverage support. This might be too chatty to be worth doing?
        results_with_coverage = [
            x for x in results if x.test_result.coverage_data is not None
        ]
        coverage_data_collections = itertools.groupby(
            results_with_coverage,
            lambda address_and_test_result: address_and_test_result.test_result
            .coverage_data.batch_cls,  # type: ignore[union-attr]
        )

        coverage_reports = await MultiGet(Get[CoverageReport](
            CoverageDataBatch,
            coverage_batch_cls(tuple(
                addresses_and_test_results))  # type: ignore[call-arg]
        ) for coverage_batch_cls, addresses_and_test_results in
                                          coverage_data_collections)

        coverage_report_files = []
        for report in coverage_reports:
            report_file = report.materialize(console, workspace)
            if report_file is not None:
                coverage_report_files.append(report_file)

        if coverage_report_files and options.values.open_coverage:
            desktop.ui_open(console, runner, coverage_report_files)

    return Test(exit_code)
예제 #7
0
async def run_tests(
    console: Console,
    options: TestOptions,
    interactive_runner: InteractiveRunner,
    targets_with_origins: TargetsWithOrigins,
    workspace: Workspace,
    union_membership: UnionMembership,
    registered_target_types: RegisteredTargetTypes,
) -> Test:
    config_types: Iterable[Type[
        TestConfiguration]] = union_membership.union_rules[TestConfiguration]

    if options.values.debug:
        target_with_origin = targets_with_origins.expect_single()
        target = target_with_origin.target
        valid_config_types = [
            config_type for config_type in config_types
            if config_type.is_valid(target)
        ]
        if not valid_config_types:
            all_valid_target_types = itertools.chain.from_iterable(
                config_type.valid_target_types(
                    registered_target_types.types,
                    union_membership=union_membership)
                for config_type in config_types)
            formatted_target_types = sorted(
                target_type.alias for target_type in all_valid_target_types)
            raise ValueError(
                f"The `test` goal only works with the following target types: "
                f"{formatted_target_types}\n\nYou used {target.address} with target "
                f"type {repr(target.alias)}.")
        if len(valid_config_types) > 1:
            possible_config_types = sorted(
                config_type.__name__ for config_type in valid_config_types)
            raise ValueError(
                f"Multiple of the registered test implementations work for {target.address} "
                f"(target type {repr(target.alias)}). It is ambiguous which implementation to use. "
                f"Possible implementations: {possible_config_types}.")
        config_type = valid_config_types[0]
        logger.info(
            f"Starting test in debug mode: {target.address.reference()}")
        request = await Get[TestDebugRequest](
            TestConfiguration, config_type.create(target_with_origin))
        debug_result = interactive_runner.run_local_interactive_process(
            request.ipr)
        return Test(debug_result.process_exit_code)

    # TODO: possibly factor out this filtering out of empty `sources`. We do this at this level of
    #  abstraction, rather than in the test runners, because the test runners often will use
    #  auto-discovery when given no input files.
    configs = tuple(
        config_type.create(target_with_origin)
        for target_with_origin in targets_with_origins
        for config_type in config_types
        if config_type.is_valid(target_with_origin.target))
    all_hydrated_sources = await MultiGet(Get[HydratedSources](
        HydrateSourcesRequest, test_target.sources.request)
                                          for test_target in configs)

    results = await MultiGet(
        Get[AddressAndTestResult](WrappedTestConfiguration(config))
        for config, hydrated_sources in zip(configs, all_hydrated_sources)
        if hydrated_sources.snapshot.files)

    did_any_fail = False
    for result in results:
        if result.test_result.status == Status.FAILURE:
            did_any_fail = True
        if result.test_result.stdout:
            console.write_stdout(
                f"{result.address.reference()} stdout:\n{result.test_result.stdout}\n"
            )
        if result.test_result.stderr:
            # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving
            # the two streams.
            console.write_stdout(
                f"{result.address.reference()} stderr:\n{result.test_result.stderr}\n"
            )

    console.write_stdout("\n")

    for result in results:
        console.print_stdout(
            f"{result.address.reference():80}.....{result.test_result.status.value:>10}"
        )

    if did_any_fail:
        console.print_stderr(console.red("\nTests failed"))
        exit_code = PANTS_FAILED_EXIT_CODE
    else:
        exit_code = PANTS_SUCCEEDED_EXIT_CODE

    if options.values.run_coverage:
        # TODO: consider warning if a user uses `--coverage` but the language backend does not
        # provide coverage support. This might be too chatty to be worth doing?
        results_with_coverage = [
            x for x in results if x.test_result.coverage_data is not None
        ]
        coverage_data_collections = itertools.groupby(
            results_with_coverage,
            lambda address_and_test_result: (
                address_and_test_result.test_result.coverage_data.
                batch_cls  # type: ignore[union-attr]
            ),
        )

        coverage_reports = await MultiGet(Get[CoverageReport](
            CoverageDataBatch,
            coverage_batch_cls(tuple(
                addresses_and_test_results)),  # type: ignore[call-arg]
        ) for coverage_batch_cls, addresses_and_test_results in
                                          coverage_data_collections)

        coverage_report_files = []
        for report in coverage_reports:
            report_file = report.materialize(console, workspace)
            if report_file is not None:
                coverage_report_files.append(report_file)

        if coverage_report_files and options.values.open_coverage:
            desktop.ui_open(console, interactive_runner, coverage_report_files)

    return Test(exit_code)