Ejemplo n.º 1
0
Archivo: test.py Proyecto: sazlin/pants
def fast_test(console: Console, addresses: BuildFileAddresses) -> Test:
  test_results = yield [Get(TestResult, Address, address.to_address()) for address in addresses]
  did_any_fail = False
  for address, test_result in zip(addresses, test_results):
    if test_result.status == Status.FAILURE:
      did_any_fail = True
    if test_result.stdout:
      console.write_stdout(
        "{} stdout:\n{}\n".format(
          address.reference(),
          console.red(test_result.stdout) if test_result.status == Status.FAILURE else test_result.stdout
        )
      )
    if test_result.stderr:
      # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving the
      # two streams.
      console.write_stdout(
        "{} stderr:\n{}\n".format(
          address.reference(),
          console.red(test_result.stderr) if test_result.status == Status.FAILURE else test_result.stderr
        )
      )

  console.write_stdout("\n")

  for address, test_result in zip(addresses, test_results):
    console.print_stdout('{0:80}.....{1:>10}'.format(address.reference(), test_result.status.value))

  if did_any_fail:
    console.print_stderr(console.red('Tests failed'))
    exit_code = PANTS_FAILED_EXIT_CODE
  else:
    exit_code = PANTS_SUCCEEDED_EXIT_CODE

  yield Test(exit_code)
Ejemplo n.º 2
0
async def run_tests(
  console: Console, options: TestOptions, runner: InteractiveRunner, addresses: BuildFileAddresses,
) -> Test:
  if options.values.debug:
    address = await Get[BuildFileAddress](BuildFileAddresses, addresses)
    addr_debug_request = await Get[AddressAndDebugRequest](Address, address.to_address())
    result = runner.run_local_interactive_process(addr_debug_request.request.ipr)
    return Test(result.process_exit_code)

  results = await MultiGet(Get[AddressAndTestResult](Address, addr.to_address()) for addr in addresses)
  did_any_fail = False
  filtered_results = [(x.address, x.test_result) for x in results if x.test_result is not None]

  for address, test_result in filtered_results:
    if test_result.status == Status.FAILURE:
      did_any_fail = True
    if test_result.stdout:
      console.write_stdout(f"{address.reference()} stdout:\n{test_result.stdout}\n")
    if test_result.stderr:
      # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving the
      # two streams.
      console.write_stdout(f"{address.reference()} stderr:\n{test_result.stderr}\n")

  console.write_stdout("\n")

  for address, test_result in filtered_results:
    console.print_stdout(f'{address.reference():80}.....{test_result.status.value:>10}')

  if did_any_fail:
    console.print_stderr(console.red('\nTests failed'))
    exit_code = PANTS_FAILED_EXIT_CODE
  else:
    exit_code = PANTS_SUCCEEDED_EXIT_CODE

  return Test(exit_code)
Ejemplo n.º 3
0
Archivo: test.py Proyecto: OniOni/pants
async def fast_test(console: Console, addresses: BuildFileAddresses) -> Test:
  results = await MultiGet(Get(AddressAndTestResult, Address, addr.to_address()) for addr in addresses)
  did_any_fail = False
  filtered_results = [(x.address, x.test_result) for x in results if x.test_result is not None]

  for address, test_result in filtered_results:
    if test_result.status == Status.FAILURE:
      did_any_fail = True
    if test_result.stdout:
      console.write_stdout(
        "{} stdout:\n{}\n".format(
          address.reference(),
          (console.red(test_result.stdout) if test_result.status == Status.FAILURE
           else test_result.stdout)
        )
      )
    if test_result.stderr:
      # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving the
      # two streams.
      console.write_stdout(
        "{} stderr:\n{}\n".format(
          address.reference(),
          (console.red(test_result.stderr) if test_result.status == Status.FAILURE
           else test_result.stderr)
        )
      )

  console.write_stdout("\n")

  for address, test_result in filtered_results:
    console.print_stdout('{0:80}.....{1:>10}'.format(
      address.reference(), test_result.status.value))

  if did_any_fail:
    console.print_stderr(console.red('Tests failed'))
    exit_code = PANTS_FAILED_EXIT_CODE
  else:
    exit_code = PANTS_SUCCEEDED_EXIT_CODE

  return Test(exit_code)
Ejemplo n.º 4
0
async def typecheck(
    console: Console, targets: Targets, union_membership: UnionMembership
) -> Typecheck:
    typecheck_request_types = union_membership[TypecheckRequest]
    requests: Iterable[StyleRequest] = tuple(
        lint_request_type(
            lint_request_type.field_set_type.create(target)
            for target in targets
            if lint_request_type.field_set_type.is_applicable(target)
        )
        for lint_request_type in typecheck_request_types
    )
    field_sets_with_sources: Iterable[FieldSetsWithSources] = await MultiGet(
        Get(FieldSetsWithSources, FieldSetsWithSourcesRequest(request.field_sets))
        for request in requests
    )
    valid_requests: Iterable[StyleRequest] = tuple(
        request_cls(request)
        for request_cls, request in zip(typecheck_request_types, field_sets_with_sources)
        if request
    )
    all_results = await MultiGet(
        Get(TypecheckResults, TypecheckRequest, request) for request in valid_requests
    )

    exit_code = 0
    if all_results:
        console.print_stderr("")
    for results in sorted(all_results, key=lambda results: results.typechecker_name):
        if results.skipped:
            sigil = console.yellow("-")
            status = "skipped"
        elif results.exit_code == 0:
            sigil = console.green("✓")
            status = "succeeded"
        else:
            sigil = console.red("𐄂")
            status = "failed"
            exit_code = results.exit_code
        console.print_stderr(f"{sigil} {results.typechecker_name} {status}.")

    return Typecheck(exit_code)
Ejemplo n.º 5
0
async def fmt(
    console: Console,
    targets: Targets,
    fmt_subsystem: FmtSubsystem,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Fmt:
    language_target_collection_types = union_membership[LanguageFmtTargets]
    language_target_collections: Iterable[LanguageFmtTargets] = tuple(
        language_target_collection_type(
            Targets(target for target in targets
                    if language_target_collection_type.belongs_to_language(
                        target))) for language_target_collection_type in
        language_target_collection_types)
    targets_with_sources: Iterable[TargetsWithSources] = await MultiGet(
        Get(
            TargetsWithSources,
            TargetsWithSourcesRequest(language_target_collection.targets),
        ) for language_target_collection in language_target_collections)
    # NB: We must convert back the generic TargetsWithSources objects back into their
    # corresponding LanguageFmtTargets, e.g. back to PythonFmtTargets, in order for the union
    # rule to work.
    valid_language_target_collections: Iterable[LanguageFmtTargets] = tuple(
        language_target_collection_cls(
            Targets(target for target in language_target_collection.targets
                    if target in language_targets_with_sources))
        for language_target_collection_cls,
        language_target_collection, language_targets_with_sources in zip(
            language_target_collection_types, language_target_collections,
            targets_with_sources) if language_targets_with_sources)

    if fmt_subsystem.per_file_caching:
        per_language_results = await MultiGet(
            Get(
                LanguageFmtResults,
                LanguageFmtTargets,
                language_target_collection.__class__(Targets([target])),
            )
            for language_target_collection in valid_language_target_collections
            for target in language_target_collection.targets)
    else:
        per_language_results = await MultiGet(
            Get(LanguageFmtResults, LanguageFmtTargets,
                language_target_collection)
            for language_target_collection in valid_language_target_collections
        )

    individual_results: List[FmtResult] = list(
        itertools.chain.from_iterable(
            language_result.results
            for language_result in per_language_results))

    if not individual_results:
        return Fmt(exit_code=0)

    changed_digests = tuple(language_result.output
                            for language_result in per_language_results
                            if language_result.did_change)
    if changed_digests:
        # NB: this will fail if there are any conflicting changes, which we want to happen rather
        # than silently having one result override the other. In practice, this should never
        # happen due to us grouping each language's formatters into a single digest.
        merged_formatted_digest = await Get(Digest,
                                            MergeDigests(changed_digests))
        workspace.write_digest(merged_formatted_digest)

    if individual_results:
        console.print_stderr("")

    # We group all results for the same formatter so that we can give one final status in the
    # summary. This is only relevant if there were multiple results because of
    # `--per-file-caching`.
    formatter_to_results = defaultdict(set)
    for result in individual_results:
        formatter_to_results[result.formatter_name].add(result)

    for formatter, results in sorted(formatter_to_results.items()):
        if any(result.did_change for result in results):
            sigil = console.red("𐄂")
            status = "made changes"
        elif all(result.skipped for result in results):
            sigil = console.yellow("-")
            status = "skipped"
        else:
            sigil = console.green("✓")
            status = "made no changes"
        console.print_stderr(f"{sigil} {formatter} {status}.")

    # Since the rules to produce FmtResult should use ExecuteRequest, rather than
    # FallibleProcess, we assume that there were no failures.
    return Fmt(exit_code=0)
Ejemplo n.º 6
0
async def run_tests(
    console: Console,
    test_subsystem: TestSubsystem,
    interactive_runner: InteractiveRunner,
    workspace: Workspace,
    union_membership: UnionMembership,
    dist_dir: DistDir,
) -> Test:
    if test_subsystem.debug:
        targets_to_valid_field_sets = await Get(
            TargetRootsToFieldSets,
            TargetRootsToFieldSetsRequest(
                TestFieldSet,
                goal_description="`test --debug`",
                no_applicable_targets_behavior=NoApplicableTargetsBehavior.error,
            ),
        )
        debug_requests = await MultiGet(
            Get(TestDebugRequest, TestFieldSet, field_set)
            for field_set in targets_to_valid_field_sets.field_sets
        )
        exit_code = 0
        for debug_request in debug_requests:
            if debug_request.process is None:
                continue
            debug_result = interactive_runner.run(debug_request.process)
            if debug_result.exit_code != 0:
                exit_code = debug_result.exit_code
        return Test(exit_code)

    targets_to_valid_field_sets = await Get(
        TargetRootsToFieldSets,
        TargetRootsToFieldSetsRequest(
            TestFieldSet,
            goal_description=f"the `{test_subsystem.name}` goal",
            no_applicable_targets_behavior=NoApplicableTargetsBehavior.warn,
        ),
    )
    field_sets_with_sources = await Get(
        FieldSetsWithSources, FieldSetsWithSourcesRequest(targets_to_valid_field_sets.field_sets)
    )

    results = await MultiGet(
        Get(EnrichedTestResult, TestFieldSet, field_set) for field_set in field_sets_with_sources
    )

    # Print summary.
    exit_code = 0
    if results:
        console.print_stderr("")
    for result in sorted(results):
        if result.skipped:
            continue
        if result.exit_code == 0:
            sigil = console.green("✓")
            status = "succeeded"
        else:
            sigil = console.red("𐄂")
            status = "failed"
            exit_code = cast(int, result.exit_code)
        console.print_stderr(f"{sigil} {result.address} {status}.")
        if result.extra_output and result.extra_output.files:
            workspace.write_digest(
                result.extra_output.digest,
                path_prefix=str(dist_dir.relpath / "test" / result.address.path_safe_spec),
            )

    merged_xml_results = await Get(
        Digest,
        MergeDigests(result.xml_results.digest for result in results if result.xml_results),
    )
    workspace.write_digest(merged_xml_results)

    if test_subsystem.use_coverage:
        # NB: We must pre-sort the data for itertools.groupby() to work properly, using the same
        # key function for both. However, you can't sort by `types`, so we call `str()` on it.
        all_coverage_data = sorted(
            (result.coverage_data for result in results if result.coverage_data is not None),
            key=lambda cov_data: str(type(cov_data)),
        )

        coverage_types_to_collection_types: Dict[
            Type[CoverageData], Type[CoverageDataCollection]
        ] = {
            collection_cls.element_type: collection_cls
            for collection_cls in union_membership.get(CoverageDataCollection)
        }
        coverage_collections: List[CoverageDataCollection] = []
        for data_cls, data in itertools.groupby(all_coverage_data, lambda data: type(data)):
            collection_cls = coverage_types_to_collection_types[data_cls]
            coverage_collections.append(collection_cls(data))
        # We can create multiple reports for each coverage data (console, xml and html)
        coverage_reports_collections = await MultiGet(
            Get(CoverageReports, CoverageDataCollection, coverage_collection)
            for coverage_collection in coverage_collections
        )

        coverage_report_files: List[PurePath] = []
        for coverage_reports in coverage_reports_collections:
            report_files = coverage_reports.materialize(console, workspace)
            coverage_report_files.extend(report_files)

        if coverage_report_files and test_subsystem.open_coverage:
            open_files = await Get(
                OpenFiles, OpenFilesRequest(coverage_report_files, error_if_open_not_found=False)
            )
            for process in open_files.processes:
                interactive_runner.run(process)

    return Test(exit_code)
Ejemplo n.º 7
0
async def lint(
    console: Console,
    workspace: Workspace,
    targets: Targets,
    lint_subsystem: LintSubsystem,
    union_membership: UnionMembership,
) -> Lint:
    request_types = union_membership[LintRequest]
    requests: Iterable[StyleRequest] = tuple(
        request_type(
            request_type.field_set_type.create(target) for target in targets
            if request_type.field_set_type.is_applicable(target))
        for request_type in request_types)
    field_sets_with_sources: Iterable[FieldSetsWithSources] = await MultiGet(
        Get(FieldSetsWithSources,
            FieldSetsWithSourcesRequest(request.field_sets))
        for request in requests)
    valid_requests: Iterable[StyleRequest] = tuple(
        request_cls(request)
        for request_cls, request in zip(request_types, field_sets_with_sources)
        if request)

    if lint_subsystem.per_file_caching:
        all_per_file_results = await MultiGet(
            Get(LintResults, LintRequest, request.__class__([field_set]))
            for request in valid_requests for field_set in request.field_sets)

        def key_fn(results: LintResults):
            return results.linter_name

        # NB: We must pre-sort the data for itertools.groupby() to work properly.
        sorted_all_per_files_results = sorted(all_per_file_results, key=key_fn)
        # We consolidate all results for each linter into a single `LintResults`.
        all_results = tuple(
            LintResults(
                itertools.chain.from_iterable(
                    per_file_results.results
                    for per_file_results in all_linter_results),
                linter_name=linter_name,
            ) for linter_name, all_linter_results in itertools.groupby(
                sorted_all_per_files_results, key=key_fn))
    else:
        all_results = await MultiGet(
            Get(LintResults, LintRequest, lint_request)
            for lint_request in valid_requests)

    all_results = tuple(
        sorted(all_results, key=lambda results: results.linter_name))

    reports = list(
        itertools.chain.from_iterable(results.reports
                                      for results in all_results))
    if reports:
        # TODO(#10532): Tolerate when a linter has multiple reports.
        linters_with_multiple_reports = [
            results.linter_name for results in all_results
            if len(results.reports) > 1
        ]
        if linters_with_multiple_reports:
            if lint_subsystem.per_file_caching:
                suggestion = "Try running without `--lint-per-file-caching` set."
            else:
                suggestion = (
                    "The linters likely partitioned the input targets, such as grouping by Python "
                    "interpreter compatibility. Try running on fewer targets or unset "
                    "`--lint-reports-dir`.")
            raise InvalidLinterReportsError(
                "Multiple reports would have been written for these linters: "
                f"{linters_with_multiple_reports}. The option `--lint-reports-dir` only works if "
                f"each linter has a single result. {suggestion}")
        merged_reports = await Get(
            Digest, MergeDigests(report.digest for report in reports))
        workspace.write_digest(merged_reports)
        logger.info(
            f"Wrote lint result files to {lint_subsystem.reports_dir}.")

    exit_code = 0
    if all_results:
        console.print_stderr("")
    for results in all_results:
        if results.skipped:
            sigil = console.yellow("-")
            status = "skipped"
        elif results.exit_code == 0:
            sigil = console.green("✓")
            status = "succeeded"
        else:
            sigil = console.red("𐄂")
            status = "failed"
            exit_code = results.exit_code
        console.print_stderr(f"{sigil} {results.linter_name} {status}.")

    return Lint(exit_code)
Ejemplo n.º 8
0
async def run_tests(
    console: Console,
    test_subsystem: TestSubsystem,
    interactive_runner: InteractiveRunner,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Test:
    if test_subsystem.debug:
        targets_to_valid_field_sets = await Get(
            TargetRootsToFieldSets,
            TargetRootsToFieldSetsRequest(
                TestFieldSet, goal_description="`test --debug`", error_if_no_applicable_targets=True
            ),
        )
        debug_requests = await MultiGet(
            Get(TestDebugRequest, TestFieldSet, field_set)
            for field_set in targets_to_valid_field_sets.field_sets
        )
        exit_code = 0
        for debug_request in debug_requests:
            if debug_request.process is None:
                continue
            debug_result = interactive_runner.run(debug_request.process)
            if debug_result.exit_code != 0:
                exit_code = debug_result.exit_code
        return Test(exit_code)

    targets_to_valid_field_sets = await Get(
        TargetRootsToFieldSets,
        TargetRootsToFieldSetsRequest(
            TestFieldSet,
            goal_description=f"the `{test_subsystem.name}` goal",
            error_if_no_applicable_targets=False,
        ),
    )
    field_sets_with_sources = await Get(
        FieldSetsWithSources, FieldSetsWithSourcesRequest(targets_to_valid_field_sets.field_sets)
    )

    results = await MultiGet(
        Get(EnrichedTestResult, TestFieldSet, field_set) for field_set in field_sets_with_sources
    )

    # Print summary.
    exit_code = 0
    if results:
        console.print_stderr("")
    for result in sorted(results):
        if result.skipped:
            continue
        if result.exit_code == 0:
            sigil = console.green("✓")
            status = "succeeded"
        else:
            sigil = console.red("𐄂")
            status = "failed"
            exit_code = cast(int, result.exit_code)
        console.print_stderr(f"{sigil} {result.address} {status}.")

    merged_xml_results = await Get(
        Digest,
        MergeDigests(result.xml_results.digest for result in results if result.xml_results),
    )
    workspace.write_digest(merged_xml_results)

    if test_subsystem.use_coverage:
        all_coverage_data: Iterable[CoverageData] = [
            result.coverage_data for result in results if result.coverage_data is not None
        ]

        coverage_types_to_collection_types: Dict[
            Type[CoverageData], Type[CoverageDataCollection]
        ] = {
            collection_cls.element_type: collection_cls
            for collection_cls in union_membership.get(CoverageDataCollection)
        }
        coverage_collections: List[CoverageDataCollection] = []
        for data_cls, data in itertools.groupby(all_coverage_data, lambda data: type(data)):
            collection_cls = coverage_types_to_collection_types[data_cls]
            coverage_collections.append(collection_cls(data))
        # We can create multiple reports for each coverage data (console, xml and html)
        coverage_reports_collections = await MultiGet(
            Get(CoverageReports, CoverageDataCollection, coverage_collection)
            for coverage_collection in coverage_collections
        )

        coverage_report_files: List[PurePath] = []
        for coverage_reports in coverage_reports_collections:
            report_files = coverage_reports.materialize(console, workspace)
            coverage_report_files.extend(report_files)

        if coverage_report_files and test_subsystem.open_coverage:
            open_files = await Get(
                OpenFiles, OpenFilesRequest(coverage_report_files, error_if_open_not_found=False)
            )
            for process in open_files.processes:
                interactive_runner.run(process)

    return Test(exit_code)
Ejemplo n.º 9
0
async def run_tests(
    console: Console,
    options: TestOptions,
    interactive_runner: InteractiveRunner,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Test:
    if options.values.debug:
        targets_to_valid_configs = await Get[TargetsToValidConfigurations](
            TargetsToValidConfigurationsRequest(
                TestConfiguration,
                goal_description="`test --debug`",
                error_if_no_valid_targets=True,
                expect_single_config=True,
            )
        )
        config = targets_to_valid_configs.configurations[0]
        logger.info(f"Starting test in debug mode: {config.address.reference()}")
        request = await Get[TestDebugRequest](TestConfiguration, config)
        debug_result = interactive_runner.run_local_interactive_process(request.ipr)
        return Test(debug_result.process_exit_code)

    targets_to_valid_configs = await Get[TargetsToValidConfigurations](
        TargetsToValidConfigurationsRequest(
            TestConfiguration,
            goal_description=f"the `{options.name}` goal",
            error_if_no_valid_targets=False,
        )
    )
    configs_with_sources = await Get[ConfigurationsWithSources](
        ConfigurationsWithSourcesRequest(targets_to_valid_configs.configurations)
    )

    results = await MultiGet(
        Get[AddressAndTestResult](WrappedTestConfiguration(config))
        for config in configs_with_sources
    )

    did_any_fail = False
    for result in results:
        if result.test_result.status == Status.FAILURE:
            did_any_fail = True
        if result.test_result.stdout:
            console.write_stdout(
                f"{result.address.reference()} stdout:\n{result.test_result.stdout}\n"
            )
        if result.test_result.stderr:
            # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving
            # the two streams.
            console.write_stdout(
                f"{result.address.reference()} stderr:\n{result.test_result.stderr}\n"
            )

    console.write_stdout("\n")

    for result in results:
        console.print_stdout(
            f"{result.address.reference():80}.....{result.test_result.status.value:>10}"
        )

    if did_any_fail:
        console.print_stderr(console.red("\nTests failed"))
        exit_code = PANTS_FAILED_EXIT_CODE
    else:
        exit_code = PANTS_SUCCEEDED_EXIT_CODE

    if options.values.run_coverage:
        all_coverage_data: Iterable[CoverageData] = [
            result.test_result.coverage_data
            for result in results
            if result.test_result.coverage_data is not None
        ]

        coverage_types_to_collection_types: Dict[
            Type[CoverageData], Type[CoverageDataCollection]
        ] = {
            collection_cls.element_type: collection_cls
            for collection_cls in union_membership.union_rules[CoverageDataCollection]
        }
        coverage_collections: List[CoverageDataCollection] = []
        for data_cls, data in itertools.groupby(all_coverage_data, lambda data: type(data)):
            collection_cls = coverage_types_to_collection_types[data_cls]
            coverage_collections.append(collection_cls(data))

        coverage_reports = await MultiGet(
            Get[CoverageReport](CoverageDataCollection, coverage_collection)
            for coverage_collection in coverage_collections
        )

        coverage_report_files = []
        for report in coverage_reports:
            report_file = report.materialize(console, workspace)
            if report_file is not None:
                coverage_report_files.append(report_file)

        if coverage_report_files and options.values.open_coverage:
            desktop.ui_open(console, interactive_runner, coverage_report_files)

    return Test(exit_code)
Ejemplo n.º 10
0
async def run_tests(
    console: Console,
    options: TestOptions,
    runner: InteractiveRunner,
    addresses_with_origins: AddressesWithOrigins,
    workspace: Workspace,
) -> Test:
    if options.values.debug:
        address_with_origin = addresses_with_origins.expect_single()
        addr_debug_request = await Get[AddressAndDebugRequest](
            AddressWithOrigin, address_with_origin
        )
        result = runner.run_local_interactive_process(addr_debug_request.request.ipr)
        return Test(result.process_exit_code)

    results = await MultiGet(
        Get[AddressAndTestResult](AddressWithOrigin, address_with_origin)
        for address_with_origin in addresses_with_origins
    )

    if options.values.run_coverage:
        # TODO: consider warning if a user uses `--coverage` but the language backend does not
        # provide coverage support. This might be too chatty to be worth doing?
        results_with_coverage = [
            x
            for x in results
            if x.test_result is not None and x.test_result.coverage_data is not None
        ]
        coverage_data_collections = itertools.groupby(
            results_with_coverage,
            lambda address_and_test_result: address_and_test_result.test_result.coverage_data.batch_cls,  # type: ignore[union-attr]
        )

        coverage_reports = await MultiGet(
            Get[CoverageReport](
                CoverageDataBatch, coverage_batch_cls(tuple(addresses_and_test_results))  # type: ignore[call-arg]
            )
            for coverage_batch_cls, addresses_and_test_results in coverage_data_collections
        )
        for report in coverage_reports:
            workspace.materialize_directory(
                DirectoryToMaterialize(
                    report.result_digest, path_prefix=str(report.directory_to_materialize_to),
                )
            )
            console.print_stdout(f"Wrote coverage report to `{report.directory_to_materialize_to}`")

    did_any_fail = False
    filtered_results = [(x.address, x.test_result) for x in results if x.test_result is not None]
    for address, test_result in filtered_results:
        if test_result.status == Status.FAILURE:
            did_any_fail = True
        if test_result.stdout:
            console.write_stdout(f"{address.reference()} stdout:\n{test_result.stdout}\n")
        if test_result.stderr:
            # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving the
            # two streams.
            console.write_stdout(f"{address.reference()} stderr:\n{test_result.stderr}\n")

    console.write_stdout("\n")

    for address, test_result in filtered_results:
        console.print_stdout(f"{address.reference():80}.....{test_result.status.value:>10}")

    if did_any_fail:
        console.print_stderr(console.red("\nTests failed"))
        exit_code = PANTS_FAILED_EXIT_CODE
    else:
        exit_code = PANTS_SUCCEEDED_EXIT_CODE

    return Test(exit_code)
Ejemplo n.º 11
0
async def run_tests(
    console: Console,
    options: TestOptions,
    interactive_runner: InteractiveRunner,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Test:
    if options.values.debug:
        targets_to_valid_field_sets = await Get[TargetsToValidFieldSets](
            TargetsToValidFieldSetsRequest(
                TestFieldSet,
                goal_description="`test --debug`",
                error_if_no_valid_targets=True,
                expect_single_field_set=True,
            )
        )
        field_set = targets_to_valid_field_sets.field_sets[0]
        request = await Get[TestDebugRequest](TestFieldSet, field_set)
        debug_result = interactive_runner.run_local_interactive_process(request.ipr)
        return Test(debug_result.process_exit_code)

    targets_to_valid_field_sets = await Get[TargetsToValidFieldSets](
        TargetsToValidFieldSetsRequest(
            TestFieldSet,
            goal_description=f"the `{options.name}` goal",
            error_if_no_valid_targets=False,
        )
    )
    field_sets_with_sources = await Get[FieldSetsWithSources](
        FieldSetsWithSourcesRequest(targets_to_valid_field_sets.field_sets)
    )

    results = await MultiGet(
        Get[AddressAndTestResult](WrappedTestFieldSet(field_set))
        for field_set in field_sets_with_sources
    )

    exit_code = PANTS_SUCCEEDED_EXIT_CODE
    for result in results:
        if result.test_result.status == Status.FAILURE:
            exit_code = PANTS_FAILED_EXIT_CODE
        has_output = result.test_result.stdout or result.test_result.stderr
        if has_output:
            status = (
                console.green("✓")
                if result.test_result.status == Status.SUCCESS
                else console.red("𐄂")
            )
            console.print_stderr(f"{status} {result.address}")
        if result.test_result.stdout:
            console.print_stderr(result.test_result.stdout)
        if result.test_result.stderr:
            console.print_stderr(result.test_result.stderr)
        if has_output and result != results[-1]:
            console.print_stderr("")

    # Print summary
    if len(results) > 1:
        console.print_stderr("")
        for result in results:
            console.print_stderr(
                f"{result.address.reference():80}.....{result.test_result.status.value:>10}"
            )

    for result in results:
        xml_results = result.test_result.xml_results
        if not xml_results:
            continue
        workspace.materialize_directory(DirectoryToMaterialize(xml_results))

    if options.values.use_coverage:
        all_coverage_data: Iterable[CoverageData] = [
            result.test_result.coverage_data
            for result in results
            if result.test_result.coverage_data is not None
        ]

        coverage_types_to_collection_types: Dict[
            Type[CoverageData], Type[CoverageDataCollection]
        ] = {
            collection_cls.element_type: collection_cls
            for collection_cls in union_membership.get(CoverageDataCollection)
        }
        coverage_collections: List[CoverageDataCollection] = []
        for data_cls, data in itertools.groupby(all_coverage_data, lambda data: type(data)):
            collection_cls = coverage_types_to_collection_types[data_cls]
            coverage_collections.append(collection_cls(data))

        coverage_reports = await MultiGet(
            Get[CoverageReport](CoverageDataCollection, coverage_collection)
            for coverage_collection in coverage_collections
        )

        coverage_report_files = []
        for report in coverage_reports:
            report_file = report.materialize(console, workspace)
            if report_file is not None:
                coverage_report_files.append(report_file)

        if coverage_report_files and options.values.open_coverage:
            desktop.ui_open(console, interactive_runner, coverage_report_files)

    return Test(exit_code)
Ejemplo n.º 12
0
async def run_tests(
    console: Console,
    options: TestOptions,
    runner: InteractiveRunner,
    targets_with_origins: HydratedTargetsWithOrigins,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Test:
    test_runners: Iterable[Type[TestRunner]] = union_membership.union_rules[TestRunner]

    if options.values.debug:
        target_with_origin = targets_with_origins.expect_single()
        adaptor_with_origin = TargetAdaptorWithOrigin.create(
            target_with_origin.target.adaptor, target_with_origin.origin
        )
        address = adaptor_with_origin.adaptor.address
        valid_test_runners = [
            test_runner
            for test_runner in test_runners
            if test_runner.is_valid_target(adaptor_with_origin)
        ]
        if not valid_test_runners:
            raise ValueError(f"No valid test runner for {address}.")
        if len(valid_test_runners) > 1:
            raise ValueError(
                f"Multiple possible test runners for {address} "
                f"({', '.join(test_runner.__name__ for test_runner in valid_test_runners)})."
            )
        test_runner = valid_test_runners[0]
        logger.info(f"Starting test in debug mode: {address.reference()}")
        request = await Get[TestDebugRequest](TestRunner, test_runner(adaptor_with_origin))
        debug_result = runner.run_local_interactive_process(request.ipr)
        return Test(debug_result.process_exit_code)

    adaptors_with_origins = tuple(
        TargetAdaptorWithOrigin.create(target_with_origin.target.adaptor, target_with_origin.origin)
        for target_with_origin in targets_with_origins
        if target_with_origin.target.adaptor.has_sources()
    )

    results = await MultiGet(
        Get[AddressAndTestResult](
            WrappedTestRunner, WrappedTestRunner(test_runner(adaptor_with_origin))
        )
        for adaptor_with_origin in adaptors_with_origins
        for test_runner in test_runners
        if test_runner.is_valid_target(adaptor_with_origin)
    )

    did_any_fail = False
    for result in results:
        if result.test_result.status == Status.FAILURE:
            did_any_fail = True
        if result.test_result.stdout:
            console.write_stdout(
                f"{result.address.reference()} stdout:\n{result.test_result.stdout}\n"
            )
        if result.test_result.stderr:
            # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving
            # the two streams.
            console.write_stdout(
                f"{result.address.reference()} stderr:\n{result.test_result.stderr}\n"
            )

    console.write_stdout("\n")

    for result in results:
        console.print_stdout(
            f"{result.address.reference():80}.....{result.test_result.status.value:>10}"
        )

    if did_any_fail:
        console.print_stderr(console.red("\nTests failed"))
        exit_code = PANTS_FAILED_EXIT_CODE
    else:
        exit_code = PANTS_SUCCEEDED_EXIT_CODE

    if options.values.run_coverage:
        # TODO: consider warning if a user uses `--coverage` but the language backend does not
        # provide coverage support. This might be too chatty to be worth doing?
        results_with_coverage = [x for x in results if x.test_result.coverage_data is not None]
        coverage_data_collections = itertools.groupby(
            results_with_coverage,
            lambda address_and_test_result: address_and_test_result.test_result.coverage_data.batch_cls,  # type: ignore[union-attr]
        )

        coverage_reports = await MultiGet(
            Get[CoverageReport](
                CoverageDataBatch, coverage_batch_cls(tuple(addresses_and_test_results))  # type: ignore[call-arg]
            )
            for coverage_batch_cls, addresses_and_test_results in coverage_data_collections
        )

        for report in coverage_reports:
            report.materialize(console, workspace)

    return Test(exit_code)
Ejemplo n.º 13
0
async def run_tests(
    console: Console,
    test_subsystem: TestSubsystem,
    interactive_runner: InteractiveRunner,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Test:
    if test_subsystem.debug:
        targets_to_valid_field_sets = await Get(
            TargetsToValidFieldSets,
            TargetsToValidFieldSetsRequest(
                TestFieldSet,
                goal_description="`test --debug`",
                error_if_no_valid_targets=True,
                expect_single_field_set=True,
            ),
        )
        field_set = targets_to_valid_field_sets.field_sets[0]
        request = await Get(TestDebugRequest, TestFieldSet, field_set)
        debug_result = interactive_runner.run(request.process)
        return Test(debug_result.exit_code)

    targets_to_valid_field_sets = await Get(
        TargetsToValidFieldSets,
        TargetsToValidFieldSetsRequest(
            TestFieldSet,
            goal_description=f"the `{test_subsystem.name}` goal",
            error_if_no_valid_targets=False,
        ),
    )
    field_sets_with_sources = await Get(
        FieldSetsWithSources,
        FieldSetsWithSourcesRequest(targets_to_valid_field_sets.field_sets))

    results = await MultiGet(
        Get(AddressAndTestResult, WrappedTestFieldSet(field_set))
        for field_set in field_sets_with_sources)

    # Print details.
    for result in results:
        if test_subsystem.options.output == ShowOutput.NONE or (
                test_subsystem.options.output == ShowOutput.FAILED
                and result.test_result.status == Status.SUCCESS):
            continue
        has_output = result.test_result.stdout or result.test_result.stderr
        if has_output:
            status = (console.green("✓") if result.test_result.status
                      == Status.SUCCESS else console.red("𐄂"))
            console.print_stderr(f"{status} {result.address}")
        if result.test_result.stdout:
            console.print_stderr(result.test_result.stdout)
        if result.test_result.stderr:
            console.print_stderr(result.test_result.stderr)
        if has_output and result != results[-1]:
            console.print_stderr("")

    # Print summary
    console.print_stderr("")
    for result in results:
        color = console.green if result.test_result.status == Status.SUCCESS else console.red
        # The right-align logic sees the color control codes as characters, so we have
        # to account for that. In f-strings the alignment field widths must be literals,
        # so we have to indirect via a call to .format().
        right_align = 19 if console.use_colors else 10
        format_str = f"{{addr:80}}.....{{result:>{right_align}}}"
        console.print_stderr(
            format_str.format(addr=result.address.spec,
                              result=color(result.test_result.status.value)))

    merged_xml_results = await Get(
        Digest,
        MergeDigests(result.test_result.xml_results for result in results
                     if result.test_result.xml_results),
    )
    workspace.write_digest(merged_xml_results)

    if test_subsystem.use_coverage:
        all_coverage_data: Iterable[CoverageData] = [
            result.test_result.coverage_data for result in results
            if result.test_result.coverage_data is not None
        ]

        coverage_types_to_collection_types: Dict[
            Type[CoverageData], Type[CoverageDataCollection]] = {
                collection_cls.element_type: collection_cls
                for collection_cls in union_membership.get(
                    CoverageDataCollection)
            }
        coverage_collections: List[CoverageDataCollection] = []
        for data_cls, data in itertools.groupby(all_coverage_data,
                                                lambda data: type(data)):
            collection_cls = coverage_types_to_collection_types[data_cls]
            coverage_collections.append(collection_cls(data))
        # We can create multiple reports for each coverage data (console, xml and html)
        coverage_reports_collections = await MultiGet(
            Get(CoverageReports, CoverageDataCollection, coverage_collection)
            for coverage_collection in coverage_collections)

        coverage_report_files: List[PurePath] = []
        for coverage_reports in coverage_reports_collections:
            report_files = coverage_reports.materialize(console, workspace)
            coverage_report_files.extend(report_files)

        if coverage_report_files and test_subsystem.open_coverage:
            desktop.ui_open(console, interactive_runner, coverage_report_files)

    exit_code = (PANTS_FAILED_EXIT_CODE if any(
        res.test_result.status == Status.FAILURE
        for res in results) else PANTS_SUCCEEDED_EXIT_CODE)

    return Test(exit_code)
Ejemplo n.º 14
0
async def run_tests(
    console: Console,
    options: TestOptions,
    interactive_runner: InteractiveRunner,
    targets_with_origins: TargetsWithOrigins,
    workspace: Workspace,
    union_membership: UnionMembership,
    registered_target_types: RegisteredTargetTypes,
) -> Test:
    config_types: Iterable[Type[
        TestConfiguration]] = union_membership.union_rules[TestConfiguration]

    if options.values.debug:
        target_with_origin = targets_with_origins.expect_single()
        target = target_with_origin.target
        valid_config_types = [
            config_type for config_type in config_types
            if config_type.is_valid(target)
        ]
        if not valid_config_types:
            all_valid_target_types = itertools.chain.from_iterable(
                config_type.valid_target_types(
                    registered_target_types.types,
                    union_membership=union_membership)
                for config_type in config_types)
            formatted_target_types = sorted(
                target_type.alias for target_type in all_valid_target_types)
            raise ValueError(
                f"The `test` goal only works with the following target types: "
                f"{formatted_target_types}\n\nYou used {target.address} with target "
                f"type {repr(target.alias)}.")
        if len(valid_config_types) > 1:
            possible_config_types = sorted(
                config_type.__name__ for config_type in valid_config_types)
            raise ValueError(
                f"Multiple of the registered test implementations work for {target.address} "
                f"(target type {repr(target.alias)}). It is ambiguous which implementation to use. "
                f"Possible implementations: {possible_config_types}.")
        config_type = valid_config_types[0]
        logger.info(
            f"Starting test in debug mode: {target.address.reference()}")
        request = await Get[TestDebugRequest](
            TestConfiguration, config_type.create(target_with_origin))
        debug_result = interactive_runner.run_local_interactive_process(
            request.ipr)
        return Test(debug_result.process_exit_code)

    configs = tuple(
        config_type.create(target_with_origin)
        for target_with_origin in targets_with_origins
        for config_type in config_types
        if config_type.is_valid(target_with_origin.target))
    configs_with_sources = await Get[ConfigurationsWithSources](
        ConfigurationsWithSourcesRequest(configs))

    results = await MultiGet(
        Get[AddressAndTestResult](WrappedTestConfiguration(config))
        for config in configs_with_sources)

    did_any_fail = False
    for result in results:
        if result.test_result.status == Status.FAILURE:
            did_any_fail = True
        if result.test_result.stdout:
            console.write_stdout(
                f"{result.address.reference()} stdout:\n{result.test_result.stdout}\n"
            )
        if result.test_result.stderr:
            # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving
            # the two streams.
            console.write_stdout(
                f"{result.address.reference()} stderr:\n{result.test_result.stderr}\n"
            )

    console.write_stdout("\n")

    for result in results:
        console.print_stdout(
            f"{result.address.reference():80}.....{result.test_result.status.value:>10}"
        )

    if did_any_fail:
        console.print_stderr(console.red("\nTests failed"))
        exit_code = PANTS_FAILED_EXIT_CODE
    else:
        exit_code = PANTS_SUCCEEDED_EXIT_CODE

    if options.values.run_coverage:
        all_coverage_data: Iterable[CoverageData] = [
            result.test_result.coverage_data for result in results
            if result.test_result.coverage_data is not None
        ]

        coverage_types_to_collection_types: Dict[
            Type[CoverageData], Type[CoverageDataCollection]] = {
                collection_cls.element_type: collection_cls
                for collection_cls in
                union_membership.union_rules[CoverageDataCollection]
            }
        coverage_collections: List[CoverageDataCollection] = []
        for data_cls, data in itertools.groupby(all_coverage_data,
                                                lambda data: type(data)):
            collection_cls = coverage_types_to_collection_types[data_cls]
            coverage_collections.append(collection_cls(data))

        coverage_reports = await MultiGet(
            Get[CoverageReport](CoverageDataCollection, coverage_collection)
            for coverage_collection in coverage_collections)

        coverage_report_files = []
        for report in coverage_reports:
            report_file = report.materialize(console, workspace)
            if report_file is not None:
                coverage_report_files.append(report_file)

        if coverage_report_files and options.values.open_coverage:
            desktop.ui_open(console, interactive_runner, coverage_report_files)

    return Test(exit_code)
Ejemplo n.º 15
0
async def run_tests(
    console: Console,
    options: TestOptions,
    interactive_runner: InteractiveRunner,
    targets_with_origins: TargetsWithOrigins,
    workspace: Workspace,
    union_membership: UnionMembership,
    registered_target_types: RegisteredTargetTypes,
) -> Test:
    config_types: Iterable[Type[
        TestConfiguration]] = union_membership.union_rules[TestConfiguration]

    if options.values.debug:
        target_with_origin = targets_with_origins.expect_single()
        target = target_with_origin.target
        valid_config_types = [
            config_type for config_type in config_types
            if config_type.is_valid(target)
        ]
        if not valid_config_types:
            all_valid_target_types = itertools.chain.from_iterable(
                config_type.valid_target_types(
                    registered_target_types.types,
                    union_membership=union_membership)
                for config_type in config_types)
            formatted_target_types = sorted(
                target_type.alias for target_type in all_valid_target_types)
            raise ValueError(
                f"The `test` goal only works with the following target types: "
                f"{formatted_target_types}\n\nYou used {target.address} with target "
                f"type {repr(target.alias)}.")
        if len(valid_config_types) > 1:
            possible_config_types = sorted(
                config_type.__name__ for config_type in valid_config_types)
            raise ValueError(
                f"Multiple of the registered test implementations work for {target.address} "
                f"(target type {repr(target.alias)}). It is ambiguous which implementation to use. "
                f"Possible implementations: {possible_config_types}.")
        config_type = valid_config_types[0]
        logger.info(
            f"Starting test in debug mode: {target.address.reference()}")
        request = await Get[TestDebugRequest](
            TestConfiguration, config_type.create(target_with_origin))
        debug_result = interactive_runner.run_local_interactive_process(
            request.ipr)
        return Test(debug_result.process_exit_code)

    # TODO: possibly factor out this filtering out of empty `sources`. We do this at this level of
    #  abstraction, rather than in the test runners, because the test runners often will use
    #  auto-discovery when given no input files.
    configs = tuple(
        config_type.create(target_with_origin)
        for target_with_origin in targets_with_origins
        for config_type in config_types
        if config_type.is_valid(target_with_origin.target))
    all_hydrated_sources = await MultiGet(Get[HydratedSources](
        HydrateSourcesRequest, test_target.sources.request)
                                          for test_target in configs)

    results = await MultiGet(
        Get[AddressAndTestResult](WrappedTestConfiguration(config))
        for config, hydrated_sources in zip(configs, all_hydrated_sources)
        if hydrated_sources.snapshot.files)

    did_any_fail = False
    for result in results:
        if result.test_result.status == Status.FAILURE:
            did_any_fail = True
        if result.test_result.stdout:
            console.write_stdout(
                f"{result.address.reference()} stdout:\n{result.test_result.stdout}\n"
            )
        if result.test_result.stderr:
            # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving
            # the two streams.
            console.write_stdout(
                f"{result.address.reference()} stderr:\n{result.test_result.stderr}\n"
            )

    console.write_stdout("\n")

    for result in results:
        console.print_stdout(
            f"{result.address.reference():80}.....{result.test_result.status.value:>10}"
        )

    if did_any_fail:
        console.print_stderr(console.red("\nTests failed"))
        exit_code = PANTS_FAILED_EXIT_CODE
    else:
        exit_code = PANTS_SUCCEEDED_EXIT_CODE

    if options.values.run_coverage:
        # TODO: consider warning if a user uses `--coverage` but the language backend does not
        # provide coverage support. This might be too chatty to be worth doing?
        results_with_coverage = [
            x for x in results if x.test_result.coverage_data is not None
        ]
        coverage_data_collections = itertools.groupby(
            results_with_coverage,
            lambda address_and_test_result: (
                address_and_test_result.test_result.coverage_data.
                batch_cls  # type: ignore[union-attr]
            ),
        )

        coverage_reports = await MultiGet(Get[CoverageReport](
            CoverageDataBatch,
            coverage_batch_cls(tuple(
                addresses_and_test_results)),  # type: ignore[call-arg]
        ) for coverage_batch_cls, addresses_and_test_results in
                                          coverage_data_collections)

        coverage_report_files = []
        for report in coverage_reports:
            report_file = report.materialize(console, workspace)
            if report_file is not None:
                coverage_report_files.append(report_file)

        if coverage_report_files and options.values.open_coverage:
            desktop.ui_open(console, interactive_runner, coverage_report_files)

    return Test(exit_code)