def run_typecheck_rule( *, request_types: List[Type[TypecheckRequest]], targets: List[TargetWithOrigin], include_sources: bool = True, ) -> Tuple[int, str]: console = MockConsole(use_colors=False) union_membership = UnionMembership({TypecheckRequest: request_types}) result: Typecheck = run_rule( typecheck, rule_args=[console, TargetsWithOrigins(targets), union_membership], mock_gets=[ MockGet( product_type=TypecheckResults, subject_type=TypecheckRequest, mock=lambda field_set_collection: field_set_collection.typecheck_results, ), MockGet( product_type=FieldSetsWithSources, subject_type=FieldSetsWithSourcesRequest, mock=lambda field_sets: FieldSetsWithSources( field_sets if include_sources else () ), ), ], union_membership=union_membership, ) assert not console.stdout.getvalue() return result.exit_code, console.stderr.getvalue()
async def resolve_targets_with_origins( addresses_with_origins: AddressesWithOrigins, ) -> TargetsWithOrigins: # TODO: See `resolve_targets`. targets_with_origins = await MultiGet( Get(TargetWithOrigin, AddressWithOrigin, address_with_origin) for address_with_origin in addresses_with_origins) # Split out and expand any base targets. # TODO: Should recursively expand alias targets here as well. other_targets_with_origins = [] base_targets_with_origins = [] for to in targets_with_origins: if to.target.address.is_base_target: base_targets_with_origins.append(to) else: other_targets_with_origins.append(to) base_targets_subtargets = await MultiGet( Get(Subtargets, Address, to.target.address) for to in base_targets_with_origins) # Zip the subtargets back to the base targets and replace them while maintaining origins. # NB: If a target had no subtargets, we use the base. expanded_targets_with_origins = set(other_targets_with_origins) expanded_targets_with_origins.update( TargetWithOrigin(target, bto.origin) for bto, subtargets in zip( base_targets_with_origins, base_targets_subtargets) for target in ( subtargets.subtargets if subtargets.subtargets else [bto.target])) return TargetsWithOrigins(expanded_targets_with_origins)
def run_lint_rule( *, config_collection_types: List[Type[LinterConfigurations]], targets: List[TargetWithOrigin], per_target_caching: bool, include_sources: bool = True, ) -> Tuple[int, str]: console = MockConsole(use_colors=False) union_membership = UnionMembership({LinterConfigurations: config_collection_types}) result: Lint = run_rule( lint, rule_args=[ console, TargetsWithOrigins(targets), create_goal_subsystem(LintOptions, per_target_caching=per_target_caching), union_membership, ], mock_gets=[ MockGet( product_type=LintResult, subject_type=LinterConfigurations, mock=lambda config_collection: config_collection.lint_result, ), MockGet( product_type=ConfigurationsWithSources, subject_type=ConfigurationsWithSourcesRequest, mock=lambda configs: ConfigurationsWithSources( configs if include_sources else () ), ), ], union_membership=union_membership, ) return result.exit_code, console.stdout.getvalue()
async def find_valid_field_sets_for_target_roots( request: TargetRootsToFieldSetsRequest, targets_with_origins: TargetsWithOrigins, union_membership: UnionMembership, registered_target_types: RegisteredTargetTypes, ) -> TargetRootsToFieldSets: field_sets_per_target = await Get( FieldSetsPerTarget, FieldSetsPerTargetRequest( request.field_set_superclass, (two.target for two in targets_with_origins) ), ) targets_to_valid_field_sets = {} for tgt_with_origin, field_sets in zip(targets_with_origins, field_sets_per_target.collection): if field_sets: targets_to_valid_field_sets[tgt_with_origin] = field_sets if request.error_if_no_applicable_targets and not targets_to_valid_field_sets: raise NoApplicableTargetsException.create_from_field_sets( TargetsWithOrigins(targets_with_origins), field_set_types=union_membership.union_rules[request.field_set_superclass], goal_description=request.goal_description, union_membership=union_membership, registered_target_types=registered_target_types, ) result = TargetRootsToFieldSets(targets_to_valid_field_sets) if not request.expect_single_field_set: return result if len(result.targets) > 1: raise TooManyTargetsException(result.targets, goal_description=request.goal_description) if len(result.field_sets) > 1: raise AmbiguousImplementationsException( result.targets[0], result.field_sets, goal_description=request.goal_description ) return result
async def resolve_targets_with_origins( addresses_with_origins: AddressesWithOrigins, ) -> TargetsWithOrigins: targets_with_origins = await MultiGet( Get[TargetWithOrigin](AddressWithOrigin, address_with_origin) for address_with_origin in addresses_with_origins ) return TargetsWithOrigins(targets_with_origins)
def single_target_run( self, *, console: MockConsole, program_text: bytes, address_spec: str, ) -> Run: workspace = Workspace(self.scheduler) interactive_runner = InteractiveRunner(self.scheduler) class TestBinaryConfiguration(BinaryConfiguration): required_fields = () class TestBinaryTarget(Target): alias = "binary" core_fields = () address = Address.parse(address_spec) origin = SingleAddress(address.spec_path, address.target_name) res = run_rule( run, rule_args=[ console, workspace, interactive_runner, BuildRoot(), TargetsWithOrigins([ TargetWithOrigin( target=TestBinaryTarget(unhydrated_values={}, address=address), origin=origin, ) ]), create_goal_subsystem(RunOptions, args=[]), create_subsystem(GlobalOptions, pants_workdir=self.pants_workdir), UnionMembership(union_rules={ BinaryConfiguration: OrderedSet([TestBinaryConfiguration]) }), RegisteredTargetTypes.create([TestBinaryTarget]), ], mock_gets=[ MockGet( product_type=CreatedBinary, subject_type=TestBinaryConfiguration, mock=lambda _: self.create_mock_binary(program_text), ), ], ) return cast(Run, res)
def run_black_and_isort( self, source_files: List[FileContent], *, name: str, extra_args: Optional[List[str]] = None ) -> LanguageFmtResults: for source_file in source_files: self.create_file(source_file.path, source_file.content.decode()) target = PythonLibrary({}, address=Address.parse(f"test:{name}")) origin = SingleAddress(directory="test", name=name) targets = PythonFmtTargets(TargetsWithOrigins([TargetWithOrigin(target, origin)])) args = [ "--backend-packages2=['pants.backend.python.lint.black', 'pants.backend.python.lint.isort']", *(extra_args or []), ] results = self.request_single_product( LanguageFmtResults, Params(targets, create_options_bootstrapper(args=args)), ) return results
def find_valid_field_sets( superclass: Type, targets_with_origins: Iterable[TargetWithOrigin], *, error_if_no_valid_targets: bool = False, expect_single_config: bool = False, ) -> TargetsToValidFieldSets: request = TargetsToValidFieldSetsRequest( superclass, goal_description="fake", error_if_no_valid_targets=error_if_no_valid_targets, expect_single_field_set=expect_single_config, ) return self.request_single_product( TargetsToValidFieldSets, Params(request, TargetsWithOrigins(targets_with_origins),), )
def run_fmt_rule( self, *, language_target_collection_types: List[Type[LanguageFmtTargets]], targets: List[TargetWithOrigin], result_digest: Digest, per_target_caching: bool, include_sources: bool = True, ) -> str: console = MockConsole(use_colors=False) union_membership = UnionMembership( {LanguageFmtTargets: language_target_collection_types}) result: Fmt = run_rule( fmt, rule_args=[ console, TargetsWithOrigins(targets), create_goal_subsystem(FmtOptions, per_target_caching=per_target_caching), Workspace(self.scheduler), union_membership, ], mock_gets=[ MockGet( product_type=LanguageFmtResults, subject_type=LanguageFmtTargets, mock=lambda language_targets_collection: language_targets_collection.language_fmt_results( result_digest), ), MockGet( product_type=TargetsWithSources, subject_type=TargetsWithSourcesRequest, mock=lambda tgts: TargetsWithSources( tgts if include_sources else ()), ), MockGet( product_type=Digest, subject_type=MergeDigests, mock=lambda _: result_digest, ), ], union_membership=union_membership, ) assert result.exit_code == 0 assert not console.stdout.getvalue() return cast(str, console.stderr.getvalue())
def find_valid_field_sets( request: TargetsToValidFieldSetsRequest, targets_with_origins: TargetsWithOrigins, union_membership: UnionMembership, registered_target_types: RegisteredTargetTypes, ) -> TargetsToValidFieldSets: field_set_types: Iterable[ Union[Type[FieldSet], Type[FieldSetWithOrigin]]] = union_membership.union_rules[ request.field_set_superclass] targets_to_valid_field_sets = {} for tgt_with_origin in targets_with_origins: valid_field_sets = [ (field_set_type.create(tgt_with_origin) if issubclass( field_set_type, FieldSetWithOrigin) else field_set_type.create( tgt_with_origin.target)) for field_set_type in field_set_types if field_set_type.is_valid(tgt_with_origin.target) ] if valid_field_sets: targets_to_valid_field_sets[tgt_with_origin] = valid_field_sets if request.error_if_no_valid_targets and not targets_to_valid_field_sets: raise NoValidTargetsException.create_from_field_sets( TargetsWithOrigins(targets_with_origins), field_set_types=field_set_types, goal_description=request.goal_description, union_membership=union_membership, registered_target_types=registered_target_types, ) result = TargetsToValidFieldSets(targets_to_valid_field_sets) if not request.expect_single_field_set: return result if len(result.targets) > 1: raise TooManyTargetsException( result.targets, goal_description=request.goal_description) if len(result.field_sets) > 1: raise AmbiguousImplementationsException( result.targets[0], result.field_sets, goal_description=request.goal_description) return result
async def fmt( console: Console, targets_with_origins: TargetsWithOrigins, options: FmtOptions, workspace: Workspace, union_membership: UnionMembership, ) -> Fmt: language_target_collection_types: Iterable[Type[LanguageFmtTargets]] = ( union_membership.union_rules[LanguageFmtTargets] ) language_target_collections: Iterable[LanguageFmtTargets] = tuple( language_target_collection_type( TargetsWithOrigins( target_with_origin for target_with_origin in targets_with_origins if language_target_collection_type.belongs_to_language(target_with_origin.target) ) ) for language_target_collection_type in language_target_collection_types ) targets_with_sources: Iterable[TargetsWithSources] = await MultiGet( Get[TargetsWithSources]( TargetsWithSourcesRequest( target_with_origin.target for target_with_origin in language_target_collection.targets_with_origins ) ) for language_target_collection in language_target_collections ) # NB: We must convert back the generic TargetsWithSources objects back into their # corresponding LanguageFmtTargets, e.g. back to PythonFmtTargets, in order for the union # rule to work. valid_language_target_collections: Iterable[LanguageFmtTargets] = tuple( language_target_collection_cls( TargetsWithOrigins( target_with_origin for target_with_origin in language_target_collection.targets_with_origins if target_with_origin.target in language_targets_with_sources ) ) for language_target_collection_cls, language_target_collection, language_targets_with_sources in zip( language_target_collection_types, language_target_collections, targets_with_sources ) if language_targets_with_sources ) if options.values.per_target_caching: per_language_results = await MultiGet( Get[LanguageFmtResults]( LanguageFmtTargets, language_target_collection.__class__(TargetsWithOrigins([target_with_origin])), ) for language_target_collection in valid_language_target_collections for target_with_origin in language_target_collection.targets_with_origins ) else: per_language_results = await MultiGet( Get[LanguageFmtResults](LanguageFmtTargets, language_target_collection) for language_target_collection in valid_language_target_collections ) individual_results: List[FmtResult] = list( itertools.chain.from_iterable( language_result.results for language_result in per_language_results ) ) if not individual_results: return Fmt(exit_code=0) # NB: this will fail if there are any conflicting changes, which we want to happen rather than # silently having one result override the other. In practicality, this should never happen due # to us grouping each language's formatters into a single combined_digest. merged_formatted_digest = await Get[Digest]( DirectoriesToMerge( tuple(language_result.combined_digest for language_result in per_language_results) ) ) workspace.materialize_directory(DirectoryToMaterialize(merged_formatted_digest)) for result in individual_results: if result.stdout: console.print_stdout(result.stdout) if result.stderr: console.print_stderr(result.stderr) # Since the rules to produce FmtResult should use ExecuteRequest, rather than # FallibleProcess, we assume that there were no failures. return Fmt(exit_code=0)
async def run_tests( console: Console, options: TestOptions, interactive_runner: InteractiveRunner, targets_with_origins: TargetsWithOrigins, workspace: Workspace, union_membership: UnionMembership, registered_target_types: RegisteredTargetTypes, ) -> Test: config_types: Iterable[Type[ TestConfiguration]] = union_membership.union_rules[TestConfiguration] if options.values.debug: target_with_origin = targets_with_origins.expect_single() target = target_with_origin.target valid_config_types = [ config_type for config_type in config_types if config_type.is_valid(target) ] if not valid_config_types: all_valid_target_types = itertools.chain.from_iterable( config_type.valid_target_types( registered_target_types.types, union_membership=union_membership) for config_type in config_types) formatted_target_types = sorted( target_type.alias for target_type in all_valid_target_types) raise ValueError( f"The `test` goal only works with the following target types: " f"{formatted_target_types}\n\nYou used {target.address} with target " f"type {repr(target.alias)}.") if len(valid_config_types) > 1: possible_config_types = sorted( config_type.__name__ for config_type in valid_config_types) raise ValueError( f"Multiple of the registered test implementations work for {target.address} " f"(target type {repr(target.alias)}). It is ambiguous which implementation to use. " f"Possible implementations: {possible_config_types}.") config_type = valid_config_types[0] logger.info( f"Starting test in debug mode: {target.address.reference()}") request = await Get[TestDebugRequest]( TestConfiguration, config_type.create(target_with_origin)) debug_result = interactive_runner.run_local_interactive_process( request.ipr) return Test(debug_result.process_exit_code) configs = tuple( config_type.create(target_with_origin) for target_with_origin in targets_with_origins for config_type in config_types if config_type.is_valid(target_with_origin.target)) configs_with_sources = await Get[ConfigurationsWithSources]( ConfigurationsWithSourcesRequest(configs)) results = await MultiGet( Get[AddressAndTestResult](WrappedTestConfiguration(config)) for config in configs_with_sources) did_any_fail = False for result in results: if result.test_result.status == Status.FAILURE: did_any_fail = True if result.test_result.stdout: console.write_stdout( f"{result.address.reference()} stdout:\n{result.test_result.stdout}\n" ) if result.test_result.stderr: # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving # the two streams. console.write_stdout( f"{result.address.reference()} stderr:\n{result.test_result.stderr}\n" ) console.write_stdout("\n") for result in results: console.print_stdout( f"{result.address.reference():80}.....{result.test_result.status.value:>10}" ) if did_any_fail: console.print_stderr(console.red("\nTests failed")) exit_code = PANTS_FAILED_EXIT_CODE else: exit_code = PANTS_SUCCEEDED_EXIT_CODE if options.values.run_coverage: all_coverage_data: Iterable[CoverageData] = [ result.test_result.coverage_data for result in results if result.test_result.coverage_data is not None ] coverage_types_to_collection_types: Dict[ Type[CoverageData], Type[CoverageDataCollection]] = { collection_cls.element_type: collection_cls for collection_cls in union_membership.union_rules[CoverageDataCollection] } coverage_collections: List[CoverageDataCollection] = [] for data_cls, data in itertools.groupby(all_coverage_data, lambda data: type(data)): collection_cls = coverage_types_to_collection_types[data_cls] coverage_collections.append(collection_cls(data)) coverage_reports = await MultiGet( Get[CoverageReport](CoverageDataCollection, coverage_collection) for coverage_collection in coverage_collections) coverage_report_files = [] for report in coverage_reports: report_file = report.materialize(console, workspace) if report_file is not None: coverage_report_files.append(report_file) if coverage_report_files and options.values.open_coverage: desktop.ui_open(console, interactive_runner, coverage_report_files) return Test(exit_code)
def run_test_rule( self, *, config: Type[TestConfiguration], targets: List[TargetWithOrigin], debug: bool = False, include_sources: bool = True, ) -> Tuple[int, str]: console = MockConsole(use_colors=False) options = MockOptions(debug=debug, run_coverage=False) interactive_runner = InteractiveRunner(self.scheduler) workspace = Workspace(self.scheduler) union_membership = UnionMembership( {TestConfiguration: OrderedSet([config])}) def mock_coordinator_of_tests( wrapped_config: WrappedTestConfiguration, ) -> AddressAndTestResult: config = wrapped_config.config return AddressAndTestResult( address=config.address, test_result=config.test_result, # type: ignore[attr-defined] ) result: Test = run_rule( run_tests, rule_args=[ console, options, interactive_runner, TargetsWithOrigins(targets), workspace, union_membership, RegisteredTargetTypes.create([MockTarget]), ], mock_gets=[ MockGet( product_type=AddressAndTestResult, subject_type=WrappedTestConfiguration, mock=lambda wrapped_config: mock_coordinator_of_tests( wrapped_config), ), MockGet( product_type=TestDebugRequest, subject_type=TestConfiguration, mock=lambda _: TestDebugRequest(self.make_ipr()), ), MockGet( product_type=HydratedSources, subject_type=HydrateSourcesRequest, mock=lambda _: HydratedSources( Snapshot( directory_digest=EMPTY_DIRECTORY_DIGEST, files=cast(Tuple[str, ...], ("test.hs", ) if include_sources else ()), dirs=(), ), filespec={"globs": []}, ), ), MockGet( product_type=CoverageReport, subject_type=CoverageDataBatch, mock=lambda _: FilesystemCoverageReport( result_digest=EMPTY_DIRECTORY_DIGEST, directory_to_materialize_to=PurePath("mockety/mock"), report_file=None, ), ), ], union_membership=union_membership, ) return result.exit_code, console.stdout.getvalue()
async def run_tests( console: Console, options: TestOptions, interactive_runner: InteractiveRunner, targets_with_origins: TargetsWithOrigins, workspace: Workspace, union_membership: UnionMembership, registered_target_types: RegisteredTargetTypes, ) -> Test: config_types: Iterable[Type[ TestConfiguration]] = union_membership.union_rules[TestConfiguration] if options.values.debug: target_with_origin = targets_with_origins.expect_single() target = target_with_origin.target valid_config_types = [ config_type for config_type in config_types if config_type.is_valid(target) ] if not valid_config_types: all_valid_target_types = itertools.chain.from_iterable( config_type.valid_target_types( registered_target_types.types, union_membership=union_membership) for config_type in config_types) formatted_target_types = sorted( target_type.alias for target_type in all_valid_target_types) raise ValueError( f"The `test` goal only works with the following target types: " f"{formatted_target_types}\n\nYou used {target.address} with target " f"type {repr(target.alias)}.") if len(valid_config_types) > 1: possible_config_types = sorted( config_type.__name__ for config_type in valid_config_types) raise ValueError( f"Multiple of the registered test implementations work for {target.address} " f"(target type {repr(target.alias)}). It is ambiguous which implementation to use. " f"Possible implementations: {possible_config_types}.") config_type = valid_config_types[0] logger.info( f"Starting test in debug mode: {target.address.reference()}") request = await Get[TestDebugRequest]( TestConfiguration, config_type.create(target_with_origin)) debug_result = interactive_runner.run_local_interactive_process( request.ipr) return Test(debug_result.process_exit_code) # TODO: possibly factor out this filtering out of empty `sources`. We do this at this level of # abstraction, rather than in the test runners, because the test runners often will use # auto-discovery when given no input files. configs = tuple( config_type.create(target_with_origin) for target_with_origin in targets_with_origins for config_type in config_types if config_type.is_valid(target_with_origin.target)) all_hydrated_sources = await MultiGet(Get[HydratedSources]( HydrateSourcesRequest, test_target.sources.request) for test_target in configs) results = await MultiGet( Get[AddressAndTestResult](WrappedTestConfiguration(config)) for config, hydrated_sources in zip(configs, all_hydrated_sources) if hydrated_sources.snapshot.files) did_any_fail = False for result in results: if result.test_result.status == Status.FAILURE: did_any_fail = True if result.test_result.stdout: console.write_stdout( f"{result.address.reference()} stdout:\n{result.test_result.stdout}\n" ) if result.test_result.stderr: # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving # the two streams. console.write_stdout( f"{result.address.reference()} stderr:\n{result.test_result.stderr}\n" ) console.write_stdout("\n") for result in results: console.print_stdout( f"{result.address.reference():80}.....{result.test_result.status.value:>10}" ) if did_any_fail: console.print_stderr(console.red("\nTests failed")) exit_code = PANTS_FAILED_EXIT_CODE else: exit_code = PANTS_SUCCEEDED_EXIT_CODE if options.values.run_coverage: # TODO: consider warning if a user uses `--coverage` but the language backend does not # provide coverage support. This might be too chatty to be worth doing? results_with_coverage = [ x for x in results if x.test_result.coverage_data is not None ] coverage_data_collections = itertools.groupby( results_with_coverage, lambda address_and_test_result: ( address_and_test_result.test_result.coverage_data. batch_cls # type: ignore[union-attr] ), ) coverage_reports = await MultiGet(Get[CoverageReport]( CoverageDataBatch, coverage_batch_cls(tuple( addresses_and_test_results)), # type: ignore[call-arg] ) for coverage_batch_cls, addresses_and_test_results in coverage_data_collections) coverage_report_files = [] for report in coverage_reports: report_file = report.materialize(console, workspace) if report_file is not None: coverage_report_files.append(report_file) if coverage_report_files and options.values.open_coverage: desktop.ui_open(console, interactive_runner, coverage_report_files) return Test(exit_code)