async def run_go_pkg_debug(targets: UnexpandedTargets, console: Console) -> GoPkgDebugGoal: first_party_package_targets = [ tgt for tgt in targets if is_first_party_package_target(tgt) ] first_party_requests = [ Get(ResolvedGoPackage, ResolveGoPackageRequest(address=tgt.address)) for tgt in first_party_package_targets ] third_party_package_targets = [ tgt for tgt in targets if is_third_party_package_target(tgt) ] third_party_requests = [ Get(ResolvedGoPackage, ResolveExternalGoPackageRequest(address=tgt.address)) for tgt in third_party_package_targets ] resolved_packages = await MultiGet( [*first_party_requests, *third_party_requests]) # type: ignore for package in resolved_packages: console.write_stdout(str(package) + "\n") return GoPkgDebugGoal(exit_code=0)
def run(console: Console, workspace: Workspace, runner: InteractiveRunner, bfa: BuildFileAddress) -> Run: target = bfa.to_address() binary = yield Get(CreatedBinary, Address, target) with temporary_dir(cleanup=True) as tmpdir: dirs_to_materialize = (DirectoryToMaterialize( path=str(tmpdir), directory_digest=binary.digest), ) workspace.materialize_directories(dirs_to_materialize) console.write_stdout(f"Running target: {target}\n") full_path = str(Path(tmpdir, binary.binary_name)) run_request = InteractiveProcessRequest( argv=[full_path], run_in_workspace=True, ) try: result = runner.run_local_interactive_process(run_request) exit_code = result.process_exit_code if result.process_exit_code == 0: console.write_stdout(f"{target} ran successfully.\n") else: console.write_stderr( f"{target} failed with code {result.process_exit_code}!\n") except Exception as e: console.write_stderr( f"Exception when attempting to run {target} : {e}\n") exit_code = -1 yield Run(exit_code)
async def run( console: Console, workspace: Workspace, runner: InteractiveRunner, build_root: BuildRoot, options: RunOptions, global_options: GlobalOptions, ) -> Run: targets_to_valid_configs = await Get[TargetsToValidConfigurations]( TargetsToValidConfigurationsRequest( BinaryConfiguration, goal_description=f"the `{options.name}` goal", error_if_no_valid_targets=True, expect_single_config=True, )) config = targets_to_valid_configs.configurations[0] binary = await Get[CreatedBinary](BinaryConfiguration, config) workdir = global_options.options.pants_workdir with temporary_dir(root_dir=workdir, cleanup=True) as tmpdir: path_relative_to_build_root = PurePath(tmpdir).relative_to( build_root.path).as_posix() workspace.materialize_directory( DirectoryToMaterialize(binary.digest, path_prefix=path_relative_to_build_root)) console.write_stdout(f"Running target: {config.address}\n") full_path = PurePath(tmpdir, binary.binary_name).as_posix() run_request = InteractiveProcessRequest( argv=(full_path, *options.values.args), run_in_workspace=True, ) try: result = runner.run_local_interactive_process(run_request) exit_code = result.process_exit_code if result.process_exit_code == 0: console.write_stdout(f"{config.address} ran successfully.\n") else: console.write_stderr( f"{config.address} failed with code {result.process_exit_code}!\n" ) except Exception as e: console.write_stderr( f"Exception when attempting to run {config.address}: {e!r}\n") exit_code = -1 return Run(exit_code)
async def dump_java_source_analysis( targets: Targets, console: Console) -> DumpJavaSourceAnalysis: java_source_field_sets = [ JavaFieldSet.create(tgt) for tgt in targets if JavaFieldSet.is_applicable(tgt) ] java_source_analysis = await MultiGet( Get(JavaSourceDependencyAnalysis, SourceFilesRequest([fs.sources])) for fs in java_source_field_sets) java_source_analysis_json = [{ "address": str(fs.address), **analysis.to_debug_json_dict() } for (fs, analysis) in zip(java_source_field_sets, java_source_analysis)] console.write_stdout(json.dumps(java_source_analysis_json)) return DumpJavaSourceAnalysis(exit_code=0)
def run(console: Console, runner: InteractiveRunner, build_file_addresses: BuildFileAddresses) -> Run: console.write_stdout("Running the `run` goal\n") request = InteractiveProcessRequest( argv=["/usr/bin/python"], env=("TEST_ENV", "TEST"), run_in_workspace=False, ) try: res = runner.run_local_interactive_process(request) print(f"Subprocess exited with result: {res.process_exit_code}") yield Run(res.process_exit_code) except Exception as e: print(f"Exception when running local interactive process: {e}") yield Run(-1)
async def internal_render_test_lockfile_fixtures( rendered_fixtures: RenderedJVMLockfileFixtures, workspace: Workspace, console: Console, ) -> InternalGenerateTestLockfileFixturesGoal: if not rendered_fixtures: console.write_stdout("No test lockfile fixtures found.\n") return InternalGenerateTestLockfileFixturesGoal(exit_code=0) digest_contents = [ FileContent(rendered_fixture.path, rendered_fixture.content) for rendered_fixture in rendered_fixtures ] snapshot = await Get(Snapshot, CreateDigest(digest_contents)) console.write_stdout(f"Writing test lockfile fixtures: {snapshot.files}\n") workspace.write_digest(snapshot.digest) return InternalGenerateTestLockfileFixturesGoal(exit_code=0)
async def run( console: Console, workspace: Workspace, runner: InteractiveRunner, build_root: BuildRoot, addresses: Addresses, options: RunOptions, ) -> Run: address = addresses.expect_single() binary = await Get[CreatedBinary](Address, address) with temporary_dir(root_dir=PurePath(build_root.path, ".pants.d").as_posix(), cleanup=True) as tmpdir: path_relative_to_build_root = PurePath(tmpdir).relative_to( build_root.path).as_posix() workspace.materialize_directory( DirectoryToMaterialize(binary.digest, path_prefix=path_relative_to_build_root)) console.write_stdout(f"Running target: {address}\n") full_path = PurePath(tmpdir, binary.binary_name).as_posix() run_request = InteractiveProcessRequest( argv=(full_path, *options.values.args), run_in_workspace=True, ) try: result = runner.run_local_interactive_process(run_request) exit_code = result.process_exit_code if result.process_exit_code == 0: console.write_stdout(f"{address} ran successfully.\n") else: console.write_stderr( f"{address} failed with code {result.process_exit_code}!\n" ) except Exception as e: console.write_stderr( f"Exception when attempting to run {address}: {e!r}\n") exit_code = -1 return Run(exit_code)
def fast_test(console: Console, addresses: BuildFileAddresses) -> Test: test_results = yield [Get(TestResult, Address, address.to_address()) for address in addresses] did_any_fail = False for address, test_result in zip(addresses, test_results): if test_result.status == Status.FAILURE: did_any_fail = True if test_result.stdout: console.write_stdout( "{} stdout:\n{}\n".format( address.reference(), console.red(test_result.stdout) if test_result.status == Status.FAILURE else test_result.stdout ) ) if test_result.stderr: # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving the # two streams. console.write_stdout( "{} stderr:\n{}\n".format( address.reference(), console.red(test_result.stderr) if test_result.status == Status.FAILURE else test_result.stderr ) ) console.write_stdout("\n") for address, test_result in zip(addresses, test_results): console.print_stdout('{0:80}.....{1:>10}'.format(address.reference(), test_result.status.value)) if did_any_fail: console.print_stderr(console.red('Tests failed')) exit_code = PANTS_FAILED_EXIT_CODE else: exit_code = PANTS_SUCCEEDED_EXIT_CODE yield Test(exit_code)
async def run( console: Console, workspace: Workspace, runner: InteractiveRunner, build_root: BuildRoot, bfa: BuildFileAddress, ) -> Run: target = bfa.to_address() binary = await Get[CreatedBinary](Address, target) with temporary_dir(root_dir=str(Path(build_root.path, ".pants.d")), cleanup=True) as tmpdir: path_relative_to_build_root = str( Path(tmpdir).relative_to(build_root.path)) workspace.materialize_directory( DirectoryToMaterialize(binary.digest, path_prefix=path_relative_to_build_root)) console.write_stdout(f"Running target: {target}\n") full_path = str(Path(tmpdir, binary.binary_name)) run_request = InteractiveProcessRequest( argv=(full_path, ), run_in_workspace=True, ) try: result = runner.run_local_interactive_process(run_request) exit_code = result.process_exit_code if result.process_exit_code == 0: console.write_stdout(f"{target} ran successfully.\n") else: console.write_stderr( f"{target} failed with code {result.process_exit_code}!\n") except Exception as e: console.write_stderr( f"Exception when attempting to run {target} : {e}\n") exit_code = -1 return Run(exit_code)
async def run_tests( console: Console, options: TestOptions, runner: InteractiveRunner, addresses: BuildFileAddresses, ) -> Test: if options.values.debug: address = await Get[BuildFileAddress](BuildFileAddresses, addresses) addr_debug_request = await Get[AddressAndDebugRequest](Address, address.to_address()) result = runner.run_local_interactive_process(addr_debug_request.request.ipr) return Test(result.process_exit_code) results = await MultiGet(Get[AddressAndTestResult](Address, addr.to_address()) for addr in addresses) did_any_fail = False filtered_results = [(x.address, x.test_result) for x in results if x.test_result is not None] for address, test_result in filtered_results: if test_result.status == Status.FAILURE: did_any_fail = True if test_result.stdout: console.write_stdout(f"{address.reference()} stdout:\n{test_result.stdout}\n") if test_result.stderr: # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving the # two streams. console.write_stdout(f"{address.reference()} stderr:\n{test_result.stderr}\n") console.write_stdout("\n") for address, test_result in filtered_results: console.print_stdout(f'{address.reference():80}.....{test_result.status.value:>10}') if did_any_fail: console.print_stderr(console.red('\nTests failed')) exit_code = PANTS_FAILED_EXIT_CODE else: exit_code = PANTS_SUCCEEDED_EXIT_CODE return Test(exit_code)
async def check_default_tools( console: Console, real_opts: _Options, ) -> CheckDefaultTools: # The real options know about all the registered tools. for scope, si in real_opts.options.known_scope_to_info.items(): if si.subsystem_cls and issubclass(si.subsystem_cls, ExternalTool): tool_cls = si.subsystem_cls console.print_stdout(f"Checking {console.cyan(tool_cls.name)}:") for known_version in tool_cls.default_known_versions: ver, plat_val, sha256, length = tool_cls.split_known_version_str( known_version) # Note that we don't want to use the real option values here - we want to # verify that the *defaults* aren't broken. However the get_request_for() method # requires an instance (since it can consult option values, including custom # options for specific tools, that we don't know about), so we construct a # default one, but we force the --version to the one we're checking (which will # typically be the same as the default version, but doesn't have to be, if the # tool provides default_known_versions for versions other than default_version). args = ("./pants", f"--{scope}-version={ver}") blank_opts = await Get( _Options, SessionValues({ OptionsBootstrapper: OptionsBootstrapper(tuple(), ("./pants", ), args, _ChainedConfig(tuple()), CliAlias()) }), ) instance = tool_cls(blank_opts.options.for_scope(scope)) req = instance.get_request_for(plat_val, sha256, length) console.write_stdout(f" version {ver} for {plat_val}... ") # TODO: We'd like to run all the requests concurrently, but since we can't catch # engine exceptions, we wouldn't have an easy way to output which one failed. await Get(DownloadedExternalTool, ExternalToolRequest, req) console.print_stdout(console.sigil_succeeded()) return CheckDefaultTools(exit_code=0)
async def run_repl( console: Console, workspace: Workspace, runner: InteractiveRunner, options: ReplOptions, transitive_targets: TransitiveTargets, build_root: BuildRoot, union_membership: UnionMembership, global_options: GlobalOptions, ) -> Repl: # We can guarantee that we will only even enter this `goal_rule` if there exists an implementer # of the `ReplImplementation` union because `LegacyGraphSession.run_goal_rules()` will not # execute this rule's body if there are no implementations registered. membership: Iterable[Type[ ReplImplementation]] = union_membership.union_rules[ReplImplementation] implementations = {impl.name: impl for impl in membership} default_repl = "python" repl_shell_name = cast(str, options.values.shell or default_repl) repl_implementation_cls = implementations.get(repl_shell_name) if repl_implementation_cls is None: available = sorted(set(implementations.keys())) console.write_stdout( f"{repl_shell_name} is not an installed REPL program. Available REPLs: {available}" ) return Repl(-1) repl_impl = repl_implementation_cls(targets=Targets( tgt for tgt in transitive_targets.closure if repl_implementation_cls.is_valid(tgt))) repl_binary = await Get[ReplBinary](ReplImplementation, repl_impl) with temporary_dir(root_dir=global_options.options.pants_workdir, cleanup=False) as tmpdir: path_relative_to_build_root = PurePath(tmpdir).relative_to( build_root.path).as_posix() workspace.materialize_directory( DirectoryToMaterialize(repl_binary.digest, path_prefix=path_relative_to_build_root)) full_path = PurePath(tmpdir, repl_binary.binary_name).as_posix() run_request = InteractiveProcessRequest( argv=(full_path, ), run_in_workspace=True, ) result = runner.run_local_interactive_process(run_request) exit_code = result.process_exit_code if exit_code == 0: console.write_stdout("REPL exited successfully.") else: console.write_stdout(f"REPL exited with error: {exit_code}.") return Repl(exit_code)
async def fast_test(console: Console, addresses: BuildFileAddresses) -> Test: results = await MultiGet(Get(AddressAndTestResult, Address, addr.to_address()) for addr in addresses) did_any_fail = False filtered_results = [(x.address, x.test_result) for x in results if x.test_result is not None] for address, test_result in filtered_results: if test_result.status == Status.FAILURE: did_any_fail = True if test_result.stdout: console.write_stdout( "{} stdout:\n{}\n".format( address.reference(), (console.red(test_result.stdout) if test_result.status == Status.FAILURE else test_result.stdout) ) ) if test_result.stderr: # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving the # two streams. console.write_stdout( "{} stderr:\n{}\n".format( address.reference(), (console.red(test_result.stderr) if test_result.status == Status.FAILURE else test_result.stderr) ) ) console.write_stdout("\n") for address, test_result in filtered_results: console.print_stdout('{0:80}.....{1:>10}'.format( address.reference(), test_result.status.value)) if did_any_fail: console.print_stderr(console.red('Tests failed')) exit_code = PANTS_FAILED_EXIT_CODE else: exit_code = PANTS_SUCCEEDED_EXIT_CODE return Test(exit_code)
async def run_tests( console: Console, options: TestOptions, runner: InteractiveRunner, addresses_with_origins: AddressesWithOrigins, workspace: Workspace, ) -> Test: if options.values.debug: address_with_origin = addresses_with_origins.expect_single() addr_debug_request = await Get[AddressAndDebugRequest]( AddressWithOrigin, address_with_origin ) result = runner.run_local_interactive_process(addr_debug_request.request.ipr) return Test(result.process_exit_code) results = await MultiGet( Get[AddressAndTestResult](AddressWithOrigin, address_with_origin) for address_with_origin in addresses_with_origins ) if options.values.run_coverage: # TODO: consider warning if a user uses `--coverage` but the language backend does not # provide coverage support. This might be too chatty to be worth doing? results_with_coverage = [ x for x in results if x.test_result is not None and x.test_result.coverage_data is not None ] coverage_data_collections = itertools.groupby( results_with_coverage, lambda address_and_test_result: address_and_test_result.test_result.coverage_data.batch_cls, # type: ignore[union-attr] ) coverage_reports = await MultiGet( Get[CoverageReport]( CoverageDataBatch, coverage_batch_cls(tuple(addresses_and_test_results)) # type: ignore[call-arg] ) for coverage_batch_cls, addresses_and_test_results in coverage_data_collections ) for report in coverage_reports: workspace.materialize_directory( DirectoryToMaterialize( report.result_digest, path_prefix=str(report.directory_to_materialize_to), ) ) console.print_stdout(f"Wrote coverage report to `{report.directory_to_materialize_to}`") did_any_fail = False filtered_results = [(x.address, x.test_result) for x in results if x.test_result is not None] for address, test_result in filtered_results: if test_result.status == Status.FAILURE: did_any_fail = True if test_result.stdout: console.write_stdout(f"{address.reference()} stdout:\n{test_result.stdout}\n") if test_result.stderr: # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving the # two streams. console.write_stdout(f"{address.reference()} stderr:\n{test_result.stderr}\n") console.write_stdout("\n") for address, test_result in filtered_results: console.print_stdout(f"{address.reference():80}.....{test_result.status.value:>10}") if did_any_fail: console.print_stderr(console.red("\nTests failed")) exit_code = PANTS_FAILED_EXIT_CODE else: exit_code = PANTS_SUCCEEDED_EXIT_CODE return Test(exit_code)
async def run_tests( console: Console, options: TestOptions, interactive_runner: InteractiveRunner, targets_with_origins: TargetsWithOrigins, workspace: Workspace, union_membership: UnionMembership, registered_target_types: RegisteredTargetTypes, ) -> Test: config_types: Iterable[Type[ TestConfiguration]] = union_membership.union_rules[TestConfiguration] if options.values.debug: target_with_origin = targets_with_origins.expect_single() target = target_with_origin.target valid_config_types = [ config_type for config_type in config_types if config_type.is_valid(target) ] if not valid_config_types: all_valid_target_types = itertools.chain.from_iterable( config_type.valid_target_types( registered_target_types.types, union_membership=union_membership) for config_type in config_types) formatted_target_types = sorted( target_type.alias for target_type in all_valid_target_types) raise ValueError( f"The `test` goal only works with the following target types: " f"{formatted_target_types}\n\nYou used {target.address} with target " f"type {repr(target.alias)}.") if len(valid_config_types) > 1: possible_config_types = sorted( config_type.__name__ for config_type in valid_config_types) raise ValueError( f"Multiple of the registered test implementations work for {target.address} " f"(target type {repr(target.alias)}). It is ambiguous which implementation to use. " f"Possible implementations: {possible_config_types}.") config_type = valid_config_types[0] logger.info( f"Starting test in debug mode: {target.address.reference()}") request = await Get[TestDebugRequest]( TestConfiguration, config_type.create(target_with_origin)) debug_result = interactive_runner.run_local_interactive_process( request.ipr) return Test(debug_result.process_exit_code) # TODO: possibly factor out this filtering out of empty `sources`. We do this at this level of # abstraction, rather than in the test runners, because the test runners often will use # auto-discovery when given no input files. configs = tuple( config_type.create(target_with_origin) for target_with_origin in targets_with_origins for config_type in config_types if config_type.is_valid(target_with_origin.target)) all_hydrated_sources = await MultiGet(Get[HydratedSources]( HydrateSourcesRequest, test_target.sources.request) for test_target in configs) results = await MultiGet( Get[AddressAndTestResult](WrappedTestConfiguration(config)) for config, hydrated_sources in zip(configs, all_hydrated_sources) if hydrated_sources.snapshot.files) did_any_fail = False for result in results: if result.test_result.status == Status.FAILURE: did_any_fail = True if result.test_result.stdout: console.write_stdout( f"{result.address.reference()} stdout:\n{result.test_result.stdout}\n" ) if result.test_result.stderr: # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving # the two streams. console.write_stdout( f"{result.address.reference()} stderr:\n{result.test_result.stderr}\n" ) console.write_stdout("\n") for result in results: console.print_stdout( f"{result.address.reference():80}.....{result.test_result.status.value:>10}" ) if did_any_fail: console.print_stderr(console.red("\nTests failed")) exit_code = PANTS_FAILED_EXIT_CODE else: exit_code = PANTS_SUCCEEDED_EXIT_CODE if options.values.run_coverage: # TODO: consider warning if a user uses `--coverage` but the language backend does not # provide coverage support. This might be too chatty to be worth doing? results_with_coverage = [ x for x in results if x.test_result.coverage_data is not None ] coverage_data_collections = itertools.groupby( results_with_coverage, lambda address_and_test_result: ( address_and_test_result.test_result.coverage_data. batch_cls # type: ignore[union-attr] ), ) coverage_reports = await MultiGet(Get[CoverageReport]( CoverageDataBatch, coverage_batch_cls(tuple( addresses_and_test_results)), # type: ignore[call-arg] ) for coverage_batch_cls, addresses_and_test_results in coverage_data_collections) coverage_report_files = [] for report in coverage_reports: report_file = report.materialize(console, workspace) if report_file is not None: coverage_report_files.append(report_file) if coverage_report_files and options.values.open_coverage: desktop.ui_open(console, interactive_runner, coverage_report_files) return Test(exit_code)
async def run_tests( console: Console, options: TestOptions, interactive_runner: InteractiveRunner, targets_with_origins: TargetsWithOrigins, workspace: Workspace, union_membership: UnionMembership, registered_target_types: RegisteredTargetTypes, ) -> Test: config_types: Iterable[Type[ TestConfiguration]] = union_membership.union_rules[TestConfiguration] if options.values.debug: target_with_origin = targets_with_origins.expect_single() target = target_with_origin.target valid_config_types = [ config_type for config_type in config_types if config_type.is_valid(target) ] if not valid_config_types: all_valid_target_types = itertools.chain.from_iterable( config_type.valid_target_types( registered_target_types.types, union_membership=union_membership) for config_type in config_types) formatted_target_types = sorted( target_type.alias for target_type in all_valid_target_types) raise ValueError( f"The `test` goal only works with the following target types: " f"{formatted_target_types}\n\nYou used {target.address} with target " f"type {repr(target.alias)}.") if len(valid_config_types) > 1: possible_config_types = sorted( config_type.__name__ for config_type in valid_config_types) raise ValueError( f"Multiple of the registered test implementations work for {target.address} " f"(target type {repr(target.alias)}). It is ambiguous which implementation to use. " f"Possible implementations: {possible_config_types}.") config_type = valid_config_types[0] logger.info( f"Starting test in debug mode: {target.address.reference()}") request = await Get[TestDebugRequest]( TestConfiguration, config_type.create(target_with_origin)) debug_result = interactive_runner.run_local_interactive_process( request.ipr) return Test(debug_result.process_exit_code) configs = tuple( config_type.create(target_with_origin) for target_with_origin in targets_with_origins for config_type in config_types if config_type.is_valid(target_with_origin.target)) configs_with_sources = await Get[ConfigurationsWithSources]( ConfigurationsWithSourcesRequest(configs)) results = await MultiGet( Get[AddressAndTestResult](WrappedTestConfiguration(config)) for config in configs_with_sources) did_any_fail = False for result in results: if result.test_result.status == Status.FAILURE: did_any_fail = True if result.test_result.stdout: console.write_stdout( f"{result.address.reference()} stdout:\n{result.test_result.stdout}\n" ) if result.test_result.stderr: # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving # the two streams. console.write_stdout( f"{result.address.reference()} stderr:\n{result.test_result.stderr}\n" ) console.write_stdout("\n") for result in results: console.print_stdout( f"{result.address.reference():80}.....{result.test_result.status.value:>10}" ) if did_any_fail: console.print_stderr(console.red("\nTests failed")) exit_code = PANTS_FAILED_EXIT_CODE else: exit_code = PANTS_SUCCEEDED_EXIT_CODE if options.values.run_coverage: all_coverage_data: Iterable[CoverageData] = [ result.test_result.coverage_data for result in results if result.test_result.coverage_data is not None ] coverage_types_to_collection_types: Dict[ Type[CoverageData], Type[CoverageDataCollection]] = { collection_cls.element_type: collection_cls for collection_cls in union_membership.union_rules[CoverageDataCollection] } coverage_collections: List[CoverageDataCollection] = [] for data_cls, data in itertools.groupby(all_coverage_data, lambda data: type(data)): collection_cls = coverage_types_to_collection_types[data_cls] coverage_collections.append(collection_cls(data)) coverage_reports = await MultiGet( Get[CoverageReport](CoverageDataCollection, coverage_collection) for coverage_collection in coverage_collections) coverage_report_files = [] for report in coverage_reports: report_file = report.materialize(console, workspace) if report_file is not None: coverage_report_files.append(report_file) if coverage_report_files and options.values.open_coverage: desktop.ui_open(console, interactive_runner, coverage_report_files) return Test(exit_code)
async def run_tests( console: Console, options: TestOptions, interactive_runner: InteractiveRunner, workspace: Workspace, union_membership: UnionMembership, ) -> Test: if options.values.debug: targets_to_valid_configs = await Get[TargetsToValidConfigurations]( TargetsToValidConfigurationsRequest( TestConfiguration, goal_description="`test --debug`", error_if_no_valid_targets=True, expect_single_config=True, ) ) config = targets_to_valid_configs.configurations[0] logger.info(f"Starting test in debug mode: {config.address.reference()}") request = await Get[TestDebugRequest](TestConfiguration, config) debug_result = interactive_runner.run_local_interactive_process(request.ipr) return Test(debug_result.process_exit_code) targets_to_valid_configs = await Get[TargetsToValidConfigurations]( TargetsToValidConfigurationsRequest( TestConfiguration, goal_description=f"the `{options.name}` goal", error_if_no_valid_targets=False, ) ) configs_with_sources = await Get[ConfigurationsWithSources]( ConfigurationsWithSourcesRequest(targets_to_valid_configs.configurations) ) results = await MultiGet( Get[AddressAndTestResult](WrappedTestConfiguration(config)) for config in configs_with_sources ) did_any_fail = False for result in results: if result.test_result.status == Status.FAILURE: did_any_fail = True if result.test_result.stdout: console.write_stdout( f"{result.address.reference()} stdout:\n{result.test_result.stdout}\n" ) if result.test_result.stderr: # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving # the two streams. console.write_stdout( f"{result.address.reference()} stderr:\n{result.test_result.stderr}\n" ) console.write_stdout("\n") for result in results: console.print_stdout( f"{result.address.reference():80}.....{result.test_result.status.value:>10}" ) if did_any_fail: console.print_stderr(console.red("\nTests failed")) exit_code = PANTS_FAILED_EXIT_CODE else: exit_code = PANTS_SUCCEEDED_EXIT_CODE if options.values.run_coverage: all_coverage_data: Iterable[CoverageData] = [ result.test_result.coverage_data for result in results if result.test_result.coverage_data is not None ] coverage_types_to_collection_types: Dict[ Type[CoverageData], Type[CoverageDataCollection] ] = { collection_cls.element_type: collection_cls for collection_cls in union_membership.union_rules[CoverageDataCollection] } coverage_collections: List[CoverageDataCollection] = [] for data_cls, data in itertools.groupby(all_coverage_data, lambda data: type(data)): collection_cls = coverage_types_to_collection_types[data_cls] coverage_collections.append(collection_cls(data)) coverage_reports = await MultiGet( Get[CoverageReport](CoverageDataCollection, coverage_collection) for coverage_collection in coverage_collections ) coverage_report_files = [] for report in coverage_reports: report_file = report.materialize(console, workspace) if report_file is not None: coverage_report_files.append(report_file) if coverage_report_files and options.values.open_coverage: desktop.ui_open(console, interactive_runner, coverage_report_files) return Test(exit_code)
async def dump_dep_inference_data( console: Console, first_party_dep_map: FirstPartySymbolMapping) -> DumpFirstPartyDepMap: console.write_stdout(json.dumps( first_party_dep_map.symbols.to_json_dict())) return DumpFirstPartyDepMap(exit_code=0)
async def run_tests( console: Console, options: TestOptions, runner: InteractiveRunner, targets_with_origins: HydratedTargetsWithOrigins, workspace: Workspace, union_membership: UnionMembership, ) -> Test: test_runners: Iterable[Type[TestRunner]] = union_membership.union_rules[TestRunner] if options.values.debug: target_with_origin = targets_with_origins.expect_single() adaptor_with_origin = TargetAdaptorWithOrigin.create( target_with_origin.target.adaptor, target_with_origin.origin ) address = adaptor_with_origin.adaptor.address valid_test_runners = [ test_runner for test_runner in test_runners if test_runner.is_valid_target(adaptor_with_origin) ] if not valid_test_runners: raise ValueError(f"No valid test runner for {address}.") if len(valid_test_runners) > 1: raise ValueError( f"Multiple possible test runners for {address} " f"({', '.join(test_runner.__name__ for test_runner in valid_test_runners)})." ) test_runner = valid_test_runners[0] logger.info(f"Starting test in debug mode: {address.reference()}") request = await Get[TestDebugRequest](TestRunner, test_runner(adaptor_with_origin)) debug_result = runner.run_local_interactive_process(request.ipr) return Test(debug_result.process_exit_code) adaptors_with_origins = tuple( TargetAdaptorWithOrigin.create(target_with_origin.target.adaptor, target_with_origin.origin) for target_with_origin in targets_with_origins if target_with_origin.target.adaptor.has_sources() ) results = await MultiGet( Get[AddressAndTestResult]( WrappedTestRunner, WrappedTestRunner(test_runner(adaptor_with_origin)) ) for adaptor_with_origin in adaptors_with_origins for test_runner in test_runners if test_runner.is_valid_target(adaptor_with_origin) ) did_any_fail = False for result in results: if result.test_result.status == Status.FAILURE: did_any_fail = True if result.test_result.stdout: console.write_stdout( f"{result.address.reference()} stdout:\n{result.test_result.stdout}\n" ) if result.test_result.stderr: # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving # the two streams. console.write_stdout( f"{result.address.reference()} stderr:\n{result.test_result.stderr}\n" ) console.write_stdout("\n") for result in results: console.print_stdout( f"{result.address.reference():80}.....{result.test_result.status.value:>10}" ) if did_any_fail: console.print_stderr(console.red("\nTests failed")) exit_code = PANTS_FAILED_EXIT_CODE else: exit_code = PANTS_SUCCEEDED_EXIT_CODE if options.values.run_coverage: # TODO: consider warning if a user uses `--coverage` but the language backend does not # provide coverage support. This might be too chatty to be worth doing? results_with_coverage = [x for x in results if x.test_result.coverage_data is not None] coverage_data_collections = itertools.groupby( results_with_coverage, lambda address_and_test_result: address_and_test_result.test_result.coverage_data.batch_cls, # type: ignore[union-attr] ) coverage_reports = await MultiGet( Get[CoverageReport]( CoverageDataBatch, coverage_batch_cls(tuple(addresses_and_test_results)) # type: ignore[call-arg] ) for coverage_batch_cls, addresses_and_test_results in coverage_data_collections ) for report in coverage_reports: report.materialize(console, workspace) return Test(exit_code)
async def run( console: Console, workspace: Workspace, runner: InteractiveRunner, build_root: BuildRoot, targets_with_origins: TargetsWithOrigins, options: RunOptions, global_options: GlobalOptions, union_membership: UnionMembership, registered_target_types: RegisteredTargetTypes, ) -> Run: valid_config_types_by_target = gather_valid_binary_configuration_types( goal_subsytem=options, targets_with_origins=targets_with_origins, union_membership=union_membership, registered_target_types=registered_target_types, ) bulleted_list_sep = "\n * " if len(valid_config_types_by_target) > 1: binary_target_addresses = sorted( binary_target.address.spec for binary_target in valid_config_types_by_target) raise ValueError( f"The `run` goal only works on one binary target but was given multiple targets that " f"can produce a binary:" f"{bulleted_list_sep}{bulleted_list_sep.join(binary_target_addresses)}\n\n" f"Please select one of these targets to run.") target, valid_config_types = list(valid_config_types_by_target.items())[0] if len(valid_config_types) > 1: possible_config_types = sorted(config_type.__name__ for config_type in valid_config_types) # TODO: improve this error message. (It's never actually triggered yet because we only have # Python implemented with V2.) A better error message would explain to users how they can # resolve the issue. raise ValueError( f"Multiple of the registered binary implementations work for {target.address} " f"(target type {repr(target.alias)}).\n\n" f"It is ambiguous which implementation to use. Possible implementations:" f"{bulleted_list_sep}{bulleted_list_sep.join(possible_config_types)}." ) config_type = valid_config_types[0] binary = await Get[CreatedBinary](BinaryConfiguration, config_type.create(target)) workdir = global_options.options.pants_workdir with temporary_dir(root_dir=workdir, cleanup=True) as tmpdir: path_relative_to_build_root = PurePath(tmpdir).relative_to( build_root.path).as_posix() workspace.materialize_directory( DirectoryToMaterialize(binary.digest, path_prefix=path_relative_to_build_root)) console.write_stdout(f"Running target: {target.address}\n") full_path = PurePath(tmpdir, binary.binary_name).as_posix() run_request = InteractiveProcessRequest( argv=(full_path, *options.values.args), run_in_workspace=True, ) try: result = runner.run_local_interactive_process(run_request) exit_code = result.process_exit_code if result.process_exit_code == 0: console.write_stdout(f"{target.address} ran successfully.\n") else: console.write_stderr( f"{target.address} failed with code {result.process_exit_code}!\n" ) except Exception as e: console.write_stderr( f"Exception when attempting to run {target.address}: {e!r}\n") exit_code = -1 return Run(exit_code)