예제 #1
0
def test_invalid_variable() -> None:
    pants_env = CompleteEnvironment()

    with pytest.raises(ValueError) as exc:
        pants_env.get_subset(["3INVALID=doesn't matter"])
    assert (
        "An invalid variable was requested via the --test-extra-env-var mechanism: 3INVALID"
        in str(exc))
예제 #2
0
def create_dynamic_remote_options(
    *,
    initial_headers: dict[str, str] | None = None,
    address: str | None = "grpc://fake.url:10",
    token_path: str | None = None,
    plugin: str | None = None,
) -> DynamicRemoteOptions:
    if initial_headers is None:
        initial_headers = {}
    args = [
        "--remote-cache-read",
        f"--remote-execution-address={address}",
        f"--remote-store-address={address}",
        f"--remote-store-headers={initial_headers}",
        f"--remote-execution-headers={initial_headers}",
        "--remote-instance-name=main",
    ]
    if token_path:
        args.append(f"--remote-oauth-bearer-token-path={token_path}")
    if plugin:
        args.append(f"--remote-auth-plugin={plugin}")
    ob = create_options_bootstrapper(args)
    env = CompleteEnvironment({})
    _build_config, options = OptionsInitializer(ob).build_config_and_options(ob, env, raise_=False)
    return DynamicRemoteOptions.from_options(options, env)[0]
예제 #3
0
def create_execution_options(
    *,
    initial_headers: dict[str, str],
    token_path: str | None = None,
    plugin: str | None = None,
    remote_store_address: str = "grpc://fake.url:10",
    remote_execution_address: str = "grpc://fake.url:10",
    local_only: bool = False,
) -> ExecutionOptions:
    args = [
        "--remote-cache-read",
        f"--remote-execution-address={remote_execution_address}",
        f"--remote-store-address={remote_store_address}",
        f"--remote-store-headers={initial_headers}",
        f"--remote-execution-headers={initial_headers}",
        "--remote-instance-name=main",
    ]
    if token_path:
        args.append(f"--remote-oauth-bearer-token-path={token_path}")
    if plugin:
        args.append(f"--remote-auth-plugin={plugin}")
    ob = create_options_bootstrapper(args)
    env = CompleteEnvironment({})
    _build_config, options = OptionsInitializer(
        ob, env).build_config_and_options(ob, env, raise_=False)
    return ExecutionOptions.from_options(options, env, local_only=local_only)
예제 #4
0
    def set_options(
        self,
        args: Iterable[str],
        *,
        env: Mapping[str, str] | None = None,
        env_inherit: set[str] | None = None,
    ) -> None:
        """Update the engine session with new options and/or environment variables.

        The environment variables will be used to set the `CompleteEnvironment`, which is the
        environment variables captured by the parent Pants process. Some rules use this to be able
        to read arbitrary env vars. Any options that start with `PANTS_` will also be used to set
        options.

        Environment variables listed in `env_inherit` and not in `env` will be inherited from the test
        runner's environment (os.environ)

        This will override any previously configured values.
        """
        env = {
            **{
                k: os.environ[k]
                for k in (env_inherit or set()) if k in os.environ
            },
            **(env or {}),
        }
        self.options_bootstrapper = create_options_bootstrapper(args=args,
                                                                env=env)
        self.environment = CompleteEnvironment(env)
        self._set_new_session(self.scheduler.scheduler)
예제 #5
0
def launch_new_pantsd_instance():
    """An external entrypoint that spawns a new pantsd instance."""

    options_bootstrapper = OptionsBootstrapper.create(env=os.environ,
                                                      args=sys.argv,
                                                      allow_pantsrc=True)
    env = CompleteEnvironment(os.environ)
    daemon = PantsDaemon.create(options_bootstrapper, env)
    daemon.run_sync()
예제 #6
0
 def test_global_options_validation(self):
     # Specify an invalid combination of options.
     ob = OptionsBootstrapper.create(
         env={}, args=["--backend-packages=[]", "--remote-execution"], allow_pantsrc=False
     )
     env = CompleteEnvironment({})
     with self.assertRaises(OptionsError) as exc:
         OptionsInitializer(ob, env).build_config_and_options(ob, env, raise_=True)
     self.assertIn("The `--remote-execution` option requires", str(exc.exception))
예제 #7
0
파일: test.py 프로젝트: rhysyngsun/pants
def get_filtered_environment(
    test_subsystem: TestSubsystem, complete_env: CompleteEnvironment
) -> TestExtraEnv:
    env = (
        complete_env.get_subset(test_subsystem.extra_env_vars)
        if test_subsystem.extra_env_vars
        else FrozenDict({})
    )
    return TestExtraEnv(env)
예제 #8
0
    def test_invalid_version(self) -> None:
        options_bootstrapper = OptionsBootstrapper.create(
            env={},
            args=["--backend-packages=[]", "--pants-version=99.99.9999"],
            allow_pantsrc=False,
        )

        env = CompleteEnvironment({})
        with self.assertRaises(ExecutionError):
            OptionsInitializer(options_bootstrapper).build_config_and_options(
                options_bootstrapper, env, raise_=True)
예제 #9
0
    def single_daemonized_run(
        self,
        args: Tuple[str, ...],
        env: Dict[str, str],
        cancellation_latch: PySessionCancellationLatch,
    ) -> ExitCode:
        """Run a single daemonized run of Pants.

        All aspects of the `sys` global should already have been replaced in `__call__`, so this
        method should not need any special handling for the fact that it's running in a proxied
        environment.
        """

        try:
            logger.debug("Connected to pantsd")
            # Capture the client's start time, which we propagate here in order to get an accurate
            # view of total time.
            env_start_time = env.get("PANTSD_RUNTRACKER_CLIENT_START_TIME", None)
            if not env_start_time:
                # NB: We warn rather than erroring here because it eases use of non-Pants nailgun
                # clients for testing.
                logger.warning(
                    "No start time was reported by the client! Metrics may be inaccurate."
                )
            start_time = float(env_start_time) if env_start_time else time.time()

            options_bootstrapper = OptionsBootstrapper.create(
                env=env, args=args, allow_pantsrc=True
            )

            # Run using the pre-warmed Session.
            complete_env = CompleteEnvironment(env)
            scheduler, options_initializer = self._core.prepare(options_bootstrapper, complete_env)
            runner = LocalPantsRunner.create(
                complete_env,
                options_bootstrapper,
                scheduler=scheduler,
                options_initializer=options_initializer,
                cancellation_latch=cancellation_latch,
            )
            return runner.run(start_time)
        except Exception as e:
            logger.exception(e)
            return PANTS_FAILED_EXIT_CODE
        except KeyboardInterrupt:
            print("Interrupted by user.\n", file=sys.stderr)
            return PANTS_FAILED_EXIT_CODE
예제 #10
0
    def run(self, start_time: float) -> ExitCode:
        self.scrub_pythonpath()

        options_bootstrapper = OptionsBootstrapper.create(env=self.env,
                                                          args=self.args,
                                                          allow_pantsrc=True)
        with warnings.catch_warnings(record=True):
            bootstrap_options = options_bootstrapper.bootstrap_options
            global_bootstrap_options = bootstrap_options.for_global_scope()

        # We enable logging here, and everything before it will be routed through regular
        # Python logging.
        stdin_fileno = sys.stdin.fileno()
        stdout_fileno = sys.stdout.fileno()
        stderr_fileno = sys.stderr.fileno()
        with initialize_stdio(global_bootstrap_options), stdio_destination(
                stdin_fileno=stdin_fileno,
                stdout_fileno=stdout_fileno,
                stderr_fileno=stderr_fileno,
        ):
            # N.B. We inline imports to speed up the python thin client run, and avoids importing
            # engine types until after the runner has had a chance to set PANTS_BIN_NAME.

            if self._should_run_with_pantsd(global_bootstrap_options):
                from pants.bin.remote_pants_runner import RemotePantsRunner

                try:
                    remote_runner = RemotePantsRunner(self.args, self.env,
                                                      options_bootstrapper)
                    return remote_runner.run(start_time)
                except RemotePantsRunner.Fallback as e:
                    logger.warning(
                        f"Client exception: {e!r}, falling back to non-daemon mode"
                    )

            from pants.bin.local_pants_runner import LocalPantsRunner

            # We only install signal handling via ExceptionSink if the run will execute in this process.
            ExceptionSink.install(
                log_location=init_workdir(global_bootstrap_options),
                pantsd_instance=False)
            runner = LocalPantsRunner.create(
                env=CompleteEnvironment(self.env),
                options_bootstrapper=options_bootstrapper)
            return runner.run(start_time)
예제 #11
0
def test_prepare_scheduler():
    # A core with no services.
    def create_services(bootstrap_options, legacy_graph_scheduler):
        return PantsServices()

    env = CompleteEnvironment({})
    core = PantsDaemonCore(create_options_bootstrapper([]), env,
                           PyExecutor(2, 4), create_services)

    first_scheduler, first_options_initializer = core.prepare(
        create_options_bootstrapper(["-ldebug"]),
        env,
    )
    second_scheduler, second_options_initializer = core.prepare(
        create_options_bootstrapper(["-lwarn"]),
        env,
    )
    assert first_scheduler is not second_scheduler
    assert first_options_initializer is second_options_initializer
예제 #12
0
    def create(
        cls,
        env: CompleteEnvironment,
        options_bootstrapper: OptionsBootstrapper,
        options_initializer: Optional[OptionsInitializer] = None,
        scheduler: Optional[GraphScheduler] = None,
        cancellation_latch: Optional[PySessionCancellationLatch] = None,
    ) -> LocalPantsRunner:
        """Creates a new LocalPantsRunner instance by parsing options.

        By the time this method runs, logging will already have been initialized in either
        PantsRunner or DaemonPantsRunner.

        :param env: The environment for this run.
        :param options_bootstrapper: The OptionsBootstrapper instance to reuse.
        :param scheduler: If being called from the daemon, a warmed scheduler to use.
        """
        options_initializer = options_initializer or OptionsInitializer(options_bootstrapper, env)
        build_config, options = options_initializer.build_config_and_options(
            options_bootstrapper, env, raise_=True
        )

        run_tracker = RunTracker(options)
        union_membership = UnionMembership.from_rules(build_config.union_rules)

        # If we're running with the daemon, we'll be handed a warmed Scheduler, which we use
        # to initialize a session here.
        graph_session = cls._init_graph_session(
            options_initializer,
            options_bootstrapper,
            build_config,
            env,
            run_tracker.run_id,
            options,
            scheduler,
            cancellation_latch,
        )

        # Option values are usually computed lazily on demand, but command line options are
        # eagerly computed for validation.
        with options_initializer.handle_unknown_flags(options_bootstrapper, env, raise_=True):
            for scope in options.scope_to_flags.keys():
                options.for_scope(scope)

        # Verify configs.
        global_bootstrap_options = options_bootstrapper.bootstrap_options.for_global_scope()
        if global_bootstrap_options.verify_config:
            options.verify_configs(options_bootstrapper.config)

        specs = calculate_specs(
            options_bootstrapper=options_bootstrapper,
            options=options,
            build_root=get_buildroot(),
            session=graph_session.scheduler_session,
        )

        profile_path = env.get("PANTS_PROFILE")

        return cls(
            options=options,
            options_bootstrapper=options_bootstrapper,
            build_config=build_config,
            run_tracker=run_tracker,
            specs=specs,
            graph_session=graph_session,
            union_membership=union_membership,
            profile_path=profile_path,
        )
예제 #13
0
def plugin_resolution(rule_runner: RuleRunner,
                      *,
                      interpreter=None,
                      chroot=None,
                      plugins=None,
                      sdist=True):
    @contextmanager
    def provide_chroot(existing):
        if existing:
            yield existing, False
        else:
            with temporary_dir() as new_chroot:
                yield new_chroot, True

    interpreter_constraints = (PexInterpreterConstraints([
        f"=={interpreter.identity.version_str}"
    ]) if interpreter else PexInterpreterConstraints([">=3.7"]))

    with provide_chroot(chroot) as (root_dir, create_artifacts):
        env: Dict[str, str] = {}
        repo_dir = None
        if plugins:
            repo_dir = os.path.join(root_dir, "repo")
            env.update(
                PANTS_PYTHON_REPOS_REPOS=f"['file://{repo_dir}']",
                PANTS_PYTHON_REPOS_INDEXES="[]",
                PANTS_PYTHON_SETUP_RESOLVER_CACHE_TTL="1",
            )
            plugin_list = []
            for plugin in plugins:
                version = None
                if isinstance(plugin, tuple):
                    plugin, version = plugin
                plugin_list.append(
                    f"{plugin}=={version}" if version else plugin)
                if create_artifacts:
                    setup_py_args = [
                        "sdist" if sdist else "bdist_wheel", "--dist-dir",
                        "dist/"
                    ]
                    _run_setup_py(
                        rule_runner,
                        plugin,
                        interpreter_constraints,
                        version,
                        setup_py_args,
                        repo_dir,
                    )
            env["PANTS_PLUGINS"] = f"[{','.join(map(repr, plugin_list))}]"

        configpath = os.path.join(root_dir, "pants.toml")
        if create_artifacts:
            touch(configpath)
        args = [f"--pants-config-files=['{configpath}']"]

        options_bootstrapper = OptionsBootstrapper.create(env=env,
                                                          args=args,
                                                          allow_pantsrc=False)
        complete_env = CompleteEnvironment({
            **{
                k: os.environ[k]
                for k in ["PATH", "HOME", "PYENV_ROOT"] if k in os.environ
            },
            **env
        })
        bootstrap_scheduler = create_bootstrap_scheduler(
            options_bootstrapper, complete_env)
        plugin_resolver = PluginResolver(
            bootstrap_scheduler,
            interpreter_constraints=interpreter_constraints)
        cache_dir = options_bootstrapper.bootstrap_options.for_global_scope(
        ).named_caches_dir

        working_set = plugin_resolver.resolve(options_bootstrapper,
                                              complete_env,
                                              WorkingSet(entries=[]))
        for dist in working_set:
            assert (Path(os.path.realpath(cache_dir))
                    in Path(os.path.realpath(dist.location)).parents)

        yield working_set, root_dir, repo_dir
예제 #14
0
def plugin_resolution(
    rule_runner: RuleRunner,
    *,
    interpreter: PythonInterpreter | None = None,
    chroot: str | None = None,
    plugins: Sequence[Plugin] = (),
    sdist: bool = True,
    working_set_entries: Sequence[Distribution] = (),
    use_pypi: bool = False,
):
    @contextmanager
    def provide_chroot(existing):
        if existing:
            yield existing, False
        else:
            with temporary_dir() as new_chroot:
                yield new_chroot, True

    # Default to resolving with whatever we're currently running with.
    interpreter_constraints = (InterpreterConstraints(
        [f"=={interpreter.identity.version_str}"]) if interpreter else None)
    artifact_interpreter_constraints = interpreter_constraints or InterpreterConstraints(
        [f"=={'.'.join(map(str, sys.version_info[:3]))}"])

    with provide_chroot(chroot) as (root_dir, create_artifacts):
        env: Dict[str, str] = {}
        repo_dir = None
        if plugins:
            repo_dir = os.path.join(root_dir, "repo")
            env.update(
                PANTS_PYTHON_REPOS_REPOS=f"['file://{repo_dir}']",
                PANTS_PYTHON_RESOLVER_CACHE_TTL="1",
            )
            if not use_pypi:
                env.update(PANTS_PYTHON_REPOS_INDEXES="[]")
            plugin_list = []
            for plugin in plugins:
                version = plugin.version
                plugin_list.append(
                    f"{plugin.name}=={version}" if version else plugin.name)
                if create_artifacts:
                    setup_py_args = [
                        "sdist" if sdist else "bdist_wheel", "--dist-dir",
                        "dist/"
                    ]
                    _run_setup_py(
                        rule_runner,
                        plugin.name,
                        artifact_interpreter_constraints,
                        version,
                        plugin.install_requires,
                        setup_py_args,
                        repo_dir,
                    )
            env["PANTS_PLUGINS"] = f"[{','.join(map(repr, plugin_list))}]"

        configpath = os.path.join(root_dir, "pants.toml")
        if create_artifacts:
            touch(configpath)
        args = [f"--pants-config-files=['{configpath}']"]

        options_bootstrapper = OptionsBootstrapper.create(env=env,
                                                          args=args,
                                                          allow_pantsrc=False)
        complete_env = CompleteEnvironment({
            **{
                k: os.environ[k]
                for k in ["PATH", "HOME", "PYENV_ROOT"] if k in os.environ
            },
            **env
        })
        bootstrap_scheduler = create_bootstrap_scheduler(options_bootstrapper)
        cache_dir = options_bootstrapper.bootstrap_options.for_global_scope(
        ).named_caches_dir

        input_working_set = WorkingSet(entries=[])
        for dist in working_set_entries:
            input_working_set.add(dist)
        plugin_resolver = PluginResolver(bootstrap_scheduler,
                                         interpreter_constraints,
                                         input_working_set)
        working_set = plugin_resolver.resolve(
            options_bootstrapper,
            complete_env,
        )
        for dist in working_set:
            assert (Path(os.path.realpath(cache_dir))
                    in Path(os.path.realpath(dist.location)).parents)

        yield working_set, root_dir, repo_dir
예제 #15
0
def test_complete_environment(input_strs: List[str],
                              expected: Dict[str, str]) -> None:
    pants_env = CompleteEnvironment({"A": "a", "B": "b", "C": "c"})

    subset = pants_env.get_subset(input_strs)
    assert dict(subset) == expected
예제 #16
0
async def setup_pytest_for_target(
    request: TestSetupRequest,
    pytest: PyTest,
    test_subsystem: TestSubsystem,
    python_setup: PythonSetup,
    coverage_config: CoverageConfig,
    coverage_subsystem: CoverageSubsystem,
    test_extra_env: TestExtraEnv,
    global_options: GlobalOptions,
    complete_env: CompleteEnvironment,
) -> TestSetup:
    transitive_targets, plugin_setups = await MultiGet(
        Get(TransitiveTargets,
            TransitiveTargetsRequest([request.field_set.address])),
        Get(AllPytestPluginSetups,
            AllPytestPluginSetupsRequest(request.field_set.address)),
    )
    all_targets = transitive_targets.closure

    interpreter_constraints = InterpreterConstraints.create_from_targets(
        all_targets, python_setup)

    requirements_pex_get = Get(
        Pex,
        PexFromTargetsRequest,
        PexFromTargetsRequest.for_requirements(
            [request.field_set.address],
            internal_only=True,
            resolve_and_lockfile=request.field_set.resolve.
            resolve_and_lockfile(python_setup),
        ),
    )
    pytest_pex_get = Get(
        Pex,
        PexRequest(
            output_filename="pytest.pex",
            requirements=pytest.pex_requirements(),
            interpreter_constraints=interpreter_constraints,
            internal_only=True,
        ),
    )

    # Ensure that the empty extra output dir exists.
    extra_output_directory_digest_get = Get(
        Digest, CreateDigest([Directory(_EXTRA_OUTPUT_DIR)]))

    prepared_sources_get = Get(
        PythonSourceFiles,
        PythonSourceFilesRequest(all_targets, include_files=True))

    # Get the file names for the test_target so that we can specify to Pytest precisely which files
    # to test, rather than using auto-discovery.
    field_set_source_files_get = Get(
        SourceFiles, SourceFilesRequest([request.field_set.sources]))

    (
        pytest_pex,
        requirements_pex,
        prepared_sources,
        field_set_source_files,
        extra_output_directory_digest,
    ) = await MultiGet(
        pytest_pex_get,
        requirements_pex_get,
        prepared_sources_get,
        field_set_source_files_get,
        extra_output_directory_digest_get,
    )

    local_dists = await Get(
        LocalDistsPex,
        LocalDistsPexRequest(
            [request.field_set.address],
            interpreter_constraints=interpreter_constraints,
            sources=prepared_sources,
        ),
    )

    pytest_runner_pex_get = Get(
        VenvPex,
        PexRequest(
            output_filename="pytest_runner.pex",
            interpreter_constraints=interpreter_constraints,
            main=pytest.main,
            internal_only=True,
            pex_path=[pytest_pex, requirements_pex, local_dists.pex],
        ),
    )
    config_files_get = Get(
        ConfigFiles,
        ConfigFilesRequest,
        pytest.config_request(field_set_source_files.snapshot.dirs),
    )
    pytest_runner_pex, config_files = await MultiGet(pytest_runner_pex_get,
                                                     config_files_get)

    input_digest = await Get(
        Digest,
        MergeDigests((
            coverage_config.digest,
            local_dists.remaining_sources.source_files.snapshot.digest,
            config_files.snapshot.digest,
            extra_output_directory_digest,
            *(plugin_setup.digest for plugin_setup in plugin_setups),
        )),
    )

    add_opts = [f"--color={'yes' if global_options.options.colors else 'no'}"]
    output_files = []

    results_file_name = None
    if pytest.options.junit_xml_dir and not request.is_debug:
        results_file_name = f"{request.field_set.address.path_safe_spec}.xml"
        add_opts.extend((f"--junitxml={results_file_name}", "-o",
                         f"junit_family={pytest.options.junit_family}"))
        output_files.append(results_file_name)

    coverage_args = []
    if test_subsystem.use_coverage and not request.is_debug:
        pytest.validate_pytest_cov_included()
        output_files.append(".coverage")

        if coverage_subsystem.filter:
            cov_args = [f"--cov={morf}" for morf in coverage_subsystem.filter]
        else:
            # N.B.: Passing `--cov=` or `--cov=.` to communicate "record coverage for all sources"
            # fails in certain contexts as detailed in:
            #   https://github.com/pantsbuild/pants/issues/12390
            # Instead we focus coverage on just the directories containing python source files
            # materialized to the Process chroot.
            cov_args = [
                f"--cov={source_root}"
                for source_root in prepared_sources.source_roots
            ]

        coverage_args = [
            "--cov-report=",  # Turn off output.
            f"--cov-config={coverage_config.path}",
            *cov_args,
        ]

    extra_env = {
        "PYTEST_ADDOPTS": " ".join(add_opts),
        "PEX_EXTRA_SYS_PATH": ":".join(prepared_sources.source_roots),
        **test_extra_env.env,
        # NOTE: `complete_env` intentionally after `test_extra_env` to allow overriding within
        # `python_tests`
        **complete_env.get_subset(request.field_set.extra_env_vars.value or ()),
    }

    # Cache test runs only if they are successful, or not at all if `--test-force`.
    cache_scope = (ProcessCacheScope.PER_SESSION
                   if test_subsystem.force else ProcessCacheScope.SUCCESSFUL)
    process = await Get(
        Process,
        VenvPexProcess(
            pytest_runner_pex,
            argv=(*pytest.options.args, *coverage_args,
                  *field_set_source_files.files),
            extra_env=extra_env,
            input_digest=input_digest,
            output_directories=(_EXTRA_OUTPUT_DIR, ),
            output_files=output_files,
            timeout_seconds=request.field_set.timeout.
            calculate_from_global_options(pytest),
            execution_slot_variable=pytest.options.execution_slot_var,
            description=f"Run Pytest for {request.field_set.address}",
            level=LogLevel.DEBUG,
            cache_scope=cache_scope,
        ),
    )
    return TestSetup(process, results_file_name=results_file_name)
예제 #17
0
    def __init__(
            self,
            *,
            rules: Iterable | None = None,
            target_types: Iterable[type[Target]] | None = None,
            objects: dict[str, Any] | None = None,
            context_aware_object_factories: dict[str, Any] | None = None,
            isolated_local_store: bool = False,
            preserve_tmpdirs: bool = False,
            ca_certs_path: str | None = None,
            bootstrap_args: Iterable[str] = (),
    ) -> None:

        bootstrap_args = [*bootstrap_args]

        root_dir: Path | None = None
        if preserve_tmpdirs:
            root_dir = Path(mkdtemp(prefix="RuleRunner."))
            print(
                f"Preserving rule runner temporary directories at {root_dir}.",
                file=sys.stderr)
            bootstrap_args.extend([
                "--no-process-execution-local-cleanup",
                f"--local-execution-root-dir={root_dir}",
            ])
            build_root = (root_dir / "BUILD_ROOT").resolve()
            build_root.mkdir()
            self.build_root = str(build_root)
        else:
            self.build_root = os.path.realpath(
                safe_mkdtemp(prefix="_BUILD_ROOT"))

        safe_mkdir(self.pants_workdir)
        BuildRoot().path = self.build_root

        # TODO: Redesign rule registration for tests to be more ergonomic and to make this less
        #  special-cased.
        all_rules = (
            *(rules or ()),
            *source_root.rules(),
            QueryRule(WrappedTarget, [Address]),
            QueryRule(UnionMembership, []),
        )
        build_config_builder = BuildConfiguration.Builder()
        build_config_builder.register_aliases(
            BuildFileAliases(
                objects=objects,
                context_aware_object_factories=context_aware_object_factories))
        build_config_builder.register_rules("_dummy_for_test_", all_rules)
        build_config_builder.register_target_types("_dummy_for_test_",
                                                   target_types or ())
        self.build_config = build_config_builder.create()

        self.environment = CompleteEnvironment({})
        self.options_bootstrapper = create_options_bootstrapper(
            args=bootstrap_args)
        options = self.options_bootstrapper.full_options(self.build_config)
        global_options = self.options_bootstrapper.bootstrap_options.for_global_scope(
        )

        dynamic_remote_options, _ = DynamicRemoteOptions.from_options(
            options, self.environment)
        local_store_options = LocalStoreOptions.from_options(global_options)
        if isolated_local_store:
            if root_dir:
                lmdb_store_dir = root_dir / "lmdb_store"
                lmdb_store_dir.mkdir()
                store_dir = str(lmdb_store_dir)
            else:
                store_dir = safe_mkdtemp(prefix="lmdb_store.")
            local_store_options = dataclasses.replace(local_store_options,
                                                      store_dir=store_dir)

        local_execution_root_dir = global_options.local_execution_root_dir
        named_caches_dir = global_options.named_caches_dir

        graph_session = EngineInitializer.setup_graph_extended(
            pants_ignore_patterns=GlobalOptions.compute_pants_ignore(
                self.build_root, global_options),
            use_gitignore=False,
            local_store_options=local_store_options,
            local_execution_root_dir=local_execution_root_dir,
            named_caches_dir=named_caches_dir,
            build_root=self.build_root,
            build_configuration=self.build_config,
            executor=_EXECUTOR,
            execution_options=ExecutionOptions.from_options(
                global_options, dynamic_remote_options),
            ca_certs_path=ca_certs_path,
            engine_visualize_to=None,
        ).new_session(
            build_id="buildid_for_test",
            session_values=SessionValues({
                OptionsBootstrapper: self.options_bootstrapper,
                CompleteEnvironment: self.environment,
            }),
        )
        self.scheduler = graph_session.scheduler_session
예제 #18
0
    def __init__(
        self,
        *,
        rules: Iterable | None = None,
        target_types: Iterable[type[Target]] | None = None,
        objects: dict[str, Any] | None = None,
        context_aware_object_factories: dict[str, Any] | None = None,
        isolated_local_store: bool = False,
        ca_certs_path: str | None = None,
    ) -> None:
        self.build_root = os.path.realpath(mkdtemp(suffix="_BUILD_ROOT"))
        safe_mkdir(self.build_root, clean=True)
        safe_mkdir(self.pants_workdir)
        BuildRoot().path = self.build_root

        # TODO: Redesign rule registration for tests to be more ergonomic and to make this less
        #  special-cased.
        all_rules = (
            *(rules or ()),
            *source_root.rules(),
            QueryRule(WrappedTarget, [Address]),
            QueryRule(UnionMembership, []),
        )
        build_config_builder = BuildConfiguration.Builder()
        build_config_builder.register_aliases(
            BuildFileAliases(
                objects=objects,
                context_aware_object_factories=context_aware_object_factories))
        build_config_builder.register_rules(all_rules)
        build_config_builder.register_target_types(target_types or ())
        self.build_config = build_config_builder.create()

        self.environment = CompleteEnvironment({})
        self.options_bootstrapper = create_options_bootstrapper()
        options = self.options_bootstrapper.full_options(self.build_config)
        global_options = self.options_bootstrapper.bootstrap_options.for_global_scope(
        )
        local_store_dir = (os.path.realpath(safe_mkdtemp())
                           if isolated_local_store else
                           global_options.local_store_dir)
        local_execution_root_dir = global_options.local_execution_root_dir
        named_caches_dir = global_options.named_caches_dir

        graph_session = EngineInitializer.setup_graph_extended(
            pants_ignore_patterns=GlobalOptions.compute_pants_ignore(
                self.build_root, global_options),
            use_gitignore=False,
            local_store_dir=local_store_dir,
            local_execution_root_dir=local_execution_root_dir,
            named_caches_dir=named_caches_dir,
            native=Native(),
            build_root=self.build_root,
            build_configuration=self.build_config,
            executor=_EXECUTOR,
            execution_options=ExecutionOptions.from_options(
                options, self.environment),
            ca_certs_path=ca_certs_path,
            native_engine_visualize_to=None,
        ).new_session(
            build_id="buildid_for_test",
            session_values=SessionValues({
                OptionsBootstrapper: self.options_bootstrapper,
                CompleteEnvironment: self.environment,
            }),
        )
        self.scheduler = graph_session.scheduler_session