Пример #1
0
async def make_process_uncacheable(uncacheable_process: UncacheableProcess) -> Process:
    uuid = await Get(
        UUID, UUIDRequest, UUIDRequest.scoped(cast(UUIDScope, uncacheable_process.scope.value))
    )

    process = uncacheable_process.process
    env = dict(process.env)

    # This is a slightly hacky way to force the process to run: since the env var
    #  value is unique, this input combination will never have been seen before,
    #  and therefore never cached. The two downsides are:
    #  1. This leaks into the process' environment, albeit with a funky var name that is
    #     unlikely to cause problems in practice.
    #  2. This run will be cached even though it can never be re-used.
    # TODO: A more principled way of forcing rules to run?
    env["__PANTS_FORCE_PROCESS_RUN__"] = str(uuid)

    return dataclasses.replace(process, env=FrozenDict(env))
Пример #2
0
async def setup_shunit2_for_target(
    request: TestSetupRequest,
    bash_program: BashProgram,
    bash_setup: BashSetup,
    test_subsystem: TestSubsystem,
) -> TestSetup:
    # Because shunit2 is a simple Bash file, we download it using `DownloadFile`. Normally, we
    # would install the test runner through `ExternalTool`. See
    # https://www.pantsbuild.org/v2.0/docs/rules-api-installing-tools and
    # https://www.pantsbuild.org/v2.0/docs/rules-api-file-system.
    shunit2_script_request = Get(
        Digest,
        DownloadFile(
            url=
            "https://raw.githubusercontent.com/kward/shunit2/b9102bb763cc603b3115ed30a5648bf950548097/shunit2",
            expected_digest=Digest(
                "1f11477b7948150d1ca50cdd41d89be4ed2acd137e26d2e0fe23966d0e272cc5",
                40987,
            ),
        ),
    )

    transitive_targets_request = Get(
        TransitiveTargets,
        TransitiveTargetsRequest([request.field_set.address]))

    shunit2_script, transitive_targets = await MultiGet(
        shunit2_script_request, transitive_targets_request)

    # We need to include all relevant transitive dependencies in the environment. We also get the
    # test's sources so that we can check that it has `source ./shunit2` at the bottom of it.
    #
    # Because we might modify the test files, we leave the tests out of
    # `dependencies_source_files_request` by using `transitive_targets.dependencies` instead of
    # `transitive_targets.closure`. This makes sure that we don't accidentally include the
    # unmodified test files and the modified test files in the same input. See
    # https://www.pantsbuild.org/v2.0/docs/rules-api-and-target-api.
    dependencies_source_files_request = Get(
        SourceFiles,
        SourceFilesRequest(
            (tgt.get(Sources) for tgt in transitive_targets.dependencies),
            for_sources_types=(BashSources, FilesSources, ResourcesSources),
        ),
    )
    test_source_files_request = Get(
        SourceFiles, SourceFilesRequest([request.field_set.sources]))
    dependencies_source_files, test_source_files = await MultiGet(
        dependencies_source_files_request, test_source_files_request)

    # To check if the test files already have `source ./shunit2` in them, we need to look at the
    # actual file content. We use `DigestContents` for this, and then use `CreateDigest` to create
    # a digest of the (possibly) updated test files. See
    # https://www.pantsbuild.org/v2.0/docs/rules-api-file-system.
    #
    # Most test runners don't modify their test files like we do here, so most test runners can
    # skip this step.
    test_files_content = await Get(DigestContents, Digest,
                                   test_source_files.snapshot.digest)
    updated_test_files_content = []
    for file_content in test_files_content:
        if (b"source ./shunit2" in file_content.content
                or b". ./shunit2" in file_content.content):
            updated_test_files_content.append(file_content)
        else:
            updated_file_content = FileContent(
                path=file_content.path,
                content=file_content.content + b"\nsource ./shunit2\n",
            )
            updated_test_files_content.append(updated_file_content)
    updated_test_source_files = await Get(
        Digest, CreateDigest(updated_test_files_content))

    # The Process needs one single `Digest`, so we merge everything together. See
    # https://www.pantsbuild.org/v2.0/docs/rules-api-file-system.
    input_digest = await Get(
        Digest,
        MergeDigests([
            shunit2_script,
            updated_test_source_files,
            dependencies_source_files.snapshot.digest,
        ]),
    )

    # We must check if `test --force` was used, and if so, use a hack to invalidate the cache by
    # mixing in a randomly generated UUID into the environment.
    extra_env = {}
    if test_subsystem.force and not request.is_debug:
        uuid = await Get(UUID, UUIDRequest())
        extra_env["__PANTS_FORCE_TEST_RUN__"] = str(uuid)

    process = Process(
        argv=[bash_program.exe, *test_source_files.snapshot.files],
        input_digest=input_digest,
        description=f"Run shunit2 on {request.field_set.address}.",
        level=LogLevel.DEBUG,
        env=bash_setup.env_dict,
        timeout_seconds=request.field_set.timeout.value,
    )
    return TestSetup(process)
Пример #3
0
 def test_identical_uuids(self):
     uuid1 = self.request_single_product(UUID, UUIDRequest(randomizer=0.0))
     uuid2 = self.request_single_product(UUID, UUIDRequest(randomizer=0.0))
     assert uuid1 == uuid2
Пример #4
0
 def test_distinct_uuids(self):
     uuid1 = self.request_single_product(UUID, UUIDRequest())
     uuid2 = self.request_single_product(UUID, UUIDRequest())
     assert uuid1 != uuid2
Пример #5
0
def test_identical_uuids(rule_runner: RuleRunner) -> None:
    uuid1 = rule_runner.request_product(UUID, [UUIDRequest(randomizer=0.0)])
    uuid2 = rule_runner.request_product(UUID, [UUIDRequest(randomizer=0.0)])
    assert uuid1 == uuid2
Пример #6
0
def test_distinct_uuids(rule_runner: RuleRunner) -> None:
    uuid1 = rule_runner.request_product(UUID, [UUIDRequest()])
    uuid2 = rule_runner.request_product(UUID, [UUIDRequest()])
    assert uuid1 != uuid2
Пример #7
0
async def run_python_test(
    field_set: PythonTestFieldSet,
    setup: TestTargetSetup,
    global_options: GlobalOptions,
    test_subsystem: TestSubsystem,
) -> TestResult:
    if field_set.is_conftest():
        return TestResult.skip(field_set.address)

    add_opts = [f"--color={'yes' if global_options.options.colors else 'no'}"]

    output_files = []
    # Configure generation of JUnit-compatible test report.
    test_results_file = None
    if setup.xml_dir:
        test_results_file = f"{field_set.address.path_safe_spec}.xml"
        add_opts.extend((f"--junitxml={test_results_file}", "-o",
                         f"junit_family={setup.junit_family}"))
        output_files.append(test_results_file)

    # Configure generation of a coverage report.
    if test_subsystem.use_coverage:
        output_files.append(".coverage")

    env = {
        "PYTEST_ADDOPTS": " ".join(add_opts),
        "PEX_EXTRA_SYS_PATH": ":".join(setup.source_roots)
    }

    if test_subsystem.force:
        # This is a slightly hacky way to force the process to run: since the env var
        #  value is unique, this input combination will never have been seen before,
        #  and therefore never cached. The two downsides are:
        #  1. This leaks into the test's environment, albeit with a funky var name that is
        #     unlikely to cause problems in practice.
        #  2. This run will be cached even though it can never be re-used.
        # TODO: A more principled way of forcing rules to run?
        uuid = await Get(UUID, UUIDRequest())
        env["__PANTS_FORCE_TEST_RUN__"] = str(uuid)

    result = await Get(
        FallibleProcessResult,
        PexProcess(
            setup.test_runner_pex,
            argv=setup.args,
            input_digest=setup.input_digest,
            output_files=tuple(output_files) if output_files else None,
            description=f"Run Pytest for {field_set.address}",
            timeout_seconds=setup.timeout_seconds,
            extra_env=env,
            execution_slot_variable=setup.execution_slot_variable,
            level=LogLevel.DEBUG,
        ),
    )

    coverage_data = None
    if test_subsystem.use_coverage:
        coverage_snapshot = await Get(
            Snapshot,
            DigestSubset(result.output_digest, PathGlobs([".coverage"])))
        if coverage_snapshot.files == (".coverage", ):
            coverage_data = PytestCoverageData(field_set.address,
                                               coverage_snapshot.digest)
        else:
            logger.warning(
                f"Failed to generate coverage data for {field_set.address}.")

    xml_results_digest = None
    if test_results_file:
        xml_results_snapshot = await Get(
            Snapshot,
            DigestSubset(result.output_digest, PathGlobs([test_results_file])))
        if xml_results_snapshot.files == (test_results_file, ):
            xml_results_digest = await Get(
                Digest,
                AddPrefix(xml_results_snapshot.digest,
                          setup.xml_dir),  # type: ignore[arg-type]
            )
        else:
            logger.warning(
                f"Failed to generate JUnit XML data for {field_set.address}.")

    return TestResult.from_fallible_process_result(
        result,
        coverage_data=coverage_data,
        xml_results=xml_results_digest,
        address=field_set.address,
    )
Пример #8
0
async def setup_pytest_for_target(
    request: TestSetupRequest,
    pytest: PyTest,
    test_subsystem: TestSubsystem,
    python_setup: PythonSetup,
    coverage_config: CoverageConfig,
    coverage_subsystem: CoverageSubsystem,
    global_options: GlobalOptions,
) -> TestSetup:
    test_addresses = Addresses((request.field_set.address, ))

    transitive_targets = await Get(TransitiveTargets, Addresses,
                                   test_addresses)
    all_targets = transitive_targets.closure

    interpreter_constraints = PexInterpreterConstraints.create_from_compatibility_fields(
        (tgt[PythonInterpreterCompatibility] for tgt in all_targets
         if tgt.has_field(PythonInterpreterCompatibility)),
        python_setup,
    )

    # NB: We set `--not-zip-safe` because Pytest plugin discovery, which uses
    # `importlib_metadata` and thus `zipp`, does not play nicely when doing import magic directly
    # from zip files. `zipp` has pathologically bad behavior with large zipfiles.
    # TODO: this does have a performance cost as the pex must now be expanded to disk. Long term,
    # it would be better to fix Zipp (whose fix would then need to be used by importlib_metadata
    # and then by Pytest). See https://github.com/jaraco/zipp/pull/26.
    additional_args_for_pytest = ("--not-zip-safe", )

    pytest_pex_request = Get(
        Pex,
        PexRequest(
            output_filename="pytest.pex",
            requirements=PexRequirements(pytest.get_requirement_strings()),
            interpreter_constraints=interpreter_constraints,
            additional_args=additional_args_for_pytest,
            internal_only=True,
        ),
    )

    # Defaults to zip_safe=False.
    requirements_pex_request = Get(
        Pex,
        PexFromTargetsRequest,
        PexFromTargetsRequest.for_requirements(test_addresses,
                                               internal_only=True),
    )

    test_runner_pex_request = Get(
        Pex,
        PexRequest(
            interpreter_constraints=interpreter_constraints,
            output_filename="test_runner.pex",
            entry_point="pytest:main",
            additional_args=(
                "--pex-path",
                # TODO(John Sirois): Support shading python binaries:
                #   https://github.com/pantsbuild/pants/issues/9206
                # Right now any pytest transitive requirements will shadow corresponding user
                # requirements which will lead to problems when APIs that are used by either
                # `pytest:main` or the tests themselves break between the two versions.
                ":".join((
                    pytest_pex_request.subject.output_filename,
                    requirements_pex_request.subject.output_filename,
                )),
            ),
            internal_only=True,
        ),
    )

    prepared_sources_request = Get(
        PythonSourceFiles,
        PythonSourceFilesRequest(all_targets, include_files=True))

    # Get the file names for the test_target so that we can specify to Pytest precisely which files
    # to test, rather than using auto-discovery.
    field_set_source_files_request = Get(
        SourceFiles, SourceFilesRequest([request.field_set.sources]))

    (
        pytest_pex,
        requirements_pex,
        test_runner_pex,
        prepared_sources,
        field_set_source_files,
    ) = await MultiGet(
        pytest_pex_request,
        requirements_pex_request,
        test_runner_pex_request,
        prepared_sources_request,
        field_set_source_files_request,
    )

    input_digest = await Get(
        Digest,
        MergeDigests((
            coverage_config.digest,
            prepared_sources.source_files.snapshot.digest,
            requirements_pex.digest,
            pytest_pex.digest,
            test_runner_pex.digest,
        )),
    )

    add_opts = [f"--color={'yes' if global_options.options.colors else 'no'}"]
    output_files = []

    results_file_name = None
    if pytest.options.junit_xml_dir and not request.is_debug:
        results_file_name = f"{request.field_set.address.path_safe_spec}.xml"
        add_opts.extend((f"--junitxml={results_file_name}", "-o",
                         f"junit_family={pytest.options.junit_family}"))
        output_files.append(results_file_name)

    coverage_args = []
    if test_subsystem.use_coverage and not request.is_debug:
        output_files.append(".coverage")
        cov_paths = coverage_subsystem.filter if coverage_subsystem.filter else (
            ".", )
        coverage_args = [
            "--cov-report=",  # Turn off output.
            *itertools.chain.from_iterable(["--cov", cov_path]
                                           for cov_path in cov_paths),
        ]

    extra_env = {
        "PYTEST_ADDOPTS": " ".join(add_opts),
        "PEX_EXTRA_SYS_PATH": ":".join(prepared_sources.source_roots),
    }

    if test_subsystem.force and not request.is_debug:
        # This is a slightly hacky way to force the process to run: since the env var
        #  value is unique, this input combination will never have been seen before,
        #  and therefore never cached. The two downsides are:
        #  1. This leaks into the test's environment, albeit with a funky var name that is
        #     unlikely to cause problems in practice.
        #  2. This run will be cached even though it can never be re-used.
        # TODO: A more principled way of forcing rules to run?
        uuid = await Get(UUID, UUIDRequest())
        extra_env["__PANTS_FORCE_TEST_RUN__"] = str(uuid)

    process = await Get(
        Process,
        PexProcess(
            test_runner_pex,
            argv=(*pytest.options.args, *coverage_args,
                  *field_set_source_files.files),
            extra_env=extra_env,
            input_digest=input_digest,
            output_files=output_files,
            timeout_seconds=request.field_set.timeout.
            calculate_from_global_options(pytest),
            execution_slot_variable=pytest.options.execution_slot_var,
            description=f"Run Pytest for {request.field_set.address}",
            level=LogLevel.DEBUG,
        ),
    )
    return TestSetup(process, results_file_name=results_file_name)
Пример #9
0
def test_identical_uuids_session_scope(rule_runner: RuleRunner) -> None:
    uuid1 = rule_runner.request(UUID,
                                [UUIDRequest.scoped(UUIDScope.PER_SESSION)])
    uuid2 = rule_runner.request(UUID,
                                [UUIDRequest.scoped(UUIDScope.PER_SESSION)])
    assert uuid1 == uuid2
Пример #10
0
def test_distinct_uuids_call_scope(rule_runner: RuleRunner) -> None:
    uuid1 = rule_runner.request(UUID, [UUIDRequest()])
    uuid2 = rule_runner.request(UUID, [UUIDRequest(scope="bob")])
    uuid3 = rule_runner.request(UUID, [UUIDRequest.scoped(UUIDScope.PER_CALL)])
    uuid4 = rule_runner.request(UUID, [UUIDRequest.scoped(UUIDScope.PER_CALL)])
    assert uuid1 != uuid2 != uuid3 != uuid4
Пример #11
0
def test_identical_uuids_same_scope(rule_runner: RuleRunner) -> None:
    uuid1 = rule_runner.request(UUID, [UUIDRequest(scope="this")])
    uuid2 = rule_runner.request(UUID, [UUIDRequest(scope="this")])
    assert uuid1 == uuid2
Пример #12
0
def test_distinct_uuids_different_scopes(rule_runner: RuleRunner) -> None:
    uuid1 = rule_runner.request(UUID, [UUIDRequest(scope="this")])
    uuid2 = rule_runner.request(UUID, [UUIDRequest(scope="that")])
    assert uuid1 != uuid2
Пример #13
0
def test_distinct_uuids_default_scope(rule_runner: RuleRunner) -> None:
    uuid1 = rule_runner.request(UUID, [UUIDRequest()])
    uuid2 = rule_runner.request(UUID, [UUIDRequest()])
    assert uuid1 != uuid2