Exemple #1
0
    def __new__(cls, *args, **kwargs):
      # TODO: Ideally we could execute this exactly once per `cls` but it should be a
      # relatively cheap check.
      if not hasattr(cls.__eq__, '_eq_override_canary'):
        raise cls.make_type_error('Should not override __eq__.')

      try:
        this_object = super(DataType, cls).__new__(cls, *args, **kwargs)
      except TypeError as e:
        raise cls.make_type_error(
          "error in namedtuple() base constructor: {}".format(e))

      # TODO: Make this kind of exception pattern (filter for errors then display them all at once)
      # more ergonomic.
      type_failure_msgs = []
      for field_name, field_constraint in fields_with_constraints.items():
        field_value = getattr(this_object, field_name)
        try:
          field_constraint.validate_satisfied_by(field_value)
        except TypeConstraintError as e:
          type_failure_msgs.append(
            "field '{}' was invalid: {}".format(field_name, e))
      if type_failure_msgs:
        raise cls.make_type_error(
          '{} type checking constructor arguments:\n{}'
          .format(pluralize(len(type_failure_msgs), 'error'),
                  '\n'.join(type_failure_msgs)))

      return this_object
Exemple #2
0
    def _raise_on_error(self, throws: list[Throw]) -> NoReturn:
        exception_noun = pluralize(len(throws), "Exception")

        if self._scheduler.include_trace_on_error:
            throw = throws[0]
            etb = throw.engine_traceback
            python_traceback_str = throw.python_traceback or ""
            engine_traceback_str = ""
            others_msg = f"\n(and {len(throws) - 1} more)" if len(
                throws) > 1 else ""
            if etb:
                sep = "\n  in "
                engine_traceback_str = "Engine traceback:" + sep + sep.join(
                    reversed(etb)) + "\n"
            raise ExecutionError(
                f"{exception_noun} encountered:\n\n"
                f"{engine_traceback_str}"
                f"{python_traceback_str}"
                f"{others_msg}",
                wrapped_exceptions=tuple(t.exc for t in throws),
            )
        else:
            exception_strs = "\n  ".join(
                f"{type(t.exc).__name__}: {str(t.exc)}" for t in throws)
            raise ExecutionError(
                f"{exception_noun} encountered:\n\n"
                f"  {exception_strs}\n",
                wrapped_exceptions=tuple(t.exc for t in throws),
            )
Exemple #3
0
        def __new__(cls, *args, **kwargs):
            # TODO: Ideally we could execute this exactly once per `cls` but it should be a
            # relatively cheap check.
            if not hasattr(cls.__eq__, '_eq_override_canary'):
                raise cls.make_type_error('Should not override __eq__.')

            try:
                this_object = super(DataType,
                                    cls).__new__(cls, *args, **kwargs)
            except TypeError as e:
                raise cls.make_type_error(
                    "error in namedtuple() base constructor: {}".format(e))

            # TODO: Make this kind of exception pattern (filter for errors then display them all at once)
            # more ergonomic.
            type_failure_msgs = []
            for field_name, field_constraint in fields_with_constraints.items(
            ):
                field_value = getattr(this_object, field_name)
                try:
                    field_constraint.validate_satisfied_by(field_value)
                except TypeConstraintError as e:
                    type_failure_msgs.append(
                        "field '{}' was invalid: {}".format(field_name, e))
            if type_failure_msgs:
                raise cls.make_type_error(
                    '{} type checking constructor arguments:\n{}'.format(
                        pluralize(len(type_failure_msgs), 'error'),
                        '\n'.join(type_failure_msgs)))

            return this_object
Exemple #4
0
    def __new__(cls, *args, **kwargs):
      # TODO: Ideally we could execute this exactly once per `cls` but it should be a
      # relatively cheap check.
      if not hasattr(cls.__eq__, '_eq_override_canary'):
        raise cls.make_type_error('Should not override __eq__.')

      try:
        this_object = super().__new__(cls, *args, **kwargs)
      except TypeError as e:
        raise cls.make_type_error(
          f"error in namedtuple() base constructor: {e}")

      # TODO: Make this kind of exception pattern (filter for errors then display them all at once)
      # more ergonomic.
      type_failure_msgs = []
      for field_name, field_constraint in fields_with_constraints.items():
        # TODO: figure out how to disallow users from accessing datatype fields by index!
        # TODO: gettattr() with a specific `field_name` against a `namedtuple` is apparently
        # converted into a __getitem__() call with the argument being the integer index of the field
        # with that name -- this indirection is not shown in the stack trace when overriding
        # __getitem__() to raise on `int` inputs. See https://stackoverflow.com/a/6738724 for the
        # greater context of how `namedtuple` differs from other "normal" python classes.
        field_value = getattr(this_object, field_name)
        try:
          field_constraint.validate_satisfied_by(field_value)
        except TypeConstraintError as e:
          type_failure_msgs.append(
            f"field '{field_name}' was invalid: {e}")
      if type_failure_msgs:
        raise cls.make_type_error(
          '{} type checking constructor arguments:\n{}'
          .format(pluralize(len(type_failure_msgs), 'error'),
                  '\n'.join(type_failure_msgs)))

      return this_object
Exemple #5
0
  def console_output(self, ignored_targets):
    self.validate_target_roots()
    from_target = self.target_roots[0]
    to_target = self.target_roots[1]

    paths = list(find_paths_breadth_first(from_target, to_target, self.log))
    yield 'Found {}'.format(pluralize(len(paths), 'path'))
    if paths:
      yield ''
      for path in paths:
        yield '\t{}'.format(format_path(path))
Exemple #6
0
  def console_output(self, ignored_targets):
    self.validate_target_roots()
    from_target = self.target_roots[0]
    to_target = self.target_roots[1]

    paths = list(find_paths_breadth_first(from_target, to_target, self.log))
    yield 'Found {}'.format(pluralize(len(paths), 'path'))
    if paths:
      yield ''
      for path in paths:
        yield '\t{}'.format(format_path(path))
Exemple #7
0
 def test_pluralize(self) -> None:
   self.assertEqual('1 bat', pluralize(1, 'bat'))
   self.assertEqual('1 boss', pluralize(1, 'boss'))
   self.assertEqual('2 bats', pluralize(2, 'bat'))
   self.assertEqual('2 bosses', pluralize(2, 'boss'))
   self.assertEqual('0 bats', pluralize(0, 'bat'))
   self.assertEqual('0 bosses', pluralize(0, 'boss'))
Exemple #8
0
 def test_pluralize(self):
     self.assertEquals("1 bat", pluralize(1, "bat"))
     self.assertEquals("1 boss", pluralize(1, "boss"))
     self.assertEquals("2 bats", pluralize(2, "bat"))
     self.assertEquals("2 bosses", pluralize(2, "boss"))
     self.assertEquals("0 bats", pluralize(0, "bat"))
     self.assertEquals("0 bosses", pluralize(0, "boss"))
Exemple #9
0
 def test_pluralize(self) -> None:
     self.assertEqual("1 bat", pluralize(1, "bat"))
     self.assertEqual("1 boss", pluralize(1, "boss"))
     self.assertEqual("2 bats", pluralize(2, "bat"))
     self.assertEqual("2 bosses", pluralize(2, "boss"))
     self.assertEqual("0 bats", pluralize(0, "bat"))
     self.assertEqual("0 bosses", pluralize(0, "boss"))
Exemple #10
0
 def test_pluralize(self):
   self.assertEqual('1 bat', pluralize(1, 'bat'))
   self.assertEqual('1 boss', pluralize(1, 'boss'))
   self.assertEqual('2 bats', pluralize(2, 'bat'))
   self.assertEqual('2 bosses', pluralize(2, 'boss'))
   self.assertEqual('0 bats', pluralize(0, 'bat'))
   self.assertEqual('0 bosses', pluralize(0, 'boss'))
Exemple #11
0
def test_pluralize() -> None:
    assert "1 bat" == pluralize(1, "bat")
    assert "1 boss" == pluralize(1, "boss")
    assert "2 bats" == pluralize(2, "bat")
    assert "2 bosses" == pluralize(2, "boss")
    assert "0 bats" == pluralize(0, "bat")
    assert "0 bosses" == pluralize(0, "boss")
Exemple #12
0
 def _trace_on_error(self, unique_exceptions, request):
     exception_noun = pluralize(len(unique_exceptions), 'Exception')
     if self._scheduler.include_trace_on_error:
         cumulative_trace = '\n'.join(self.trace(request))
         raise ExecutionError(
             '{} encountered:\n{}'.format(exception_noun, cumulative_trace),
             unique_exceptions,
         )
     else:
         raise ExecutionError(
             '{} encountered:\n  {}'.format(
                 exception_noun,
                 '\n  '.join('{}: {}'.format(type(t).__name__, str(t))
                             for t in unique_exceptions)),
             unique_exceptions)
Exemple #13
0
 def _trace_on_error(self, unique_exceptions, request):
   exception_noun = pluralize(len(unique_exceptions), 'Exception')
   if self._scheduler.include_trace_on_error:
     cumulative_trace = '\n'.join(self.trace(request))
     raise ExecutionError(
       '{} encountered:\n{}'.format(exception_noun, cumulative_trace),
       unique_exceptions,
     )
   else:
     raise ExecutionError(
       '{} encountered:\n  {}'.format(
         exception_noun,
         '\n  '.join('{}: {}'.format(type(t).__name__, str(t)) for t in unique_exceptions)),
       unique_exceptions
     )
Exemple #14
0
    def products_request(self, products, subjects):
        """Executes a request for multiple products for some subjects, and returns the products.

    :param list products: A list of product type for the request.
    :param list subjects: A list of subjects for the request.
    :returns: A dict from product type to lists of products each with length matching len(subjects).
    """
        request = self.execution_request(products, subjects)
        result = self.execute(request)
        if result.error:
            raise result.error

        # State validation.
        unknown_state_types = tuple(
            type(state) for _, state in result.root_products
            if type(state) not in (Throw, Return))
        if unknown_state_types:
            State.raise_unrecognized(unknown_state_types)

        # Throw handling.
        # TODO: See https://github.com/pantsbuild/pants/issues/3912
        throw_root_states = tuple(state for root, state in result.root_products
                                  if type(state) is Throw)
        if throw_root_states:
            unique_exceptions = tuple({t.exc for t in throw_root_states})
            exception_noun = pluralize(len(unique_exceptions), 'Exception')

            if self._scheduler.include_trace_on_error:
                cumulative_trace = '\n'.join(self.trace(request))
                raise ExecutionError(
                    '{} encountered:\n{}'.format(exception_noun,
                                                 cumulative_trace),
                    unique_exceptions,
                )
            else:
                raise ExecutionError(
                    '{} encountered:\n  {}'.format(
                        exception_noun,
                        '\n  '.join('{}: {}'.format(type(t).__name__, str(t))
                                    for t in unique_exceptions)),
                    unique_exceptions)

        # Everything is a Return: we rely on the fact that roots are ordered to preserve subject
        # order in output lists.
        product_results = defaultdict(list)
        for (_, product), state in result.root_products:
            product_results[product].append(state.value)
        return product_results
Exemple #15
0
    def validate_snapshot(self, snapshot: Snapshot) -> None:
        """Perform any additional validation on the resulting snapshot, e.g. ensuring that certain
        banned files are not used.

        To enforce that the resulting files end in certain extensions, such as `.py` or `.java`, set
        the class property `expected_file_extensions`.

        To enforce that there are only a certain number of resulting files, such as binary targets
        checking for only 0-1 sources, set the class property `expected_num_files`.
        """
        if self.expected_file_extensions is not None:
            bad_files = [
                fp for fp in snapshot.files
                if not PurePath(fp).suffix in self.expected_file_extensions
            ]
            if bad_files:
                expected = (f"one of {sorted(self.expected_file_extensions)}"
                            if len(self.expected_file_extensions) > 1 else
                            repr(self.expected_file_extensions[0]))
                raise InvalidFieldException(
                    f"The {repr(self.alias)} field in target {self.address} must only contain "
                    f"files that end in {expected}, but it had these files: {sorted(bad_files)}."
                )
        if self.expected_num_files is not None:
            num_files = len(snapshot.files)
            is_bad_num_files = (num_files not in self.expected_num_files
                                if isinstance(self.expected_num_files, range)
                                else num_files != self.expected_num_files)
            if is_bad_num_files:
                if isinstance(self.expected_num_files, range):
                    if len(self.expected_num_files) == 2:
                        expected_str = (" or ".join(
                            str(n)
                            for n in self.expected_num_files) + " files")
                    else:
                        expected_str = f"a number of files in the range `{self.expected_num_files}`"
                else:
                    expected_str = pluralize(self.expected_num_files, "file")
                raise InvalidFieldException(
                    f"The {repr(self.alias)} field in target {self.address} must have "
                    f"{expected_str}, but it had {pluralize(num_files, 'file')}."
                )
Exemple #16
0
def items_to_report_element(items, item_type):
  """Converts an iterable of items to a (message, detail) pair.

  - items: a list of items (e.g., Target instances) that can be str()-ed.
  - item_type: a string describing the type of item (e.g., 'target').

  Returns (message, detail) where message is the count of items (e.g., '26 targets')
  and detail is the text representation of the list of items, one per line.

  The return value can be used as an argument to Report.log().

  This is useful when we want to say "N targets" or "K sources"
  and allow the user to see which ones by clicking on that text.
  """
  n = len(items)
  text = pluralize(n, item_type)
  if n == 0:
    return text
  else:
    detail = '\n'.join(str(x) for x in items)
    return text, detail
Exemple #17
0
    def run_tests(self, fail_fast, test_targets, output_dir, coverage, complete_test_registry):
        test_registry = complete_test_registry.filter(test_targets)
        if test_registry.empty:
            return TestResult.successful

        coverage.instrument(output_dir)

        def parse_error_handler(parse_error):
            # Just log and move on since the result is only used to characterize failures, and raising
            # an error here would just distract from the underlying test failures.
            self.context.log.error(
                "Error parsing test result file {path}: {cause}".format(
                    path=parse_error.xml_path, cause=parse_error.cause
                )
            )

        # The 'instrument_classpath' product below below will be `None` if not set, and we'll default
        # back to runtime_classpath
        classpath_product = self.context.products.get_data("instrument_classpath")

        result = 0
        for batch_id, (properties, batch) in enumerate(self._iter_batches(test_registry)):
            (
                workdir,
                platform,
                target_jvm_options,
                target_env_vars,
                concurrency,
                threads,
            ) = properties

            batch_output_dir = output_dir
            if self._batched:
                batch_output_dir = os.path.join(batch_output_dir, f"batch-{batch_id}")

            run_modifications = coverage.run_modifications(batch_output_dir)
            self.context.log.debug(f"run_modifications: {run_modifications}")

            extra_jvm_options = run_modifications.extra_jvm_options

            # Batches of test classes will likely exist within the same targets: dedupe them.
            relevant_targets = {test_registry.get_owning_target(t) for t in batch}

            complete_classpath = OrderedSet()
            complete_classpath.update(run_modifications.classpath_prepend)
            complete_classpath.update(JUnit.global_instance().runner_classpath(self.context))
            complete_classpath.update(
                self.classpath(relevant_targets, classpath_product=classpath_product)
            )

            distribution = self.preferred_jvm_distribution([platform], self._strict_jvm_version)

            # Override cmdline args with values from junit_test() target that specify concurrency:
            args = self._args(fail_fast, batch_output_dir) + ["-xmlreport"]

            if concurrency is not None:
                args = remove_arg(args, "-default-parallel")
                if concurrency == JUnitTests.CONCURRENCY_SERIAL:
                    args = ensure_arg(args, "-default-concurrency", param="SERIAL")
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES:
                    args = ensure_arg(args, "-default-concurrency", param="PARALLEL_CLASSES")
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_METHODS:
                    args = ensure_arg(args, "-default-concurrency", param="PARALLEL_METHODS")
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES_AND_METHODS:
                    args = ensure_arg(
                        args, "-default-concurrency", param="PARALLEL_CLASSES_AND_METHODS"
                    )

            if threads is not None:
                args = remove_arg(args, "-parallel-threads", has_param=True)
                args += ["-parallel-threads", str(threads)]

            batch_test_specs = [test.render_test_spec() for test in batch]
            with argfile.safe_args(batch_test_specs, self.get_options()) as batch_tests:
                with self.chroot(relevant_targets, workdir) as chroot:
                    self.context.log.debug(f"CWD = {chroot}")
                    self.context.log.debug(f"platform = {platform}")
                    with environment_as(**dict(target_env_vars)):
                        subprocess_result = self.spawn_and_wait(
                            relevant_targets,
                            executor=SubprocessExecutor(distribution),
                            distribution=distribution,
                            classpath=complete_classpath,
                            main=JUnit.RUNNER_MAIN,
                            jvm_options=self.jvm_options
                            + list(platform.jvm_options)
                            + extra_jvm_options
                            + list(target_jvm_options),
                            args=args + batch_tests,
                            workunit_factory=self.context.new_workunit,
                            workunit_name="run",
                            workunit_labels=[WorkUnitLabel.TEST],
                            cwd=chroot,
                            synthetic_jar_dir=batch_output_dir,
                            create_synthetic_jar=self.synthetic_classpath,
                        )
                        self.context.log.debug(
                            "JUnit subprocess exited with result ({})".format(subprocess_result)
                        )
                        result += abs(subprocess_result)

                tests_info = self.parse_test_info(
                    batch_output_dir, parse_error_handler, ["classname"]
                )
                for test_name, test_info in tests_info.items():
                    test_item = Test(test_info["classname"], test_name)
                    test_target = test_registry.get_owning_target(test_item)
                    self.report_all_info_for_single_test(
                        self.options_scope, test_target, test_name, test_info
                    )

                if result != 0 and fail_fast:
                    break

        if result == 0:
            return TestResult.successful

        # NB: If the TestRegistry fails to find the owning target of a failed test, the target key in
        # this dictionary will be None: helper methods in this block account for that.
        target_to_failed_test = parse_failed_targets(test_registry, output_dir, parse_error_handler)

        def sort_owning_target(t):
            return t.address.spec if t else ""

        failed_targets = sorted(target_to_failed_test, key=sort_owning_target)
        error_message_lines = []
        if self._failure_summary:

            def render_owning_target(t):
                return t.address.reference() if t else "<Unknown Target>"

            for target in failed_targets:
                error_message_lines.append(f"\n{(' ' * 4)}{render_owning_target(target)}")
                for test in sorted(target_to_failed_test[target]):
                    error_message_lines.append(f"{' ' * 8}{test.classname}#{test.methodname}")
        error_message_lines.append(
            "\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.".format(
                main=JUnit.RUNNER_MAIN,
                code=result,
                failed=len(failed_targets),
                targets=pluralize(len(failed_targets), "target"),
            )
        )
        return TestResult(
            msg="\n".join(error_message_lines), rc=result, failed_targets=failed_targets
        )
Exemple #18
0
    def _run_tests(self,
                   tests_to_targets,
                   extra_jvm_options=None,
                   classpath_prepend=(),
                   classpath_append=()):
        extra_jvm_options = extra_jvm_options or []

        tests_by_properties = self._tests_by_properties(
            tests_to_targets, self._infer_workdir,
            lambda target: target.test_platform)

        # the below will be None if not set, and we'll default back to runtime_classpath
        classpath_product = self._context.products.get_data(
            'instrument_classpath')

        result = 0
        for (workdir, platform), tests in tests_by_properties.items():
            for batch in self._partition(tests):
                # Batches of test classes will likely exist within the same targets: dedupe them.
                relevant_targets = set(map(tests_to_targets.get, batch))
                classpath = self._task_exports.classpath(
                    relevant_targets,
                    classpath_prefix=self._task_exports.tool_classpath(
                        'junit'),
                    classpath_product=classpath_product)
                complete_classpath = OrderedSet()
                complete_classpath.update(classpath_prepend)
                complete_classpath.update(classpath)
                complete_classpath.update(classpath_append)
                distribution = self.preferred_jvm_distribution([platform])
                with binary_util.safe_args(
                        batch, self._task_exports.task_options) as batch_tests:
                    self._context.log.debug('CWD = {}'.format(workdir))
                    self._context.log.debug('platform = {}'.format(platform))
                    result += abs(
                        distribution.execute_java(
                            classpath=complete_classpath,
                            main=JUnitRun._MAIN,
                            jvm_options=self._task_exports.jvm_options +
                            extra_jvm_options,
                            args=self._args + batch_tests + [u'-xmlreport'],
                            workunit_factory=self._context.new_workunit,
                            workunit_name='run',
                            workunit_labels=[WorkUnitLabel.TEST],
                            cwd=workdir,
                        ))

                    if result != 0 and self._fail_fast:
                        break

        if result != 0:
            failed_targets_and_tests = self._get_failed_targets(
                tests_to_targets)
            failed_targets = sorted(failed_targets_and_tests,
                                    key=lambda target: target.address.spec)
            error_message_lines = []
            if self._failure_summary:
                for target in failed_targets:
                    error_message_lines.append('\n{0}{1}'.format(
                        ' ' * 4, target.address.spec))
                    for test in sorted(failed_targets_and_tests[target]):
                        error_message_lines.append('{0}{1}'.format(
                            ' ' * 8, test))
            error_message_lines.append(
                '\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.'
                .format(main=JUnitRun._MAIN,
                        code=result,
                        failed=len(failed_targets),
                        targets=pluralize(len(failed_targets), 'target')))
            raise TestFailedTaskError('\n'.join(error_message_lines),
                                      failed_targets=list(failed_targets))
Exemple #19
0
  def _run_tests(self, tests_to_targets):

    if self._coverage:
      extra_jvm_options = self._coverage.extra_jvm_options
      classpath_prepend = self._coverage.classpath_prepend
      classpath_append = self._coverage.classpath_append
    else:
      extra_jvm_options = []
      classpath_prepend = ()
      classpath_append = ()

    tests_by_properties = self._tests_by_properties(
      tests_to_targets,
      self._infer_workdir,
      lambda target: target.test_platform,
      lambda target: target.payload.extra_jvm_options,
      lambda target: target.payload.extra_env_vars,
    )

    # the below will be None if not set, and we'll default back to runtime_classpath
    classpath_product = self.context.products.get_data('instrument_classpath')

    result = 0
    for (workdir, platform, target_jvm_options, target_env_vars), tests in tests_by_properties.items():
      for batch in self._partition(tests):
        # Batches of test classes will likely exist within the same targets: dedupe them.
        relevant_targets = set(map(tests_to_targets.get, batch))
        complete_classpath = OrderedSet()
        complete_classpath.update(classpath_prepend)
        complete_classpath.update(self.tool_classpath('junit'))
        complete_classpath.update(self.classpath(relevant_targets,
                                                 classpath_product=classpath_product))
        complete_classpath.update(classpath_append)
        distribution = self.preferred_jvm_distribution([platform])
        with binary_util.safe_args(batch, self.get_options()) as batch_tests:
          self.context.log.debug('CWD = {}'.format(workdir))
          self.context.log.debug('platform = {}'.format(platform))
          with environment_as(**dict(target_env_vars)):
            result += abs(self._spawn_and_wait(
              executor=SubprocessExecutor(distribution),
              distribution=distribution,
              classpath=complete_classpath,
              main=JUnitRun._MAIN,
              jvm_options=self.jvm_options + extra_jvm_options + list(target_jvm_options),
              args=self._args + batch_tests + [u'-xmlreport'],
              workunit_factory=self.context.new_workunit,
              workunit_name='run',
              workunit_labels=[WorkUnitLabel.TEST],
              cwd=workdir,
              synthetic_jar_dir=self.workdir,
              create_synthetic_jar=self.synthetic_classpath,
            ))

          if result != 0 and self._fail_fast:
            break

    if result != 0:
      failed_targets_and_tests = self._get_failed_targets(tests_to_targets)
      failed_targets = sorted(failed_targets_and_tests, key=lambda target: target.address.spec)
      error_message_lines = []
      if self._failure_summary:
        for target in failed_targets:
          error_message_lines.append('\n{0}{1}'.format(' '*4, target.address.spec))
          for test in sorted(failed_targets_and_tests[target]):
            error_message_lines.append('{0}{1}'.format(' '*8, test))
      error_message_lines.append(
        '\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.'
          .format(main=JUnitRun._MAIN, code=result, failed=len(failed_targets),
                  targets=pluralize(len(failed_targets), 'target'))
      )
      raise TestFailedTaskError('\n'.join(error_message_lines), failed_targets=list(failed_targets))
Exemple #20
0
async def resolve_target_parametrizations(
    request: _TargetParametrizationsRequest,
    registered_target_types: RegisteredTargetTypes,
    union_membership: UnionMembership,
    target_types_to_generate_requests: TargetTypesToGenerateTargetsRequests,
    unmatched_build_file_globs: UnmatchedBuildFileGlobs,
) -> _TargetParametrizations:
    address = request.address

    target_adaptor = await Get(TargetAdaptor, Address, address)
    target_type = registered_target_types.aliases_to_types.get(
        target_adaptor.type_alias, None)
    if target_type is None:
        raise UnrecognizedTargetTypeException(target_adaptor.type_alias,
                                              registered_target_types, address)
    if (target_type.deprecated_alias is not None
            and target_type.deprecated_alias == target_adaptor.type_alias
            and not address.is_generated_target):
        warn_deprecated_target_type(target_type)

    target = None
    parametrizations: list[_TargetParametrization] = []
    generate_request: type[GenerateTargetsRequest] | None = None
    if issubclass(target_type, TargetGenerator):
        generate_request = target_types_to_generate_requests.request_for(
            target_type)
    if generate_request:
        # Split out the `propagated_fields` before construction.
        generator_fields = dict(target_adaptor.kwargs)
        template_fields = {}
        if issubclass(target_type, TargetGenerator):
            copied_fields = (
                *target_type.copied_fields,
                *target_type._find_plugin_fields(union_membership),
            )
            for field_type in copied_fields:
                field_value = generator_fields.get(field_type.alias, None)
                if field_value is not None:
                    template_fields[field_type.alias] = field_value
            for field_type in target_type.moved_fields:
                field_value = generator_fields.pop(field_type.alias, None)
                if field_value is not None:
                    template_fields[field_type.alias] = field_value

        generator_fields_parametrized = {
            name
            for name, field in generator_fields.items()
            if isinstance(field, Parametrize)
        }
        if generator_fields_parametrized:
            noun = pluralize(len(generator_fields_parametrized),
                             "field",
                             include_count=False)
            raise ValueError(
                f"Only fields which will be moved to generated targets may be parametrized, "
                f"so target generator {address} (with type {target_type.alias}) cannot "
                f"parametrize the {generator_fields_parametrized} {noun}.")

        base_generator = target_type(
            generator_fields,
            address,
            name_explicitly_set=target_adaptor.name_explicitly_set,
            union_membership=union_membership,
        )

        overrides = {}
        if base_generator.has_field(OverridesField):
            overrides_field = base_generator[OverridesField]
            overrides_flattened = overrides_field.flatten()
            if issubclass(target_type, TargetFilesGenerator):
                override_globs = OverridesField.to_path_globs(
                    address, overrides_flattened, unmatched_build_file_globs)
                override_paths = await MultiGet(
                    Get(Paths, PathGlobs, path_globs)
                    for path_globs in override_globs)
                overrides = OverridesField.flatten_paths(
                    address,
                    zip(override_paths, override_globs,
                        overrides_flattened.values()),
                )
            else:
                overrides = overrides_field.flatten()

        generators = [(
            target_type(
                generator_fields,
                address,
                name_explicitly_set=target_adaptor.name is not None,
                union_membership=union_membership,
            ),
            template,
        ) for address, template in Parametrize.expand(address, template_fields)
                      ]
        all_generated = await MultiGet(
            Get(
                GeneratedTargets,
                GenerateTargetsRequest,
                generate_request(
                    generator,
                    template_address=generator.address,
                    template=template,
                    overrides={
                        name: dict(
                            Parametrize.expand(generator.address, override))
                        for name, override in overrides.items()
                    },
                ),
            ) for generator, template in generators)
        parametrizations.extend(
            _TargetParametrization(generator, generated_batch)
            for generated_batch, (generator,
                                  _) in zip(all_generated, generators))
    else:
        first, *rest = Parametrize.expand(address, target_adaptor.kwargs)
        if rest:
            # The target was parametrized, and so the original Target does not exist.
            generated = FrozenDict((
                parameterized_address,
                target_type(
                    parameterized_fields,
                    parameterized_address,
                    name_explicitly_set=target_adaptor.name_explicitly_set,
                    union_membership=union_membership,
                ),
            ) for parameterized_address, parameterized_fields in (first,
                                                                  *rest))
            parametrizations.append(_TargetParametrization(None, generated))
        else:
            # The target was not parametrized.
            target = target_type(
                target_adaptor.kwargs,
                address,
                name_explicitly_set=target_adaptor.name_explicitly_set,
                union_membership=union_membership,
            )
            parametrizations.append(
                _TargetParametrization(target, FrozenDict()))

    # TODO: Move to Target constructor.
    for field_type in target.field_types if target else ():
        if (field_type.deprecated_alias is not None
                and field_type.deprecated_alias in target_adaptor.kwargs):
            warn_deprecated_field_type(field_type)

    return _TargetParametrizations(parametrizations)
Exemple #21
0
    def product_request(self, product, subjects):
        """Executes a request for a single product for some subjects, and returns the products.

        :param class product: A product type for the request.
        :param list subjects: A list of subjects or Params instances for the request.
        :returns: A list of the requested products, with length match len(subjects).
        """
        request = None
        raised_exception = None
        try:
            request = self.execution_request([product], subjects)
        except:  # noqa: T803
            # If there are any exceptions during CFFI extern method calls, we want to return an error with
            # them and whatever failure results from it. This typically results from unhashable types.
            if self._scheduler._native._peek_cffi_extern_method_runtime_exceptions():
                raised_exception = sys.exc_info()[0:3]
            else:
                # Otherwise, this is likely an exception coming from somewhere else, and we don't want to
                # swallow that, so re-raise.
                raise

        # We still want to raise whenever there are any exceptions in any CFFI extern methods, even if
        # that didn't lead to an exception in generating the execution request for some reason, so we
        # check the extern exceptions list again.
        internal_errors = self._scheduler._native.consume_cffi_extern_method_runtime_exceptions()
        if internal_errors:
            error_tracebacks = [
                "".join(
                    traceback.format_exception(
                        etype=error_info.exc_type,
                        value=error_info.exc_value,
                        tb=error_info.traceback,
                    )
                )
                for error_info in internal_errors
            ]

            raised_exception_message = None
            if raised_exception:
                exc_type, exc_value, tb = raised_exception
                raised_exception_message = dedent(
                    """\
                    The engine execution request raised this error, which is probably due to the errors in the
                    CFFI extern methods listed above, as CFFI externs return None upon error:
                    {}
                    """
                ).format(
                    "".join(traceback.format_exception(etype=exc_type, value=exc_value, tb=tb))
                )

            raise ExecutionError(
                dedent(
                    """\
                    {error_description} raised in CFFI extern methods:
                    {joined_tracebacks}{raised_exception_message}
                    """
                ).format(
                    error_description=pluralize(len(internal_errors), "Exception"),
                    joined_tracebacks="\n+++++++++\n".join(
                        formatted_tb for formatted_tb in error_tracebacks
                    ),
                    raised_exception_message=(
                        "\n\n{}".format(raised_exception_message)
                        if raised_exception_message
                        else ""
                    ),
                )
            )

        returns, throws = self.execute(request)

        # Throw handling.
        if throws:
            unique_exceptions = tuple({t.exc for _, t in throws})
            self._trace_on_error(unique_exceptions, request)

        # Everything is a Return: we rely on the fact that roots are ordered to preserve subject
        # order in output lists.
        return [ret.value for _, ret in returns]
Exemple #22
0
  def _run_tests(self, test_registry, output_dir, coverage=None):
    if coverage:
      extra_jvm_options = coverage.extra_jvm_options
      classpath_prepend = coverage.classpath_prepend
      classpath_append = coverage.classpath_append
    else:
      extra_jvm_options = []
      classpath_prepend = ()
      classpath_append = ()

    tests_by_properties = test_registry.index(
        lambda tgt: tgt.cwd if tgt.cwd is not None else self._working_dir,
        lambda tgt: tgt.test_platform,
        lambda tgt: tgt.payload.extra_jvm_options,
        lambda tgt: tgt.payload.extra_env_vars,
        lambda tgt: tgt.concurrency,
        lambda tgt: tgt.threads)

    # the below will be None if not set, and we'll default back to runtime_classpath
    classpath_product = self.context.products.get_data('instrument_classpath')

    result = 0
    for properties, tests in tests_by_properties.items():
      (workdir, platform, target_jvm_options, target_env_vars, concurrency, threads) = properties
      for batch in self._partition(tests):
        # Batches of test classes will likely exist within the same targets: dedupe them.
        relevant_targets = {test_registry.get_owning_target(t) for t in batch}
        complete_classpath = OrderedSet()
        complete_classpath.update(classpath_prepend)
        complete_classpath.update(JUnit.global_instance().runner_classpath(self.context))
        complete_classpath.update(self.classpath(relevant_targets,
                                                 classpath_product=classpath_product))
        complete_classpath.update(classpath_append)
        distribution = JvmPlatform.preferred_jvm_distribution([platform], self._strict_jvm_version)

        # Override cmdline args with values from junit_test() target that specify concurrency:
        args = self._args(output_dir) + [u'-xmlreport']

        if concurrency is not None:
          args = remove_arg(args, '-default-parallel')
          if concurrency == JUnitTests.CONCURRENCY_SERIAL:
            args = ensure_arg(args, '-default-concurrency', param='SERIAL')
          elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES:
            args = ensure_arg(args, '-default-concurrency', param='PARALLEL_CLASSES')
          elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_METHODS:
            args = ensure_arg(args, '-default-concurrency', param='PARALLEL_METHODS')
          elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES_AND_METHODS:
            args = ensure_arg(args, '-default-concurrency', param='PARALLEL_CLASSES_AND_METHODS')

        if threads is not None:
          args = remove_arg(args, '-parallel-threads', has_param=True)
          args += ['-parallel-threads', str(threads)]

        batch_test_specs = [test.render_test_spec() for test in batch]
        with argfile.safe_args(batch_test_specs, self.get_options()) as batch_tests:
          self.context.log.debug('CWD = {}'.format(workdir))
          self.context.log.debug('platform = {}'.format(platform))
          with environment_as(**dict(target_env_vars)):
            result += abs(self._spawn_and_wait(
              executor=SubprocessExecutor(distribution),
              distribution=distribution,
              classpath=complete_classpath,
              main=JUnit.RUNNER_MAIN,
              jvm_options=self.jvm_options + extra_jvm_options + list(target_jvm_options),
              args=args + batch_tests,
              workunit_factory=self.context.new_workunit,
              workunit_name='run',
              workunit_labels=[WorkUnitLabel.TEST],
              cwd=workdir,
              synthetic_jar_dir=output_dir,
              create_synthetic_jar=self.synthetic_classpath,
            ))

          if result != 0 and self._fail_fast:
            break

    if result != 0:
      def error_handler(parse_error):
        # Just log and move on since the result is only used to characterize failures, and raising
        # an error here would just distract from the underlying test failures.
        self.context.log.error('Error parsing test result file {path}: {cause}'
                               .format(path=parse_error.junit_xml_path, cause=parse_error.cause))

      target_to_failed_test = parse_failed_targets(test_registry, output_dir, error_handler)
      failed_targets = sorted(target_to_failed_test, key=lambda t: t.address.spec)
      error_message_lines = []
      if self._failure_summary:
        for target in failed_targets:
          error_message_lines.append('\n{indent}{address}'.format(indent=' ' * 4,
                                                                  address=target.address.spec))
          for test in sorted(target_to_failed_test[target]):
            error_message_lines.append('{indent}{classname}#{methodname}'
                                       .format(indent=' ' * 8,
                                               classname=test.classname,
                                               methodname=test.methodname))
      error_message_lines.append(
        '\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.'
          .format(main=JUnit.RUNNER_MAIN, code=result, failed=len(failed_targets),
                  targets=pluralize(len(failed_targets), 'target'))
      )
      raise TestFailedTaskError('\n'.join(error_message_lines), failed_targets=list(failed_targets))
Exemple #23
0
  def run_tests(self, fail_fast, test_targets, output_dir, coverage):
    test_registry = self._collect_test_targets(test_targets)
    if test_registry.empty:
      return TestResult.rc(0)

    coverage.instrument(output_dir)

    def parse_error_handler(parse_error):
      # Just log and move on since the result is only used to characterize failures, and raising
      # an error here would just distract from the underlying test failures.
      self.context.log.error('Error parsing test result file {path}: {cause}'
                             .format(path=parse_error.xml_path, cause=parse_error.cause))

    # The 'instrument_classpath' product below below will be `None` if not set, and we'll default
    # back to runtime_classpath
    classpath_product = self.context.products.get_data('instrument_classpath')

    result = 0
    for batch_id, (properties, batch) in enumerate(self._iter_batches(test_registry)):
      (workdir, platform, target_jvm_options, target_env_vars, concurrency, threads) = properties

      batch_output_dir = output_dir
      if self._batched:
        batch_output_dir = os.path.join(batch_output_dir, 'batch-{}'.format(batch_id))

      run_modifications = coverage.run_modifications(batch_output_dir)

      extra_jvm_options = run_modifications.extra_jvm_options

      # Batches of test classes will likely exist within the same targets: dedupe them.
      relevant_targets = {test_registry.get_owning_target(t) for t in batch}

      complete_classpath = OrderedSet()
      complete_classpath.update(run_modifications.classpath_prepend)
      complete_classpath.update(JUnit.global_instance().runner_classpath(self.context))
      complete_classpath.update(self.classpath(relevant_targets,
                                               classpath_product=classpath_product))

      distribution = JvmPlatform.preferred_jvm_distribution([platform], self._strict_jvm_version)

      # Override cmdline args with values from junit_test() target that specify concurrency:
      args = self._args(fail_fast, batch_output_dir) + [u'-xmlreport']

      if concurrency is not None:
        args = remove_arg(args, '-default-parallel')
        if concurrency == JUnitTests.CONCURRENCY_SERIAL:
          args = ensure_arg(args, '-default-concurrency', param='SERIAL')
        elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES:
          args = ensure_arg(args, '-default-concurrency', param='PARALLEL_CLASSES')
        elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_METHODS:
          args = ensure_arg(args, '-default-concurrency', param='PARALLEL_METHODS')
        elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES_AND_METHODS:
          args = ensure_arg(args, '-default-concurrency', param='PARALLEL_CLASSES_AND_METHODS')

      if threads is not None:
        args = remove_arg(args, '-parallel-threads', has_param=True)
        args += ['-parallel-threads', str(threads)]

      batch_test_specs = [test.render_test_spec() for test in batch]
      with argfile.safe_args(batch_test_specs, self.get_options()) as batch_tests:
        with self._chroot(relevant_targets, workdir) as chroot:
          self.context.log.debug('CWD = {}'.format(chroot))
          self.context.log.debug('platform = {}'.format(platform))
          with environment_as(**dict(target_env_vars)):
            subprocess_result = self._spawn_and_wait(
              executor=SubprocessExecutor(distribution),
              distribution=distribution,
              classpath=complete_classpath,
              main=JUnit.RUNNER_MAIN,
              jvm_options=self.jvm_options + extra_jvm_options + list(target_jvm_options),
              args=args + batch_tests,
              workunit_factory=self.context.new_workunit,
              workunit_name='run',
              workunit_labels=[WorkUnitLabel.TEST],
              cwd=chroot,
              synthetic_jar_dir=batch_output_dir,
              create_synthetic_jar=self.synthetic_classpath,
            )
            self.context.log.debug('JUnit subprocess exited with result ({})'
                                   .format(subprocess_result))
            result += abs(subprocess_result)

        tests_info = self.parse_test_info(batch_output_dir, parse_error_handler, ['classname'])
        for test_name, test_info in tests_info.items():
          test_item = Test(test_info['classname'], test_name)
          test_target = test_registry.get_owning_target(test_item)
          self.report_all_info_for_single_test(self.options_scope, test_target,
                                               test_name, test_info)

        if result != 0 and fail_fast:
          break

    if result == 0:
      return TestResult.rc(0)

    target_to_failed_test = parse_failed_targets(test_registry, output_dir, parse_error_handler)

    def sort_owning_target(t):
      return t.address.spec if t else None

    failed_targets = sorted(target_to_failed_test, key=sort_owning_target)
    error_message_lines = []
    if self._failure_summary:
      def render_owning_target(t):
        return t.address.reference() if t else '<Unknown Target>'

      for target in failed_targets:
        error_message_lines.append('\n{indent}{owner}'.format(indent=' ' * 4,
                                                              owner=render_owning_target(target)))
        for test in sorted(target_to_failed_test[target]):
          error_message_lines.append('{indent}{classname}#{methodname}'
                                     .format(indent=' ' * 8,
                                             classname=test.classname,
                                             methodname=test.methodname))
    error_message_lines.append(
      '\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.'
        .format(main=JUnit.RUNNER_MAIN, code=result, failed=len(failed_targets),
                targets=pluralize(len(failed_targets), 'target'))
    )
    return TestResult(msg='\n'.join(error_message_lines), rc=result, failed_targets=failed_targets)
Exemple #24
0
  def _run_tests(self, test_registry, output_dir, coverage=None):

    tests_by_properties = test_registry.index(
      lambda tgt: tgt.cwd if tgt.cwd is not None else self._working_dir,
      lambda tgt: tgt.platform)

    # the below will be None if not set, and we'll default back to runtime_classpath
    classpath_product = self.context.products.get_data('instrument_classpath')

    result = 0
    base_args = self.get_options().args
    for properties, tests in tests_by_properties.items():
      (cwd, platform) = properties
      for batch in self._partition(tests, test_registry):
        # Batches of test classes will likely exist within the same targets: dedupe them.
        relevant_targets = {test_registry.get_owning_target(t) for t in batch}
        if len(relevant_targets) > 1:
          raise "oops, should have only had one target"
        complete_classpath = OrderedSet()
        # TODO: Include specs2 on the classpath, in case the target doesn't
        complete_classpath.update(self.classpath(relevant_targets,
                                                 classpath_product=classpath_product))
        distribution = JvmPlatform.preferred_jvm_distribution([platform], self._strict_jvm_version)

        target_dir = list(relevant_targets)[0].address.spec_path
        args = base_args[:]
        opts = self.get_options()
        if opts.example:
          args.extend(["ex", opts.example])
        if opts.sequential:
          args.extend(['sequential', 'true'])
        if opts.show_times:
          args.extend(['showtimes', 'true'])
        file_pattern = opts.file_pattern or Specs2Run.TEST_CLASS_PATTERN
        if '(' not in file_pattern or ')' not in file_pattern:
          raise TaskError("Test regex must have a group.")
        args.extend([
          "junitxml",
          "console",
          "filesrunner.basepath", target_dir,
          "filesrunner.pattern", file_pattern,
          "junit.outdir", self.junit_xml_dir
        ])
        self.context.log.debug('CWD = {}'.format(cwd))
        self.context.log.debug('platform = {}'.format(platform))
        self.context.log.debug('targets = {}'.format(relevant_targets))
        self.context.log.debug('args = {}'.format(" ".join(args)))
        result += abs(self._spawn_and_wait(
          executor=SubprocessExecutor(distribution),
          distribution=distribution,
          classpath=complete_classpath,
          main=Specs2Run.SPECS2_MAIN,
          jvm_options=self.jvm_options,
          args=args,
          workunit_factory=self.context.new_workunit,
          workunit_name='run',
          workunit_labels=[WorkUnitLabel.TEST],
          cwd=cwd,
          synthetic_jar_dir=output_dir,
          create_synthetic_jar=self.synthetic_classpath,
        ))

        if result != 0 and self._fail_fast:
          break

    if result != 0:
      failed_targets_and_tests = self._get_failed_targets(test_registry, output_dir)
      failed_targets = sorted(failed_targets_and_tests, key=lambda target: target.address.spec)
      error_message_lines = []
      if self._failure_summary:
        for target in failed_targets:
          error_message_lines.append('\n{0}{1}'.format(' '*4, target.address.spec))
          for test in sorted(failed_targets_and_tests[target]):
            error_message_lines.append('{0}{1}'.format(' '*8, test))
      error_message_lines.append(
        '\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.'
          .format(main=Specs2Run.SPECS2_MAIN, code=result, failed=len(failed_targets),
                  targets=pluralize(len(failed_targets), 'target'))
      )
      raise TestFailedTaskError('\n'.join(error_message_lines), failed_targets=list(failed_targets))
Exemple #25
0
    def run_tests(self, fail_fast, test_targets, output_dir, coverage):
        test_registry = self._collect_test_targets(test_targets)
        if test_registry.empty:
            return TestResult.rc(0)

        coverage.instrument(output_dir)

        def parse_error_handler(parse_error):
            # Just log and move on since the result is only used to characterize failures, and raising
            # an error here would just distract from the underlying test failures.
            self.context.log.error(
                'Error parsing test result file {path}: {cause}'.format(
                    path=parse_error.xml_path, cause=parse_error.cause))

        # The 'instrument_classpath' product below below will be `None` if not set, and we'll default
        # back to runtime_classpath
        classpath_product = self.context.products.get_data(
            'instrument_classpath')

        result = 0
        for batch_id, (properties,
                       batch) in enumerate(self._iter_batches(test_registry)):
            (workdir, platform, target_jvm_options, target_env_vars,
             concurrency, threads) = properties

            batch_output_dir = output_dir
            if self._batched:
                batch_output_dir = os.path.join(batch_output_dir,
                                                'batch-{}'.format(batch_id))

            run_modifications = coverage.run_modifications(batch_output_dir)

            extra_jvm_options = run_modifications.extra_jvm_options

            # Batches of test classes will likely exist within the same targets: dedupe them.
            relevant_targets = {
                test_registry.get_owning_target(t)
                for t in batch
            }

            complete_classpath = OrderedSet()
            complete_classpath.update(run_modifications.classpath_prepend)
            complete_classpath.update(JUnit.global_instance().runner_classpath(
                self.context))
            complete_classpath.update(
                self.classpath(relevant_targets,
                               classpath_product=classpath_product))

            distribution = JvmPlatform.preferred_jvm_distribution(
                [platform], self._strict_jvm_version)

            # Override cmdline args with values from junit_test() target that specify concurrency:
            args = self._args(fail_fast, batch_output_dir) + [u'-xmlreport']

            if concurrency is not None:
                args = remove_arg(args, '-default-parallel')
                if concurrency == JUnitTests.CONCURRENCY_SERIAL:
                    args = ensure_arg(args,
                                      '-default-concurrency',
                                      param='SERIAL')
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES:
                    args = ensure_arg(args,
                                      '-default-concurrency',
                                      param='PARALLEL_CLASSES')
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_METHODS:
                    args = ensure_arg(args,
                                      '-default-concurrency',
                                      param='PARALLEL_METHODS')
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES_AND_METHODS:
                    args = ensure_arg(args,
                                      '-default-concurrency',
                                      param='PARALLEL_CLASSES_AND_METHODS')

            if threads is not None:
                args = remove_arg(args, '-parallel-threads', has_param=True)
                args += ['-parallel-threads', str(threads)]

            batch_test_specs = [test.render_test_spec() for test in batch]
            with argfile.safe_args(batch_test_specs,
                                   self.get_options()) as batch_tests:
                with self._chroot(relevant_targets, workdir) as chroot:
                    self.context.log.debug('CWD = {}'.format(chroot))
                    self.context.log.debug('platform = {}'.format(platform))
                    with environment_as(**dict(target_env_vars)):
                        subprocess_result = self._spawn_and_wait(
                            executor=SubprocessExecutor(distribution),
                            distribution=distribution,
                            classpath=complete_classpath,
                            main=JUnit.RUNNER_MAIN,
                            jvm_options=self.jvm_options + extra_jvm_options +
                            list(target_jvm_options),
                            args=args + batch_tests,
                            workunit_factory=self.context.new_workunit,
                            workunit_name='run',
                            workunit_labels=[WorkUnitLabel.TEST],
                            cwd=chroot,
                            synthetic_jar_dir=batch_output_dir,
                            create_synthetic_jar=self.synthetic_classpath,
                        )
                        self.context.log.debug(
                            'JUnit subprocess exited with result ({})'.format(
                                subprocess_result))
                        result += abs(subprocess_result)

                tests_info = self.parse_test_info(batch_output_dir,
                                                  parse_error_handler,
                                                  ['classname'])
                for test_name, test_info in tests_info.items():
                    test_item = Test(test_info['classname'], test_name)
                    test_target = test_registry.get_owning_target(test_item)
                    self.report_all_info_for_single_test(
                        self.options_scope, test_target, test_name, test_info)

                if result != 0 and fail_fast:
                    break

        if result == 0:
            return TestResult.rc(0)

        target_to_failed_test = parse_failed_targets(test_registry, output_dir,
                                                     parse_error_handler)

        def sort_owning_target(t):
            return t.address.spec if t else None

        failed_targets = sorted(target_to_failed_test, key=sort_owning_target)
        error_message_lines = []
        if self._failure_summary:

            def render_owning_target(t):
                return t.address.reference() if t else '<Unknown Target>'

            for target in failed_targets:
                error_message_lines.append('\n{indent}{owner}'.format(
                    indent=' ' * 4, owner=render_owning_target(target)))
                for test in sorted(target_to_failed_test[target]):
                    error_message_lines.append(
                        '{indent}{classname}#{methodname}'.format(
                            indent=' ' * 8,
                            classname=test.classname,
                            methodname=test.methodname))
        error_message_lines.append(
            '\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.'
            .format(main=JUnit.RUNNER_MAIN,
                    code=result,
                    failed=len(failed_targets),
                    targets=pluralize(len(failed_targets), 'target')))
        return TestResult(msg='\n'.join(error_message_lines),
                          rc=result,
                          failed_targets=failed_targets)
Exemple #26
0
    def _run_tests(self, tests_to_targets, extra_jvm_options=None, classpath_prepend=(), classpath_append=()):
        extra_jvm_options = extra_jvm_options or []

        tests_by_properties = self._tests_by_properties(
            tests_to_targets, self._infer_workdir, lambda target: target.test_platform
        )

        # the below will be None if not set, and we'll default back to runtime_classpath
        classpath_product = self._context.products.get_data("instrument_classpath")

        result = 0
        for (workdir, platform), tests in tests_by_properties.items():
            for batch in self._partition(tests):
                # Batches of test classes will likely exist within the same targets: dedupe them.
                relevant_targets = set(map(tests_to_targets.get, batch))
                classpath = self._task_exports.classpath(
                    relevant_targets,
                    classpath_prefix=self._task_exports.tool_classpath("junit"),
                    classpath_product=classpath_product,
                )
                complete_classpath = OrderedSet()
                complete_classpath.update(classpath_prepend)
                complete_classpath.update(classpath)
                complete_classpath.update(classpath_append)
                distribution = self.preferred_jvm_distribution([platform])
                with binary_util.safe_args(batch, self._task_exports.task_options) as batch_tests:
                    self._context.log.debug("CWD = {}".format(workdir))
                    self._context.log.debug("platform = {}".format(platform))
                    result += abs(
                        distribution.execute_java(
                            classpath=complete_classpath,
                            main=JUnitRun._MAIN,
                            jvm_options=self._task_exports.jvm_options + extra_jvm_options,
                            args=self._args + batch_tests + ["-xmlreport"],
                            workunit_factory=self._context.new_workunit,
                            workunit_name="run",
                            workunit_labels=[WorkUnitLabel.TEST],
                            cwd=workdir,
                        )
                    )

                    if result != 0 and self._fail_fast:
                        break

        if result != 0:
            failed_targets_and_tests = self._get_failed_targets(tests_to_targets)
            failed_targets = sorted(failed_targets_and_tests, key=lambda target: target.address.spec)
            error_message_lines = []
            if self._failure_summary:
                for target in failed_targets:
                    error_message_lines.append("\n{0}{1}".format(" " * 4, target.address.spec))
                    for test in sorted(failed_targets_and_tests[target]):
                        error_message_lines.append("{0}{1}".format(" " * 8, test))
            error_message_lines.append(
                "\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.".format(
                    main=JUnitRun._MAIN,
                    code=result,
                    failed=len(failed_targets),
                    targets=pluralize(len(failed_targets), "target"),
                )
            )
            raise TestFailedTaskError("\n".join(error_message_lines), failed_targets=list(failed_targets))
Exemple #27
0
    def _run_tests(self, test_registry, output_dir, coverage=None):
        if coverage:
            extra_jvm_options = coverage.extra_jvm_options
            classpath_prepend = coverage.classpath_prepend
            classpath_append = coverage.classpath_append
        else:
            extra_jvm_options = []
            classpath_prepend = ()
            classpath_append = ()

        tests_by_properties = test_registry.index(
            lambda tgt: tgt.cwd if tgt.cwd is not None else self._working_dir,
            lambda tgt: tgt.test_platform,
            lambda tgt: tgt.payload.extra_jvm_options,
            lambda tgt: tgt.payload.extra_env_vars,
            lambda tgt: tgt.concurrency, lambda tgt: tgt.threads)

        # the below will be None if not set, and we'll default back to runtime_classpath
        classpath_product = self.context.products.get_data(
            'instrument_classpath')

        result = 0
        for properties, tests in tests_by_properties.items():
            (workdir, platform, target_jvm_options, target_env_vars,
             concurrency, threads) = properties
            for batch in self._partition(tests):
                # Batches of test classes will likely exist within the same targets: dedupe them.
                relevant_targets = {
                    test_registry.get_owning_target(t)
                    for t in batch
                }
                complete_classpath = OrderedSet()
                complete_classpath.update(classpath_prepend)
                complete_classpath.update(
                    JUnit.global_instance().runner_classpath(self.context))
                complete_classpath.update(
                    self.classpath(relevant_targets,
                                   classpath_product=classpath_product))
                complete_classpath.update(classpath_append)
                distribution = JvmPlatform.preferred_jvm_distribution(
                    [platform], self._strict_jvm_version)

                # Override cmdline args with values from junit_test() target that specify concurrency:
                args = self._args(output_dir) + [u'-xmlreport']

                if concurrency is not None:
                    args = remove_arg(args, '-default-parallel')
                    if concurrency == JUnitTests.CONCURRENCY_SERIAL:
                        args = ensure_arg(args,
                                          '-default-concurrency',
                                          param='SERIAL')
                    elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES:
                        args = ensure_arg(args,
                                          '-default-concurrency',
                                          param='PARALLEL_CLASSES')
                    elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_METHODS:
                        args = ensure_arg(args,
                                          '-default-concurrency',
                                          param='PARALLEL_METHODS')
                    elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES_AND_METHODS:
                        args = ensure_arg(args,
                                          '-default-concurrency',
                                          param='PARALLEL_CLASSES_AND_METHODS')

                if threads is not None:
                    args = remove_arg(args,
                                      '-parallel-threads',
                                      has_param=True)
                    args += ['-parallel-threads', str(threads)]

                batch_test_specs = [test.render_test_spec() for test in batch]
                with argfile.safe_args(batch_test_specs,
                                       self.get_options()) as batch_tests:
                    self.context.log.debug('CWD = {}'.format(workdir))
                    self.context.log.debug('platform = {}'.format(platform))
                    with environment_as(**dict(target_env_vars)):
                        result += abs(
                            self._spawn_and_wait(
                                executor=SubprocessExecutor(distribution),
                                distribution=distribution,
                                classpath=complete_classpath,
                                main=JUnit.RUNNER_MAIN,
                                jvm_options=self.jvm_options +
                                extra_jvm_options + list(target_jvm_options),
                                args=args + batch_tests,
                                workunit_factory=self.context.new_workunit,
                                workunit_name='run',
                                workunit_labels=[WorkUnitLabel.TEST],
                                cwd=workdir,
                                synthetic_jar_dir=output_dir,
                                create_synthetic_jar=self.synthetic_classpath,
                            ))

                    if result != 0 and self._fail_fast:
                        break

        if result != 0:

            def error_handler(parse_error):
                # Just log and move on since the result is only used to characterize failures, and raising
                # an error here would just distract from the underlying test failures.
                self.context.log.error(
                    'Error parsing test result file {path}: {cause}'.format(
                        path=parse_error.junit_xml_path,
                        cause=parse_error.cause))

            target_to_failed_test = parse_failed_targets(
                test_registry, output_dir, error_handler)
            failed_targets = sorted(target_to_failed_test,
                                    key=lambda t: t.address.spec)
            error_message_lines = []
            if self._failure_summary:
                for target in failed_targets:
                    error_message_lines.append('\n{indent}{address}'.format(
                        indent=' ' * 4, address=target.address.spec))
                    for test in sorted(target_to_failed_test[target]):
                        error_message_lines.append(
                            '{indent}{classname}#{methodname}'.format(
                                indent=' ' * 8,
                                classname=test.classname,
                                methodname=test.methodname))
            error_message_lines.append(
                '\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.'
                .format(main=JUnit.RUNNER_MAIN,
                        code=result,
                        failed=len(failed_targets),
                        targets=pluralize(len(failed_targets), 'target')))
            raise ErrorWhileTesting('\n'.join(error_message_lines),
                                    failed_targets=list(failed_targets))
Exemple #28
0
  def _run_tests(self, tests_to_targets):
    if self._coverage:
      extra_jvm_options = self._coverage.extra_jvm_options
      classpath_prepend = self._coverage.classpath_prepend
      classpath_append = self._coverage.classpath_append
    else:
      extra_jvm_options = []
      classpath_prepend = ()
      classpath_append = ()

    tests_by_properties = self._tests_by_properties(
      tests_to_targets,
      self._infer_workdir,
      lambda target: target.test_platform,
      lambda target: target.payload.extra_jvm_options,
      lambda target: target.payload.extra_env_vars,
      lambda target: target.concurrency,
      lambda target: target.threads
    )

    # the below will be None if not set, and we'll default back to runtime_classpath
    classpath_product = self.context.products.get_data('instrument_classpath')

    result = 0
    for properties, tests in tests_by_properties.items():
      (workdir, platform, target_jvm_options, target_env_vars, concurrency, threads) = properties
      for batch in self._partition(tests):
        # Batches of test classes will likely exist within the same targets: dedupe them.
        relevant_targets = set(map(tests_to_targets.get, batch))
        complete_classpath = OrderedSet()
        complete_classpath.update(classpath_prepend)
        complete_classpath.update(self.tool_classpath('junit'))
        complete_classpath.update(self.classpath(relevant_targets,
                                                 classpath_product=classpath_product))
        complete_classpath.update(classpath_append)
        distribution = JvmPlatform.preferred_jvm_distribution([platform], self._strict_jvm_version)

        # Override cmdline args with values from junit_test() target that specify concurrency:
        args = self._args + [u'-xmlreport']

        if concurrency is not None:
          args = remove_arg(args, '-default-parallel')
          if concurrency == junit_tests.CONCURRENCY_SERIAL:
            args = ensure_arg(args, '-default-concurrency', param='SERIAL')
          elif concurrency == junit_tests.CONCURRENCY_PARALLEL_CLASSES:
            args = ensure_arg(args, '-default-concurrency', param='PARALLEL_CLASSES')
          elif concurrency == junit_tests.CONCURRENCY_PARALLEL_METHODS:
            args = ensure_arg(args, '-default-concurrency', param='PARALLEL_METHODS')
          elif concurrency == junit_tests.CONCURRENCY_PARALLEL_CLASSES_AND_METHODS:
            args = ensure_arg(args, '-default-concurrency', param='PARALLEL_CLASSES_AND_METHODS')

        if threads is not None:
          args = remove_arg(args, '-parallel-threads', has_param=True)
          args += ['-parallel-threads', str(threads)]

        with argfile.safe_args(batch, self.get_options()) as batch_tests:
          self.context.log.debug('CWD = {}'.format(workdir))
          self.context.log.debug('platform = {}'.format(platform))
          with environment_as(**dict(target_env_vars)):
            result += abs(self._spawn_and_wait(
              executor=SubprocessExecutor(distribution),
              distribution=distribution,
              classpath=complete_classpath,
              main=JUnitRun._MAIN,
              jvm_options=self.jvm_options + extra_jvm_options + list(target_jvm_options),
              args=args + batch_tests,
              workunit_factory=self.context.new_workunit,
              workunit_name='run',
              workunit_labels=[WorkUnitLabel.TEST],
              cwd=workdir,
              synthetic_jar_dir=self.workdir,
              create_synthetic_jar=self.synthetic_classpath,
            ))

          if result != 0 and self._fail_fast:
            break

    if result != 0:
      failed_targets_and_tests = self._get_failed_targets(tests_to_targets)
      failed_targets = sorted(failed_targets_and_tests, key=lambda target: target.address.spec)
      error_message_lines = []
      if self._failure_summary:
        for target in failed_targets:
          error_message_lines.append('\n{0}{1}'.format(' '*4, target.address.spec))
          for test in sorted(failed_targets_and_tests[target]):
            error_message_lines.append('{0}{1}'.format(' '*8, test))
      error_message_lines.append(
        '\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.'
          .format(main=JUnitRun._MAIN, code=result, failed=len(failed_targets),
                  targets=pluralize(len(failed_targets), 'target'))
      )
      raise TestFailedTaskError('\n'.join(error_message_lines), failed_targets=list(failed_targets))
Exemple #29
0
  def _run_tests(self, tests_to_targets):
    if self._coverage:
      extra_jvm_options = self._coverage.extra_jvm_options
      classpath_prepend = self._coverage.classpath_prepend
      classpath_append = self._coverage.classpath_append
    else:
      extra_jvm_options = []
      classpath_prepend = ()
      classpath_append = ()

    tests_by_properties = self._tests_by_properties(
      tests_to_targets,
      self._infer_workdir,
      lambda target: target.test_platform,
      lambda target: target.payload.extra_jvm_options,
      lambda target: target.payload.extra_env_vars,
      lambda target: target.concurrency,
      lambda target: target.threads
    )

    # the below will be None if not set, and we'll default back to runtime_classpath
    classpath_product = self.context.products.get_data('instrument_classpath')

    result = 0
    for properties, tests in tests_by_properties.items():
      (workdir, platform, target_jvm_options, target_env_vars, concurrency, threads) = properties
      for batch in self._partition(tests):
        # Batches of test classes will likely exist within the same targets: dedupe them.
        relevant_targets = set(map(tests_to_targets.get, batch))
        complete_classpath = OrderedSet()
        complete_classpath.update(classpath_prepend)
        complete_classpath.update(self.tool_classpath('junit'))
        complete_classpath.update(self.classpath(relevant_targets,
                                                 classpath_product=classpath_product))
        complete_classpath.update(classpath_append)
        distribution = JvmPlatform.preferred_jvm_distribution([platform], self._strict_jvm_version)

        # Override cmdline args with values from junit_test() target that specify concurrency:
        args = self._args + [u'-xmlreport']

        # TODO(zundel): Combine these together into a single -concurrency choices style argument
        if concurrency == junit_tests.CONCURRENCY_SERIAL:
          args = remove_arg(args, '-default-parallel')
        elif concurrency == junit_tests.CONCURRENCY_PARALLEL_CLASSES:
          args = ensure_arg(args, '-default-parallel')
        elif concurrency == junit_tests.CONCURRENCY_PARALLEL_METHODS:
          self.context.log.warn('Not implemented: parallel_methods')
        elif concurrency == junit_tests.CONCURRENCY_PARALLEL_BOTH:
          self.context.log.warn('specifying {} is experimental.'.format(concurrency))
          args = ensure_arg(args, '-default-parallel')
          args = ensure_arg(args, '-parallel-methods')
        if threads is not None:
          args = remove_arg(args, '-parallel-threads', has_param=True)
          args += ['-parallel-threads', str(threads)]

        with binary_util.safe_args(batch, self.get_options()) as batch_tests:
          self.context.log.debug('CWD = {}'.format(workdir))
          self.context.log.debug('platform = {}'.format(platform))
          with environment_as(**dict(target_env_vars)):
            result += abs(self._spawn_and_wait(
              executor=SubprocessExecutor(distribution),
              distribution=distribution,
              classpath=complete_classpath,
              main=JUnitRun._MAIN,
              jvm_options=self.jvm_options + extra_jvm_options + list(target_jvm_options),
              args=args + batch_tests,
              workunit_factory=self.context.new_workunit,
              workunit_name='run',
              workunit_labels=[WorkUnitLabel.TEST],
              cwd=workdir,
              synthetic_jar_dir=self.workdir,
              create_synthetic_jar=self.synthetic_classpath,
            ))

          if result != 0 and self._fail_fast:
            break

    if result != 0:
      failed_targets_and_tests = self._get_failed_targets(tests_to_targets)
      failed_targets = sorted(failed_targets_and_tests, key=lambda target: target.address.spec)
      error_message_lines = []
      if self._failure_summary:
        for target in failed_targets:
          error_message_lines.append('\n{0}{1}'.format(' '*4, target.address.spec))
          for test in sorted(failed_targets_and_tests[target]):
            error_message_lines.append('{0}{1}'.format(' '*8, test))
      error_message_lines.append(
        '\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.'
          .format(main=JUnitRun._MAIN, code=result, failed=len(failed_targets),
                  targets=pluralize(len(failed_targets), 'target'))
      )
      raise TestFailedTaskError('\n'.join(error_message_lines), failed_targets=list(failed_targets))