示例#1
0
class TestTest(unittest.TestCase):
  def setUp(self):
    self.class_test = JUnitTest('class')
    self.method_test = JUnitTest('class', 'method')

  def test_no_method_normalization(self):
    def test_normalization(variant):
      self.assertEqual(variant, self.class_test)
      self.assertIsNone(variant.methodname)

    test_normalization(JUnitTest('class', ''))
    test_normalization(JUnitTest('class', None))
    test_normalization(JUnitTest('class'))

  def test_enclosing(self):
    self.assertIs(self.class_test, self.class_test.enclosing())
    self.assertEqual(self.class_test, self.method_test.enclosing())

  def test_render_test_spec(self):
    self.assertEqual('class', self.class_test.render_test_spec())
    self.assertEqual('class#method', self.method_test.render_test_spec())
示例#2
0
    def _parse(self, test_spec_str):
        """Parses a test specification string into an object that can yield corresponding tests.

    Tests can be specified in one of four forms:

    * [classname]
    * [classname]#[methodname]
    * [fully qualified classname]#[methodname]
    * [fully qualified classname]#[methodname]

    :param string test_spec: A test specification.
    :returns: A Test object.
    :rtype: :class:`Test`
    """
        components = test_spec_str.split('#', 2)
        classname = components[0]
        methodname = components[1] if len(components) == 2 else None

        return Test(classname=classname, methodname=methodname)
示例#3
0
    def _get_failed_targets_from_junitxml(self, junitxml, targets):
        # Note that unlike in Java, we can't easily map targets to test classnames up-front.
        # Instead we grab the classnames seen in practice and work backwards to the targets.

        # First map the dotted paths of the modules to their respective targets.
        pex_src_root = os.path.relpath(
            self.context.products.get_data(
                GatherSources.PYTHON_SOURCES).path(), get_buildroot())
        dotted_module_path_to_target = {}
        for target in targets:
            for src in target.sources_relative_to_source_root():
                pex_src = os.path.join(pex_src_root, src)
                dotted_path = os.path.splitext(pex_src)[0].replace(
                    os.path.sep, '.')
                dotted_module_path_to_target[dotted_path] = target

        # Now grab the classnames from the xml file.
        xml = XmlParser.from_file(junitxml)
        classname_and_names = (
            (testcase.getAttribute('classname'), testcase.getAttribute('name'))
            for testcase in xml.parsed.getElementsByTagName('testcase'))

        # Now find which module each classname belongs to, and map it to its target.
        test_target_pairs = []
        for classname, name in classname_and_names:
            # if the classname is empty, it means that there was an error in the module body,
            # outside any class or method body.  In this case the module name in its entirety
            # ends up in the 'name' attribute.
            dotted_path = classname or name
            while dotted_path and dotted_path not in dotted_module_path_to_target:
                dotted_path = dotted_path.rpartition('.')[0]
            if dotted_path:
                target = dotted_module_path_to_target[dotted_path]
                test_target_pairs.append((Test(classname), target))

        # Now parse the junit xml the usual way.
        def error_handler(e):
            raise TaskError(e)

        failed_targets_map = parse_failed_targets(
            TestRegistry(test_target_pairs), junitxml, error_handler)
        return failed_targets_map.keys()
示例#4
0
    def run_tests(self, fail_fast, test_targets, output_dir, coverage):
        test_registry = self._collect_test_targets(test_targets)
        if test_registry.empty:
            return TestResult.rc(0)

        coverage.instrument(output_dir)

        def parse_error_handler(parse_error):
            # Just log and move on since the result is only used to characterize failures, and raising
            # an error here would just distract from the underlying test failures.
            self.context.log.error(
                'Error parsing test result file {path}: {cause}'.format(
                    path=parse_error.xml_path, cause=parse_error.cause))

        # The 'instrument_classpath' product below below will be `None` if not set, and we'll default
        # back to runtime_classpath
        classpath_product = self.context.products.get_data(
            'instrument_classpath')

        result = 0
        for batch_id, (properties,
                       batch) in enumerate(self._iter_batches(test_registry)):
            (workdir, platform, target_jvm_options, target_env_vars,
             concurrency, threads) = properties

            batch_output_dir = output_dir
            if self._batched:
                batch_output_dir = os.path.join(batch_output_dir,
                                                'batch-{}'.format(batch_id))

            run_modifications = coverage.run_modifications(batch_output_dir)

            extra_jvm_options = run_modifications.extra_jvm_options

            # Batches of test classes will likely exist within the same targets: dedupe them.
            relevant_targets = {
                test_registry.get_owning_target(t)
                for t in batch
            }

            complete_classpath = OrderedSet()
            complete_classpath.update(run_modifications.classpath_prepend)
            complete_classpath.update(JUnit.global_instance().runner_classpath(
                self.context))
            complete_classpath.update(
                self.classpath(relevant_targets,
                               classpath_product=classpath_product))

            distribution = JvmPlatform.preferred_jvm_distribution(
                [platform], self._strict_jvm_version)

            # Override cmdline args with values from junit_test() target that specify concurrency:
            args = self._args(fail_fast, batch_output_dir) + [u'-xmlreport']

            if concurrency is not None:
                args = remove_arg(args, '-default-parallel')
                if concurrency == JUnitTests.CONCURRENCY_SERIAL:
                    args = ensure_arg(args,
                                      '-default-concurrency',
                                      param='SERIAL')
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES:
                    args = ensure_arg(args,
                                      '-default-concurrency',
                                      param='PARALLEL_CLASSES')
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_METHODS:
                    args = ensure_arg(args,
                                      '-default-concurrency',
                                      param='PARALLEL_METHODS')
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES_AND_METHODS:
                    args = ensure_arg(args,
                                      '-default-concurrency',
                                      param='PARALLEL_CLASSES_AND_METHODS')

            if threads is not None:
                args = remove_arg(args, '-parallel-threads', has_param=True)
                args += ['-parallel-threads', str(threads)]

            batch_test_specs = [test.render_test_spec() for test in batch]
            with argfile.safe_args(batch_test_specs,
                                   self.get_options()) as batch_tests:
                with self._chroot(relevant_targets, workdir) as chroot:
                    self.context.log.debug('CWD = {}'.format(chroot))
                    self.context.log.debug('platform = {}'.format(platform))
                    with environment_as(**dict(target_env_vars)):
                        subprocess_result = self._spawn_and_wait(
                            executor=SubprocessExecutor(distribution),
                            distribution=distribution,
                            classpath=complete_classpath,
                            main=JUnit.RUNNER_MAIN,
                            jvm_options=self.jvm_options + extra_jvm_options +
                            list(target_jvm_options),
                            args=args + batch_tests,
                            workunit_factory=self.context.new_workunit,
                            workunit_name='run',
                            workunit_labels=[WorkUnitLabel.TEST],
                            cwd=chroot,
                            synthetic_jar_dir=batch_output_dir,
                            create_synthetic_jar=self.synthetic_classpath,
                        )
                        self.context.log.debug(
                            'JUnit subprocess exited with result ({})'.format(
                                subprocess_result))
                        result += abs(subprocess_result)

                tests_info = self.parse_test_info(batch_output_dir,
                                                  parse_error_handler,
                                                  ['classname'])
                for test_name, test_info in tests_info.items():
                    test_item = Test(test_info['classname'], test_name)
                    test_target = test_registry.get_owning_target(test_item)
                    self.report_all_info_for_single_test(
                        self.options_scope, test_target, test_name, test_info)

                if result != 0 and fail_fast:
                    break

        if result == 0:
            return TestResult.rc(0)

        target_to_failed_test = parse_failed_targets(test_registry, output_dir,
                                                     parse_error_handler)

        def sort_owning_target(t):
            return t.address.spec if t else None

        failed_targets = sorted(target_to_failed_test, key=sort_owning_target)
        error_message_lines = []
        if self._failure_summary:

            def render_owning_target(t):
                return t.address.reference() if t else '<Unknown Target>'

            for target in failed_targets:
                error_message_lines.append('\n{indent}{owner}'.format(
                    indent=' ' * 4, owner=render_owning_target(target)))
                for test in sorted(target_to_failed_test[target]):
                    error_message_lines.append(
                        '{indent}{classname}#{methodname}'.format(
                            indent=' ' * 8,
                            classname=test.classname,
                            methodname=test.methodname))
        error_message_lines.append(
            '\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.'
            .format(main=JUnit.RUNNER_MAIN,
                    code=result,
                    failed=len(failed_targets),
                    targets=pluralize(len(failed_targets), 'target')))
        return TestResult(msg='\n'.join(error_message_lines),
                          rc=result,
                          failed_targets=failed_targets)
示例#5
0
 def iter_possible_tests(self, context):
     yield Test(classname=self._classname, methodname=self._methodname)
示例#6
0
 def iter_possible_tests(self, context):
     for classname in self._classnames_from_source_file(context):
         # Tack the methodname onto all classes in the source file, as we
         # can't know which method the user intended.
         yield Test(classname=classname, methodname=self._methodname)
示例#7
0
 def setUp(self):
     self.class_test = JUnitTest('class')
     self.method_test = JUnitTest('class', 'method')
示例#8
0
    def run_tests(self, fail_fast, test_targets, output_dir, coverage, complete_test_registry):
        test_registry = complete_test_registry.filter(test_targets)
        if test_registry.empty:
            return TestResult.successful

        coverage.instrument(output_dir)

        def parse_error_handler(parse_error):
            # Just log and move on since the result is only used to characterize failures, and raising
            # an error here would just distract from the underlying test failures.
            self.context.log.error(
                "Error parsing test result file {path}: {cause}".format(
                    path=parse_error.xml_path, cause=parse_error.cause
                )
            )

        # The 'instrument_classpath' product below below will be `None` if not set, and we'll default
        # back to runtime_classpath
        classpath_product = self.context.products.get_data("instrument_classpath")

        result = 0
        for batch_id, (properties, batch) in enumerate(self._iter_batches(test_registry)):
            (
                workdir,
                platform,
                target_jvm_options,
                target_env_vars,
                concurrency,
                threads,
            ) = properties

            batch_output_dir = output_dir
            if self._batched:
                batch_output_dir = os.path.join(batch_output_dir, f"batch-{batch_id}")

            run_modifications = coverage.run_modifications(batch_output_dir)
            self.context.log.debug(f"run_modifications: {run_modifications}")

            extra_jvm_options = run_modifications.extra_jvm_options

            # Batches of test classes will likely exist within the same targets: dedupe them.
            relevant_targets = {test_registry.get_owning_target(t) for t in batch}

            complete_classpath = OrderedSet()
            complete_classpath.update(run_modifications.classpath_prepend)
            complete_classpath.update(JUnit.global_instance().runner_classpath(self.context))
            complete_classpath.update(
                self.classpath(relevant_targets, classpath_product=classpath_product)
            )

            distribution = self.preferred_jvm_distribution([platform], self._strict_jvm_version)

            # Override cmdline args with values from junit_test() target that specify concurrency:
            args = self._args(fail_fast, batch_output_dir) + ["-xmlreport"]

            if concurrency is not None:
                args = remove_arg(args, "-default-parallel")
                if concurrency == JUnitTests.CONCURRENCY_SERIAL:
                    args = ensure_arg(args, "-default-concurrency", param="SERIAL")
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES:
                    args = ensure_arg(args, "-default-concurrency", param="PARALLEL_CLASSES")
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_METHODS:
                    args = ensure_arg(args, "-default-concurrency", param="PARALLEL_METHODS")
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES_AND_METHODS:
                    args = ensure_arg(
                        args, "-default-concurrency", param="PARALLEL_CLASSES_AND_METHODS"
                    )

            if threads is not None:
                args = remove_arg(args, "-parallel-threads", has_param=True)
                args += ["-parallel-threads", str(threads)]

            batch_test_specs = [test.render_test_spec() for test in batch]
            with argfile.safe_args(batch_test_specs, self.get_options()) as batch_tests:
                with self.chroot(relevant_targets, workdir) as chroot:
                    self.context.log.debug(f"CWD = {chroot}")
                    self.context.log.debug(f"platform = {platform}")
                    with environment_as(**dict(target_env_vars)):
                        subprocess_result = self.spawn_and_wait(
                            relevant_targets,
                            executor=SubprocessExecutor(distribution),
                            distribution=distribution,
                            classpath=complete_classpath,
                            main=JUnit.RUNNER_MAIN,
                            jvm_options=self.jvm_options
                            + list(platform.jvm_options)
                            + extra_jvm_options
                            + list(target_jvm_options),
                            args=args + batch_tests,
                            workunit_factory=self.context.new_workunit,
                            workunit_name="run",
                            workunit_labels=[WorkUnitLabel.TEST],
                            cwd=chroot,
                            synthetic_jar_dir=batch_output_dir,
                            create_synthetic_jar=self.synthetic_classpath,
                        )
                        self.context.log.debug(
                            "JUnit subprocess exited with result ({})".format(subprocess_result)
                        )
                        result += abs(subprocess_result)

                tests_info = self.parse_test_info(
                    batch_output_dir, parse_error_handler, ["classname"]
                )
                for test_name, test_info in tests_info.items():
                    test_item = Test(test_info["classname"], test_name)
                    test_target = test_registry.get_owning_target(test_item)
                    self.report_all_info_for_single_test(
                        self.options_scope, test_target, test_name, test_info
                    )

                if result != 0 and fail_fast:
                    break

        if result == 0:
            return TestResult.successful

        # NB: If the TestRegistry fails to find the owning target of a failed test, the target key in
        # this dictionary will be None: helper methods in this block account for that.
        target_to_failed_test = parse_failed_targets(test_registry, output_dir, parse_error_handler)

        def sort_owning_target(t):
            return t.address.spec if t else ""

        failed_targets = sorted(target_to_failed_test, key=sort_owning_target)
        error_message_lines = []
        if self._failure_summary:

            def render_owning_target(t):
                return t.address.reference() if t else "<Unknown Target>"

            for target in failed_targets:
                error_message_lines.append(f"\n{(' ' * 4)}{render_owning_target(target)}")
                for test in sorted(target_to_failed_test[target]):
                    error_message_lines.append(f"{' ' * 8}{test.classname}#{test.methodname}")
        error_message_lines.append(
            "\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.".format(
                main=JUnit.RUNNER_MAIN,
                code=result,
                failed=len(failed_targets),
                targets=pluralize(len(failed_targets), "target"),
            )
        )
        return TestResult(
            msg="\n".join(error_message_lines), rc=result, failed_targets=failed_targets
        )
示例#9
0
 def setUp(self):
   self.class_test = JUnitTest('class')
   self.method_test = JUnitTest('class', 'method')
示例#10
0
 def setUp(self):
     self.class_test = JUnitTest("class")
     self.method_test = JUnitTest("class", "method")