Esempio n. 1
0
 def test_parse_failed_targets_error_raise(self):
   registry = TestRegistry({})
   with temporary_dir() as junit_xml_dir:
     junit_xml_file = os.path.join(junit_xml_dir, 'TEST-bad.xml')
     with open(junit_xml_file, 'w') as fp:
       fp.write('<invalid></xml>')
     with self.assertRaises(ParseError) as exc:
       parse_failed_targets(registry, junit_xml_dir, self._raise_handler)
     self.assertEqual(junit_xml_file, exc.exception.junit_xml_path)
     self.assertIsInstance(exc.exception.cause, XmlParser.XmlError)
Esempio n. 2
0
    def test_parse_failed_targets_error_continue(self):
        registry = RegistryOfTests({})
        with temporary_dir() as junit_xml_dir:
            bad_file1 = os.path.join(junit_xml_dir, 'TEST-bad1.xml')
            with open(bad_file1, 'w') as fp:
                fp.write('<testsuite failures="nan" errors="0"/>')
            with open(os.path.join(junit_xml_dir, 'TEST-good.xml'), 'w') as fp:
                fp.write("""
        <testsuite failures="0" errors="1">
          <testcase classname="org.pantsbuild.Error" name="testError">
            <error/>
          </testcase>
        </testsuite>
        """)
            bad_file2 = os.path.join(junit_xml_dir, 'TEST-bad2.xml')
            with open(bad_file2, 'w') as fp:
                fp.write('<invalid></xml>')

            collect_handler = self.CollectHandler()
            failed_targets = parse_failed_targets(registry, junit_xml_dir,
                                                  collect_handler)
            self.assertEqual(2, len(collect_handler.errors))
            self.assertEqual({bad_file1, bad_file2},
                             {e.xml_path
                              for e in collect_handler.errors})

            self.assertEqual(
                {None: {JUnitTest('org.pantsbuild.Error', 'testError')}},
                failed_targets)
Esempio n. 3
0
    def test_parse_failed_targets_nominal(self):
        registry = RegistryOfTests({
            JUnitTest('org.pantsbuild.Failure'):
            'Bob',
            JUnitTest('org.pantsbuild.Error'):
            'Jane',
            JUnitTest('org.pantsbuild.AnotherError'):
            'Bob',
            JUnitTest('org.pantsbuild.subpackage.AnotherFailure'):
            'Mary'
        })

        with temporary_dir() as junit_xml_dir:
            with open(os.path.join(junit_xml_dir, 'TEST-a.xml'), 'w') as fp:
                fp.write("""
        <testsuite failures="1" errors="1">
          <testcase classname="org.pantsbuild.Green" name="testOK"/>
          <testcase classname="org.pantsbuild.Failure" name="testFailure">
            <failure/>
          </testcase>
          <testcase classname="org.pantsbuild.Error" name="testError">
            <error/>
          </testcase>
        </testsuite>
        """)
            with open(os.path.join(junit_xml_dir, 'TEST-b.xml'), 'w') as fp:
                fp.write("""
        <testsuite failures="0" errors="1">
          <testcase classname="org.pantsbuild.AnotherError" name="testAnotherError">
            <error/>
          </testcase>
        </testsuite>
        """)
            with open(os.path.join(junit_xml_dir, 'random.xml'), 'w') as fp:
                fp.write('<invalid></xml>')
            with safe_open(os.path.join(junit_xml_dir, 'subdir', 'TEST-c.xml'),
                           'w') as fp:
                fp.write("""
        <testsuite failures="1" errors="0">
          <testcase classname="org.pantsbuild.subpackage.AnotherFailure" name="testAnotherFailue">
            <failure/>
          </testcase>
        </testsuite>
        """)

            failed_targets = parse_failed_targets(registry, junit_xml_dir,
                                                  self._raise_handler)
            self.assertEqual(
                {
                    'Bob': {
                        JUnitTest('org.pantsbuild.Failure', 'testFailure'),
                        JUnitTest('org.pantsbuild.AnotherError',
                                  'testAnotherError')
                    },
                    'Jane': {JUnitTest('org.pantsbuild.Error', 'testError')},
                    'Mary': {
                        JUnitTest('org.pantsbuild.subpackage.AnotherFailure',
                                  'testAnotherFailue')
                    }
                }, failed_targets)
Esempio n. 4
0
    def test_parse_failed_targets_no_files(self):
        registry = RegistryOfTests({})
        with temporary_dir() as junit_xml_dir:
            failed_targets = parse_failed_targets(registry, junit_xml_dir,
                                                  self._raise_handler)

            self.assertEqual({}, failed_targets)
Esempio n. 5
0
  def test_parse_failed_targets_nominal(self):
    registry = RegistryOfTests({JUnitTest('org.pantsbuild.Failure'): 'Bob',
                                JUnitTest('org.pantsbuild.Error'): 'Jane',
                                JUnitTest('org.pantsbuild.AnotherError'): 'Bob',
                                JUnitTest('org.pantsbuild.subpackage.AnotherFailure'): 'Mary'})

    with temporary_dir() as junit_xml_dir:
      with open(os.path.join(junit_xml_dir, 'TEST-a.xml'), 'w') as fp:
        fp.write("""
        <testsuite failures="1" errors="1">
          <testcase classname="org.pantsbuild.Green" name="testOK"/>
          <testcase classname="org.pantsbuild.Failure" name="testFailure">
            <failure/>
          </testcase>
          <testcase classname="org.pantsbuild.Error" name="testError">
            <error/>
          </testcase>
        </testsuite>
        """)
      with open(os.path.join(junit_xml_dir, 'TEST-b.xml'), 'w') as fp:
        fp.write("""
        <testsuite failures="0" errors="1">
          <testcase classname="org.pantsbuild.AnotherError" name="testAnotherError">
            <error/>
          </testcase>
        </testsuite>
        """)
      with open(os.path.join(junit_xml_dir, 'random.xml'), 'w') as fp:
        fp.write('<invalid></xml>')
      with safe_open(os.path.join(junit_xml_dir, 'subdir', 'TEST-c.xml'), 'w') as fp:
        fp.write("""
        <testsuite failures="1" errors="0">
          <testcase classname="org.pantsbuild.subpackage.AnotherFailure" name="testAnotherFailue">
            <failure/>
          </testcase>
        </testsuite>
        """)

      failed_targets = parse_failed_targets(registry, junit_xml_dir, self._raise_handler)
      self.assertEqual({'Bob': {JUnitTest('org.pantsbuild.Failure', 'testFailure'),
                                JUnitTest('org.pantsbuild.AnotherError', 'testAnotherError')},
                        'Jane': {JUnitTest('org.pantsbuild.Error', 'testError')},
                        'Mary': {JUnitTest('org.pantsbuild.subpackage.AnotherFailure',
                                           'testAnotherFailue')}},
                       failed_targets)
Esempio n. 6
0
    def _get_failed_targets_from_junitxml(self, junitxml, targets):
        # Note that unlike in Java, we can't easily map targets to test classnames up-front.
        # Instead we grab the classnames seen in practice and work backwards to the targets.

        # First map the dotted paths of the modules to their respective targets.
        pex_src_root = os.path.relpath(
            self.context.products.get_data(
                GatherSources.PYTHON_SOURCES).path(), get_buildroot())
        dotted_module_path_to_target = {}
        for target in targets:
            for src in target.sources_relative_to_source_root():
                pex_src = os.path.join(pex_src_root, src)
                dotted_path = os.path.splitext(pex_src)[0].replace(
                    os.path.sep, '.')
                dotted_module_path_to_target[dotted_path] = target

        # Now grab the classnames from the xml file.
        xml = XmlParser.from_file(junitxml)
        classname_and_names = (
            (testcase.getAttribute('classname'), testcase.getAttribute('name'))
            for testcase in xml.parsed.getElementsByTagName('testcase'))

        # Now find which module each classname belongs to, and map it to its target.
        test_target_pairs = []
        for classname, name in classname_and_names:
            # if the classname is empty, it means that there was an error in the module body,
            # outside any class or method body.  In this case the module name in its entirety
            # ends up in the 'name' attribute.
            dotted_path = classname or name
            while dotted_path and dotted_path not in dotted_module_path_to_target:
                dotted_path = dotted_path.rpartition('.')[0]
            if dotted_path:
                target = dotted_module_path_to_target[dotted_path]
                test_target_pairs.append((Test(classname), target))

        # Now parse the junit xml the usual way.
        def error_handler(e):
            raise TaskError(e)

        failed_targets_map = parse_failed_targets(
            TestRegistry(test_target_pairs), junitxml, error_handler)
        return failed_targets_map.keys()
Esempio n. 7
0
  def test_parse_failed_targets_error_continue(self):
    registry = TestRegistry({})
    with temporary_dir() as junit_xml_dir:
      bad_file1 = os.path.join(junit_xml_dir, 'TEST-bad1.xml')
      with open(bad_file1, 'w') as fp:
        fp.write('<testsuite failures="nan" errors="0"/>')
      with open(os.path.join(junit_xml_dir, 'TEST-good.xml'), 'w') as fp:
        fp.write("""
        <testsuite failures="0" errors="1">
          <testcase classname="org.pantsbuild.Error" name="testError">
            <error/>
          </testcase>
        </testsuite>
        """)
      bad_file2 = os.path.join(junit_xml_dir, 'TEST-bad2.xml')
      with open(bad_file2, 'w') as fp:
        fp.write('<invalid></xml>')

      collect_handler = self.CollectHandler()
      failed_targets = parse_failed_targets(registry, junit_xml_dir, collect_handler)
      self.assertEqual(2, len(collect_handler.errors))
      self.assertEqual({bad_file1, bad_file2}, {e.junit_xml_path for e in collect_handler.errors})

      self.assertEqual({None: {JUnitTest('org.pantsbuild.Error', 'testError')}}, failed_targets)
Esempio n. 8
0
    def run_tests(self, fail_fast, test_targets, output_dir, coverage):
        test_registry = self._collect_test_targets(test_targets)
        if test_registry.empty:
            return TestResult.rc(0)

        coverage.instrument(output_dir)

        def parse_error_handler(parse_error):
            # Just log and move on since the result is only used to characterize failures, and raising
            # an error here would just distract from the underlying test failures.
            self.context.log.error(
                'Error parsing test result file {path}: {cause}'.format(
                    path=parse_error.xml_path, cause=parse_error.cause))

        # The 'instrument_classpath' product below below will be `None` if not set, and we'll default
        # back to runtime_classpath
        classpath_product = self.context.products.get_data(
            'instrument_classpath')

        result = 0
        for batch_id, (properties,
                       batch) in enumerate(self._iter_batches(test_registry)):
            (workdir, platform, target_jvm_options, target_env_vars,
             concurrency, threads) = properties

            batch_output_dir = output_dir
            if self._batched:
                batch_output_dir = os.path.join(batch_output_dir,
                                                'batch-{}'.format(batch_id))

            run_modifications = coverage.run_modifications(batch_output_dir)

            extra_jvm_options = run_modifications.extra_jvm_options

            # Batches of test classes will likely exist within the same targets: dedupe them.
            relevant_targets = {
                test_registry.get_owning_target(t)
                for t in batch
            }

            complete_classpath = OrderedSet()
            complete_classpath.update(run_modifications.classpath_prepend)
            complete_classpath.update(JUnit.global_instance().runner_classpath(
                self.context))
            complete_classpath.update(
                self.classpath(relevant_targets,
                               classpath_product=classpath_product))

            distribution = JvmPlatform.preferred_jvm_distribution(
                [platform], self._strict_jvm_version)

            # Override cmdline args with values from junit_test() target that specify concurrency:
            args = self._args(fail_fast, batch_output_dir) + [u'-xmlreport']

            if concurrency is not None:
                args = remove_arg(args, '-default-parallel')
                if concurrency == JUnitTests.CONCURRENCY_SERIAL:
                    args = ensure_arg(args,
                                      '-default-concurrency',
                                      param='SERIAL')
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES:
                    args = ensure_arg(args,
                                      '-default-concurrency',
                                      param='PARALLEL_CLASSES')
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_METHODS:
                    args = ensure_arg(args,
                                      '-default-concurrency',
                                      param='PARALLEL_METHODS')
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES_AND_METHODS:
                    args = ensure_arg(args,
                                      '-default-concurrency',
                                      param='PARALLEL_CLASSES_AND_METHODS')

            if threads is not None:
                args = remove_arg(args, '-parallel-threads', has_param=True)
                args += ['-parallel-threads', str(threads)]

            batch_test_specs = [test.render_test_spec() for test in batch]
            with argfile.safe_args(batch_test_specs,
                                   self.get_options()) as batch_tests:
                with self._chroot(relevant_targets, workdir) as chroot:
                    self.context.log.debug('CWD = {}'.format(chroot))
                    self.context.log.debug('platform = {}'.format(platform))
                    with environment_as(**dict(target_env_vars)):
                        subprocess_result = self._spawn_and_wait(
                            executor=SubprocessExecutor(distribution),
                            distribution=distribution,
                            classpath=complete_classpath,
                            main=JUnit.RUNNER_MAIN,
                            jvm_options=self.jvm_options + extra_jvm_options +
                            list(target_jvm_options),
                            args=args + batch_tests,
                            workunit_factory=self.context.new_workunit,
                            workunit_name='run',
                            workunit_labels=[WorkUnitLabel.TEST],
                            cwd=chroot,
                            synthetic_jar_dir=batch_output_dir,
                            create_synthetic_jar=self.synthetic_classpath,
                        )
                        self.context.log.debug(
                            'JUnit subprocess exited with result ({})'.format(
                                subprocess_result))
                        result += abs(subprocess_result)

                tests_info = self.parse_test_info(batch_output_dir,
                                                  parse_error_handler,
                                                  ['classname'])
                for test_name, test_info in tests_info.items():
                    test_item = Test(test_info['classname'], test_name)
                    test_target = test_registry.get_owning_target(test_item)
                    self.report_all_info_for_single_test(
                        self.options_scope, test_target, test_name, test_info)

                if result != 0 and fail_fast:
                    break

        if result == 0:
            return TestResult.rc(0)

        target_to_failed_test = parse_failed_targets(test_registry, output_dir,
                                                     parse_error_handler)

        def sort_owning_target(t):
            return t.address.spec if t else None

        failed_targets = sorted(target_to_failed_test, key=sort_owning_target)
        error_message_lines = []
        if self._failure_summary:

            def render_owning_target(t):
                return t.address.reference() if t else '<Unknown Target>'

            for target in failed_targets:
                error_message_lines.append('\n{indent}{owner}'.format(
                    indent=' ' * 4, owner=render_owning_target(target)))
                for test in sorted(target_to_failed_test[target]):
                    error_message_lines.append(
                        '{indent}{classname}#{methodname}'.format(
                            indent=' ' * 8,
                            classname=test.classname,
                            methodname=test.methodname))
        error_message_lines.append(
            '\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.'
            .format(main=JUnit.RUNNER_MAIN,
                    code=result,
                    failed=len(failed_targets),
                    targets=pluralize(len(failed_targets), 'target')))
        return TestResult(msg='\n'.join(error_message_lines),
                          rc=result,
                          failed_targets=failed_targets)
Esempio n. 9
0
  def run_tests(self, fail_fast, test_targets, output_dir, coverage):
    test_registry = self._collect_test_targets(test_targets)
    if test_registry.empty:
      return TestResult.rc(0)

    coverage.instrument(output_dir)

    def parse_error_handler(parse_error):
      # Just log and move on since the result is only used to characterize failures, and raising
      # an error here would just distract from the underlying test failures.
      self.context.log.error('Error parsing test result file {path}: {cause}'
                             .format(path=parse_error.xml_path, cause=parse_error.cause))

    # The 'instrument_classpath' product below below will be `None` if not set, and we'll default
    # back to runtime_classpath
    classpath_product = self.context.products.get_data('instrument_classpath')

    result = 0
    for batch_id, (properties, batch) in enumerate(self._iter_batches(test_registry)):
      (workdir, platform, target_jvm_options, target_env_vars, concurrency, threads) = properties

      batch_output_dir = output_dir
      if self._batched:
        batch_output_dir = os.path.join(batch_output_dir, 'batch-{}'.format(batch_id))

      run_modifications = coverage.run_modifications(batch_output_dir)

      extra_jvm_options = run_modifications.extra_jvm_options

      # Batches of test classes will likely exist within the same targets: dedupe them.
      relevant_targets = {test_registry.get_owning_target(t) for t in batch}

      complete_classpath = OrderedSet()
      complete_classpath.update(run_modifications.classpath_prepend)
      complete_classpath.update(JUnit.global_instance().runner_classpath(self.context))
      complete_classpath.update(self.classpath(relevant_targets,
                                               classpath_product=classpath_product))

      distribution = JvmPlatform.preferred_jvm_distribution([platform], self._strict_jvm_version)

      # Override cmdline args with values from junit_test() target that specify concurrency:
      args = self._args(fail_fast, batch_output_dir) + [u'-xmlreport']

      if concurrency is not None:
        args = remove_arg(args, '-default-parallel')
        if concurrency == JUnitTests.CONCURRENCY_SERIAL:
          args = ensure_arg(args, '-default-concurrency', param='SERIAL')
        elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES:
          args = ensure_arg(args, '-default-concurrency', param='PARALLEL_CLASSES')
        elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_METHODS:
          args = ensure_arg(args, '-default-concurrency', param='PARALLEL_METHODS')
        elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES_AND_METHODS:
          args = ensure_arg(args, '-default-concurrency', param='PARALLEL_CLASSES_AND_METHODS')

      if threads is not None:
        args = remove_arg(args, '-parallel-threads', has_param=True)
        args += ['-parallel-threads', str(threads)]

      batch_test_specs = [test.render_test_spec() for test in batch]
      with argfile.safe_args(batch_test_specs, self.get_options()) as batch_tests:
        with self._chroot(relevant_targets, workdir) as chroot:
          self.context.log.debug('CWD = {}'.format(chroot))
          self.context.log.debug('platform = {}'.format(platform))
          with environment_as(**dict(target_env_vars)):
            subprocess_result = self._spawn_and_wait(
              executor=SubprocessExecutor(distribution),
              distribution=distribution,
              classpath=complete_classpath,
              main=JUnit.RUNNER_MAIN,
              jvm_options=self.jvm_options + extra_jvm_options + list(target_jvm_options),
              args=args + batch_tests,
              workunit_factory=self.context.new_workunit,
              workunit_name='run',
              workunit_labels=[WorkUnitLabel.TEST],
              cwd=chroot,
              synthetic_jar_dir=batch_output_dir,
              create_synthetic_jar=self.synthetic_classpath,
            )
            self.context.log.debug('JUnit subprocess exited with result ({})'
                                   .format(subprocess_result))
            result += abs(subprocess_result)

        tests_info = self.parse_test_info(batch_output_dir, parse_error_handler, ['classname'])
        for test_name, test_info in tests_info.items():
          test_item = Test(test_info['classname'], test_name)
          test_target = test_registry.get_owning_target(test_item)
          self.report_all_info_for_single_test(self.options_scope, test_target,
                                               test_name, test_info)

        if result != 0 and fail_fast:
          break

    if result == 0:
      return TestResult.rc(0)

    target_to_failed_test = parse_failed_targets(test_registry, output_dir, parse_error_handler)

    def sort_owning_target(t):
      return t.address.spec if t else None

    failed_targets = sorted(target_to_failed_test, key=sort_owning_target)
    error_message_lines = []
    if self._failure_summary:
      def render_owning_target(t):
        return t.address.reference() if t else '<Unknown Target>'

      for target in failed_targets:
        error_message_lines.append('\n{indent}{owner}'.format(indent=' ' * 4,
                                                              owner=render_owning_target(target)))
        for test in sorted(target_to_failed_test[target]):
          error_message_lines.append('{indent}{classname}#{methodname}'
                                     .format(indent=' ' * 8,
                                             classname=test.classname,
                                             methodname=test.methodname))
    error_message_lines.append(
      '\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.'
        .format(main=JUnit.RUNNER_MAIN, code=result, failed=len(failed_targets),
                targets=pluralize(len(failed_targets), 'target'))
    )
    return TestResult(msg='\n'.join(error_message_lines), rc=result, failed_targets=failed_targets)
Esempio n. 10
0
    def _run_tests(self, test_registry, output_dir, coverage=None):
        if coverage:
            extra_jvm_options = coverage.extra_jvm_options
            classpath_prepend = coverage.classpath_prepend
            classpath_append = coverage.classpath_append
        else:
            extra_jvm_options = []
            classpath_prepend = ()
            classpath_append = ()

        tests_by_properties = test_registry.index(
            lambda tgt: tgt.cwd if tgt.cwd is not None else self._working_dir,
            lambda tgt: tgt.test_platform,
            lambda tgt: tgt.payload.extra_jvm_options,
            lambda tgt: tgt.payload.extra_env_vars,
            lambda tgt: tgt.concurrency, lambda tgt: tgt.threads)

        # the below will be None if not set, and we'll default back to runtime_classpath
        classpath_product = self.context.products.get_data(
            'instrument_classpath')

        result = 0
        for properties, tests in tests_by_properties.items():
            (workdir, platform, target_jvm_options, target_env_vars,
             concurrency, threads) = properties
            for batch in self._partition(tests):
                # Batches of test classes will likely exist within the same targets: dedupe them.
                relevant_targets = {
                    test_registry.get_owning_target(t)
                    for t in batch
                }
                complete_classpath = OrderedSet()
                complete_classpath.update(classpath_prepend)
                complete_classpath.update(
                    JUnit.global_instance().runner_classpath(self.context))
                complete_classpath.update(
                    self.classpath(relevant_targets,
                                   classpath_product=classpath_product))
                complete_classpath.update(classpath_append)
                distribution = JvmPlatform.preferred_jvm_distribution(
                    [platform], self._strict_jvm_version)

                # Override cmdline args with values from junit_test() target that specify concurrency:
                args = self._args(output_dir) + [u'-xmlreport']

                if concurrency is not None:
                    args = remove_arg(args, '-default-parallel')
                    if concurrency == JUnitTests.CONCURRENCY_SERIAL:
                        args = ensure_arg(args,
                                          '-default-concurrency',
                                          param='SERIAL')
                    elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES:
                        args = ensure_arg(args,
                                          '-default-concurrency',
                                          param='PARALLEL_CLASSES')
                    elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_METHODS:
                        args = ensure_arg(args,
                                          '-default-concurrency',
                                          param='PARALLEL_METHODS')
                    elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES_AND_METHODS:
                        args = ensure_arg(args,
                                          '-default-concurrency',
                                          param='PARALLEL_CLASSES_AND_METHODS')

                if threads is not None:
                    args = remove_arg(args,
                                      '-parallel-threads',
                                      has_param=True)
                    args += ['-parallel-threads', str(threads)]

                batch_test_specs = [test.render_test_spec() for test in batch]
                with argfile.safe_args(batch_test_specs,
                                       self.get_options()) as batch_tests:
                    self.context.log.debug('CWD = {}'.format(workdir))
                    self.context.log.debug('platform = {}'.format(platform))
                    with environment_as(**dict(target_env_vars)):
                        result += abs(
                            self._spawn_and_wait(
                                executor=SubprocessExecutor(distribution),
                                distribution=distribution,
                                classpath=complete_classpath,
                                main=JUnit.RUNNER_MAIN,
                                jvm_options=self.jvm_options +
                                extra_jvm_options + list(target_jvm_options),
                                args=args + batch_tests,
                                workunit_factory=self.context.new_workunit,
                                workunit_name='run',
                                workunit_labels=[WorkUnitLabel.TEST],
                                cwd=workdir,
                                synthetic_jar_dir=output_dir,
                                create_synthetic_jar=self.synthetic_classpath,
                            ))

                    if result != 0 and self._fail_fast:
                        break

        if result != 0:

            def error_handler(parse_error):
                # Just log and move on since the result is only used to characterize failures, and raising
                # an error here would just distract from the underlying test failures.
                self.context.log.error(
                    'Error parsing test result file {path}: {cause}'.format(
                        path=parse_error.junit_xml_path,
                        cause=parse_error.cause))

            target_to_failed_test = parse_failed_targets(
                test_registry, output_dir, error_handler)
            failed_targets = sorted(target_to_failed_test,
                                    key=lambda t: t.address.spec)
            error_message_lines = []
            if self._failure_summary:
                for target in failed_targets:
                    error_message_lines.append('\n{indent}{address}'.format(
                        indent=' ' * 4, address=target.address.spec))
                    for test in sorted(target_to_failed_test[target]):
                        error_message_lines.append(
                            '{indent}{classname}#{methodname}'.format(
                                indent=' ' * 8,
                                classname=test.classname,
                                methodname=test.methodname))
            error_message_lines.append(
                '\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.'
                .format(main=JUnit.RUNNER_MAIN,
                        code=result,
                        failed=len(failed_targets),
                        targets=pluralize(len(failed_targets), 'target')))
            raise ErrorWhileTesting('\n'.join(error_message_lines),
                                    failed_targets=list(failed_targets))
Esempio n. 11
0
    def run_tests(self, fail_fast, test_targets, output_dir, coverage, complete_test_registry):
        test_registry = complete_test_registry.filter(test_targets)
        if test_registry.empty:
            return TestResult.successful

        coverage.instrument(output_dir)

        def parse_error_handler(parse_error):
            # Just log and move on since the result is only used to characterize failures, and raising
            # an error here would just distract from the underlying test failures.
            self.context.log.error(
                "Error parsing test result file {path}: {cause}".format(
                    path=parse_error.xml_path, cause=parse_error.cause
                )
            )

        # The 'instrument_classpath' product below below will be `None` if not set, and we'll default
        # back to runtime_classpath
        classpath_product = self.context.products.get_data("instrument_classpath")

        result = 0
        for batch_id, (properties, batch) in enumerate(self._iter_batches(test_registry)):
            (
                workdir,
                platform,
                target_jvm_options,
                target_env_vars,
                concurrency,
                threads,
            ) = properties

            batch_output_dir = output_dir
            if self._batched:
                batch_output_dir = os.path.join(batch_output_dir, f"batch-{batch_id}")

            run_modifications = coverage.run_modifications(batch_output_dir)
            self.context.log.debug(f"run_modifications: {run_modifications}")

            extra_jvm_options = run_modifications.extra_jvm_options

            # Batches of test classes will likely exist within the same targets: dedupe them.
            relevant_targets = {test_registry.get_owning_target(t) for t in batch}

            complete_classpath = OrderedSet()
            complete_classpath.update(run_modifications.classpath_prepend)
            complete_classpath.update(JUnit.global_instance().runner_classpath(self.context))
            complete_classpath.update(
                self.classpath(relevant_targets, classpath_product=classpath_product)
            )

            distribution = self.preferred_jvm_distribution([platform], self._strict_jvm_version)

            # Override cmdline args with values from junit_test() target that specify concurrency:
            args = self._args(fail_fast, batch_output_dir) + ["-xmlreport"]

            if concurrency is not None:
                args = remove_arg(args, "-default-parallel")
                if concurrency == JUnitTests.CONCURRENCY_SERIAL:
                    args = ensure_arg(args, "-default-concurrency", param="SERIAL")
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES:
                    args = ensure_arg(args, "-default-concurrency", param="PARALLEL_CLASSES")
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_METHODS:
                    args = ensure_arg(args, "-default-concurrency", param="PARALLEL_METHODS")
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES_AND_METHODS:
                    args = ensure_arg(
                        args, "-default-concurrency", param="PARALLEL_CLASSES_AND_METHODS"
                    )

            if threads is not None:
                args = remove_arg(args, "-parallel-threads", has_param=True)
                args += ["-parallel-threads", str(threads)]

            batch_test_specs = [test.render_test_spec() for test in batch]
            with argfile.safe_args(batch_test_specs, self.get_options()) as batch_tests:
                with self.chroot(relevant_targets, workdir) as chroot:
                    self.context.log.debug(f"CWD = {chroot}")
                    self.context.log.debug(f"platform = {platform}")
                    with environment_as(**dict(target_env_vars)):
                        subprocess_result = self.spawn_and_wait(
                            relevant_targets,
                            executor=SubprocessExecutor(distribution),
                            distribution=distribution,
                            classpath=complete_classpath,
                            main=JUnit.RUNNER_MAIN,
                            jvm_options=self.jvm_options
                            + list(platform.jvm_options)
                            + extra_jvm_options
                            + list(target_jvm_options),
                            args=args + batch_tests,
                            workunit_factory=self.context.new_workunit,
                            workunit_name="run",
                            workunit_labels=[WorkUnitLabel.TEST],
                            cwd=chroot,
                            synthetic_jar_dir=batch_output_dir,
                            create_synthetic_jar=self.synthetic_classpath,
                        )
                        self.context.log.debug(
                            "JUnit subprocess exited with result ({})".format(subprocess_result)
                        )
                        result += abs(subprocess_result)

                tests_info = self.parse_test_info(
                    batch_output_dir, parse_error_handler, ["classname"]
                )
                for test_name, test_info in tests_info.items():
                    test_item = Test(test_info["classname"], test_name)
                    test_target = test_registry.get_owning_target(test_item)
                    self.report_all_info_for_single_test(
                        self.options_scope, test_target, test_name, test_info
                    )

                if result != 0 and fail_fast:
                    break

        if result == 0:
            return TestResult.successful

        # NB: If the TestRegistry fails to find the owning target of a failed test, the target key in
        # this dictionary will be None: helper methods in this block account for that.
        target_to_failed_test = parse_failed_targets(test_registry, output_dir, parse_error_handler)

        def sort_owning_target(t):
            return t.address.spec if t else ""

        failed_targets = sorted(target_to_failed_test, key=sort_owning_target)
        error_message_lines = []
        if self._failure_summary:

            def render_owning_target(t):
                return t.address.reference() if t else "<Unknown Target>"

            for target in failed_targets:
                error_message_lines.append(f"\n{(' ' * 4)}{render_owning_target(target)}")
                for test in sorted(target_to_failed_test[target]):
                    error_message_lines.append(f"{' ' * 8}{test.classname}#{test.methodname}")
        error_message_lines.append(
            "\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.".format(
                main=JUnit.RUNNER_MAIN,
                code=result,
                failed=len(failed_targets),
                targets=pluralize(len(failed_targets), "target"),
            )
        )
        return TestResult(
            msg="\n".join(error_message_lines), rc=result, failed_targets=failed_targets
        )
Esempio n. 12
0
  def test_parse_failed_targets_no_files(self):
    registry = TestRegistry({})
    with temporary_dir() as junit_xml_dir:
      failed_targets = parse_failed_targets(registry, junit_xml_dir, self._raise_handler)

      self.assertEqual({}, failed_targets)
Esempio n. 13
0
  def _run_tests(self, test_registry, output_dir, coverage=None):
    if coverage:
      extra_jvm_options = coverage.extra_jvm_options
      classpath_prepend = coverage.classpath_prepend
      classpath_append = coverage.classpath_append
    else:
      extra_jvm_options = []
      classpath_prepend = ()
      classpath_append = ()

    tests_by_properties = test_registry.index(
        lambda tgt: tgt.cwd if tgt.cwd is not None else self._working_dir,
        lambda tgt: tgt.test_platform,
        lambda tgt: tgt.payload.extra_jvm_options,
        lambda tgt: tgt.payload.extra_env_vars,
        lambda tgt: tgt.concurrency,
        lambda tgt: tgt.threads)

    # the below will be None if not set, and we'll default back to runtime_classpath
    classpath_product = self.context.products.get_data('instrument_classpath')

    result = 0
    for properties, tests in tests_by_properties.items():
      (workdir, platform, target_jvm_options, target_env_vars, concurrency, threads) = properties
      for batch in self._partition(tests):
        # Batches of test classes will likely exist within the same targets: dedupe them.
        relevant_targets = {test_registry.get_owning_target(t) for t in batch}
        complete_classpath = OrderedSet()
        complete_classpath.update(classpath_prepend)
        complete_classpath.update(JUnit.global_instance().runner_classpath(self.context))
        complete_classpath.update(self.classpath(relevant_targets,
                                                 classpath_product=classpath_product))
        complete_classpath.update(classpath_append)
        distribution = JvmPlatform.preferred_jvm_distribution([platform], self._strict_jvm_version)

        # Override cmdline args with values from junit_test() target that specify concurrency:
        args = self._args(output_dir) + [u'-xmlreport']

        if concurrency is not None:
          args = remove_arg(args, '-default-parallel')
          if concurrency == JUnitTests.CONCURRENCY_SERIAL:
            args = ensure_arg(args, '-default-concurrency', param='SERIAL')
          elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES:
            args = ensure_arg(args, '-default-concurrency', param='PARALLEL_CLASSES')
          elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_METHODS:
            args = ensure_arg(args, '-default-concurrency', param='PARALLEL_METHODS')
          elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES_AND_METHODS:
            args = ensure_arg(args, '-default-concurrency', param='PARALLEL_CLASSES_AND_METHODS')

        if threads is not None:
          args = remove_arg(args, '-parallel-threads', has_param=True)
          args += ['-parallel-threads', str(threads)]

        batch_test_specs = [test.render_test_spec() for test in batch]
        with argfile.safe_args(batch_test_specs, self.get_options()) as batch_tests:
          self.context.log.debug('CWD = {}'.format(workdir))
          self.context.log.debug('platform = {}'.format(platform))
          with environment_as(**dict(target_env_vars)):
            result += abs(self._spawn_and_wait(
              executor=SubprocessExecutor(distribution),
              distribution=distribution,
              classpath=complete_classpath,
              main=JUnit.RUNNER_MAIN,
              jvm_options=self.jvm_options + extra_jvm_options + list(target_jvm_options),
              args=args + batch_tests,
              workunit_factory=self.context.new_workunit,
              workunit_name='run',
              workunit_labels=[WorkUnitLabel.TEST],
              cwd=workdir,
              synthetic_jar_dir=output_dir,
              create_synthetic_jar=self.synthetic_classpath,
            ))

          if result != 0 and self._fail_fast:
            break

    if result != 0:
      def error_handler(parse_error):
        # Just log and move on since the result is only used to characterize failures, and raising
        # an error here would just distract from the underlying test failures.
        self.context.log.error('Error parsing test result file {path}: {cause}'
                               .format(path=parse_error.junit_xml_path, cause=parse_error.cause))

      target_to_failed_test = parse_failed_targets(test_registry, output_dir, error_handler)
      failed_targets = sorted(target_to_failed_test, key=lambda t: t.address.spec)
      error_message_lines = []
      if self._failure_summary:
        for target in failed_targets:
          error_message_lines.append('\n{indent}{address}'.format(indent=' ' * 4,
                                                                  address=target.address.spec))
          for test in sorted(target_to_failed_test[target]):
            error_message_lines.append('{indent}{classname}#{methodname}'
                                       .format(indent=' ' * 8,
                                               classname=test.classname,
                                               methodname=test.methodname))
      error_message_lines.append(
        '\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.'
          .format(main=JUnit.RUNNER_MAIN, code=result, failed=len(failed_targets),
                  targets=pluralize(len(failed_targets), 'target'))
      )
      raise TestFailedTaskError('\n'.join(error_message_lines), failed_targets=list(failed_targets))
Esempio n. 14
0
    def test_parse_failed_targets_nominal(self):
        registry = RegistryOfTests({
            JUnitTest("org.pantsbuild.Failure"):
            "Bob",
            JUnitTest("org.pantsbuild.Error"):
            "Jane",
            JUnitTest("org.pantsbuild.AnotherError"):
            "Bob",
            JUnitTest("org.pantsbuild.subpackage.AnotherFailure"):
            "Mary",
        })

        with temporary_dir() as junit_xml_dir:
            with open(os.path.join(junit_xml_dir, "TEST-a.xml"), "w") as fp:
                fp.write("""
                    <testsuite failures="1" errors="1">
                      <testcase classname="org.pantsbuild.Green" name="testOK"/>
                      <testcase classname="org.pantsbuild.Failure" name="testFailure">
                        <failure/>
                      </testcase>
                      <testcase classname="org.pantsbuild.Error" name="testError">
                        <error/>
                      </testcase>
                    </testsuite>
                    """)
            with open(os.path.join(junit_xml_dir, "TEST-b.xml"), "w") as fp:
                fp.write("""
                    <testsuite failures="0" errors="1">
                      <testcase classname="org.pantsbuild.AnotherError" name="testAnotherError">
                        <error/>
                      </testcase>
                    </testsuite>
                    """)
            with open(os.path.join(junit_xml_dir, "random.xml"), "w") as fp:
                fp.write("<invalid></xml>")
            with safe_open(os.path.join(junit_xml_dir, "subdir", "TEST-c.xml"),
                           "w") as fp:
                fp.write("""
                    <testsuite failures="1" errors="0">
                      <testcase classname="org.pantsbuild.subpackage.AnotherFailure" name="testAnotherFailure">
                        <failure/>
                      </testcase>
                    </testsuite>
                    """)

            failed_targets = parse_failed_targets(registry, junit_xml_dir,
                                                  self._raise_handler)
            self.assertEqual(
                {
                    "Bob": {
                        JUnitTest("org.pantsbuild.Failure", "testFailure"),
                        JUnitTest("org.pantsbuild.AnotherError",
                                  "testAnotherError"),
                    },
                    "Jane": {JUnitTest("org.pantsbuild.Error", "testError")},
                    "Mary": {
                        JUnitTest("org.pantsbuild.subpackage.AnotherFailure",
                                  "testAnotherFailure")
                    },
                },
                failed_targets,
            )