Exemple #1
0
  def _simulate_proc(self, variants):
    """Expects the list of instantiated test variants to load into the
    VariantProc."""
    variants_mapping = {self.test: variants}

    # Creates a Variant processor containing the possible types of test
    # variants.
    self.variant_proc = VariantProc(variants=["to_filter", "to_load"])
    self.variant_proc._variant_gens = {
      "fake_suite": FakeVariantGen(variants)}

    # FakeFilter only lets tests passing the predicate to be loaded.
    self.fake_filter = FakeFilter(
      filter_predicate=(lambda t: t.procid == "to_filter"))

    # FakeResultObserver to verify that VariantProc calls result_for correctly.
    self.fake_result_observer = FakeResultObserver()

    # Links up processors together to form a test processing pipeline.
    self.variant_proc._prev_proc = self.fake_result_observer
    self.fake_filter._prev_proc = self.variant_proc
    self.variant_proc._next_proc = self.fake_filter

    # Injects the test into the VariantProc
    is_loaded = self.variant_proc.next_test(self.test)

    # Verifies the behavioral consistency by using the instrumentation in
    # FakeFilter
    loaded_variants = list(self.fake_filter.loaded)
    self.assertEqual(is_loaded, any(loaded_variants))
    return self.fake_filter.loaded, self.fake_filter.call_counter
Exemple #2
0
    def _run_test_procs(self, suites, options, progress_indicator, context,
                        outproc_factory):
      jobs = options.j

      print '>>> Running with test processors'
      loader = LoadProc()
      results = ResultsTracker(count_subtests=False)
      indicators = progress_indicator.ToProgressIndicatorProcs()

      procs = [
        loader,
        VariantProc(VARIANTS),
        StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
        results,
      ] + indicators

      if context.rerun_failures_count:
        procs.append(RerunProc(
            context.rerun_failures_count,
            context.rerun_failures_max
        ))

      execproc = ExecutionProc(jobs, context)
      procs.append(execproc)

      for i in xrange(0, len(procs) - 1):
        procs[i].connect_to(procs[i + 1])

      tests = [t for s in suites for t in s.tests]
      tests.sort(key=lambda t: t.is_slow, reverse=True)
      loader.load_tests(tests)
      for indicator in indicators:
        indicator.starting()
      execproc.start()
      for indicator in indicators:
        indicator.finished()

      if results.failed:
        return 1
      if results.remaining:
        return 2
      return 0
    def _do_execute(self, tests, args, options):
        jobs = options.j

        print('>>> Running with test processors')
        loader = LoadProc(tests)
        results = self._create_result_tracker(options)
        indicators = self._create_progress_indicators(
            tests.test_count_estimate, options)

        outproc_factory = None
        if self.build_config.predictable:
            outproc_factory = predictable.get_outproc
        execproc = ExecutionProc(jobs, outproc_factory)
        sigproc = self._create_signal_proc()

        procs = [
            loader,
            NameFilterProc(args) if args else None,
            StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
            VariantProc(self._variants),
            StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
            self._create_predictable_filter(),
            self._create_shard_proc(options),
            self._create_seed_proc(options),
            self._create_sequence_proc(options),
            sigproc,
        ] + indicators + [
            results,
            self._create_timeout_proc(options),
            self._create_rerun_proc(options),
            execproc,
        ]

        self._prepare_procs(procs)

        loader.load_initial_tests(initial_batch_size=options.j * 2)

        # This starts up worker processes and blocks until all tests are
        # processed.
        execproc.run()

        for indicator in indicators:
            indicator.finished()

        if tests.test_count_estimate:
            percentage = float(results.total) / tests.test_count_estimate * 100
        else:
            percentage = 0

        print(('>>> %d base tests produced %d (%d%s)'
               ' non-filtered tests') %
              (tests.test_count_estimate, results.total, percentage, '%'))

        print('>>> %d tests ran' % (results.total - results.remaining))

        exit_code = utils.EXIT_CODE_PASS
        if results.failed:
            exit_code = utils.EXIT_CODE_FAILURES
        if not results.total:
            exit_code = utils.EXIT_CODE_NO_TESTS

        if options.time:
            self._print_durations(options)

        # Indicate if a SIGINT or SIGTERM happened.
        return max(exit_code, sigproc.exit_code)
Exemple #4
0
    def _do_execute(self, tests, args, options):
        jobs = options.j

        print '>>> Running with test processors'
        loader = LoadProc()
        tests_counter = TestsCounter()
        results = ResultsTracker()
        indicators = self._create_progress_indicators(options)

        outproc_factory = None
        if self.build_config.predictable:
            outproc_factory = predictable.get_outproc
        execproc = ExecutionProc(jobs, outproc_factory)
        sigproc = self._create_signal_proc()

        procs = [
            loader,
            NameFilterProc(args) if args else None,
            StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
            self._create_shard_proc(options),
            tests_counter,
            VariantProc(self._variants),
            StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
            self._create_predictable_filter(),
            self._create_seed_proc(options),
            sigproc,
        ] + indicators + [
            results,
            self._create_timeout_proc(options),
            self._create_rerun_proc(options),
            execproc,
        ]

        self._prepare_procs(procs)
        tests.sort(key=lambda t: t.is_slow, reverse=True)

        loader.load_tests(tests)

        print '>>> Running %d base tests' % tests_counter.total
        tests_counter.remove_from_chain()

        # This starts up worker processes and blocks until all tests are
        # processed.
        execproc.run()

        for indicator in indicators:
            indicator.finished()

        print '>>> %d tests ran' % (results.total - results.remaining)

        exit_code = utils.EXIT_CODE_PASS
        if results.failed:
            exit_code = utils.EXIT_CODE_FAILURES
        if not results.total:
            exit_code = utils.EXIT_CODE_NO_TESTS

        # Indicate if a SIGINT or SIGTERM happened.
        exit_code = max(exit_code, sigproc.exit_code)

        if exit_code == utils.EXIT_CODE_FAILURES and options.json_test_results:
            print(
                "Force exit code 0 after failures. Json test results file "
                "generated with failure information.")
            exit_code = utils.EXIT_CODE_PASS
        return exit_code
Exemple #5
0
    def _run_test_procs(self, suites, args, options, progress_indicator,
                        context):
        jobs = options.j

        print '>>> Running with test processors'
        loader = LoadProc()
        tests_counter = TestsCounter()
        results = ResultsTracker()
        indicators = progress_indicator.ToProgressIndicatorProcs()

        outproc_factory = None
        if self.build_config.predictable:
            outproc_factory = predictable.get_outproc
        execproc = ExecutionProc(jobs, context, outproc_factory)

        procs = [
            loader,
            NameFilterProc(args) if args else None,
            StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
            self._create_shard_proc(options),
            tests_counter,
            VariantProc(self._variants),
            StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
        ] + indicators + [
            results,
            self._create_rerun_proc(context),
            execproc,
        ]

        procs = filter(None, procs)

        for i in xrange(0, len(procs) - 1):
            procs[i].connect_to(procs[i + 1])

        tests = [t for s in suites for t in s.tests]
        tests.sort(key=lambda t: t.is_slow, reverse=True)

        loader.setup()
        loader.load_tests(tests)

        print '>>> Running %d base tests' % tests_counter.total
        tests_counter.remove_from_chain()

        execproc.start()

        for indicator in indicators:
            indicator.finished()

        print '>>> %d tests ran' % results.total

        exit_code = 0
        if results.failed:
            exit_code = 1
        if results.remaining:
            exit_code = 2
        if not results.total:
            exit_code = 3

        if exit_code == 1 and options.json_test_results:
            print(
                "Force exit code 0 after failures. Json test results file "
                "generated with failure information.")
            exit_code = 0
        return exit_code
Exemple #6
0
class TestVariantProcLoading(unittest.TestCase):
  def setUp(self):
    self.test = FakeTest("test")

  def _simulate_proc(self, variants):
    """Expects the list of instantiated test variants to load into the
    VariantProc."""
    variants_mapping = {self.test: variants}

    # Creates a Variant processor containing the possible types of test
    # variants.
    self.variant_proc = VariantProc(variants=["to_filter", "to_load"])
    self.variant_proc._variant_gens = {
      "fake_suite": FakeVariantGen(variants)}

    # FakeFilter only lets tests passing the predicate to be loaded.
    self.fake_filter = FakeFilter(
      filter_predicate=(lambda t: t.procid == "to_filter"))

    # FakeResultObserver to verify that VariantProc calls result_for correctly.
    self.fake_result_observer = FakeResultObserver()

    # Links up processors together to form a test processing pipeline.
    self.variant_proc._prev_proc = self.fake_result_observer
    self.fake_filter._prev_proc = self.variant_proc
    self.variant_proc._next_proc = self.fake_filter

    # Injects the test into the VariantProc
    is_loaded = self.variant_proc.next_test(self.test)

    # Verifies the behavioral consistency by using the instrumentation in
    # FakeFilter
    loaded_variants = list(self.fake_filter.loaded)
    self.assertEqual(is_loaded, any(loaded_variants))
    return self.fake_filter.loaded, self.fake_filter.call_counter

  def test_filters_first_two_variants(self):
    variants = [
      FakeTest('to_filter'),
      FakeTest('to_filter'),
      FakeTest('to_load'),
      FakeTest('to_load'),
    ]
    expected_load_results = {variants[2]}

    load_results, call_count = self._simulate_proc(variants)

    self.assertSetEqual(expected_load_results, load_results)
    self.assertEqual(call_count, 3)

  def test_stops_loading_after_first_successful_load(self):
    variants = [
      FakeTest('to_load'),
      FakeTest('to_load'),
      FakeTest('to_filter'),
    ]
    expected_load_results = {variants[0]}

    loaded_tests, call_count = self._simulate_proc(variants)

    self.assertSetEqual(expected_load_results, loaded_tests)
    self.assertEqual(call_count, 1)

  def test_return_result_when_out_of_variants(self):
    variants = [
      FakeTest('to_filter'),
      FakeTest('to_load'),
    ]

    self._simulate_proc(variants)

    self.variant_proc.result_for(variants[1], None)

    expected_results = {(self.test, None)}

    self.assertSetEqual(expected_results, self.fake_result_observer.results)

  def test_return_result_after_running_variants(self):
    variants = [
      FakeTest('to_filter'),
      FakeTest('to_load'),
      FakeTest('to_load'),
    ]

    self._simulate_proc(variants)
    self.variant_proc.result_for(variants[1], None)

    self.assertSetEqual(set(variants[1:]), self.fake_filter.loaded)

    self.variant_proc.result_for(variants[2], None)

    expected_results = {(self.test, None)}
    self.assertSetEqual(expected_results, self.fake_result_observer.results)
Exemple #7
0
class TestVariantProcLoading(unittest.TestCase):
    def setUp(self):
        self.test = FakeTest("test")

    def _simulate_proc(self, variants):
        """Expects the list of instantiated test variants to load into the
    VariantProc."""
        variants_mapping = {self.test: variants}

        # Creates a Variant processor containing the possible types of test
        # variants.
        self.variant_proc = VariantProc(variants=["to_filter", "to_load"])
        self.variant_proc._variant_gens = {
            "fake_suite": FakeVariantGen(variants)
        }

        # FakeFilter only lets tests passing the predicate to be loaded.
        self.fake_filter = FakeFilter(
            filter_predicate=(lambda t: t.procid == "to_filter"))

        # FakeResultObserver to verify that VariantProc calls result_for correctly.
        self.fake_result_observer = FakeResultObserver()

        # Links up processors together to form a test processing pipeline.
        self.variant_proc._prev_proc = self.fake_result_observer
        self.fake_filter._prev_proc = self.variant_proc
        self.variant_proc._next_proc = self.fake_filter

        # Injects the test into the VariantProc
        is_loaded = self.variant_proc.next_test(self.test)

        # Verifies the behavioral consistency by using the instrumentation in
        # FakeFilter
        loaded_variants = list(self.fake_filter.loaded)
        self.assertEqual(is_loaded, any(loaded_variants))
        return self.fake_filter.loaded, self.fake_filter.call_counter

    def test_filters_first_two_variants(self):
        variants = [
            FakeTest('to_filter'),
            FakeTest('to_filter'),
            FakeTest('to_load'),
            FakeTest('to_load'),
        ]
        expected_load_results = {variants[2]}

        load_results, call_count = self._simulate_proc(variants)

        self.assertSetEqual(expected_load_results, load_results)
        self.assertEqual(call_count, 3)

    def test_stops_loading_after_first_successful_load(self):
        variants = [
            FakeTest('to_load'),
            FakeTest('to_load'),
            FakeTest('to_filter'),
        ]
        expected_load_results = {variants[0]}

        loaded_tests, call_count = self._simulate_proc(variants)

        self.assertSetEqual(expected_load_results, loaded_tests)
        self.assertEqual(call_count, 1)

    def test_return_result_when_out_of_variants(self):
        variants = [
            FakeTest('to_filter'),
            FakeTest('to_load'),
        ]

        self._simulate_proc(variants)

        self.variant_proc.result_for(variants[1], None)

        expected_results = {(self.test, None)}

        self.assertSetEqual(expected_results,
                            self.fake_result_observer.results)

    def test_return_result_after_running_variants(self):
        variants = [
            FakeTest('to_filter'),
            FakeTest('to_load'),
            FakeTest('to_load'),
        ]

        self._simulate_proc(variants)
        self.variant_proc.result_for(variants[1], None)

        self.assertSetEqual(set(variants[1:]), self.fake_filter.loaded)

        self.variant_proc.result_for(variants[2], None)

        expected_results = {(self.test, None)}
        self.assertSetEqual(expected_results,
                            self.fake_result_observer.results)