示例#1
0
    def _do_execute(self, tests, args, options):
        jobs = options.j

        print '>>> Running with test processors'
        loader = LoadProc(tests)
        results = self._create_result_tracker(options)
        indicators = self._create_progress_indicators(
            tests.test_count_estimate, options)

        outproc_factory = None
        if self.build_config.predictable:
            outproc_factory = predictable.get_outproc
        execproc = ExecutionProc(jobs, outproc_factory)
        sigproc = self._create_signal_proc()

        procs = [
            loader,
            NameFilterProc(args) if args else None,
            StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
            VariantProc(self._variants),
            StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
            self._create_predictable_filter(),
            self._create_shard_proc(options),
            self._create_seed_proc(options),
            sigproc,
        ] + indicators + [
            results,
            self._create_timeout_proc(options),
            self._create_rerun_proc(options),
            execproc,
        ]

        self._prepare_procs(procs)

        loader.load_initial_tests(initial_batch_size=options.j * 2)

        # This starts up worker processes and blocks until all tests are
        # processed.
        execproc.run()

        for indicator in indicators:
            indicator.finished()

        print '>>> %d tests ran' % (results.total - results.remaining)

        exit_code = utils.EXIT_CODE_PASS
        if results.failed:
            exit_code = utils.EXIT_CODE_FAILURES
        if not results.total:
            exit_code = utils.EXIT_CODE_NO_TESTS

        # Indicate if a SIGINT or SIGTERM happened.
        exit_code = max(exit_code, sigproc.exit_code)

        if exit_code == utils.EXIT_CODE_FAILURES and options.json_test_results:
            print(
                "Force exit code 0 after failures. Json test results file "
                "generated with failure information.")
            exit_code = utils.EXIT_CODE_PASS
        return exit_code
示例#2
0
    def _run_test_procs(self, suites, args, options, progress_indicator,
                        context):
        jobs = options.j

        print '>>> Running with test processors'
        loader = LoadProc()
        tests_counter = TestsCounter()
        results = ResultsTracker()
        indicators = progress_indicator.ToProgressIndicatorProcs()
        execproc = ExecutionProc(jobs, context)

        procs = [
            loader,
            NameFilterProc(args) if args else None,
            StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
            self._create_shard_proc(options),
            tests_counter,
            VariantProc(VARIANTS),
            StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
        ] + indicators + [
            results,
            self._create_rerun_proc(context),
            execproc,
        ]

        procs = filter(None, procs)

        for i in xrange(0, len(procs) - 1):
            procs[i].connect_to(procs[i + 1])

        tests = [t for s in suites for t in s.tests]
        tests.sort(key=lambda t: t.is_slow, reverse=True)

        loader.setup()
        loader.load_tests(tests)

        print '>>> Running %d base tests' % tests_counter.total
        tests_counter.remove_from_chain()

        execproc.start()

        for indicator in indicators:
            indicator.finished()

        print '>>> %d tests ran' % results.total

        exit_code = 0
        if results.failed:
            exit_code = 1
        if results.remaining:
            exit_code = 2
        if not results.total:
            exit_code = 3

        if exit_code == 1 and options.json_test_results:
            print(
                "Force exit code 0 after failures. Json test results file "
                "generated with failure information.")
            exit_code = 0
        return exit_code
示例#3
0
    def _do_execute(self, tests, args, options):
        jobs = options.j

        print '>>> Running with test processors'
        loader = LoadProc()
        tests_counter = TestsCounter()
        results = ResultsTracker()
        indicators = self._create_progress_indicators(options)

        outproc_factory = None
        if self.build_config.predictable:
            outproc_factory = predictable.get_outproc
        execproc = ExecutionProc(jobs, outproc_factory)

        procs = [
            loader,
            NameFilterProc(args) if args else None,
            StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
            self._create_shard_proc(options),
            tests_counter,
            VariantProc(self._variants),
            StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
            self._create_predictable_filter(),
            self._create_seed_proc(options),
            self._create_signal_proc(),
        ] + indicators + [
            results,
            self._create_timeout_proc(options),
            self._create_rerun_proc(options),
            execproc,
        ]

        self._prepare_procs(procs)
        tests.sort(key=lambda t: t.is_slow, reverse=True)

        loader.load_tests(tests)

        print '>>> Running %d base tests' % tests_counter.total
        tests_counter.remove_from_chain()

        execproc.start()

        for indicator in indicators:
            indicator.finished()

        print '>>> %d tests ran' % (results.total - results.remaining)

        exit_code = 0
        if results.failed:
            exit_code = 1
        if not results.total:
            exit_code = 3

        if exit_code == 1 and options.json_test_results:
            print(
                "Force exit code 0 after failures. Json test results file "
                "generated with failure information.")
            exit_code = 0
        return exit_code
示例#4
0
  def _do_execute(self, suites, args, options):
    print(">>> Running tests for %s.%s" % (self.build_config.arch,
                                           self.mode_name))

    ctx = self._create_context(options)
    self._setup_suites(options, suites)
    tests = self._load_tests(options, suites, ctx)
    progress_indicator = progress.IndicatorNotifier()
    progress_indicator.Register(
        progress.PROGRESS_INDICATORS[options.progress]())
    if options.json_test_results:
      progress_indicator.Register(progress.JsonTestProgressIndicator(
          options.json_test_results,
          self.build_config.arch,
          self.mode_options.execution_mode,
          ctx.random_seed))

    loader = LoadProc()
    fuzzer_rng = random.Random(options.fuzzer_random_seed)

    combiner = self._create_combiner(fuzzer_rng, options)
    results = ResultsTracker()
    execproc = ExecutionProc(options.j, ctx)
    indicators = progress_indicator.ToProgressIndicatorProcs()
    procs = [
      loader,
      NameFilterProc(args) if args else None,
      StatusFileFilterProc(None, None),
      # TODO(majeski): Improve sharding when combiner is present. Maybe select
      # different random seeds for shards instead of splitting tests.
      self._create_shard_proc(options),
      combiner,
      self._create_fuzzer(fuzzer_rng, options)
    ] + indicators + [
      results,
      self._create_timeout_proc(options),
      self._create_rerun_proc(options),
      execproc,
    ]
    self._prepare_procs(procs)
    loader.load_tests(tests)

    # TODO(majeski): maybe some notification from loader would be better?
    if combiner:
      combiner.generate_initial_tests(options.j * 4)
    execproc.start()

    for indicator in indicators:
      indicator.finished()

    print '>>> %d tests ran' % results.total
    if results.failed:
      print '>>> %d tests failed' % results.failed

    if results.failed:
      return 1
    if results.remaining:
      return 2
    return 0
示例#5
0
  def _do_execute(self, suites, args, options):
    print(">>> Running tests for %s.%s" % (self.build_config.arch,
                                           self.mode_name))

    ctx = self._create_context(options)
    tests = self._load_tests(options, suites, ctx)
    progress_indicator = progress.IndicatorNotifier()
    progress_indicator.Register(
        progress.PROGRESS_INDICATORS[options.progress]())
    if options.json_test_results:
      progress_indicator.Register(progress.JsonTestProgressIndicator(
          options.json_test_results,
          self.build_config.arch,
          self.mode_options.execution_mode,
          ctx.random_seed))

    loader = LoadProc()
    fuzzer_rng = random.Random(options.fuzzer_random_seed)
    fuzzer_proc = fuzzer.FuzzerProc(
        fuzzer_rng,
        options.tests_count,
        self._create_fuzzer_configs(options),
        options.total_timeout_sec,
    )

    results = ResultsTracker()
    execproc = ExecutionProc(options.j, ctx)
    indicators = progress_indicator.ToProgressIndicatorProcs()
    procs = [
      loader,
      NameFilterProc(args) if args else None,
      StatusFileFilterProc(None, None),
      self._create_shard_proc(options),
      fuzzer_proc,
    ] + indicators + [
      results,
      self._create_rerun_proc(options),
      execproc,
    ]
    self._prepare_procs(procs)
    loader.load_tests(tests)
    execproc.start()

    for indicator in indicators:
      indicator.finished()

    print '>>> %d tests ran' % results.total
    if results.failed:
      print '>>> %d tests failed' % results.failed

    if results.failed:
      return 1
    if results.remaining:
      return 2
    return 0
示例#6
0
    def _run_test_procs(self, suites, options, progress_indicator, context,
                        outproc_factory):
        jobs = options.j

        print '>>> Running with test processors'
        procs = []
        indicators = progress_indicator.ToProgressIndicatorProcs()

        # TODO(majeski): Implement all indicators and remove this filter.
        indicators = filter(None, indicators)

        loader = LoadProc()
        procs.append(loader)

        results = ResultsTracker(count_subtests=False)

        procs.append(
            StatusFileFilterProc(options.slow_tests, options.pass_fail_tests))

        procs.append(results)

        procs += indicators

        if context.rerun_failures_count:
            procs.append(
                RerunProc(context.rerun_failures_count,
                          context.rerun_failures_max))

        execproc = ExecutionProc(jobs, context)
        procs.append(execproc)

        for i in xrange(0, len(procs) - 1):
            procs[i].connect_to(procs[i + 1])

        tests = [t for s in suites for t in s.tests]
        tests.sort(key=lambda t: t.is_slow, reverse=True)
        loader.load_tests(tests)
        for indicator in indicators:
            indicator.starting()
        execproc.start()
        for indicator in indicators:
            indicator.finished()

        if results.failed:
            return 1
        if results.remaining:
            return 2
        return 0
示例#7
0
    def _do_execute(self, tests, args, options):
        loader = LoadProc(tests)
        fuzzer_rng = random.Random(options.fuzzer_random_seed)

        combiner = self._create_combiner(fuzzer_rng, options)
        results = self._create_result_tracker(options)
        execproc = ExecutionProc(options.j)
        sigproc = self._create_signal_proc()
        indicators = self._create_progress_indicators(
            tests.test_count_estimate, options)
        procs = [
            loader,
            NameFilterProc(args) if args else None,
            StatusFileFilterProc(None, None),
            # TODO(majeski): Improve sharding when combiner is present. Maybe select
            # different random seeds for shards instead of splitting tests.
            self._create_shard_proc(options),
            ForgiveTimeoutProc(),
            combiner,
            self._create_fuzzer(fuzzer_rng, options),
            sigproc,
        ] + indicators + [
            results,
            self._create_timeout_proc(options),
            self._create_rerun_proc(options),
            execproc,
        ]
        self._prepare_procs(procs)
        loader.load_initial_tests(initial_batch_size=float('inf'))

        # TODO(majeski): maybe some notification from loader would be better?
        if combiner:
            combiner.generate_initial_tests(options.j * 4)

        # This starts up worker processes and blocks until all tests are
        # processed.
        execproc.run()

        for indicator in indicators:
            indicator.finished()

        print('>>> %d tests ran' % results.total)
        if results.failed:
            return utils.EXIT_CODE_FAILURES

        # Indicate if a SIGINT or SIGTERM happened.
        return sigproc.exit_code
示例#8
0
  def _do_execute(self, tests, args, options):
    loader = LoadProc(tests)
    fuzzer_rng = random.Random(options.fuzzer_random_seed)

    combiner = self._create_combiner(fuzzer_rng, options)
    results = self._create_result_tracker(options)
    execproc = ExecutionProc(options.j)
    sigproc = self._create_signal_proc()
    indicators = self._create_progress_indicators(
      tests.test_count_estimate, options)
    procs = [
      loader,
      NameFilterProc(args) if args else None,
      StatusFileFilterProc(None, None),
      # TODO(majeski): Improve sharding when combiner is present. Maybe select
      # different random seeds for shards instead of splitting tests.
      self._create_shard_proc(options),
      ForgiveTimeoutProc(),
      combiner,
      self._create_fuzzer(fuzzer_rng, options),
      sigproc,
    ] + indicators + [
      results,
      self._create_timeout_proc(options),
      self._create_rerun_proc(options),
      execproc,
    ]
    self._prepare_procs(procs)
    loader.load_initial_tests(initial_batch_size=float('inf'))

    # TODO(majeski): maybe some notification from loader would be better?
    if combiner:
      combiner.generate_initial_tests(options.j * 4)

    # This starts up worker processes and blocks until all tests are
    # processed.
    execproc.run()

    for indicator in indicators:
      indicator.finished()

    print('>>> %d tests ran' % results.total)
    if results.failed:
      return utils.EXIT_CODE_FAILURES

    # Indicate if a SIGINT or SIGTERM happened.
    return sigproc.exit_code
示例#9
0
  def _do_execute(self, tests, args, options):
    loader = LoadProc()
    fuzzer_rng = random.Random(options.fuzzer_random_seed)

    combiner = self._create_combiner(fuzzer_rng, options)
    results = ResultsTracker()
    execproc = ExecutionProc(options.j)
    indicators = self._create_progress_indicators(options)
    procs = [
      loader,
      NameFilterProc(args) if args else None,
      StatusFileFilterProc(None, None),
      # TODO(majeski): Improve sharding when combiner is present. Maybe select
      # different random seeds for shards instead of splitting tests.
      self._create_shard_proc(options),
      ForgiveTimeoutProc(),
      combiner,
      self._create_fuzzer(fuzzer_rng, options),
      self._create_signal_proc(),
    ] + indicators + [
      results,
      self._create_timeout_proc(options),
      self._create_rerun_proc(options),
      execproc,
    ]
    self._prepare_procs(procs)
    loader.load_tests(tests)

    # TODO(majeski): maybe some notification from loader would be better?
    if combiner:
      combiner.generate_initial_tests(options.j * 4)
    execproc.start()

    for indicator in indicators:
      indicator.finished()

    print '>>> %d tests ran' % results.total
    if results.failed:
      print '>>> %d tests failed' % results.failed

    if results.failed:
      return 1
    return 0
示例#10
0
    def _run_test_procs(self, suites, options, progress_indicator, context,
                        outproc_factory):
      jobs = options.j

      print '>>> Running with test processors'
      loader = LoadProc()
      results = ResultsTracker(count_subtests=False)
      indicators = progress_indicator.ToProgressIndicatorProcs()

      procs = [
        loader,
        VariantProc(VARIANTS),
        StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
        results,
      ] + indicators

      if context.rerun_failures_count:
        procs.append(RerunProc(
            context.rerun_failures_count,
            context.rerun_failures_max
        ))

      execproc = ExecutionProc(jobs, context)
      procs.append(execproc)

      for i in xrange(0, len(procs) - 1):
        procs[i].connect_to(procs[i + 1])

      tests = [t for s in suites for t in s.tests]
      tests.sort(key=lambda t: t.is_slow, reverse=True)
      loader.load_tests(tests)
      for indicator in indicators:
        indicator.starting()
      execproc.start()
      for indicator in indicators:
        indicator.finished()

      if results.failed:
        return 1
      if results.remaining:
        return 2
      return 0
    def _do_execute(self, tests, args, options):
        jobs = options.j

        print('>>> Running with test processors')
        loader = LoadProc(tests)
        results = self._create_result_tracker(options)
        indicators = self._create_progress_indicators(
            tests.test_count_estimate, options)

        outproc_factory = None
        if self.build_config.predictable:
            outproc_factory = predictable.get_outproc
        execproc = ExecutionProc(jobs, outproc_factory)
        sigproc = self._create_signal_proc()

        procs = [
            loader,
            NameFilterProc(args) if args else None,
            StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
            VariantProc(self._variants),
            StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
            self._create_predictable_filter(),
            self._create_shard_proc(options),
            self._create_seed_proc(options),
            self._create_sequence_proc(options),
            sigproc,
        ] + indicators + [
            results,
            self._create_timeout_proc(options),
            self._create_rerun_proc(options),
            execproc,
        ]

        self._prepare_procs(procs)

        loader.load_initial_tests(initial_batch_size=options.j * 2)

        # This starts up worker processes and blocks until all tests are
        # processed.
        execproc.run()

        for indicator in indicators:
            indicator.finished()

        if tests.test_count_estimate:
            percentage = float(results.total) / tests.test_count_estimate * 100
        else:
            percentage = 0

        print(('>>> %d base tests produced %d (%d%s)'
               ' non-filtered tests') %
              (tests.test_count_estimate, results.total, percentage, '%'))

        print('>>> %d tests ran' % (results.total - results.remaining))

        exit_code = utils.EXIT_CODE_PASS
        if results.failed:
            exit_code = utils.EXIT_CODE_FAILURES
        if not results.total:
            exit_code = utils.EXIT_CODE_NO_TESTS

        if options.time:
            self._print_durations(options)

        # Indicate if a SIGINT or SIGTERM happened.
        return max(exit_code, sigproc.exit_code)
示例#12
0
    def _do_execute(self, tests, args, options):
      jobs = options.j

      print '>>> Running with test processors'
      loader = LoadProc()
      tests_counter = TestsCounter()
      results = self._create_result_tracker(options)
      indicators = self._create_progress_indicators(options)

      outproc_factory = None
      if self.build_config.predictable:
        outproc_factory = predictable.get_outproc
      execproc = ExecutionProc(jobs, outproc_factory)
      sigproc = self._create_signal_proc()

      procs = [
        loader,
        NameFilterProc(args) if args else None,
        StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
        self._create_shard_proc(options),
        tests_counter,
        VariantProc(self._variants),
        StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
        self._create_predictable_filter(),
        self._create_seed_proc(options),
        sigproc,
      ] + indicators + [
        results,
        self._create_timeout_proc(options),
        self._create_rerun_proc(options),
        execproc,
      ]

      self._prepare_procs(procs)
      tests.sort(key=lambda t: t.is_slow, reverse=True)

      loader.load_tests(tests)

      print '>>> Running %d base tests' % tests_counter.total
      tests_counter.remove_from_chain()

      # This starts up worker processes and blocks until all tests are
      # processed.
      execproc.run()

      for indicator in indicators:
        indicator.finished()

      print '>>> %d tests ran' % (results.total - results.remaining)

      exit_code = utils.EXIT_CODE_PASS
      if results.failed:
        exit_code = utils.EXIT_CODE_FAILURES
      if not results.total:
        exit_code = utils.EXIT_CODE_NO_TESTS

      # Indicate if a SIGINT or SIGTERM happened.
      exit_code = max(exit_code, sigproc.exit_code)

      if exit_code == utils.EXIT_CODE_FAILURES and options.json_test_results:
        print("Force exit code 0 after failures. Json test results file "
              "generated with failure information.")
        exit_code = utils.EXIT_CODE_PASS
      return exit_code
示例#13
0
    def _do_execute(self, tests, args, options):
      jobs = options.j

      print('>>> Running with test processors')
      loader = LoadProc(tests)
      results = self._create_result_tracker(options)
      indicators = self._create_progress_indicators(
          tests.test_count_estimate, options)

      outproc_factory = None
      if self.build_config.predictable:
        outproc_factory = predictable.get_outproc
      execproc = ExecutionProc(jobs, outproc_factory)
      sigproc = self._create_signal_proc()

      procs = [
        loader,
        NameFilterProc(args) if args else None,
        StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
        VariantProc(self._variants),
        StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
        self._create_predictable_filter(),
        self._create_shard_proc(options),
        self._create_seed_proc(options),
        sigproc,
      ] + indicators + [
        results,
        self._create_timeout_proc(options),
        self._create_rerun_proc(options),
        execproc,
      ]

      self._prepare_procs(procs)

      loader.load_initial_tests(initial_batch_size=options.j*2)

      # This starts up worker processes and blocks until all tests are
      # processed.
      execproc.run()

      for indicator in indicators:
        indicator.finished()


      if tests.test_count_estimate:
        percentage = float(results.total) / tests.test_count_estimate * 100
      else:
        percentage = 0

      print (('>>> %d base tests produced %d (%d%s)'
             ' non-filtered tests') % (
          tests.test_count_estimate, results.total, percentage, '%'))

      print('>>> %d tests ran' % (results.total - results.remaining))

      exit_code = utils.EXIT_CODE_PASS
      if results.failed:
        exit_code = utils.EXIT_CODE_FAILURES
      if not results.total:
        exit_code = utils.EXIT_CODE_NO_TESTS

      # Indicate if a SIGINT or SIGTERM happened.
      return max(exit_code, sigproc.exit_code)