Exemple #1
0
def CalculateRunParametersForSwarmingTask(flakiness, error):
    """Calculates and returns the iterations and timeout for swarming tasks

  Args:
    flakiness (Flakiness): A structure representing flakiness thus far.
    error (SwarmingError): The error of the previously-run swarming
        task at commit_position. Should be None if no error was encountered.

  Returns:
      ((int) iterations, (int) timeout) Tuple containing the iterations to run
          for this swarming task, and the timeout for that task.
  """
    assert flakiness, 'Cannot calculate parameters for nonexistent Flakiness'

    timeout_per_test = _EstimateSwarmingIterationTimeout(flakiness)
    iterations_for_task = _CalculateNumberOfIterationsToRunWithinTimeout(
        timeout_per_test)
    time_for_task_seconds = _EstimateTimeoutForTask(timeout_per_test,
                                                    iterations_for_task)

    if error and error.code == swarming_task_error.TIMED_OUT:
        # If the previous run timed out, run a smaller, fixed number of
        # iterations so the next attempt is more likely to finish.
        iterations_for_task = waterfall_config.GetCheckFlakeSettings().get(
            'iterations_to_run_after_timeout',
            flake_constants.DEFAULT_ITERATIONS_TO_RUN_AFTER_TIMEOUT)
    else:
        # If the calculated number of iterations is too many, reduce it rather than
        # increase the timeout to minimize the load on swarming.
        iterations_for_task = (
            _GetMaximumIterationsPerSwarmingTask(iterations_for_task))

    return iterations_for_task, time_for_task_seconds
 def testGetCheckFlakeSettings(self):
     self.assertEqual(
         {
             'swarming_rerun': {
                 'lower_flake_threshold': 0.02,
                 'upper_flake_threshold': 0.98,
                 'max_flake_in_a_row': 4,
                 'max_stable_in_a_row': 4,
                 'iterations_to_rerun': 100,
                 'max_build_numbers_to_look_back': 1000,
                 'use_nearby_neighbor': True,
                 'max_dive_in_a_row': 4,
                 'dive_rate_threshold': 0.4,
                 'max_iterations_to_rerun': 800,
             },
             'try_job_rerun': {
                 'lower_flake_threshold': 0.02,
                 'upper_flake_threshold': 0.98,
                 'max_flake_in_a_row': 1,
                 'max_stable_in_a_row': 0,
                 'iterations_to_rerun': 100
             },
             'update_monorail_bug': False,
             'minimum_confidence_score_to_run_tryjobs': 0.6
         }, waterfall_config.GetCheckFlakeSettings())
def _GetHardTimeoutSeconds(master_name, builder_name, reference_build_number,
                           step_name, iterations_to_rerun):
    flake_settings = waterfall_config.GetCheckFlakeSettings()
    flake_swarming_settings = flake_settings.get('swarming_rerun', {})
    reference_task = WfSwarmingTask.Get(master_name, builder_name,
                                        reference_build_number, step_name)

    if _CanEstimateExecutionTimeFromReferenceSwarmingTask(reference_task):
        delta = reference_task.completed_time - reference_task.started_time
        execution_time = delta.total_seconds()
        number_of_tests = len(reference_task.tests_statuses)
        number_of_iterations = reference_task.parameters['iterations_to_rerun']
        time_per_test_per_iteration = (
            execution_time / (number_of_iterations * number_of_tests))
        estimated_execution_time = (time_per_test_per_iteration *
                                    iterations_to_rerun)
    else:
        # Use default settings if the reference task is unavailable or malformed.
        estimated_execution_time = flake_swarming_settings.get(
            'default_per_iteration_timeout_seconds', 60) * iterations_to_rerun

    # To account for variance and pending time, use a factor of 2x estimated
    # execution time.
    estimated_time_needed = estimated_execution_time * 2

    return min(max(estimated_time_needed, _ONE_HOUR_IN_SECONDS),
               _MAX_TIMEOUT_SECONDS)
Exemple #4
0
def IsStableDefaultThresholds(pass_rate):
  """Override for IsStable that uses the default thresholds."""
  flake_settings = waterfall_config.GetCheckFlakeSettings()
  lower_flake_threshold = flake_settings.get(
      'lower_flake_threshold', flake_constants.DEFAULT_LOWER_FLAKE_THRESHOLD)
  upper_flake_threshold = flake_settings.get(
      'upper_flake_threshold', flake_constants.DEFAULT_UPPER_FLAKE_THRESHOLD)
  return IsStable(pass_rate, lower_flake_threshold, upper_flake_threshold)
    def RunImpl(self, build_key):
        """Triggers flake analyses for flaky tests found by CI failure analysis."""
        master_name, builder_name, build_number = build_key.GetParts()
        flake_settings = waterfall_config.GetCheckFlakeSettings()
        throttled = flake_settings.get('throttle_flake_analyses', True)

        analysis = WfAnalysis.Get(master_name, builder_name, build_number)

        if not analysis or not analysis.flaky_tests:
            return

        analysis_counts = defaultdict(lambda: defaultdict(int))
        for step, flaky_tests in analysis.flaky_tests.iteritems():
            logging.info('%s/%s/%s/%s has %s flaky tests.', master_name,
                         builder_name, build_number, step, len(flaky_tests))

            for test_name in flaky_tests:
                # TODO(crbug.com/904050): Deprecate FlakeAnalysisRequest in favor of
                # Flake.
                flake = flake_util.GetFlake(_LUCI_PROJECT, step, test_name,
                                            master_name, builder_name,
                                            build_number)
                request = FlakeAnalysisRequest.Create(test_name, False, None)
                request.AddBuildStep(master_name, builder_name, build_number,
                                     step, time_util.GetUTCNow())
                request.flake_key = flake.key
                scheduled = flake_analysis_service.ScheduleAnalysisForFlake(
                    request, '*****@*****.**',
                    False, triggering_sources.FINDIT_PIPELINE)
                if scheduled:  # pragma: no branch
                    analysis_counts[step]['analyzed'] += 1
                    logging.info(
                        'A flake analysis has been triggered for %s/%s', step,
                        test_name)
                    if throttled and len(flaky_tests) > 1:
                        logging.info(
                            'Throttling is enabled, skipping %d tests.',
                            len(flaky_tests) - 1)
                        analysis_counts[step]['throttled'] = len(
                            flaky_tests) - 1
                        break  # If we're throttled, stop after the first.
            else:
                analysis_counts[step]['error'] += 1

        for step, step_counts in analysis_counts.iteritems():
            # Collects metrics.
            step_metadata = step_util.GetStepMetadata(master_name,
                                                      builder_name,
                                                      build_number, step)
            canonical_step_name = step_metadata.get(
                'canonical_step_name') or 'Unknown'
            isolate_target_name = step_metadata.get(
                'isolate_target_name') or 'Unknown'

            for operation, count in step_counts.iteritems():
                monitoring.OnFlakeIdentified(canonical_step_name,
                                             isolate_target_name, operation,
                                             count)
Exemple #6
0
def HasSufficientInformation(overall_pass_rate, total_iterations,
                             partial_pass_rate, partial_iterations):
  """Determines whether a pass rate is enough for an analysis to proceed.

  Args:
    overall_pass_rate (float): Overall pass rate with the partial pass rate
        factored in.
    total_iterations (int): Overall number of iterations ran with the partial
        iterations factored in.
    partial_pass_rate (float): A subset of the pass rate to test if the overall
        pass rate has converged. Should be from the most recent sampling.
    partial_iterations (int): A subset of all iterations to test if the overall
        pass rate has converged. Should be from the most recent sampling.

  Returns:
    Whether the overall pass rate with number of iterations is sufficient to
        proceed.
  """
  flake_settings = waterfall_config.GetCheckFlakeSettings()
  minimum_iterations = flake_settings.get(
      'minimum_iterations_required_for_confergence',
      flake_constants.MINIMUM_ITERATIONS_REQUIRED_FOR_CONVERGENCE)

  if overall_pass_rate is None or not total_iterations:
    return False

  if MinimumIterationsReached(total_iterations):
    # The test is already flaky beyond reasonable doubt.
    if not IsStableDefaultThresholds(overall_pass_rate):
      return True

    # The test is stable thus far. Check for convergence.
    return HasPassRateConverged(overall_pass_rate, total_iterations,
                                partial_pass_rate, partial_iterations)

  # For cases with few iterations, check if the test is flaky or stable by
  # checking its theoretical pass rate padded up to the minimum required
  # iterations with both passes and fails. Only if it is flaky with both
  # theoretical values can it safely be deemed flaky. For example, if exactly
  # 2 iterations have been run, with 1 passing and 1 failing, then the pass rate
  # is 50%. However because there are only 2 iterations, calling this flaky
  # immediately may be too low of a sample. Assume 100 iterations are needed for
  # confidence, then attempt to pad 99 failures to give a theoretical minimum
  # pass rate of 1% (which is flaky), and 99 passes to give a maximum pass rate
  # of 100% (which is stable). Because there is a discrepancy, the original 1
  # pass in 2 iterations is considered insufficient information.
  overall_pass_count = float(overall_pass_rate * total_iterations)
  theoretical_minimum_pass_rate = overall_pass_count / minimum_iterations
  theoretical_maximum_pass_rate = (
      (overall_pass_count + minimum_iterations - total_iterations) /
      minimum_iterations)

  return (not IsStableDefaultThresholds(theoretical_minimum_pass_rate) and
          not IsStableDefaultThresholds(theoretical_maximum_pass_rate))
Exemple #7
0
def MinimumIterationsReached(iterations):
  """Determines if a minimum number of iterations has been met.

  Args:
    iterations (int): The number of iterations a data point already ran.

  Returns:
    True if the data point has at least a minimum number of iterations.
  """
  minimum_iterations = waterfall_config.GetCheckFlakeSettings().get(
      'minimum_iterations_required_for_convergence',
      flake_constants.MINIMUM_ITERATIONS_REQUIRED_FOR_CONVERGENCE)

  return iterations >= minimum_iterations
 def testGetCheckFlakeSettings(self):
     self.assertEqual(
         {
             'iterations_to_run_after_timeout': 10,
             'lower_flake_threshold': 1e-7,
             'max_commit_positions_to_look_back': 5000,
             'max_iterations_per_task': 200,
             'max_iterations_to_rerun': 400,
             'per_iteration_timeout_seconds': 60,
             'swarming_task_cushion': 2,
             'swarming_task_retries_per_build': 3,
             'throttle_flake_analyses': False,
             'timeout_per_swarming_task_seconds': 3600,
             'timeout_per_test_seconds': 180,
             'upper_flake_threshold': 0.9999999
         }, waterfall_config.GetCheckFlakeSettings())
Exemple #9
0
def IsStableFailing(pass_rate):
  """Determines whether a pass rate is failing stably.

  Args:
    pass_rate (float): The pass rate to check.

  Returns:
    whether or not |pass_rate| is stable failing, e.g. 0%.
  """
  assert pass_rate is not None, 'Usage: pass_rate cannot be None'

  flake_settings = waterfall_config.GetCheckFlakeSettings()
  lower_flake_threshold = flake_settings.get(
      'lower_flake_threshold', flake_constants.DEFAULT_LOWER_FLAKE_THRESHOLD)

  # Nonexistent tests are considered stable passing.
  return not TestDoesNotExist(pass_rate) and pass_rate < lower_flake_threshold
Exemple #10
0
def _EstimateTimeoutForTask(estimated_timeout_per_test,
                            estimated_iterations_for_task):
    """Returns the timeout for a swarming task.

  Returns either timeout_per_test * iterations_for_task or the default timeout
  for a swarming task (whichever is greater).

  Args:
    estimated_timeout_per_test (int): timeout per test derived from existing
        data points.
    estimated_iterations_for_task (int): total number of iterations for the
        task derived from existing data points.
  Returns:
    (int): The estimated timeout for a swarming task.
  """
    minimum_timeout = waterfall_config.GetCheckFlakeSettings().get(
        'timeout_per_swarming_task_seconds',
        flake_constants.DEFAULT_TIMEOUT_PER_SWARMING_TASK_SECONDS)

    return max(estimated_timeout_per_test * estimated_iterations_for_task,
               minimum_timeout)
Exemple #11
0
def GetEarliestCommitPosition(lower_bound, upper_bound):
    """Determines the earliest commit position to analyze.

  Args:
    lower_bound (int): The lowest commit position to analyze, or None if no
        pre-determined lower bound is specified.
    upper_bound (int): The highest commit position to analyze.

  Returns:
    (int): The lowest commit position to analyze.
  """
    assert upper_bound > lower_bound

    if lower_bound is not None:
        return lower_bound

    config_settings = waterfall_config.GetCheckFlakeSettings()
    max_commit_positions_to_look_back = config_settings.get(
        'max_commit_positions_to_look_back',
        flake_constants.DEFAULT_MAX_COMMIT_POSITIONS_TO_LOOK_BACK)

    return max(0, upper_bound - max_commit_positions_to_look_back)
Exemple #12
0
def _CalculateNumberOfIterationsToRunWithinTimeout(estimated_timeout_per_test):
    """Calculates the number of iterations that will run in one swarming task.

  Uses the total iterations, target timeout, and the timeout per test to
  calculate the appropriate amount of test iterations to run.

  Args:
    estimated_timeout_per_test (int): Time, in seconds, that each test will
    is estimated to take. Can be 0 if unknown, and a default used instead.

  Returns:
    (int) Number of iterations to perform in one swarming task.
  """
    timeout_per_test = (estimated_timeout_per_test
                        or flake_constants.DEFAULT_TIMEOUT_PER_TEST_SECONDS)
    timeout_per_swarming_task = waterfall_config.GetCheckFlakeSettings().get(
        'timeout_per_swarming_task_seconds',
        flake_constants.DEFAULT_TIMEOUT_PER_SWARMING_TASK_SECONDS)

    iterations = timeout_per_swarming_task / timeout_per_test

    # Always run at least 1 iteration.
    return max(1, iterations)
Exemple #13
0
def _EstimateSwarmingIterationTimeout(flakiness):
    """Estimates a timeout per iteration based on previous data points.

  Uses the amount of time previous data points at this build number took to
  estimate a timeout for an iteration.

  Args:
    flakiness (Flakiness): Information representing the flakiness and other
        metadata at a commit position.

  Return:
    (int) Timeout for one iteration in seconds.
  """
    check_flake_settings = waterfall_config.GetCheckFlakeSettings()

    if not flakiness.total_test_run_seconds or not flakiness.iterations:
        # There is insufficient data to calculate a timeout, possibly due to error.
        # Fallback to safe values.
        return check_flake_settings.get(
            'timeout_per_test_seconds',
            flake_constants.DEFAULT_TIMEOUT_PER_TEST_SECONDS)

    # Set lower threshold for timeout per iteration.
    time_per_iteration = (float(flakiness.total_test_run_seconds) /
                          float(flakiness.iterations))

    # Add padding to for a more pessimistic per-test estimated time. For example,
    # if 60 iterations were measured to have ran in 60 seconds, theoretically 1
    # should take 1 second. However there can be some variance in the run time, so
    # to be safe, we multiply that theoretical time by a constant factor (e.g. 2)
    # to give an adjusted time of 2 seconds per test to trigger tasks with more a
    # conservative number of iterations.
    cushion_multiplier = check_flake_settings.get(
        'swarming_task_cushion',
        flake_constants.SWARMING_TASK_CUSHION_MULTIPLIER)

    return int(round(cushion_multiplier * time_per_iteration))
    def run(self,
            master_name,
            builder_name,
            triggering_build_number,
            current_build_number,
            step_name,
            test_name,
            version_number,
            step_metadata=None,
            use_nearby_neighbor=False,
            manually_triggered=False):
        # Get MasterFlakeAnalysis success list corresponding to parameters.
        analysis = MasterFlakeAnalysis.GetVersion(master_name,
                                                  builder_name,
                                                  triggering_build_number,
                                                  step_name,
                                                  test_name,
                                                  version=version_number)

        flake_swarming_task = FlakeSwarmingTask.Get(master_name, builder_name,
                                                    current_build_number,
                                                    step_name, test_name)

        # Don't call another pipeline if we fail.
        if flake_swarming_task.status == analysis_status.ERROR:
            # Report the last flake swarming task's error that it encountered.
            # TODO(lijeffrey): Another neighboring swarming task may be needed in this
            # one's place instead of failing altogether.
            error = flake_swarming_task.error or {
                'error': 'Swarming task failed',
                'message':
                'The last swarming task did not complete as expected'
            }

            _UpdateAnalysisStatusUponCompletion(analysis, None,
                                                analysis_status.ERROR, error)
            logging.error('Error in Swarming task')
            yield UpdateFlakeBugPipeline(analysis.key.urlsafe())
            return

        if not analysis.algorithm_parameters:
            # Uses analysis' own algorithm_parameters.
            flake_settings = waterfall_config.GetCheckFlakeSettings()
            analysis.algorithm_parameters = flake_settings
            analysis.put()
        algorithm_settings = analysis.algorithm_parameters.get(
            'swarming_rerun')

        data_points = _NormalizeDataPoints(analysis.data_points)
        # Figure out what build_number to trigger a swarming rerun on next, if any.
        next_build_number, suspected_build, iterations_to_rerun = (
            lookback_algorithm.GetNextRunPointNumber(data_points,
                                                     algorithm_settings))
        if iterations_to_rerun:
            # Need to rerun the first build with more iterations.
            _UpdateIterationsToRerun(analysis, iterations_to_rerun)
            _RemoveRerunBuildDataPoint(analysis, next_build_number)
            analysis.put()

        max_build_numbers_to_look_back = algorithm_settings.get(
            'max_build_numbers_to_look_back', _DEFAULT_MAX_BUILD_NUMBERS)
        last_build_number = max(
            0, triggering_build_number - max_build_numbers_to_look_back)

        if ((next_build_number < last_build_number
             or next_build_number >= triggering_build_number)
                and not iterations_to_rerun):  # Finished.
            build_confidence_score = None
            if suspected_build is not None:
                # Use steppiness as the confidence score.
                build_confidence_score = confidence.SteppinessForBuild(
                    analysis.data_points, suspected_build)

            # Update suspected build and the confidence score.
            _UpdateAnalysisStatusUponCompletion(
                analysis,
                suspected_build,
                analysis_status.COMPLETED,
                None,
                build_confidence_score=build_confidence_score)

            if build_confidence_score is None:
                logging.info(
                    ('Skipping try jobs due to no suspected flake build being '
                     'identified'))
            elif not _HasSufficientConfidenceToRunTryJobs(analysis):
                logging.info(
                    ('Skipping try jobs due to insufficient confidence in '
                     'suspected build'))
            else:
                # Hook up with try-jobs. Based on analysis of historical data, 60%
                # confidence could filter out almost all false positives.
                suspected_build_point = analysis.GetDataPointOfSuspectedBuild()
                assert suspected_build_point

                blamed_cls, lower_bound = _GetFullBlamedCLsAndLowerBound(
                    suspected_build_point, analysis.data_points)

                if blamed_cls:
                    if len(blamed_cls) > 1:
                        logging.info(
                            'Running try-jobs against commits in regressions')
                        start_commit_position = suspected_build_point.commit_position - 1
                        start_revision = blamed_cls[start_commit_position]
                        build_info = build_util.GetBuildInfo(
                            master_name, builder_name, triggering_build_number)
                        parent_mastername = build_info.parent_mastername or master_name
                        parent_buildername = build_info.parent_buildername or builder_name
                        cache_name = swarming_util.GetCacheName(
                            parent_mastername, parent_buildername)
                        dimensions = waterfall_config.GetTrybotDimensions(
                            parent_mastername, parent_buildername)
                        yield RecursiveFlakeTryJobPipeline(
                            analysis.key.urlsafe(), start_commit_position,
                            start_revision, lower_bound, cache_name,
                            dimensions)
                        return  # No update to bug yet.
                    else:
                        logging.info(
                            'Single commit in the blame list of suspected build'
                        )
                        culprit_confidence_score = confidence.SteppinessForCommitPosition(
                            analysis.data_points,
                            suspected_build_point.commit_position)
                        culprit = recursive_flake_try_job_pipeline.CreateCulprit(
                            suspected_build_point.git_hash,
                            suspected_build_point.commit_position,
                            culprit_confidence_score)
                        UpdateAnalysisUponCompletion(analysis, culprit,
                                                     analysis_status.COMPLETED,
                                                     None)
                else:
                    logging.error(
                        'Cannot run flake try jobs against empty blame list')
                    error = {
                        'error': 'Could not start try jobs',
                        'message': 'Empty blame list'
                    }
                    UpdateAnalysisUponCompletion(analysis, None,
                                                 analysis_status.ERROR, error)

            yield UpdateFlakeBugPipeline(analysis.key.urlsafe())
            return

        pipeline_job = RecursiveFlakePipeline(
            master_name,
            builder_name,
            next_build_number,
            step_name,
            test_name,
            version_number,
            triggering_build_number,
            step_metadata=step_metadata,
            manually_triggered=manually_triggered,
            use_nearby_neighbor=use_nearby_neighbor,
            step_size=(current_build_number - next_build_number))
        # Disable attribute 'target' defined outside __init__ pylint warning,
        # because pipeline generates its own __init__ based on run function.
        pipeline_job.target = (  # pylint: disable=W0201
            appengine_util.GetTargetNameForModule(constants.WATERFALL_BACKEND))
        pipeline_job.start(
            queue_name=self.queue_name or constants.DEFAULT_QUEUE)
Exemple #15
0
def ShouldThrottleAnalysis():
    """Determines whether to throttle an analysis based on config."""
    flake_settings = waterfall_config.GetCheckFlakeSettings()
    return flake_settings.get('throttle_flake_analyses', True)
Exemple #16
0
def ScheduleAnalysisIfNeeded(
        normalized_test,
        original_test,
        bug_id=None,
        allow_new_analysis=False,
        force=False,
        manually_triggered=False,
        user_email=None,
        triggering_source=triggering_sources.FINDIT_PIPELINE,
        queue_name=constants.DEFAULT_QUEUE):
    """Schedules an analysis if needed and returns the MasterFlakeAnalysis.

  When the build failure was already analyzed and a new analysis is scheduled,
  the returned WfAnalysis will still have the result of last completed analysis.

  Args:
    normalized_test (TestInfo): Info of the normalized flaky test after mapping
       a CQ trybot step to a Waterfall buildbot step, striping prefix "PRE_"
       from a gtest, etc.
    original_test (TestInfo): Info of the original flaky test.
    bug_id (int): The monorail bug id to update when analysis is done.
    allow_new_analysis (bool): Indicate whether a new analysis is allowed.
    force (bool): Indicate whether to force a rerun of current analysis.
    manually_triggered (bool): True if the analysis was requested manually,
      such as by a Chromium sheriff.
    user_email (str): The email of the user requesting the analysis.
    triggering_source (int): From where this analysis was triggered, such as
      through Findit pipeline, UI, or through Findit API.
    queue_name (str): The App Engine queue to run the analysis.

  Returns:
    A MasterFlakeAnalysis instance.
    None if no analysis was scheduled and the user has no permission to.
  """
    flake_settings = waterfall_config.GetCheckFlakeSettings()
    use_nearby_neighbor = flake_settings.get('swarming_rerun',
                                             {}).get('use_nearby_neighbor',
                                                     False)

    need_new_analysis, analysis = _NeedANewAnalysis(
        normalized_test,
        original_test,
        flake_settings,
        bug_id=bug_id,
        allow_new_analysis=allow_new_analysis,
        force=force,
        user_email=user_email,
        triggering_source=triggering_source)

    if need_new_analysis:
        # _NeedANewAnalysis just created master_flake_analysis. Use the latest
        # version number and pass that along to the other pipelines for updating
        # results and data.
        logging.info(
            'A new master flake analysis was successfully saved for %s (%s) and '
            'will be captured in version %s', repr(normalized_test),
            repr(original_test), analysis.version_number)

        step_metadata = buildbot.GetStepLog(normalized_test.master_name,
                                            normalized_test.builder_name,
                                            normalized_test.build_number,
                                            normalized_test.step_name,
                                            HttpClientAppengine(),
                                            'step_metadata')

        pipeline_job = RecursiveFlakePipeline(
            normalized_test.master_name,
            normalized_test.builder_name,
            normalized_test.build_number,
            normalized_test.step_name,
            normalized_test.test_name,
            analysis.version_number,
            triggering_build_number=normalized_test.build_number,
            step_metadata=step_metadata,
            manually_triggered=manually_triggered,
            use_nearby_neighbor=use_nearby_neighbor)
        pipeline_job.target = appengine_util.GetTargetNameForModule(
            constants.WATERFALL_BACKEND)
        pipeline_job.start(queue_name=queue_name)

    return analysis
Exemple #17
0
def _GetMaximumIterationsPerSwarmingTask(requested_iterations_for_task):
    """Returns the maximum iterations not to exceed per swarming task."""
    max_iterations_per_task = waterfall_config.GetCheckFlakeSettings().get(
        'max_iterations_per_task', flake_constants.MAX_ITERATIONS_PER_TASK)

    return min(max_iterations_per_task, requested_iterations_for_task)
Exemple #18
0
 def _GetIterationsToRerun(self):
     flake_settings = waterfall_config.GetCheckFlakeSettings()
     swarming_rerun_settings = flake_settings.get('swarming_rerun', {})
     return swarming_rerun_settings.get('iterations_to_rerun', 100)
Exemple #19
0
def _GetMaximumIterationsToRun():
    return waterfall_config.GetCheckFlakeSettings().get(
        'max_iterations_to_rerun',
        flake_constants.DEFAULT_MAX_ITERATIONS_TO_RERUN)
Exemple #20
0
def _GetMaximumSwarmingTaskRetries():
    return waterfall_config.GetCheckFlakeSettings().get(
        'maximum_swarming_task_retries_per_flakiness',
        flake_constants.DEFAULT_MAX_SWARMING_TASK_RETRIES_PER_DATA_POINT)