コード例 #1
0
  def FromSerializable(cls, data):
    """Deserializes the given data into a SwarmingTaskRequest.

      Because Swarming frequently adds new fields to task requests, maintaining
      a strict 1:1 mapping between Findit and Swarming is not feasible. Instead
      when deserializing a swarming task request, only consider the fields that
      are necessary.

    Args:
      data (dict): The dict mapping from defined attributes to their values.

    Returns:
      An instance of the given class with attributes set to the given data.
    """
    properties = data.get('properties', {})
    inputs_ref = properties.get('inputs_ref', {})
    cipd_input = properties.get('cipd_input', {})

    return SwarmingTaskRequest(
        created_ts=data.get('created_ts'),
        expiration_secs=str(data.get('expiration_secs')),
        name=data.get('name'),
        parent_task_id=data.get('parent_task_id'),
        priority=str(data.get('priority')),
        properties=SwarmingTaskProperties(
            caches=properties.get('caches'),
            command=properties.get('command'),
            dimensions=properties.get('dimensions') or [],
            env=properties.get('env') or [],
            env_prefixes=properties.get('env_prefixes') or [],
            execution_timeout_secs=str(
                properties.get('execution_timeout_secs')),
            extra_args=ListOfBasestring.FromSerializable(
                properties.get('extra_args') or []),
            grace_period_secs=str(properties.get('grace_period_secs')),
            io_timeout_secs=str(properties.get('io_timeout_secs')),
            idempotent=properties.get('idempotent'),
            inputs_ref=SwarmingTaskInputsRef(
                isolated=inputs_ref.get('isolated'),
                isolatedserver=inputs_ref.get('isolatedserver'),
                namespace=inputs_ref.get('namespace')),
            cipd_input=CIPDInput(
                packages=CIPDPackages.FromSerializable(
                    cipd_input.get('packages')),
                client_package=CIPDClientPackage(
                    version=cipd_input.get('client_package', {}).get('version'),
                    package_name=cipd_input.get('client_package',
                                                {}).get('package_name'),
                ),
                server=cipd_input.get('server'),
            ),
        ),
        pubsub_auth_token=data.get('pubsub_auth_token'),
        pubsub_topic=data.get('pubsub_topic'),
        pubsub_userdata=data.get('pubsub_userdata'),
        service_account=data.get('service_account'),
        tags=ListOfBasestring.FromSerializable(data.get('tags') or []),
        user=data.get('user'))
コード例 #2
0
  def testAnalyzeFlakePipelineStartTaskAfterDelay(self, mocked_delay, _):
    analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
    # Random date in the past, for coverage.
    analysis.request_time = datetime.datetime(2015, 1, 1, 1, 1, 1)
    analysis.Save()

    start_commit_position = 1000
    start_revision = 'r1000'
    delay = 60

    step_metadata = StepMetadata(
        canonical_step_name='s',
        dimensions=None,
        full_step_name='s',
        patched=False,
        swarm_task_ids=None,
        waterfall_buildername='b',
        waterfall_mastername='w',
        isolate_target_name='s')

    mocked_delay.return_value = delay

    analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=NextCommitPositionOutput(
            next_commit_id=CommitID(
                commit_position=start_commit_position, revision=start_revision),
            culprit_commit_id=None),
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable(['os:testOS']),
        manually_triggered=False,
        rerun=False,
        retries=0,
        step_metadata=step_metadata)

    expected_retried_analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=NextCommitPositionOutput(
            next_commit_id=CommitID(
                commit_position=start_commit_position, revision=start_revision),
            culprit_commit_id=None),
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable(['os:testOS']),
        manually_triggered=False,
        rerun=False,
        retries=1,
        step_metadata=step_metadata)

    self.MockAsynchronousPipeline(DelayPipeline, delay, delay)

    self.MockGeneratorPipeline(RecursiveAnalyzeFlakePipeline,
                               expected_retried_analyze_flake_input, None)

    pipeline_job = AnalyzeFlakePipeline(analyze_flake_input)
    pipeline_job.start()
    self.execute_queued_tasks()
コード例 #3
0
    def testUpdateAnalysisDataPointsExistingDataPointWithErrorSalvagable(
            self, _):
        commit_position = 1000
        revision = 'r1000'
        iterations = 100
        pass_count = 50
        completed_time = datetime(2018, 1, 1, 0, 1, 0)
        error = SwarmingTaskError(code=1, message='m')
        started_time = datetime(2018, 1, 1, 0, 0, 0)
        task_id_1 = 'task_1'
        task_id_2 = 'task_2'
        build_url = 'url'
        try_job_url = None

        swarming_task_output = FlakeSwarmingTaskOutput(
            completed_time=completed_time,
            error=error,
            iterations=iterations,
            pass_count=pass_count,
            started_time=started_time,
            task_id=task_id_2)

        initial_flakiness = Flakiness(
            build_number=None,
            build_url=build_url,
            commit_position=commit_position,
            total_test_run_seconds=60,
            error=None,
            failed_swarming_task_attempts=0,
            iterations=50,
            pass_rate=0.5,
            revision=revision,
            try_job_url=try_job_url,
            task_ids=ListOfBasestring.FromSerializable([task_id_1]))

        expected_flakiness = Flakiness(
            build_number=None,
            build_url=build_url,
            commit_position=commit_position,
            total_test_run_seconds=120,
            error=None,  # Only set error if no more retries.
            failed_swarming_task_attempts=0,  # Task was salvaged.
            iterations=150,
            pass_rate=0.5,
            revision=revision,
            try_job_url=try_job_url,
            task_ids=ListOfBasestring.FromSerializable([task_id_1, task_id_2]))

        resulting_flakiness = flakiness_util.UpdateFlakiness(
            initial_flakiness, swarming_task_output)

        self.assertEqual(expected_flakiness, resulting_flakiness)
コード例 #4
0
    def testUpdateExistingFlakinessWithErrorWithSuccessfulRun(self, _):
        commit_position = 1000
        revision = 'r1000'
        iterations = 10
        pass_count = 5
        completed_time = datetime(2018, 1, 1, 0, 1, 0)
        started_time = datetime(2018, 1, 1, 0, 0, 0)
        task_id_1 = 'task_1'
        task_id_2 = 'task_2'
        build_url = 'url'
        try_job_url = None

        swarming_task_output = FlakeSwarmingTaskOutput(
            completed_time=completed_time,
            error=None,
            iterations=iterations,
            pass_count=pass_count,
            started_time=started_time,
            task_id=task_id_2)

        # Simulate first run failing.
        initial_flakiness = Flakiness(
            build_number=None,
            build_url=build_url,
            commit_position=commit_position,
            total_test_run_seconds=60,
            error=None,
            failed_swarming_task_attempts=1,
            iterations=0,
            pass_rate=None,
            revision=revision,
            try_job_url=try_job_url,
            task_ids=ListOfBasestring.FromSerializable([task_id_1]))

        expected_flakiness = Flakiness(
            build_number=None,
            build_url=build_url,
            commit_position=commit_position,
            total_test_run_seconds=120,  # No change due to unrecoverable error.
            error=None,
            failed_swarming_task_attempts=1,
            iterations=10,
            pass_rate=0.5,
            revision=revision,
            try_job_url=try_job_url,
            task_ids=ListOfBasestring.FromSerializable([task_id_1, task_id_2]))

        resulting_flakiness = flakiness_util.UpdateFlakiness(
            initial_flakiness, swarming_task_output)

        self.assertEqual(expected_flakiness, resulting_flakiness)
コード例 #5
0
    def testUpdateAnalysisDataPointsExistingDataPointNoError(self):
        commit_position = 1000
        revision = 'r1000'
        iterations = 100
        pass_count = 60
        failed_swarming_task_attempts = 2
        completed_time = datetime(2018, 1, 1, 1, 0, 0)
        error = None
        started_time = datetime(2018, 1, 1, 0, 0, 0)
        task_id = 'task_2'
        build_url = None
        try_job_url = 'url'

        initial_flakiness = Flakiness(
            build_number=None,
            build_url=build_url,
            commit_position=commit_position,
            total_test_run_seconds=1800,
            error=None,
            failed_swarming_task_attempts=failed_swarming_task_attempts,
            iterations=iterations,
            pass_rate=0.5,
            revision=revision,
            try_job_url=try_job_url,
            task_ids=ListOfBasestring.FromSerializable(['task_1']))

        swarming_task_output = FlakeSwarmingTaskOutput(
            completed_time=completed_time,
            error=error,
            iterations=iterations,
            pass_count=pass_count,
            started_time=started_time,
            task_id=task_id)

        resulting_flakiness = flakiness_util.UpdateFlakiness(
            initial_flakiness, swarming_task_output)

        expected_flakiness = Flakiness(
            build_number=None,
            build_url=build_url,
            commit_position=commit_position,
            total_test_run_seconds=5400,
            error=None,
            failed_swarming_task_attempts=failed_swarming_task_attempts,
            iterations=200,
            pass_rate=0.55,
            revision=revision,
            task_ids=ListOfBasestring.FromSerializable(['task_1', 'task_2']),
            try_job_url=try_job_url)

        self.assertEqual(expected_flakiness, resulting_flakiness)
コード例 #6
0
    def testUpdateFlakinessNewFlakinessNoError(self):
        commit_position = 1000
        completed_time = datetime(2018, 1, 1, 0, 1, 0)
        error = None
        iterations = 100
        pass_count = 50
        revision = 'r1000'
        started_time = datetime(2018, 1, 1, 0, 0, 0)
        task_id = 'task_id'
        build_url = None
        try_job_url = 'url'

        swarming_task_output = FlakeSwarmingTaskOutput(
            completed_time=completed_time,
            error=error,
            iterations=iterations,
            pass_count=pass_count,
            started_time=started_time,
            task_id=task_id)

        initial_flakiness = Flakiness(
            build_number=None,
            build_url=build_url,
            commit_position=commit_position,
            total_test_run_seconds=None,
            error=None,
            failed_swarming_task_attempts=0,
            iterations=None,
            pass_rate=None,
            revision=revision,
            try_job_url=try_job_url,
            task_ids=ListOfBasestring.FromSerializable([]))

        expected_flakiness = Flakiness(
            build_number=None,
            build_url=build_url,
            commit_position=commit_position,
            total_test_run_seconds=60,
            error=None,
            failed_swarming_task_attempts=0,
            iterations=iterations,
            pass_rate=0.5,
            revision=revision,
            try_job_url=try_job_url,
            task_ids=ListOfBasestring.FromSerializable([task_id]))

        resulting_flakiness = flakiness_util.UpdateFlakiness(
            initial_flakiness, swarming_task_output)

        self.assertEqual(expected_flakiness, resulting_flakiness)
コード例 #7
0
    def testUpdateFlakinessWithErrorUnsalvagable(self, _):
        commit_position = 1000
        completed_time = datetime(2018, 1, 1, 1, 0, 0)
        error = SwarmingTaskError(code=1, message='message')
        iterations = None
        pass_count = None
        revision = 'r1000'
        started_time = datetime(2018, 1, 1, 0, 0, 0)
        task_id = 'task_id'
        build_url = 'url'
        try_job_url = None

        swarming_task_output = FlakeSwarmingTaskOutput(
            completed_time=completed_time,
            error=error,
            iterations=iterations,
            pass_count=pass_count,
            started_time=started_time,
            task_id=task_id)

        flakiness_to_update = Flakiness(
            build_number=None,
            build_url=build_url,
            commit_position=commit_position,
            total_test_run_seconds=0,
            error=None,
            failed_swarming_task_attempts=0,
            iterations=0,
            pass_rate=None,
            revision=revision,
            try_job_url=try_job_url,
            task_ids=ListOfBasestring.FromSerializable([]))

        expected_flakiness = Flakiness(
            build_number=None,
            build_url=build_url,
            commit_position=commit_position,
            total_test_run_seconds=0,
            error=None,
            failed_swarming_task_attempts=1,
            iterations=0,
            pass_rate=None,
            revision=revision,
            try_job_url=try_job_url,
            task_ids=ListOfBasestring.FromSerializable([task_id]))

        resulting_flakiness = flakiness_util.UpdateFlakiness(
            flakiness_to_update, swarming_task_output)

        self.assertEqual(expected_flakiness, resulting_flakiness)
コード例 #8
0
    def testUpdateFlakeAnalysisDataPointsPipelineTooManyErrors(
            self, _, mocked_error_reporting):
        analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
        analysis.Save()

        flakiness = Flakiness(build_number=None,
                              build_url='url',
                              commit_position=1000,
                              total_test_run_seconds=100,
                              error=None,
                              failed_swarming_task_attempts=3,
                              iterations=50,
                              pass_rate=0.5,
                              revision='r1000',
                              try_job_url=None,
                              task_ids=ListOfBasestring.FromSerializable(
                                  ['task_id']))

        update_data_points_input = UpdateFlakeAnalysisDataPointsInput(
            analysis_urlsafe_key=analysis.key.urlsafe(), flakiness=flakiness)

        pipeline_job = UpdateFlakeAnalysisDataPointsPipeline(
            update_data_points_input)
        pipeline_job.start()
        self.execute_queued_tasks()

        self.assertTrue(mocked_error_reporting.called)
コード例 #9
0
  def testDetermineApproximatePassRateConverged(self, *_):
    master_name = 'm'
    builder_name = 'b'
    reference_build_number = 123
    step_name = 's'
    test_name = 't'
    commit_position = 1000
    incoming_pass_count = 15
    iterations = 30
    incoming_pass_rate = 0.5
    isolate_sha = 'sha1'
    revision = 'r1000'
    started_time = datetime(2018, 1, 1, 0, 0, 0)
    completed_time = datetime(2018, 1, 1, 1, 0, 0)
    build_url = 'url'
    try_job_url = None

    isolate_sha_output = GetIsolateShaOutput(
        build_number=None,
        build_url=None,
        isolate_sha=isolate_sha,
        try_job_url='url')

    flake_swarming_task_output = FlakeSwarmingTaskOutput(
        error=None,
        pass_count=incoming_pass_count,
        iterations=iterations,
        started_time=started_time,
        completed_time=completed_time,
        task_id='task_id')

    flakiness_thus_far = Flakiness(
        build_number=None,
        build_url=build_url,
        commit_position=commit_position,
        total_test_run_seconds=3600,
        error=None,
        failed_swarming_task_attempts=0,
        iterations=iterations,
        pass_rate=incoming_pass_rate,
        revision=revision,
        try_job_url=try_job_url,
        task_ids=ListOfBasestring.FromSerializable(['task_id_1']))

    determine_approximate_pass_rate_input = DetermineApproximatePassRateInput(
        builder_name=builder_name,
        commit_position=commit_position,
        get_isolate_sha_output=isolate_sha_output,
        flakiness_thus_far=flakiness_thus_far,
        previous_swarming_task_output=flake_swarming_task_output,
        master_name=master_name,
        reference_build_number=reference_build_number,
        revision=revision,
        step_name=step_name,
        test_name=test_name)

    pipeline_job = DetermineApproximatePassRatePipeline(
        determine_approximate_pass_rate_input)
    pipeline_job.start()
    self.execute_queued_tasks()
コード例 #10
0
  def testAnalyzeFlakePipelineAnalysisFinishedNoFindings(self):
    analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
    analysis.Save()

    analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=NextCommitPositionOutput(
            next_commit_id=None, culprit_commit_id=None),
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable([]),
        manually_triggered=False,
        rerun=False,
        retries=0,
        step_metadata=None)

    expected_report_event_input = ReportEventInput(
        analysis_urlsafe_key=analysis.key.urlsafe())
    self.MockGeneratorPipeline(ReportAnalysisEventPipeline,
                               expected_report_event_input, None)

    pipeline_job = AnalyzeFlakePipeline(analyze_flake_input)
    pipeline_job.start()
    self.execute_queued_tasks()

    self.assertIsNone(analysis.culprit_urlsafe_key)
    self.assertEqual(analysis_status.COMPLETED, analysis.status)
    self.assertEqual(result_status.NOT_FOUND_UNTRIAGED, analysis.result_status)
コード例 #11
0
    def testIdentifyCulpritForTestTryJobSuccess(self, mock_fn, mock_fn2):
        master_name = 'm'
        builder_name = 'b'
        build_number = 1

        repo_name = 'chromium'
        revision = 'rev2'

        culprit = WfSuspectedCL.Create(repo_name, revision, 100)
        culprit.put()

        culprits_result = {
            'rev1': {
                'revision': 'rev1',
                'repo_name': 'chromium',
                'commit_position': 1,
                'url': 'url_1'
            },
            'rev2': {
                'revision': revision,
                'commit_position': 2,
                'url': 'url_2',
                'repo_name': repo_name
            }
        }

        culprit_map = {'step': {'test1': 'rev1', 'test2': 'rev2'}}
        mock_fn.return_value = culprits_result, ListOfBasestring.FromSerializable(
            []), FailureToCulpritMap.FromSerializable(culprit_map)

        culprits = DictOfBasestring()
        culprits['rev2'] = culprit.key.urlsafe()
        mock_fn2.return_value = culprits

        self.MockGeneratorPipeline(
            pipeline_class=RevertAndNotifyTestCulpritPipeline,
            expected_input=CulpritActionParameters(
                build_key=BuildKey(master_name=master_name,
                                   builder_name=builder_name,
                                   build_number=build_number),
                culprits=culprits,
                heuristic_cls=ListOfBasestring(),
                failure_to_culprit_map=FailureToCulpritMap.FromSerializable(
                    culprit_map)),
            mocked_output=False)

        parameters = IdentifyTestTryJobCulpritParameters(
            build_key=BuildKey(master_name=master_name,
                               builder_name=builder_name,
                               build_number=build_number),
            result=TestTryJobResult.FromSerializable({}))
        pipeline = IdentifyTestTryJobCulpritPipeline(parameters)
        pipeline.start()
        self.execute_queued_tasks()
        mock_fn.assert_called_once_with(parameters)
コード例 #12
0
  def testDetermineApproximatePassRatePipelineWrapper(self):
    master_name = 'm'
    builder_name = 'b'
    reference_build_number = 123
    step_name = 's'
    test_name = 't'
    commit_position = 1000
    incoming_pass_rate = 0.5
    isolate_sha = 'sha1'
    revision = 'r1000'
    build_url = None
    try_job_url = 'url'

    isolate_sha_output = GetIsolateShaOutput(
        build_number=None,
        build_url=build_url,
        isolate_sha=isolate_sha,
        try_job_url=try_job_url)

    flakiness_thus_far = Flakiness(
        build_number=None,
        build_url=build_url,
        commit_position=commit_position,
        total_test_run_seconds=60,
        error=None,
        failed_swarming_task_attempts=0,
        iterations=10,
        pass_rate=incoming_pass_rate,
        revision=revision,
        try_job_url=try_job_url,
        task_ids=ListOfBasestring.FromSerializable(['task_id_1']))

    determine_approximate_pass_rate_input = DetermineApproximatePassRateInput(
        builder_name=builder_name,
        commit_position=commit_position,
        get_isolate_sha_output=isolate_sha_output,
        flakiness_thus_far=flakiness_thus_far,
        master_name=master_name,
        previous_swarming_task_output=None,
        reference_build_number=reference_build_number,
        revision=revision,
        step_name=step_name,
        test_name=test_name)

    self.MockGeneratorPipeline(DetermineApproximatePassRatePipeline,
                               determine_approximate_pass_rate_input, None)

    pipeline_job = DetermineApproximatePassRatePipelineWrapper(
        determine_approximate_pass_rate_input)
    pipeline_job.start()
    self.execute_queued_tasks()
コード例 #13
0
  def testRerunAnalysisWithAnalyzeFlakePipeline(
      self, mocked_analysis, mocked_pipeline, mocked_need_analysis,
      mocked_build_info, mock_dimensions, *_):
    buildbucket_id = 'id'
    mock_dimensions.return_value = ['os:Mac', 'cpu:x86']
    start_commit_position = 1000
    start_build_info = BuildInfo('m', 'b 1', 123)
    start_build_info.commit_position = start_commit_position
    start_build_info.chromium_revision = 'r1000'
    start_build_info.buildbucket_id = buildbucket_id
    mocked_build_info.return_value = start_build_info
    mocked_analysis.pipeline_status_path.return_value = 'status'
    mocked_analysis.key.urlsafe.return_value = 'urlsafe_key'
    mocked_need_analysis.return_value = (True, mocked_analysis)
    test = TestInfo('m', 'b 1', 123, 's', 't')
    manually_triggered = False
    flake = Flake.Create('chromium', 's', 't', 'l')

    analysis = initialize_flake_pipeline.ScheduleAnalysisIfNeeded(
        test,
        test,
        flake.key,
        bug_id=None,
        allow_new_analysis=True,
        manually_triggered=manually_triggered,
        force=True,
        queue_name=constants.DEFAULT_QUEUE)

    self.assertIsNotNone(analysis)
    self.assertEqual(buildbucket_id, analysis.build_id)
    self.assertEqual(buildbucket_id, analysis.original_build_id)

    analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key='urlsafe_key',
        analyze_commit_position_parameters=NextCommitPositionOutput(
            culprit_commit_id=None,
            next_commit_id=CommitID(
                commit_position=start_commit_position,
                revision=start_build_info.chromium_revision)),
        commit_position_range=IntRange(lower=None, upper=start_commit_position),
        dimensions=ListOfBasestring.FromSerializable(
            ['os:Mac', 'cpu:x86', 'pool:luci.chromium.findit']),
        manually_triggered=manually_triggered,
        rerun=True,
        retries=0,
        step_metadata=StepMetadata.FromSerializable({}))

    mocked_pipeline.assert_has_calls([
        mock.call(analyze_flake_input),
        mock.call().start(queue_name=constants.DEFAULT_QUEUE)
    ])
コード例 #14
0
    def testUpdateFlakeAnalysisDataPointsPipeline(self, mocked_change_log):
        analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
        analysis.Save()

        commit_position = 1000
        pass_rate = 0.5
        revision = 'r1000'
        expected_time = datetime(2018, 9, 18, 0, 0, 0)
        committer = Contributor(name='name', email='email', time=expected_time)
        change_log = ChangeLog(None, committer, revision, None, None, None,
                               None, None)
        mocked_change_log.return_value = change_log

        flakiness = Flakiness(build_number=123,
                              build_url='url',
                              commit_position=commit_position,
                              total_test_run_seconds=100,
                              error=None,
                              failed_swarming_task_attempts=0,
                              iterations=50,
                              pass_rate=pass_rate,
                              revision=revision,
                              try_job_url=None,
                              task_ids=ListOfBasestring.FromSerializable(
                                  ['task_id']))

        expected_data_point = DataPoint.Create(build_number=123,
                                               build_url='url',
                                               commit_position=commit_position,
                                               elapsed_seconds=100,
                                               error=None,
                                               failed_swarming_task_attempts=0,
                                               iterations=50,
                                               pass_rate=pass_rate,
                                               git_hash=revision,
                                               try_job_url=None,
                                               task_ids=['task_id'],
                                               commit_timestamp=expected_time)

        update_data_points_input = UpdateFlakeAnalysisDataPointsInput(
            analysis_urlsafe_key=analysis.key.urlsafe(), flakiness=flakiness)

        pipeline_job = UpdateFlakeAnalysisDataPointsPipeline(
            update_data_points_input)
        pipeline_job.start()
        self.execute_queued_tasks()

        self.assertEqual(1, len(analysis.data_points))
        self.assertEqual(expected_data_point, analysis.data_points[0])
コード例 #15
0
    def testUpdateFlakiness(self):
        flakiness = Flakiness(build_number=None,
                              build_url='url',
                              commit_position=1000,
                              total_test_run_seconds=0,
                              error=None,
                              failed_swarming_task_attempts=0,
                              iterations=0,
                              pass_rate=None,
                              revision='r1000',
                              try_job_url=None,
                              task_ids=ListOfBasestring.FromSerializable([]))

        self.assertEqual(flakiness,
                         flakiness_util.UpdateFlakiness(flakiness, None))
コード例 #16
0
  def testOnFinalizedNoError(self):
    analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
    analysis.Save()

    analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=NextCommitPositionOutput(
            next_commit_id=CommitID(commit_position=1000, revision='rev'),
            culprit_commit_id=None),
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable(['os:testOS']),
        manually_triggered=False,
        rerun=False,
        retries=0,
        step_metadata=None)

    pipeline_job = AnalyzeFlakePipeline(analyze_flake_input)
    pipeline_job.OnFinalized(analyze_flake_input)
    self.assertEqual(analysis_status.COMPLETED, analysis.status)
コード例 #17
0
  def testRecursiveAnalyzeFlakePipeline(self):
    analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
    analysis.Save()

    analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=NextCommitPositionOutput(
            next_commit_id=CommitID(commit_position=1000, revision='rev'),
            culprit_commit_id=None),
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable([]),
        manually_triggered=False,
        rerun=False,
        retries=0,
        step_metadata=None)

    self.MockGeneratorPipeline(AnalyzeFlakePipeline, analyze_flake_input, None)

    pipeline_job = RecursiveAnalyzeFlakePipeline(analyze_flake_input)
    pipeline_job.start()
    self.execute_queued_tasks()
コード例 #18
0
    def testConvertFlakinessToDataPoint(self):
        build_url = 'url'
        commit_position = 1000
        total_test_run_seconds = 60
        failed_swarming_task_attempts = 0
        iterations = 10
        pass_rate = 0.3
        revision = 'r1000'
        try_job_url = None
        task_id = 'task_id'

        flakiness = Flakiness(
            build_number=None,
            build_url=build_url,
            commit_position=commit_position,
            total_test_run_seconds=total_test_run_seconds,
            error=None,
            failed_swarming_task_attempts=failed_swarming_task_attempts,
            iterations=iterations,
            pass_rate=pass_rate,
            revision=revision,
            try_job_url=try_job_url,
            task_ids=ListOfBasestring.FromSerializable([task_id]))

        expected_data_point = DataPoint.Create(
            build_url=build_url,
            commit_position=commit_position,
            elapsed_seconds=total_test_run_seconds,
            failed_swarming_task_attempts=failed_swarming_task_attempts,
            iterations=iterations,
            pass_rate=pass_rate,
            git_hash=revision,
            try_job_url=try_job_url,
            task_ids=[task_id])

        data_point = data_point_util.ConvertFlakinessToDataPoint(flakiness)
        self.assertEqual(expected_data_point, data_point)
コード例 #19
0
def CreateNewSwarmingTaskRequestTemplate(runner_id, ref_task_id, ref_request,
                                         master_name, builder_name, step_name,
                                         tests, iterations):
    """Returns a SwarmingTaskRequest instance to run the given tests only.

  Args:
    ref_task_id (str): Id of the referred swarming task.
    ref_request (SwarmingTaskRequest): Request of the referred swarming task.
    master_name (str): The name of the main waterfall master for a build.
    builder_name (str): The name of the main waterfall builder for a build.
    step_name (str): The name of a failed step in the build.
    tests (list): A list of tests in the step that we want to rerun in task.
    iterations (int): Number of iterations each test should run.
  """
    # Make a copy of the referred request and drop or overwrite some fields.
    new_request = copy.deepcopy(ref_request)
    new_request.name = 'findit/ref_task_id/%s/%s' % (
        ref_task_id, time_util.GetUTCNow().strftime('%Y-%m-%d %H:%M:%S %f'))
    new_request.parent_task_id = ''
    new_request.user = ''

    _UpdateRequestWithPubSubCallback(new_request, runner_id)

    # To force a fresh re-run and ignore cached result of any equivalent run.
    new_request.properties.idempotent = False

    # Set the gtest_filter to run the given tests only.
    # Remove existing test filter first.
    new_request.properties.extra_args = ListOfBasestring.FromSerializable(
        [a for a in new_request.properties.extra_args if not _IsTestFilter(a)])

    new_request.properties.extra_args.append(
        '--isolated-script-test-filter=%s' % '::'.join(tests))

    new_request.properties.extra_args.append(
        '--isolated-script-test-repeat=%s' % iterations)

    new_request.properties.extra_args.append(
        '--isolated-script-test-launcher-retry-limit=0')

    # Also rerun disabled tests. Scenario: the test was disabled before Findit
    # runs any analysis. One possible case:
    #   1. A gtest became flaky on CQ, but Findit was not automatically
    #      triggered to run any analysis because:
    #      * the test is not flaky enough
    #      * chromium-try-flakes has filed/updated too many bugs
    #   2. The test got disabled, but no culprit was identified.
    #   3. Some developer starts the investigation and requests Findit to
    #      analyze the flaky test.
    #   4. Findit picks the latest Waterfall build of the matching configuration
    #      for the CQ build in which the flaky test is found.
    #   5. In the picked Waterfall build, the test is already disabled.
    #
    # Note: test runner on Android ignores this flag because it is not supported
    # yet even though it exists.
    new_request.properties.extra_args.append(
        '--isolated-script-test-also-run-disabled-tests')

    # Remove the env setting for sharding.
    sharding_settings = ['GTEST_SHARD_INDEX', 'GTEST_TOTAL_SHARDS']
    new_request.properties.env = [
        e for e in new_request.properties.env
        if e['key'] not in sharding_settings
    ]

    # Reset tags for searching and monitoring.
    ref_name = swarming_util.GetTagValue(ref_request.tags, 'name')
    new_request.tags = ListOfBasestring()
    new_request.tags.append('ref_master:%s' % master_name)
    new_request.tags.append('ref_buildername:%s' % builder_name)

    new_request.tags.append('ref_stepname:%s' % step_name)
    new_request.tags.append('ref_name:%s' % ref_name)
    new_request.tags.extend(
        ['findit:1', 'project:Chromium', 'purpose:post-commit'])

    # Use a priority much lower than CQ for now (CQ's priority is 30).
    # Later we might use a higher priority -- a lower value here.
    # Note: the smaller value, the higher priority.
    swarming_settings = waterfall_config.GetSwarmingSettings()
    request_expiration_hours = swarming_settings.get(
        'request_expiration_hours')
    new_request.priority = str(
        max(100, swarming_settings.get('default_request_priority')))
    new_request.expiration_secs = str(request_expiration_hours * 60 * 60)

    return new_request
コード例 #20
0
    def testTriggerSwarmingTask(self, mocked_request, mocked_trigger,
                                mocked_reference_info):
        master_name = 'm'
        builder_name = 'b'
        build_number = 123
        step_name = 's'
        test_name = 't'
        isolate_sha = 'sha1'
        iterations = 50
        timeout_seconds = 3600
        runner_id = 'pipeline_id'
        ref_task_id = 'ref_task_id'
        ref_request = SwarmingTaskRequest.FromSerializable(
            _SAMPLE_REQUEST_JSON)
        task_id = 'task_id'

        request = SwarmingTaskRequest(
            created_ts=None,
            name='findit/ref_task_id/ref_task_id/2018-03-15 00:00:00 000000',
            tags=ListOfBasestring.FromSerializable([
                'ref_master:m',
                'ref_buildername:b',
                'ref_buildnumber:4',
                'ref_stepname:s',
                'ref_name:test',
                'purpose:identify-regression-range',
            ]),
            pubsub_topic='projects/app-id/topics/swarming',
            properties=SwarmingTaskProperties(
                dimensions=[{
                    'value': 'v',
                    'key': 'k'
                }],
                idempotent=False,
                inputs_ref=SwarmingTaskInputsRef(
                    isolatedserver='isolatedserver',
                    namespace=None,
                    isolated='sha1'),
                extra_args=ListOfBasestring.FromSerializable([
                    '--flag=value', '--gtest_filter=a.b:a.c',
                    '--gtest_repeat=50', '--test-launcher-retry-limit=0',
                    '--gtest_also_run_disabled_tests'
                ]),
                io_timeout_secs='1200',
                command='cmd',
                env=[{
                    'value': '1',
                    'key': 'a'
                }],
                execution_timeout_secs='3600',
                env_prefixes=[],
                grace_period_secs='30',
                caches=None),
            priority='25',
            parent_task_id='',
            user='',
            service_account=None,
            pubsub_userdata='{"runner_id": "runner_id"}',
            expiration_secs='3600',
            pubsub_auth_token='auth_token')

        mocked_reference_info.return_value = (ref_task_id, ref_request)
        mocked_request.return_value = request
        mocked_trigger.return_value = (task_id, None)

        self.assertEqual(
            task_id,
            flake_swarming.TriggerSwarmingTask(master_name, builder_name,
                                               build_number, step_name,
                                               test_name, isolate_sha,
                                               iterations, timeout_seconds,
                                               runner_id))

        mocked_request.assert_called_once_with(runner_id, ref_task_id,
                                               ref_request, master_name,
                                               builder_name, step_name,
                                               test_name, isolate_sha,
                                               iterations, timeout_seconds)
コード例 #21
0
def ScheduleAnalysisIfNeeded(
        normalized_test,
        original_test,
        flake_key,
        bug_id=None,
        allow_new_analysis=False,
        force=False,
        manually_triggered=False,
        user_email=None,
        triggering_source=triggering_sources.FINDIT_PIPELINE,
        queue_name=constants.DEFAULT_QUEUE):
    """Schedules an analysis if needed and returns the MasterFlakeAnalysis.

  When the build failure was already analyzed and a new analysis is scheduled,
  the returned WfAnalysis will still have the result of last completed analysis.

  Args:
    normalized_test (TestInfo): Info of the normalized flaky test after mapping
      a CQ trybot step to a Waterfall buildbot step, striping prefix "PRE_"
      from a gtest, etc.
    original_test (TestInfo): Info of the original flaky test.
    flake_key (ndb.Key): The key to the Flake responsible for triggering this
      analysis.
    bug_id (int): The monorail bug id to update when analysis is done.
    allow_new_analysis (bool): Indicate whether a new analysis is allowed.
    force (bool): Indicate whether to force a rerun of current analysis.
    manually_triggered (bool): True if the analysis was requested manually,
      such as by a Chromium sheriff.
    user_email (str): The email of the user requesting the analysis.
    triggering_source (int): From where this analysis was triggered, such as
      through Findit pipeline, UI, or through Findit API.
    queue_name (str): The App Engine queue to run the analysis.

  Returns:
    A MasterFlakeAnalysis instance.
    None if no analysis was scheduled and the user has no permission to.
  """
    need_new_analysis, analysis = _NeedANewAnalysis(
        normalized_test,
        original_test,
        flake_key,
        bug_id=bug_id,
        allow_new_analysis=allow_new_analysis,
        force=force,
        user_email=user_email,
        triggering_source=triggering_source)

    if need_new_analysis:
        # _NeedANewAnalysis just created master_flake_analysis. Use the latest
        # version number and pass that along to the other pipelines for updating
        # results and data.
        logging.info(
            'A new master flake analysis was successfully saved for %s (%s) and '
            'will be captured in version %s', repr(normalized_test),
            repr(original_test), analysis.version_number)

        step_metadata = (step_util.LegacyGetStepMetadata(
            normalized_test.master_name, normalized_test.builder_name,
            normalized_test.build_number,
            normalized_test.step_name) or step_util.LegacyGetStepMetadata(
                original_test.master_name, original_test.builder_name,
                original_test.build_number, original_test.step_name))

        logging.info('Initializing flake analysis pipeline for key: %s',
                     analysis.key)

        starting_build_info = build_util.GetBuildInfo(
            normalized_test.master_name, normalized_test.builder_name,
            normalized_test.build_number)

        original_build_info = build_util.GetBuildInfo(
            original_test.master_name, original_test.builder_name,
            original_test.build_number)

        assert starting_build_info, (
            'Failed to get starting build for flake analysis')
        starting_commit_position = starting_build_info.commit_position

        assert starting_commit_position is not None, (
            'Cannot analyze flake without a starting commit position')

        assert original_build_info, 'Failed to get original build info'

        # Get the dimensions of the bot for when try jobs are needed to compile.
        dimensions = try_job_service.GetDimensionsFromBuildInfo(
            starting_build_info)

        analyze_flake_input = AnalyzeFlakeInput(
            analysis_urlsafe_key=analysis.key.urlsafe(),
            analyze_commit_position_parameters=NextCommitPositionOutput(
                culprit_commit_id=None,
                next_commit_id=CommitID(
                    commit_position=starting_commit_position,
                    revision=starting_build_info.chromium_revision)),
            commit_position_range=IntRange(lower=None,
                                           upper=starting_commit_position),
            dimensions=ListOfBasestring.FromSerializable(dimensions),
            manually_triggered=manually_triggered,
            retries=0,
            rerun=force,
            step_metadata=StepMetadata.FromSerializable(step_metadata))

        pipeline_job = AnalyzeFlakePipeline(analyze_flake_input)

        pipeline_job.target = appengine_util.GetTargetNameForModule(
            constants.WATERFALL_BACKEND)
        pipeline_job.start(queue_name=queue_name)
        analysis.pipeline_status_path = pipeline_job.pipeline_status_path
        analysis.root_pipeline_id = pipeline_job.root_pipeline_id
        analysis.build_id = starting_build_info.buildbucket_id
        analysis.original_build_id = original_build_info.buildbucket_id
        analysis.put()
        analysis.LogInfo((
            'A flake analysis was scheduled using commit-based pipelines with '
            'path {}').format(pipeline_job.pipeline_status_path))
    else:
        logging.info('A flake analysis not necessary for build %s, %s, %s, %s',
                     normalized_test.master_name, normalized_test.builder_name,
                     normalized_test.build_number, normalized_test.step_name)

    return analysis
コード例 #22
0
    def RunImpl(self, parameters):
        """Pipeline to find the true pass rate of a test at a commit position."""
        master_name = parameters.master_name
        builder_name = parameters.builder_name
        reference_build_number = parameters.reference_build_number
        step_name = parameters.step_name
        test_name = parameters.test_name
        commit_position = parameters.commit_position
        get_isolate_sha_output = parameters.get_isolate_sha_output
        build_url = get_isolate_sha_output.build_url
        try_job_url = get_isolate_sha_output.try_job_url
        flakiness_thus_far = parameters.flakiness_thus_far
        previous_swarming_task_output = parameters.previous_swarming_task_output

        # Extract pass rate and iterations already-completed up to this point.
        if previous_swarming_task_output:
            assert flakiness_thus_far, (
                'Previous swarming task output not captured properly')
            error = previous_swarming_task_output.error
            pass_rate_at_commit_position = flakiness_thus_far.pass_rate
            previous_pass_count = previous_swarming_task_output.pass_count
            previous_iterations = previous_swarming_task_output.iterations
            previous_pass_rate = (float(previous_pass_count /
                                        previous_iterations)
                                  if previous_iterations else None)
        else:
            error = None
            pass_rate_at_commit_position = None
            previous_iterations = 0
            previous_pass_count = 0
            previous_pass_rate = None

            # Create a fresh Flakiness instance to aggregate swarming rerun data.
            flakiness_thus_far = Flakiness(
                build_number=get_isolate_sha_output.build_number,
                build_url=build_url,
                commit_position=commit_position,
                total_test_run_seconds=0,
                error=None,
                failed_swarming_task_attempts=0,
                iterations=0,
                pass_rate=None,
                revision=parameters.revision,
                task_ids=ListOfBasestring.FromSerializable([]),
                try_job_url=try_job_url)

        # Bail out if there were too many errors.
        if (error and flakiness_util.MaximumSwarmingTaskRetriesReached(
                flakiness_thus_far)):
            logging.error(
                'Swarming task ended in error after %d failed attempts. Giving '
                'up' % flakiness_thus_far.failed_swarming_task_attempts)
            flakiness_thus_far.error = error
            yield AggregateFlakinessPipeline(
                self.CreateInputObjectInstance(
                    AggregateFlakinessInput,
                    flakiness_thus_far=flakiness_thus_far,
                    incoming_swarming_task_output=None))
            return

        # Move on if the maximum number of iterations has been reached or exceeded.
        if flakiness_util.MaximumIterationsReached(flakiness_thus_far):
            logging.info('Max iterations reached for commit_position %d' %
                         commit_position)
            yield AggregateFlakinessPipeline(
                self.CreateInputObjectInstance(
                    AggregateFlakinessInput,
                    flakiness_thus_far=flakiness_thus_far,
                    incoming_swarming_task_output=None))
            return

        # Move on if the test doesn't exist.
        if pass_rate_util.TestDoesNotExist(pass_rate_at_commit_position):
            logging.info('No test found at commit position %d' %
                         commit_position)
            yield AggregateFlakinessPipeline(
                self.CreateInputObjectInstance(
                    AggregateFlakinessInput,
                    flakiness_thus_far=flakiness_thus_far,
                    incoming_swarming_task_output=None))
            return

        # Move on if there is sufficient information about the pass rate.
        if pass_rate_util.HasSufficientInformation(
                pass_rate_at_commit_position, flakiness_thus_far.iterations,
                previous_pass_rate, previous_iterations):
            logging.info(
                'There is sufficient information for commit position %d with pass '
                'rate %s after %d iterations' %
                (commit_position, pass_rate_at_commit_position,
                 flakiness_thus_far.iterations))
            yield AggregateFlakinessPipeline(
                self.CreateInputObjectInstance(
                    AggregateFlakinessInput,
                    flakiness_thus_far=flakiness_thus_far,
                    incoming_swarming_task_output=None))
            return

        # Another swarming task is needed. Determine parameters for it to run.
        iterations_for_task, time_for_task_seconds = (
            run_swarming_util.CalculateRunParametersForSwarmingTask(
                flakiness_thus_far, error))

        # Run swarming task, update data points with results, and recurse.
        with pipeline.InOrder():
            swarming_task_output = yield RunFlakeSwarmingTaskPipeline(
                self.CreateInputObjectInstance(
                    RunFlakeSwarmingTaskInput,
                    master_name=master_name,
                    builder_name=builder_name,
                    reference_build_number=reference_build_number,
                    step_name=step_name,
                    test_name=test_name,
                    commit_position=commit_position,
                    isolate_sha=get_isolate_sha_output.isolate_sha,
                    iterations=iterations_for_task,
                    timeout_seconds=time_for_task_seconds))

            aggregated_flakiness = yield AggregateFlakinessPipeline(
                self.CreateInputObjectInstance(
                    AggregateFlakinessInput,
                    flakiness_thus_far=flakiness_thus_far,
                    incoming_swarming_task_output=swarming_task_output))

            yield DetermineApproximatePassRatePipelineWrapper(
                self.CreateInputObjectInstance(
                    DetermineApproximatePassRateInput,
                    builder_name=parameters.builder_name,
                    commit_position=commit_position,
                    flakiness_thus_far=aggregated_flakiness,
                    get_isolate_sha_output=get_isolate_sha_output,
                    master_name=parameters.master_name,
                    previous_swarming_task_output=swarming_task_output,
                    reference_build_number=parameters.reference_build_number,
                    revision=parameters.revision,
                    step_name=parameters.step_name,
                    test_name=parameters.test_name))
コード例 #23
0
    def testGetIsolateShaForCommitPositionPipelineFallbackBuildLevel(
            self, _, mocked_reference_build, mocked_url, mocked_build_info):
        master_name = 'm'
        builder_name = 'b'
        build_number = 100
        step_name = 's'
        test_name = 't'
        requested_commit_position = 1000
        requested_revision = 'r1000'
        expected_sha = 'sha1'
        build_url = 'url'
        isolate_target_name = 'browser_tests'
        step_metadata = StepMetadata(canonical_step_name=None,
                                     dimensions=None,
                                     full_step_name=None,
                                     isolate_target_name=isolate_target_name,
                                     patched=True,
                                     swarm_task_ids=None,
                                     waterfall_buildername=None,
                                     waterfall_mastername=None)

        mocked_url.return_value = build_url

        expected_output = GetIsolateShaOutput(isolate_sha=expected_sha,
                                              build_number=build_number,
                                              build_url=build_url,
                                              try_job_url=None)

        build = BuildInfo(master_name, builder_name, build_number)
        build.commit_position = requested_commit_position
        mocked_build_info.return_value = (None, build)
        mocked_reference_build.return_value = build

        analysis = MasterFlakeAnalysis.Create(master_name, builder_name,
                                              build_number, step_name,
                                              test_name)
        analysis.Save()

        get_build_sha_parameters = GetIsolateShaForBuildParameters(
            master_name=master_name,
            builder_name=builder_name,
            build_number=build_number,
            url=build_url,
            step_name=step_name)

        get_sha_input = GetIsolateShaForCommitPositionParameters(
            analysis_urlsafe_key=unicode(analysis.key.urlsafe()),
            commit_position=requested_commit_position,
            dimensions=ListOfBasestring.FromSerializable([]),
            revision=requested_revision,
            step_metadata=step_metadata,
            upper_bound_build_number=analysis.build_number)

        self.MockSynchronousPipeline(GetIsolateShaForBuildPipeline,
                                     get_build_sha_parameters, expected_output)

        pipeline_job = GetIsolateShaForCommitPositionPipeline(get_sha_input)
        pipeline_job.start()
        self.execute_queued_tasks()

        pipeline_job = pipelines.pipeline.Pipeline.from_id(
            pipeline_job.pipeline_id)
        pipeline_output = pipeline_job.outputs.default.value

        self.assertEqual(expected_output.ToSerializable(), pipeline_output)
コード例 #24
0
  def testDetermineApproximatePassRateNotYetConverged(self, *_):
    master_name = 'm'
    builder_name = 'b'
    reference_build_number = 123
    step_name = 's'
    test_name = 't'
    commit_position = 1000
    incoming_pass_count = 15
    iterations_completed = 30
    expected_iterations = 15
    incoming_pass_rate = 0.5
    isolate_sha = 'sha1'
    revision = 'r1000'
    timeout_seconds = 3600
    started_time = datetime(2018, 1, 1, 0, 0, 0)
    completed_time = datetime(2018, 1, 1, 1, 0, 0)
    build_url = None
    try_job_url = 'url'

    isolate_sha_output = GetIsolateShaOutput(
        build_number=None,
        build_url=build_url,
        isolate_sha=isolate_sha,
        try_job_url=try_job_url)

    flakiness_thus_far = Flakiness(
        build_number=None,
        build_url=build_url,
        commit_position=commit_position,
        total_test_run_seconds=timeout_seconds,
        error=None,
        failed_swarming_task_attempts=0,
        iterations=iterations_completed,
        pass_rate=incoming_pass_rate,
        revision=revision,
        try_job_url=try_job_url,
        task_ids=ListOfBasestring.FromSerializable(['task_id_1']))

    incoming_flake_swarming_task_output = FlakeSwarmingTaskOutput(
        error=None,
        pass_count=incoming_pass_count,
        iterations=iterations_completed,
        started_time=started_time,
        completed_time=completed_time,
        task_id='task_id_2')

    expected_aggregate_flakiness_input = AggregateFlakinessInput(
        flakiness_thus_far=flakiness_thus_far,
        incoming_swarming_task_output=incoming_flake_swarming_task_output)

    expected_aggregate_flakiness_output = Flakiness(
        build_number=None,
        build_url=build_url,
        commit_position=commit_position,
        total_test_run_seconds=timeout_seconds,
        error=None,
        failed_swarming_task_attempts=0,
        iterations=45,
        pass_rate=0.5,
        revision=revision,
        try_job_url=try_job_url,
        task_ids=ListOfBasestring.FromSerializable(['task_id_1']))

    determine_approximate_pass_rate_input = DetermineApproximatePassRateInput(
        builder_name=builder_name,
        commit_position=commit_position,
        flakiness_thus_far=flakiness_thus_far,
        get_isolate_sha_output=isolate_sha_output,
        master_name=master_name,
        previous_swarming_task_output=incoming_flake_swarming_task_output,
        reference_build_number=reference_build_number,
        revision=revision,
        step_name=step_name,
        test_name=test_name)

    flake_swarming_task_input = RunFlakeSwarmingTaskInput(
        builder_name=builder_name,
        commit_position=commit_position,
        isolate_sha=isolate_sha,
        iterations=expected_iterations,
        master_name=master_name,
        reference_build_number=reference_build_number,
        step_name=step_name,
        test_name=test_name,
        timeout_seconds=timeout_seconds)

    recursive_input = DetermineApproximatePassRateInput(
        builder_name=builder_name,
        commit_position=commit_position,
        flakiness_thus_far=expected_aggregate_flakiness_output,
        get_isolate_sha_output=isolate_sha_output,
        master_name=master_name,
        previous_swarming_task_output=incoming_flake_swarming_task_output,
        reference_build_number=reference_build_number,
        revision=revision,
        step_name=step_name,
        test_name=test_name)

    self.MockAsynchronousPipeline(RunFlakeSwarmingTaskPipeline,
                                  flake_swarming_task_input,
                                  incoming_flake_swarming_task_output)
    self.MockSynchronousPipeline(AggregateFlakinessPipeline,
                                 expected_aggregate_flakiness_input,
                                 expected_aggregate_flakiness_output)
    self.MockGeneratorPipeline(DetermineApproximatePassRatePipelineWrapper,
                               recursive_input, None)

    pipeline_job = DetermineApproximatePassRatePipeline(
        determine_approximate_pass_rate_input)
    pipeline_job.start()
    self.execute_queued_tasks()
コード例 #25
0
  def testDetermineApproximatePassRateMaximumRetriesPerSwarmingTaskReached(
      self, _):
    master_name = 'm'
    builder_name = 'b'
    reference_build_number = 123
    step_name = 's'
    test_name = 't'
    commit_position = 1000
    incoming_pass_count = 15
    iterations = 30
    incoming_pass_rate = float(incoming_pass_count / iterations)
    isolate_sha = 'sha1'
    revision = 'r1000'
    task_id = 'task_id_2'
    started_time = datetime(2018, 1, 1, 0, 0, 0)
    completed_time = datetime(2018, 1, 1, 1, 0, 0)
    build_url = 'url'
    try_job_url = None
    swarming_task_error = SwarmingTaskError(code=1, message='error')

    isolate_sha_output = GetIsolateShaOutput(
        build_number=None,
        build_url=build_url,
        isolate_sha=isolate_sha,
        try_job_url=try_job_url)

    flakiness_thus_far = Flakiness(
        build_number=None,
        build_url=build_url,
        commit_position=commit_position,
        total_test_run_seconds=3600,
        error=None,
        failed_swarming_task_attempts=0,
        iterations=iterations,
        pass_rate=incoming_pass_rate,
        revision=revision,
        try_job_url=try_job_url,
        task_ids=ListOfBasestring.FromSerializable(['task_id_1']))

    expected_flakiness_thus_far = Flakiness(
        build_number=None,
        build_url=build_url,
        commit_position=commit_position,
        total_test_run_seconds=3600,
        error=swarming_task_error,
        failed_swarming_task_attempts=0,
        iterations=iterations,
        pass_rate=incoming_pass_rate,
        revision=revision,
        try_job_url=try_job_url,
        task_ids=ListOfBasestring.FromSerializable(['task_id_1']))

    incoming_flake_swarming_task_output = FlakeSwarmingTaskOutput(
        completed_time=completed_time,
        error=swarming_task_error,
        pass_count=incoming_pass_count,
        iterations=iterations,
        started_time=started_time,
        task_id=task_id)

    determine_approximate_pass_rate_input = DetermineApproximatePassRateInput(
        builder_name=builder_name,
        commit_position=commit_position,
        flakiness_thus_far=flakiness_thus_far,
        get_isolate_sha_output=isolate_sha_output,
        master_name=master_name,
        previous_swarming_task_output=incoming_flake_swarming_task_output,
        reference_build_number=reference_build_number,
        revision=revision,
        step_name=step_name,
        test_name=test_name)

    pipeline_job = DetermineApproximatePassRatePipeline(
        determine_approximate_pass_rate_input)
    pipeline_job.start()
    self.execute_queued_tasks()

    pipeline_job = pipelines.pipeline.Pipeline.from_id(pipeline_job.pipeline_id)
    self.assertEqual(expected_flakiness_thus_far.ToSerializable(),
                     pipeline_job.outputs.default.value)
コード例 #26
0
  def testDetermineApproximatePassRateTestDoesNotExist(self, *_):
    master_name = 'm'
    builder_name = 'b'
    reference_build_number = 123
    step_name = 's'
    test_name = 't'
    commit_position = 1000
    incoming_pass_count = 0
    iterations = 10
    incoming_pass_rate = flake_constants.PASS_RATE_TEST_NOT_FOUND
    isolate_sha = 'sha1'
    revision = 'r1000'
    task_id = 'task_id'
    started_time = datetime(2018, 1, 1, 0, 0, 0)
    completed_time = datetime(2018, 1, 1, 1, 0, 0)
    build_url = 'url'
    try_job_url = None

    flakiness_thus_far = Flakiness(
        build_number=None,
        build_url=build_url,
        commit_position=commit_position,
        total_test_run_seconds=3600,
        error=None,
        failed_swarming_task_attempts=0,
        iterations=iterations,
        pass_rate=incoming_pass_rate,
        revision=revision,
        try_job_url=try_job_url,
        task_ids=ListOfBasestring.FromSerializable(['task_id_1']))

    isolate_sha_output = GetIsolateShaOutput(
        build_number=None,
        build_url=build_url,
        isolate_sha=isolate_sha,
        try_job_url=try_job_url)

    flake_swarming_task_output = FlakeSwarmingTaskOutput(
        error=None,
        pass_count=incoming_pass_count,
        iterations=iterations,
        task_id=task_id,
        started_time=started_time,
        completed_time=completed_time)

    determine_approximate_pass_rate_input = DetermineApproximatePassRateInput(
        builder_name=builder_name,
        commit_position=commit_position,
        flakiness_thus_far=flakiness_thus_far,
        get_isolate_sha_output=isolate_sha_output,
        master_name=master_name,
        previous_swarming_task_output=flake_swarming_task_output,
        reference_build_number=reference_build_number,
        revision=revision,
        step_name=step_name,
        test_name=test_name)

    pipeline_job = DetermineApproximatePassRatePipeline(
        determine_approximate_pass_rate_input)
    pipeline_job.start()
    self.execute_queued_tasks()

    pipeline_job = pipelines.pipeline.Pipeline.from_id(pipeline_job.pipeline_id)
    self.assertEqual(flakiness_thus_far.ToSerializable(),
                     pipeline_job.outputs.default.value)
コード例 #27
0
    def testCreateNewSwarmingTaskRequest(self, mocked_template):
        mocked_template.return_value = SwarmingTaskRequest.FromSerializable(
            _SAMPLE_REQUEST_JSON)

        runner_id = 'pipeline_id'
        ref_task_id = 'ref_task_id'
        ref_request = SwarmingTaskRequest.GetSwarmingTaskRequestTemplate()
        master_name = 'm'
        builder_name = 'b'
        step_name = 's'
        test_name = 't'
        iterations = 50
        timeout_seconds = 3600
        isolate_sha = 'sha1'

        expected_request = SwarmingTaskRequest(
            created_ts=None,
            name='findit/ref_task_id/ref_task_id/2018-03-15 00:00:00 000000',
            tags=ListOfBasestring.FromSerializable([
                'ref_master:m',
                'ref_buildername:b',
                'ref_buildnumber:4',
                'ref_stepname:s',
                'ref_name:test',
                'purpose:identify-regression-range',
            ]),
            pubsub_topic='projects/app-id/topics/swarming',
            properties=SwarmingTaskProperties(
                dimensions=[{
                    'value': 'v',
                    'key': 'k'
                }],
                idempotent=False,
                inputs_ref=SwarmingTaskInputsRef(
                    isolatedserver='isolatedserver',
                    namespace=None,
                    isolated='sha1'),
                extra_args=ListOfBasestring.FromSerializable([
                    '--flag=value', '--gtest_filter=a.b:a.c',
                    '--gtest_repeat=50', '--test-launcher-retry-limit=0',
                    '--gtest_also_run_disabled_tests'
                ]),
                io_timeout_secs='1200',
                command='cmd',
                env=[{
                    'value': '1',
                    'key': 'a'
                }],
                execution_timeout_secs='3600',
                env_prefixes=[],
                grace_period_secs='30',
                caches=None,
                cipd_input=swarming_task_request.CIPDInput(
                    packages=swarming_task_request.CIPDPackages(),
                    client_package=swarming_task_request.CIPDClientPackage(
                        version=None,
                        package_name=None,
                    ),
                    server=None)),
            priority='25',
            parent_task_id='',
            user='',
            service_account=None,
            pubsub_userdata='{"runner_id": "runner_id"}',
            expiration_secs='3600',
            pubsub_auth_token='auth_token')

        self.assertEqual(
            expected_request,
            flake_swarming.CreateNewSwarmingTaskRequest(
                runner_id, ref_task_id, ref_request, master_name, builder_name,
                step_name, test_name, isolate_sha, iterations,
                timeout_seconds))
コード例 #28
0
    def testGetIsolateShaForCommitPositionPipelineMatchingTarget(
            self, mocked_reference_build):
        master_name = 'm'
        builder_name = 'b'
        parent_mastername = 'p_m'
        parent_buildername = 'p_b'
        build_number = 100
        build_id = 123
        test_name = 't'
        requested_commit_position = 1000
        requested_revision = 'r1000'
        expected_sha = 'sha1'
        build_url = 'url'
        luci_name = 'chromium'
        bucket_name = 'ci'
        gitiles_host = 'chromium.googlesource.com'
        gitiles_project = 'chromium/src'
        gitiles_ref = 'refs/heads/master'
        gerrit_patch = ''
        isolate_target_name = 'browser_tests'
        step_name = 's'
        isolated_hash = 'isolated_hash'

        expected_output = GetIsolateShaOutput(isolate_sha=expected_sha,
                                              build_number=None,
                                              build_url=build_url,
                                              try_job_url=None)

        analysis = MasterFlakeAnalysis.Create(master_name, builder_name,
                                              build_number, step_name,
                                              test_name)
        analysis.Save()

        build = BuildInfo(master_name, builder_name, build_number)
        build.commit_position = requested_commit_position
        build.parent_mastername = parent_mastername
        build.parent_buildername = parent_buildername
        mocked_reference_build.return_value = build

        isolated_target = IsolatedTarget.Create(
            build_id, luci_name, bucket_name, parent_mastername,
            parent_buildername, gitiles_host, gitiles_project, gitiles_ref,
            gerrit_patch, isolate_target_name, isolated_hash,
            requested_commit_position, requested_revision)
        isolated_target.put()

        step_metadata = StepMetadata(canonical_step_name=None,
                                     dimensions=None,
                                     full_step_name=None,
                                     isolate_target_name=isolate_target_name,
                                     patched=True,
                                     swarm_task_ids=None,
                                     waterfall_buildername=None,
                                     waterfall_mastername=None)

        get_sha_input = GetIsolateShaForCommitPositionParameters(
            analysis_urlsafe_key=unicode(analysis.key.urlsafe()),
            commit_position=requested_commit_position,
            dimensions=ListOfBasestring.FromSerializable([]),
            revision=requested_revision,
            step_metadata=step_metadata,
            upper_bound_build_number=analysis.build_number)

        get_sha_for_target_input = GetIsolateShaForTargetInput(
            isolated_target_urlsafe_key=isolated_target.key.urlsafe())

        self.MockSynchronousPipeline(GetIsolateShaForTargetPipeline,
                                     get_sha_for_target_input, expected_output)

        pipeline_job = GetIsolateShaForCommitPositionPipeline(get_sha_input)
        pipeline_job.start()
        self.execute_queued_tasks()

        pipeline_job = pipelines.pipeline.Pipeline.from_id(
            pipeline_job.pipeline_id)
        pipeline_output = pipeline_job.outputs.default.value

        self.assertEqual(expected_output.ToSerializable(), pipeline_output)
コード例 #29
0
    def testFromSerializable(self):
        data = {
            'expiration_secs': '50',
            'name': 'a swarming task',
            'parent_task_id': 'parent task id',
            'priority': '150',
            'tags': ['a'],
            'user': '******',
            'some_unused_field': 'blabla',
            'pubsub_topic': 'topic',
            'pubsub_auth_token': 'token',
            'pubsub_userdata': 'data',
            'properties': {
                'command': 'path/to/binary',
                'unused_property': 'blabla',
                'dimensions': [
                    {
                        'key': 'cpu',
                        'value': 'x86-64',
                    },
                ],
                'env': [
                    {
                        'key': 'name',
                        'value': '1',
                    },
                ],
                'execution_timeout_secs': 10,
                'grace_period_secs': 5,
                'extra_args': ['--arg=value'],
                'idempotent': True,
                'inputs_ref': {
                    'namespace': 'default-gzip',
                    'isolated': 'a-hash',
                    'random_field': 'blabla'
                },
                'io_timeout_secs': 10,
            },
        }

        expected_request = SwarmingTaskRequest(
            created_ts=None,
            expiration_secs='50',
            name='a swarming task',
            parent_task_id='parent task id',
            priority='150',
            properties=SwarmingTaskProperties(
                caches=None,
                command='path/to/binary',
                dimensions=[
                    {
                        'key': 'cpu',
                        'value': 'x86-64',
                    },
                ],
                env=[
                    {
                        'key': 'name',
                        'value': '1',
                    },
                ],
                env_prefixes=[],
                execution_timeout_secs='10',
                extra_args=ListOfBasestring.FromSerializable(['--arg=value']),
                grace_period_secs='5',
                io_timeout_secs='10',
                idempotent=True,
                inputs_ref=SwarmingTaskInputsRef(isolated='a-hash',
                                                 isolatedserver=None,
                                                 namespace='default-gzip')),
            pubsub_auth_token='token',
            pubsub_topic='topic',
            pubsub_userdata='data',
            service_account=None,
            tags=ListOfBasestring.FromSerializable(['a']),
            user='******')

        self.assertEqual(expected_request,
                         SwarmingTaskRequest.FromSerializable(data))
コード例 #30
0
    def testGetIsolateShaForCommitPositionPipelineCommitLevel(
            self, mocked_reference_build, mocked_cache, mocked_dimensions):
        master_name = 'm'
        builder_name = 'b'
        build_number = 100
        step_name = 's'
        test_name = 't'
        dimensions = ['dimensions']
        requested_commit_position = 1000
        containing_build_commit_position = 1001
        containing_build_revision = 'r1001'
        requested_revision = 'r1000'
        expected_sha = 'sha1'
        cache_name = 'cache'
        try_job_id = 'try_job_id'
        url = 'url'
        isolate_target_name = 'browser_tests'
        step_metadata = StepMetadata(canonical_step_name=None,
                                     dimensions=None,
                                     full_step_name=None,
                                     isolate_target_name=isolate_target_name,
                                     patched=True,
                                     swarm_task_ids=None,
                                     waterfall_buildername=None,
                                     waterfall_mastername=None)
        build_id = 100
        luci_name = 'chromium'
        bucket_name = 'ci'
        gitiles_host = 'chromium.googlesource.com'
        gitiles_project = 'chromium/src'
        gitiles_ref = 'refs/heads/master'
        gerrit_patch = ''
        isolated_hash = 'isolated_hash'

        isolated_target = IsolatedTarget.Create(
            build_id, luci_name, bucket_name, master_name, builder_name,
            gitiles_host, gitiles_project, gitiles_ref, gerrit_patch,
            isolate_target_name, isolated_hash,
            containing_build_commit_position, containing_build_revision)
        isolated_target.put()

        mocked_cache.return_value = cache_name
        mocked_dimensions.return_value = dimensions
        expected_isolated_tests = IsolatedTests()
        expected_isolated_tests[isolate_target_name] = expected_sha

        build = BuildInfo(master_name, builder_name, build_number)
        build.commit_position = containing_build_commit_position
        mocked_reference_build.return_value = build

        analysis = MasterFlakeAnalysis.Create(master_name, builder_name,
                                              build_number, step_name,
                                              test_name)
        analysis.Save()

        try_job = FlakeTryJob.Create(master_name, builder_name, step_name,
                                     test_name, requested_revision)
        try_job.put()

        run_flake_try_job_parameters = RunFlakeTryJobParameters(
            analysis_urlsafe_key=analysis.key.urlsafe(),
            revision=requested_revision,
            flake_cache_name=cache_name,
            isolate_target_name=isolate_target_name,
            dimensions=ListOfBasestring.FromSerializable(dimensions),
            urlsafe_try_job_key=try_job.key.urlsafe())

        get_sha_input = GetIsolateShaForCommitPositionParameters(
            analysis_urlsafe_key=unicode(analysis.key.urlsafe()),
            commit_position=requested_commit_position,
            revision=requested_revision,
            dimensions=ListOfBasestring.FromSerializable(dimensions),
            step_metadata=step_metadata,
            upper_bound_build_number=analysis.build_number)

        expected_try_job_report = FlakeTryJobReport(
            isolated_tests=expected_isolated_tests,
            last_checked_out_revision=None,
            previously_cached_revision=None,
            previously_checked_out_revision=None,
            metadata=None)

        expected_try_job_result = FlakeTryJobResult(
            report=expected_try_job_report, url=url, try_job_id=try_job_id)

        get_isolate_sha_for_try_job_pipeline = GetIsolateShaForTryJobParameters(
            try_job_result=expected_try_job_result, step_name=step_name)

        self.MockAsynchronousPipeline(RunFlakeTryJobPipeline,
                                      run_flake_try_job_parameters,
                                      expected_try_job_report)

        self.MockSynchronousPipeline(GetIsolateShaForTryJobPipeline,
                                     get_isolate_sha_for_try_job_pipeline,
                                     expected_sha)

        pipeline_job = GetIsolateShaForCommitPositionPipeline(get_sha_input)
        pipeline_job.start()
        self.execute_queued_tasks()