def testSendNotificationLatestBuildPassed(self, mocked_pipeline, _):
        master_name = 'm'
        builder_name = 'b'
        build_number = 124

        cl_key = 'mockurlsafekey'
        culprits = DictOfBasestring()
        culprits['r1'] = cl_key
        heuristic_cls = ListOfBasestring()
        heuristic_cls.append(cl_key)
        failure_to_culprit_map = FailureToCulpritMap.FromSerializable(
            {'step1': {
                't1': 'r1'
            }})

        pipeline = RevertAndNotifyTestCulpritPipeline(
            CulpritActionParameters(
                build_key=BuildKey(master_name=master_name,
                                   builder_name=builder_name,
                                   build_number=build_number),
                culprits=culprits,
                heuristic_cls=heuristic_cls,
                failure_to_culprit_map=failure_to_culprit_map))
        pipeline.start(queue_name=constants.DEFAULT_QUEUE)
        self.execute_queued_tasks()
        self.assertFalse(mocked_pipeline.called)
    def setUp(self):
        super(TestCulpritActionTest, self).setUp()

        repo_name = 'chromium'
        revision = 'rev1'

        self.culprit = WfSuspectedCL.Create(repo_name, revision,
                                            100).put().urlsafe()

        culprit_dict = DictOfBasestring()
        culprit_dict[revision] = self.culprit

        heuristic_cls = ListOfBasestring()
        heuristic_cls.append(self.culprit)

        parameters_dict = {
            'build_key': {
                'master_name': 'm',
                'builder_name': 'b',
                'build_number': 123
            },
            'culprits': {
                'rev1': self.culprit
            },
            'heuristic_cls': heuristic_cls
        }
        self.parameters = CulpritActionParameters.FromSerializable(
            parameters_dict)
Beispiel #3
0
 def GetSwarmingTaskRequestTemplate():
   """Returns a template SwarmingTaskRequest object with default values."""
   return SwarmingTaskRequest(
       created_ts=None,
       expiration_secs='3600',
       name='',
       parent_task_id='',
       priority='150',
       properties=SwarmingTaskProperties(
           caches=[],
           command=None,
           dimensions=[],
           env=[],
           env_prefixes=[],
           execution_timeout_secs='3600',
           extra_args=ListOfBasestring(),
           grace_period_secs='30',
           io_timeout_secs='1200',
           idempotent=True,
           inputs_ref=SwarmingTaskInputsRef(
               isolated=None, isolatedserver=None, namespace=None),
           cipd_input=CIPDInput(
               packages=CIPDPackages(),
               client_package=CIPDClientPackage(
                   version=None,
                   package_name=None,
               ),
               server=None),
       ),
       pubsub_auth_token=None,
       pubsub_topic=None,
       pubsub_userdata=None,
       service_account=None,
       tags=ListOfBasestring(),
       user='')
    def testSendNotificationForTestCulpritNoRevert(self, *_):
        master_name = 'm'
        builder_name = 'b'
        build_number = 124

        cl_key = 'mockurlsafekey'
        culprits = DictOfBasestring()
        culprits['r1'] = cl_key
        heuristic_cls = ListOfBasestring()
        heuristic_cls.append(cl_key)

        failure_to_culprit_map = FailureToCulpritMap.FromSerializable(
            {'step1': {
                't1': 'r1'
            }})

        input_object = SendNotificationForCulpritParameters(
            cl_key=cl_key,
            force_notify=True,
            revert_status=services_constants.SKIPPED,
            failure_type=failure_type.TEST)
        self.MockSynchronousPipeline(SendNotificationForCulpritPipeline,
                                     input_object, True)

        pipeline = RevertAndNotifyTestCulpritPipeline(
            CulpritActionParameters(
                build_key=BuildKey(master_name=master_name,
                                   builder_name=builder_name,
                                   build_number=build_number),
                culprits=culprits,
                heuristic_cls=heuristic_cls,
                failure_to_culprit_map=failure_to_culprit_map))
        pipeline.start(queue_name=constants.DEFAULT_QUEUE)
        self.execute_queued_tasks()
Beispiel #5
0
    def testGetSwarmingTaskRequestTemplate(self):
        expected_request = SwarmingTaskRequest(
            created_ts=None,
            expiration_secs='3600',
            name='',
            parent_task_id='',
            priority='150',
            properties=SwarmingTaskProperties(caches=[],
                                              command=None,
                                              dimensions=[],
                                              env=[],
                                              env_prefixes=[],
                                              execution_timeout_secs='3600',
                                              extra_args=ListOfBasestring(),
                                              grace_period_secs='30',
                                              io_timeout_secs='1200',
                                              idempotent=True,
                                              inputs_ref=SwarmingTaskInputsRef(
                                                  isolated=None,
                                                  isolatedserver=None,
                                                  namespace=None)),
            pubsub_auth_token=None,
            pubsub_topic=None,
            pubsub_userdata=None,
            service_account=None,
            tags=ListOfBasestring(),
            user='')

        self.assertEqual(expected_request,
                         SwarmingTaskRequest.GetSwarmingTaskRequestTemplate())
    def testCanAutoCreateRevert(self, _):
        repo_name = 'chromium'
        revision = 'rev1'

        culprit = WfSuspectedCL.Create(repo_name, revision, 123)
        culprit.failure_type.append(failure_type.TEST)
        culprit.revert_created_time = datetime(2018, 2, 14, 12, 0, 0)
        culprit.put()

        culprit_dict = DictOfBasestring()
        culprit_dict[revision] = culprit.key.urlsafe()

        heuristic_cls = ListOfBasestring()
        heuristic_cls.append(culprit.key.urlsafe())

        parameters_dict = {
            'build_key': {
                'master_name': 'm',
                'builder_name': 'b',
                'build_number': 123
            },
            'culprits': {
                'rev1': culprit.key.urlsafe()
            },
            'heuristic_cls': heuristic_cls
        }

        parameters = CulpritActionParameters.FromSerializable(parameters_dict)

        self.assertTrue(
            test_culprit_action.CanAutoCreateRevert(culprit.key.urlsafe(),
                                                    parameters))
Beispiel #7
0
  def FromSerializable(cls, data):
    """Deserializes the given data into a SwarmingTaskRequest.

      Because Swarming frequently adds new fields to task requests, maintaining
      a strict 1:1 mapping between Findit and Swarming is not feasible. Instead
      when deserializing a swarming task request, only consider the fields that
      are necessary.

    Args:
      data (dict): The dict mapping from defined attributes to their values.

    Returns:
      An instance of the given class with attributes set to the given data.
    """
    properties = data.get('properties', {})
    inputs_ref = properties.get('inputs_ref', {})
    cipd_input = properties.get('cipd_input', {})

    return SwarmingTaskRequest(
        created_ts=data.get('created_ts'),
        expiration_secs=str(data.get('expiration_secs')),
        name=data.get('name'),
        parent_task_id=data.get('parent_task_id'),
        priority=str(data.get('priority')),
        properties=SwarmingTaskProperties(
            caches=properties.get('caches'),
            command=properties.get('command'),
            dimensions=properties.get('dimensions') or [],
            env=properties.get('env') or [],
            env_prefixes=properties.get('env_prefixes') or [],
            execution_timeout_secs=str(
                properties.get('execution_timeout_secs')),
            extra_args=ListOfBasestring.FromSerializable(
                properties.get('extra_args') or []),
            grace_period_secs=str(properties.get('grace_period_secs')),
            io_timeout_secs=str(properties.get('io_timeout_secs')),
            idempotent=properties.get('idempotent'),
            inputs_ref=SwarmingTaskInputsRef(
                isolated=inputs_ref.get('isolated'),
                isolatedserver=inputs_ref.get('isolatedserver'),
                namespace=inputs_ref.get('namespace')),
            cipd_input=CIPDInput(
                packages=CIPDPackages.FromSerializable(
                    cipd_input.get('packages')),
                client_package=CIPDClientPackage(
                    version=cipd_input.get('client_package', {}).get('version'),
                    package_name=cipd_input.get('client_package',
                                                {}).get('package_name'),
                ),
                server=cipd_input.get('server'),
            ),
        ),
        pubsub_auth_token=data.get('pubsub_auth_token'),
        pubsub_topic=data.get('pubsub_topic'),
        pubsub_userdata=data.get('pubsub_userdata'),
        service_account=data.get('service_account'),
        tags=ListOfBasestring.FromSerializable(data.get('tags') or []),
        user=data.get('user'))
    def testIdentifyCulpritForCompileTryJobSuccess(self, mock_fn):
        master_name = 'm'
        builder_name = 'b'
        build_number = 1
        try_job_id = '1'

        compile_result = {
            'report': {
                'result': {
                    'rev1': 'passed',
                    'rev2': 'failed'
                },
                'culprit': 'rev2'
            },
            'try_job_id': try_job_id,
        }

        repo_name = 'chromium'
        revision = 'rev2'

        culprit = WfSuspectedCL.Create(repo_name, revision, 100)
        culprit.put()

        culprits_result = {
            'rev2': {
                'revision': revision,
                'commit_position': 2,
                'url': 'url_2',
                'repo_name': repo_name
            }
        }
        mock_fn.return_value = culprits_result, ListOfBasestring()

        culprits = DictOfBasestring()
        culprits['rev2'] = culprit.key.urlsafe()

        self.MockGeneratorPipeline(pipeline_class=revert_pipeline.
                                   RevertAndNotifyCompileCulpritPipeline,
                                   expected_input=CulpritActionParameters(
                                       build_key=BuildKey(
                                           master_name=master_name,
                                           builder_name=builder_name,
                                           build_number=build_number),
                                       culprits=culprits,
                                       heuristic_cls=ListOfBasestring(),
                                       failure_to_culprit_map=None),
                                   mocked_output=False)

        pipeline_input = IdentifyCompileTryJobCulpritParameters(
            build_key=BuildKey(master_name=master_name,
                               builder_name=builder_name,
                               build_number=build_number),
            result=CompileTryJobResult.FromSerializable(compile_result))
        pipeline = culprit_pipeline.IdentifyCompileTryJobCulpritPipeline(
            pipeline_input)
        pipeline.start()
        self.execute_queued_tasks()
Beispiel #9
0
  def testAnalyzeFlakePipelineStartTaskAfterDelay(self, mocked_delay, _):
    analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
    # Random date in the past, for coverage.
    analysis.request_time = datetime.datetime(2015, 1, 1, 1, 1, 1)
    analysis.Save()

    start_commit_position = 1000
    start_revision = 'r1000'
    delay = 60

    step_metadata = StepMetadata(
        canonical_step_name='s',
        dimensions=None,
        full_step_name='s',
        patched=False,
        swarm_task_ids=None,
        waterfall_buildername='b',
        waterfall_mastername='w',
        isolate_target_name='s')

    mocked_delay.return_value = delay

    analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=NextCommitPositionOutput(
            next_commit_id=CommitID(
                commit_position=start_commit_position, revision=start_revision),
            culprit_commit_id=None),
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable(['os:testOS']),
        manually_triggered=False,
        rerun=False,
        retries=0,
        step_metadata=step_metadata)

    expected_retried_analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=NextCommitPositionOutput(
            next_commit_id=CommitID(
                commit_position=start_commit_position, revision=start_revision),
            culprit_commit_id=None),
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable(['os:testOS']),
        manually_triggered=False,
        rerun=False,
        retries=1,
        step_metadata=step_metadata)

    self.MockAsynchronousPipeline(DelayPipeline, delay, delay)

    self.MockGeneratorPipeline(RecursiveAnalyzeFlakePipeline,
                               expected_retried_analyze_flake_input, None)

    pipeline_job = AnalyzeFlakePipeline(analyze_flake_input)
    pipeline_job.start()
    self.execute_queued_tasks()
    def testIdentifyCulpritForTestTryJobSuccess(self, mock_fn, mock_fn2):
        master_name = 'm'
        builder_name = 'b'
        build_number = 1

        repo_name = 'chromium'
        revision = 'rev2'

        culprit = WfSuspectedCL.Create(repo_name, revision, 100)
        culprit.put()

        culprits_result = {
            'rev1': {
                'revision': 'rev1',
                'repo_name': 'chromium',
                'commit_position': 1,
                'url': 'url_1'
            },
            'rev2': {
                'revision': revision,
                'commit_position': 2,
                'url': 'url_2',
                'repo_name': repo_name
            }
        }

        culprit_map = {'step': {'test1': 'rev1', 'test2': 'rev2'}}
        mock_fn.return_value = culprits_result, ListOfBasestring.FromSerializable(
            []), FailureToCulpritMap.FromSerializable(culprit_map)

        culprits = DictOfBasestring()
        culprits['rev2'] = culprit.key.urlsafe()
        mock_fn2.return_value = culprits

        self.MockGeneratorPipeline(
            pipeline_class=RevertAndNotifyTestCulpritPipeline,
            expected_input=CulpritActionParameters(
                build_key=BuildKey(master_name=master_name,
                                   builder_name=builder_name,
                                   build_number=build_number),
                culprits=culprits,
                heuristic_cls=ListOfBasestring(),
                failure_to_culprit_map=FailureToCulpritMap.FromSerializable(
                    culprit_map)),
            mocked_output=False)

        parameters = IdentifyTestTryJobCulpritParameters(
            build_key=BuildKey(master_name=master_name,
                               builder_name=builder_name,
                               build_number=build_number),
            result=TestTryJobResult.FromSerializable({}))
        pipeline = IdentifyTestTryJobCulpritPipeline(parameters)
        pipeline.start()
        self.execute_queued_tasks()
        mock_fn.assert_called_once_with(parameters)
Beispiel #11
0
    def testUpdateAnalysisDataPointsExistingDataPointWithErrorSalvagable(
            self, _):
        commit_position = 1000
        revision = 'r1000'
        iterations = 100
        pass_count = 50
        completed_time = datetime(2018, 1, 1, 0, 1, 0)
        error = SwarmingTaskError(code=1, message='m')
        started_time = datetime(2018, 1, 1, 0, 0, 0)
        task_id_1 = 'task_1'
        task_id_2 = 'task_2'
        build_url = 'url'
        try_job_url = None

        swarming_task_output = FlakeSwarmingTaskOutput(
            completed_time=completed_time,
            error=error,
            iterations=iterations,
            pass_count=pass_count,
            started_time=started_time,
            task_id=task_id_2)

        initial_flakiness = Flakiness(
            build_number=None,
            build_url=build_url,
            commit_position=commit_position,
            total_test_run_seconds=60,
            error=None,
            failed_swarming_task_attempts=0,
            iterations=50,
            pass_rate=0.5,
            revision=revision,
            try_job_url=try_job_url,
            task_ids=ListOfBasestring.FromSerializable([task_id_1]))

        expected_flakiness = Flakiness(
            build_number=None,
            build_url=build_url,
            commit_position=commit_position,
            total_test_run_seconds=120,
            error=None,  # Only set error if no more retries.
            failed_swarming_task_attempts=0,  # Task was salvaged.
            iterations=150,
            pass_rate=0.5,
            revision=revision,
            try_job_url=try_job_url,
            task_ids=ListOfBasestring.FromSerializable([task_id_1, task_id_2]))

        resulting_flakiness = flakiness_util.UpdateFlakiness(
            initial_flakiness, swarming_task_output)

        self.assertEqual(expected_flakiness, resulting_flakiness)
Beispiel #12
0
    def testUpdateAnalysisDataPointsExistingDataPointNoError(self):
        commit_position = 1000
        revision = 'r1000'
        iterations = 100
        pass_count = 60
        failed_swarming_task_attempts = 2
        completed_time = datetime(2018, 1, 1, 1, 0, 0)
        error = None
        started_time = datetime(2018, 1, 1, 0, 0, 0)
        task_id = 'task_2'
        build_url = None
        try_job_url = 'url'

        initial_flakiness = Flakiness(
            build_number=None,
            build_url=build_url,
            commit_position=commit_position,
            total_test_run_seconds=1800,
            error=None,
            failed_swarming_task_attempts=failed_swarming_task_attempts,
            iterations=iterations,
            pass_rate=0.5,
            revision=revision,
            try_job_url=try_job_url,
            task_ids=ListOfBasestring.FromSerializable(['task_1']))

        swarming_task_output = FlakeSwarmingTaskOutput(
            completed_time=completed_time,
            error=error,
            iterations=iterations,
            pass_count=pass_count,
            started_time=started_time,
            task_id=task_id)

        resulting_flakiness = flakiness_util.UpdateFlakiness(
            initial_flakiness, swarming_task_output)

        expected_flakiness = Flakiness(
            build_number=None,
            build_url=build_url,
            commit_position=commit_position,
            total_test_run_seconds=5400,
            error=None,
            failed_swarming_task_attempts=failed_swarming_task_attempts,
            iterations=200,
            pass_rate=0.55,
            revision=revision,
            task_ids=ListOfBasestring.FromSerializable(['task_1', 'task_2']),
            try_job_url=try_job_url)

        self.assertEqual(expected_flakiness, resulting_flakiness)
Beispiel #13
0
    def testUpdateExistingFlakinessWithErrorWithSuccessfulRun(self, _):
        commit_position = 1000
        revision = 'r1000'
        iterations = 10
        pass_count = 5
        completed_time = datetime(2018, 1, 1, 0, 1, 0)
        started_time = datetime(2018, 1, 1, 0, 0, 0)
        task_id_1 = 'task_1'
        task_id_2 = 'task_2'
        build_url = 'url'
        try_job_url = None

        swarming_task_output = FlakeSwarmingTaskOutput(
            completed_time=completed_time,
            error=None,
            iterations=iterations,
            pass_count=pass_count,
            started_time=started_time,
            task_id=task_id_2)

        # Simulate first run failing.
        initial_flakiness = Flakiness(
            build_number=None,
            build_url=build_url,
            commit_position=commit_position,
            total_test_run_seconds=60,
            error=None,
            failed_swarming_task_attempts=1,
            iterations=0,
            pass_rate=None,
            revision=revision,
            try_job_url=try_job_url,
            task_ids=ListOfBasestring.FromSerializable([task_id_1]))

        expected_flakiness = Flakiness(
            build_number=None,
            build_url=build_url,
            commit_position=commit_position,
            total_test_run_seconds=120,  # No change due to unrecoverable error.
            error=None,
            failed_swarming_task_attempts=1,
            iterations=10,
            pass_rate=0.5,
            revision=revision,
            try_job_url=try_job_url,
            task_ids=ListOfBasestring.FromSerializable([task_id_1, task_id_2]))

        resulting_flakiness = flakiness_util.UpdateFlakiness(
            initial_flakiness, swarming_task_output)

        self.assertEqual(expected_flakiness, resulting_flakiness)
    def testSendNotificationToConfirmRevert(self, *_):
        master_name = 'm'
        builder_name = 'b'
        build_number = 124
        build_key = 'm/b/124'

        cl_key = 'mockurlsafekey'
        culprits = DictOfBasestring()
        culprits['r1'] = cl_key
        heuristic_cls = ListOfBasestring()
        heuristic_cls.append(cl_key)

        self.MockSynchronousPipeline(
            CreateRevertCLPipeline,
            CreateRevertCLParameters(cl_key=cl_key,
                                     build_key=build_key,
                                     failure_type=failure_type.COMPILE),
            services_constants.CREATED_BY_FINDIT)
        self.MockSynchronousPipeline(
            SubmitRevertCLPipeline,
            SubmitRevertCLParameters(
                cl_key=cl_key,
                revert_status=services_constants.CREATED_BY_FINDIT,
                failure_type=failure_type.COMPILE),
            services_constants.COMMITTED)
        self.MockSynchronousPipeline(
            SendNotificationToIrcPipeline,
            SendNotificationToIrcParameters(
                cl_key=cl_key,
                revert_status=services_constants.CREATED_BY_FINDIT,
                commit_status=services_constants.COMMITTED,
                failure_type=failure_type.COMPILE), True)
        self.MockSynchronousPipeline(
            SendNotificationForCulpritPipeline,
            SendNotificationForCulpritParameters(
                cl_key=cl_key,
                force_notify=True,
                revert_status=services_constants.CREATED_BY_FINDIT,
                failure_type=failure_type.COMPILE), True)

        pipeline = wrapper_pipeline.RevertAndNotifyCompileCulpritPipeline(
            CulpritActionParameters(build_key=BuildKey(
                master_name=master_name,
                builder_name=builder_name,
                build_number=build_number),
                                    culprits=culprits,
                                    heuristic_cls=heuristic_cls,
                                    failure_to_culprit_map=None))
        pipeline.start(queue_name=constants.DEFAULT_QUEUE)
        self.execute_queued_tasks()
Beispiel #15
0
    def testUpdateFlakinessWithErrorUnsalvagable(self, _):
        commit_position = 1000
        completed_time = datetime(2018, 1, 1, 1, 0, 0)
        error = SwarmingTaskError(code=1, message='message')
        iterations = None
        pass_count = None
        revision = 'r1000'
        started_time = datetime(2018, 1, 1, 0, 0, 0)
        task_id = 'task_id'
        build_url = 'url'
        try_job_url = None

        swarming_task_output = FlakeSwarmingTaskOutput(
            completed_time=completed_time,
            error=error,
            iterations=iterations,
            pass_count=pass_count,
            started_time=started_time,
            task_id=task_id)

        flakiness_to_update = Flakiness(
            build_number=None,
            build_url=build_url,
            commit_position=commit_position,
            total_test_run_seconds=0,
            error=None,
            failed_swarming_task_attempts=0,
            iterations=0,
            pass_rate=None,
            revision=revision,
            try_job_url=try_job_url,
            task_ids=ListOfBasestring.FromSerializable([]))

        expected_flakiness = Flakiness(
            build_number=None,
            build_url=build_url,
            commit_position=commit_position,
            total_test_run_seconds=0,
            error=None,
            failed_swarming_task_attempts=1,
            iterations=0,
            pass_rate=None,
            revision=revision,
            try_job_url=try_job_url,
            task_ids=ListOfBasestring.FromSerializable([task_id]))

        resulting_flakiness = flakiness_util.UpdateFlakiness(
            flakiness_to_update, swarming_task_output)

        self.assertEqual(expected_flakiness, resulting_flakiness)
Beispiel #16
0
    def testUpdateFlakinessNewFlakinessNoError(self):
        commit_position = 1000
        completed_time = datetime(2018, 1, 1, 0, 1, 0)
        error = None
        iterations = 100
        pass_count = 50
        revision = 'r1000'
        started_time = datetime(2018, 1, 1, 0, 0, 0)
        task_id = 'task_id'
        build_url = None
        try_job_url = 'url'

        swarming_task_output = FlakeSwarmingTaskOutput(
            completed_time=completed_time,
            error=error,
            iterations=iterations,
            pass_count=pass_count,
            started_time=started_time,
            task_id=task_id)

        initial_flakiness = Flakiness(
            build_number=None,
            build_url=build_url,
            commit_position=commit_position,
            total_test_run_seconds=None,
            error=None,
            failed_swarming_task_attempts=0,
            iterations=None,
            pass_rate=None,
            revision=revision,
            try_job_url=try_job_url,
            task_ids=ListOfBasestring.FromSerializable([]))

        expected_flakiness = Flakiness(
            build_number=None,
            build_url=build_url,
            commit_position=commit_position,
            total_test_run_seconds=60,
            error=None,
            failed_swarming_task_attempts=0,
            iterations=iterations,
            pass_rate=0.5,
            revision=revision,
            try_job_url=try_job_url,
            task_ids=ListOfBasestring.FromSerializable([task_id]))

        resulting_flakiness = flakiness_util.UpdateFlakiness(
            initial_flakiness, swarming_task_output)

        self.assertEqual(expected_flakiness, resulting_flakiness)
Beispiel #17
0
def GetHeuristicSuspectedCLs(master_name, builder_name, build_number):
    """Gets revisions of suspected cls found by heuristic approach."""
    analysis = WfAnalysis.Get(master_name, builder_name, build_number)
    suspects = ListOfBasestring()
    if analysis and analysis.suspected_cls:
        for cl in analysis.suspected_cls:
            culprit = WfSuspectedCL.Get(cl['repo_name'], cl['revision'])
            if not culprit:  # pragma: no cover
                logging.warning(
                    'No culprit found for repo_name %s and revision %s',
                    cl['repo_name'], cl['revision'])
                continue
            suspects.append(culprit.key.urlsafe())

    return suspects
    def testGetCulpritsShouldTakeActions(self, _):
        master_name = 'm'
        builder_name = 'b'
        build_number = 124
        culprits = DictOfBasestring()
        culprits['r1'] = 'mockurlsafekey'
        culprits['r2'] = 'mockurlsafekey2'
        failure_to_culprit_map = FailureToCulpritMap.FromSerializable({
            'step1': {
                'test1': 'r1',
                'test2': 'r2'
            },
            'step2': {
                'test1': 'r2'
            }
        })

        parameters = CulpritActionParameters(
            build_key=BuildKey(master_name=master_name,
                               builder_name=builder_name,
                               build_number=build_number),
            culprits=culprits,
            heuristic_cls=ListOfBasestring(),
            failure_to_culprit_map=failure_to_culprit_map)
        self.assertEqual(
            set(['r1']),
            test_culprit_action.GetCulpritsShouldTakeActions(parameters))
Beispiel #19
0
  def testDetermineApproximatePassRateConverged(self, *_):
    master_name = 'm'
    builder_name = 'b'
    reference_build_number = 123
    step_name = 's'
    test_name = 't'
    commit_position = 1000
    incoming_pass_count = 15
    iterations = 30
    incoming_pass_rate = 0.5
    isolate_sha = 'sha1'
    revision = 'r1000'
    started_time = datetime(2018, 1, 1, 0, 0, 0)
    completed_time = datetime(2018, 1, 1, 1, 0, 0)
    build_url = 'url'
    try_job_url = None

    isolate_sha_output = GetIsolateShaOutput(
        build_number=None,
        build_url=None,
        isolate_sha=isolate_sha,
        try_job_url='url')

    flake_swarming_task_output = FlakeSwarmingTaskOutput(
        error=None,
        pass_count=incoming_pass_count,
        iterations=iterations,
        started_time=started_time,
        completed_time=completed_time,
        task_id='task_id')

    flakiness_thus_far = Flakiness(
        build_number=None,
        build_url=build_url,
        commit_position=commit_position,
        total_test_run_seconds=3600,
        error=None,
        failed_swarming_task_attempts=0,
        iterations=iterations,
        pass_rate=incoming_pass_rate,
        revision=revision,
        try_job_url=try_job_url,
        task_ids=ListOfBasestring.FromSerializable(['task_id_1']))

    determine_approximate_pass_rate_input = DetermineApproximatePassRateInput(
        builder_name=builder_name,
        commit_position=commit_position,
        get_isolate_sha_output=isolate_sha_output,
        flakiness_thus_far=flakiness_thus_far,
        previous_swarming_task_output=flake_swarming_task_output,
        master_name=master_name,
        reference_build_number=reference_build_number,
        revision=revision,
        step_name=step_name,
        test_name=test_name)

    pipeline_job = DetermineApproximatePassRatePipeline(
        determine_approximate_pass_rate_input)
    pipeline_job.start()
    self.execute_queued_tasks()
Beispiel #20
0
  def testAnalyzeFlakePipelineAnalysisFinishedNoFindings(self):
    analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
    analysis.Save()

    analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key=analysis.key.urlsafe(),
        analyze_commit_position_parameters=NextCommitPositionOutput(
            next_commit_id=None, culprit_commit_id=None),
        commit_position_range=IntRange(lower=None, upper=None),
        dimensions=ListOfBasestring.FromSerializable([]),
        manually_triggered=False,
        rerun=False,
        retries=0,
        step_metadata=None)

    expected_report_event_input = ReportEventInput(
        analysis_urlsafe_key=analysis.key.urlsafe())
    self.MockGeneratorPipeline(ReportAnalysisEventPipeline,
                               expected_report_event_input, None)

    pipeline_job = AnalyzeFlakePipeline(analyze_flake_input)
    pipeline_job.start()
    self.execute_queued_tasks()

    self.assertIsNone(analysis.culprit_urlsafe_key)
    self.assertEqual(analysis_status.COMPLETED, analysis.status)
    self.assertEqual(result_status.NOT_FOUND_UNTRIAGED, analysis.result_status)
    def testReturnNoneIfNoTryJob(self):
        master_name = 'm'
        builder_name = 'b'
        build_number = 8

        WfTryJob.Create(master_name, builder_name, build_number).put()

        self.MockGeneratorPipeline(
            pipeline_class=RevertAndNotifyTestCulpritPipeline,
            expected_input=CulpritActionParameters(
                build_key=BuildKey(master_name=master_name,
                                   builder_name=builder_name,
                                   build_number=build_number),
                culprits=DictOfBasestring(),
                heuristic_cls=ListOfBasestring(),
                failure_to_culprit_map=None),
            mocked_output=False)
        parameters = IdentifyTestTryJobCulpritParameters(build_key=BuildKey(
            master_name=master_name,
            builder_name=builder_name,
            build_number=build_number),
                                                         result=None)
        pipeline = IdentifyTestTryJobCulpritPipeline(parameters)
        pipeline.start()
        self.execute_queued_tasks()

        try_job = WfTryJob.Get(master_name, builder_name, build_number)
        self.assertEqual(try_job.test_results, [])
        self.assertEqual(try_job.status, analysis_status.COMPLETED)
Beispiel #22
0
    def testUpdateFlakeAnalysisDataPointsPipelineTooManyErrors(
            self, _, mocked_error_reporting):
        analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
        analysis.Save()

        flakiness = Flakiness(build_number=None,
                              build_url='url',
                              commit_position=1000,
                              total_test_run_seconds=100,
                              error=None,
                              failed_swarming_task_attempts=3,
                              iterations=50,
                              pass_rate=0.5,
                              revision='r1000',
                              try_job_url=None,
                              task_ids=ListOfBasestring.FromSerializable(
                                  ['task_id']))

        update_data_points_input = UpdateFlakeAnalysisDataPointsInput(
            analysis_urlsafe_key=analysis.key.urlsafe(), flakiness=flakiness)

        pipeline_job = UpdateFlakeAnalysisDataPointsPipeline(
            update_data_points_input)
        pipeline_job.start()
        self.execute_queued_tasks()

        self.assertTrue(mocked_error_reporting.called)
Beispiel #23
0
    def testRunImplRetryUponFailure(self, mocked_schedule, mocked_save,
                                    mocked_pipeline_id, _):
        master_name = 'm'
        builder_name = 'b'
        build_number = 1
        step_name = 's'
        test_name = 't'
        isolate_target_name = 'target'
        revision = 'r1000'

        analysis = MasterFlakeAnalysis.Create(master_name, builder_name,
                                              build_number, step_name,
                                              test_name)
        analysis.Save()

        try_job = FlakeTryJob.Create(master_name, builder_name, step_name,
                                     test_name, revision)
        try_job.put()

        pipeline_input = RunFlakeTryJobParameters(
            analysis_urlsafe_key=analysis.key.urlsafe(),
            flake_cache_name=None,
            dimensions=ListOfBasestring(),
            revision=revision,
            isolate_target_name=isolate_target_name,
            urlsafe_try_job_key=try_job.key.urlsafe())

        mocked_pipeline_id.__get__ = mock.Mock(return_value='pipeline-id')
        pipeline_job = RunFlakeTryJobPipeline(pipeline_input)

        with self.assertRaises(pipeline.Retry):
            pipeline_job.RunImpl(pipeline_input)

        mocked_schedule.assert_called_once_with(pipeline_input, 'pipeline-id')
        self.assertFalse(mocked_save.called)
Beispiel #24
0
    def testCallbackImplFailedRun(self, mocked_state_changed):
        master_name = 'm'
        builder_name = 'b'
        build_number = 1
        step_name = 's'
        test_name = 't'
        revision = 'r1000'
        isolate_target_name = 'target'
        try_job_id = 'try_job_id'

        analysis = MasterFlakeAnalysis.Create(master_name, builder_name,
                                              build_number, step_name,
                                              test_name)
        analysis.Save()

        try_job = FlakeTryJob.Create(master_name, builder_name, step_name,
                                     test_name, revision)
        try_job.put()

        pipeline_input = RunFlakeTryJobParameters(
            analysis_urlsafe_key=analysis.key.urlsafe(),
            flake_cache_name=None,
            dimensions=ListOfBasestring(),
            revision=revision,
            isolate_target_name=isolate_target_name,
            urlsafe_try_job_key=try_job.key.urlsafe())

        pipeline_job = RunFlakeTryJobPipeline(pipeline_input)
        returned_value = pipeline_job.CallbackImpl(pipeline_input, {
            'try_job_id': try_job_id,
            'build_json': '{"k":"v"}'
        })
        self.assertEqual(('Error updating try job result: m', None),
                         returned_value)
        mocked_state_changed.assert_called_once_with(try_job_id, {'k': 'v'})
Beispiel #25
0
    def testRunImplTriggerSameJobTwice(self, mocked_schedule, _):
        master_name = 'm'
        builder_name = 'b'
        build_number = 1
        step_name = 's'
        test_name = 't'
        revision = 'r1000'

        analysis = MasterFlakeAnalysis.Create(master_name, builder_name,
                                              build_number, step_name,
                                              test_name)
        analysis.Save()

        try_job = FlakeTryJob.Create(master_name, builder_name, step_name,
                                     test_name, revision)
        try_job.put()

        pipeline_input = RunFlakeTryJobParameters(
            analysis_urlsafe_key=analysis.key.urlsafe(),
            flake_cache_name=None,
            dimensions=ListOfBasestring(),
            revision=revision,
            isolate_target_name='target',
            urlsafe_try_job_key=try_job.key.urlsafe())

        try_job_pipeline = RunFlakeTryJobPipeline(pipeline_input)
        try_job_pipeline.RunImpl(pipeline_input)

        self.assertFalse(mocked_schedule.called)
Beispiel #26
0
    def testScheduleFlakeTryJobRaise(self, *_):
        master_name = 'm'
        builder_name = 'b'
        build_number = 123
        step_name = 's'
        test_name = 't'
        revision = 'r1000'

        analysis = MasterFlakeAnalysis.Create(master_name, builder_name,
                                              build_number, step_name,
                                              test_name)
        analysis.Save()

        try_job = FlakeTryJob.Create(master_name, builder_name, step_name,
                                     test_name, revision)
        try_job.put()

        parameters = RunFlakeTryJobParameters(
            analysis_urlsafe_key=analysis.key.urlsafe(),
            revision=revision,
            flake_cache_name=None,
            isolate_target_name='target',
            dimensions=ListOfBasestring())

        with self.assertRaises(exceptions.RetryException):
            flake_try_job.ScheduleFlakeTryJob(parameters, 'pipeline')
    def testCreatedRevertButNotCommitted(self, *_):
        master_name = 'm'
        builder_name = 'b'
        build_number = 124
        build_key = 'm/b/124'

        cl_key = 'mockurlsafekey'
        culprits = DictOfBasestring()
        culprits['r1'] = cl_key
        heuristic_cls = ListOfBasestring()
        heuristic_cls.append(cl_key)
        failure_to_culprit_map = FailureToCulpritMap.FromSerializable(
            {'step1': {
                't1': 'r1'
            }})

        self.MockSynchronousPipeline(
            CreateRevertCLPipeline,
            CreateRevertCLParameters(cl_key=cl_key,
                                     build_key=build_key,
                                     failure_type=failure_type.TEST),
            services_constants.CREATED_BY_FINDIT)
        self.MockSynchronousPipeline(
            SendNotificationForCulpritPipeline,
            SendNotificationForCulpritParameters(
                cl_key=cl_key,
                force_notify=True,
                revert_status=services_constants.CREATED_BY_FINDIT,
                failure_type=failure_type.TEST), True)

        pipeline = RevertAndNotifyTestCulpritPipeline(
            CulpritActionParameters(
                build_key=BuildKey(master_name=master_name,
                                   builder_name=builder_name,
                                   build_number=build_number),
                culprits=culprits,
                heuristic_cls=heuristic_cls,
                failure_to_culprit_map=failure_to_culprit_map))
        pipeline.start(queue_name=constants.DEFAULT_QUEUE)
        self.execute_queued_tasks()
  def testRerunAnalysisWithAnalyzeFlakePipeline(
      self, mocked_analysis, mocked_pipeline, mocked_need_analysis,
      mocked_build_info, mock_dimensions, *_):
    buildbucket_id = 'id'
    mock_dimensions.return_value = ['os:Mac', 'cpu:x86']
    start_commit_position = 1000
    start_build_info = BuildInfo('m', 'b 1', 123)
    start_build_info.commit_position = start_commit_position
    start_build_info.chromium_revision = 'r1000'
    start_build_info.buildbucket_id = buildbucket_id
    mocked_build_info.return_value = start_build_info
    mocked_analysis.pipeline_status_path.return_value = 'status'
    mocked_analysis.key.urlsafe.return_value = 'urlsafe_key'
    mocked_need_analysis.return_value = (True, mocked_analysis)
    test = TestInfo('m', 'b 1', 123, 's', 't')
    manually_triggered = False
    flake = Flake.Create('chromium', 's', 't', 'l')

    analysis = initialize_flake_pipeline.ScheduleAnalysisIfNeeded(
        test,
        test,
        flake.key,
        bug_id=None,
        allow_new_analysis=True,
        manually_triggered=manually_triggered,
        force=True,
        queue_name=constants.DEFAULT_QUEUE)

    self.assertIsNotNone(analysis)
    self.assertEqual(buildbucket_id, analysis.build_id)
    self.assertEqual(buildbucket_id, analysis.original_build_id)

    analyze_flake_input = AnalyzeFlakeInput(
        analysis_urlsafe_key='urlsafe_key',
        analyze_commit_position_parameters=NextCommitPositionOutput(
            culprit_commit_id=None,
            next_commit_id=CommitID(
                commit_position=start_commit_position,
                revision=start_build_info.chromium_revision)),
        commit_position_range=IntRange(lower=None, upper=start_commit_position),
        dimensions=ListOfBasestring.FromSerializable(
            ['os:Mac', 'cpu:x86', 'pool:luci.chromium.findit']),
        manually_triggered=manually_triggered,
        rerun=True,
        retries=0,
        step_metadata=StepMetadata.FromSerializable({}))

    mocked_pipeline.assert_has_calls([
        mock.call(analyze_flake_input),
        mock.call().start(queue_name=constants.DEFAULT_QUEUE)
    ])
Beispiel #29
0
  def testDetermineApproximatePassRatePipelineWrapper(self):
    master_name = 'm'
    builder_name = 'b'
    reference_build_number = 123
    step_name = 's'
    test_name = 't'
    commit_position = 1000
    incoming_pass_rate = 0.5
    isolate_sha = 'sha1'
    revision = 'r1000'
    build_url = None
    try_job_url = 'url'

    isolate_sha_output = GetIsolateShaOutput(
        build_number=None,
        build_url=build_url,
        isolate_sha=isolate_sha,
        try_job_url=try_job_url)

    flakiness_thus_far = Flakiness(
        build_number=None,
        build_url=build_url,
        commit_position=commit_position,
        total_test_run_seconds=60,
        error=None,
        failed_swarming_task_attempts=0,
        iterations=10,
        pass_rate=incoming_pass_rate,
        revision=revision,
        try_job_url=try_job_url,
        task_ids=ListOfBasestring.FromSerializable(['task_id_1']))

    determine_approximate_pass_rate_input = DetermineApproximatePassRateInput(
        builder_name=builder_name,
        commit_position=commit_position,
        get_isolate_sha_output=isolate_sha_output,
        flakiness_thus_far=flakiness_thus_far,
        master_name=master_name,
        previous_swarming_task_output=None,
        reference_build_number=reference_build_number,
        revision=revision,
        step_name=step_name,
        test_name=test_name)

    self.MockGeneratorPipeline(DetermineApproximatePassRatePipeline,
                               determine_approximate_pass_rate_input, None)

    pipeline_job = DetermineApproximatePassRatePipelineWrapper(
        determine_approximate_pass_rate_input)
    pipeline_job.start()
    self.execute_queued_tasks()
 def testShouldTakeActionsOnCulprit(self, _):
     master_name = 'm'
     builder_name = 'b'
     build_number = 124
     culprits = DictOfBasestring()
     culprits['r1'] = 'mockurlsafekey'
     parameters = CulpritActionParameters(build_key=BuildKey(
         master_name=master_name,
         builder_name=builder_name,
         build_number=build_number),
                                          culprits=culprits,
                                          heuristic_cls=ListOfBasestring())
     self.assertTrue(
         compile_culprit_action.ShouldTakeActionsOnCulprit(parameters))