def testNotUpdateBug(self, issue_tracker): analysis_not_completed = MasterFlakeAnalysis.Create( 'm', 'b', 1, 's', 't') analysis_not_completed.status = analysis_status.RUNNING analysis_without_bug = MasterFlakeAnalysis.Create( 'm', 'b', 1, 's', 't') analysis_without_bug.status = analysis_status.COMPLETED analysis_config_not_to_update = MasterFlakeAnalysis.Create( 'm', 'b', 1, 's', 't') analysis_config_not_to_update.status = analysis_status.COMPLETED analysis_config_not_to_update.bug_id = 123 analysis_config_not_to_update.data_points = [ DataPoint(), DataPoint(), DataPoint() ] analysis_config_not_to_update.algorithm_parameters = { 'update_monorail_bug': False, } analysis_without_enough_data_points = MasterFlakeAnalysis.Create( 'm', 'b', 1, 's', 't') analysis_without_enough_data_points.status = analysis_status.COMPLETED analysis_without_enough_data_points.bug_id = 123 analysis_without_enough_data_points.algorithm_parameters = { 'update_monorail_bug': True, } analysis_without_enough_data_points.data_points = [DataPoint()] analyses = [ analysis_not_completed, analysis_without_bug, analysis_config_not_to_update, analysis_without_enough_data_points ] for analysis in analyses: analysis.put() pipeline = update_flake_bug_pipeline.UpdateFlakeBugPipeline() self.assertFalse(pipeline.run(analysis.key.urlsafe())) issue_tracker.assert_not_called()
def testObscureMasterFlakeAnalysis(self): self.mock_current_user(user_email='*****@*****.**', is_admin=True) mocked_utcnow = datetime(2017, 05, 05, 22, 50, 10) self.MockUTCNow(mocked_utcnow) valid_record_time = obscure_emails._TimeBeforeNow(days=1) valid_request_time = obscure_emails._TimeBeforeNow(days=5) invalid_record_time = obscure_emails._TimeBeforeNow( days=obscure_emails._TRIAGE_RECORD_RENTENSION_DAYS + 10) invalid_request_time = obscure_emails._TimeBeforeNow( days=obscure_emails._REQUEST_RECORD_RENTENSION_DAYS + 10) old_analysis = MasterFlakeAnalysis.Create('m', 'b', 1, 's', 't') old_analysis.triage_history.append( TriageResult(user_name='*****@*****.**')) old_analysis.triage_email_obscured = False old_analysis.triage_record_last_add = invalid_record_time old_analysis.triggering_user_email = '*****@*****.**' old_analysis.triggering_user_email_obscured = False old_analysis.request_time = invalid_request_time old_analysis.Save() recent_analysis = MasterFlakeAnalysis.Create('m', 'b', 1000, 's', 't') recent_analysis.triage_history.append( TriageResult(user_name='*****@*****.**')) recent_analysis.triage_email_obscured = False recent_analysis.triage_record_last_add = valid_record_time recent_analysis.triggering_user_email = '*****@*****.**' recent_analysis.triggering_user_email_obscured = False recent_analysis.request_time = valid_request_time recent_analysis.Save() response = self.test_app.get('/obscure-emails', params={'format': 'json'}) expected_response = { 'failure_triage_count': 0, 'flake_triage_count': 1, 'flake_request_aggregated_count': 0, 'flake_request_count': 1, } self.assertEqual(expected_response, response.json_body) old_analysis = MasterFlakeAnalysis.GetVersion('m', 'b', 1, 's', 't') self.assertEqual('*****@*****.**', old_analysis.triage_history[0].user_name) self.assertTrue(old_analysis.triage_email_obscured) self.assertEqual('*****@*****.**', old_analysis.triggering_user_email) self.assertTrue(old_analysis.triggering_user_email_obscured) recent_analysis = MasterFlakeAnalysis.GetVersion( 'm', 'b', 1000, 's', 't') self.assertEqual('*****@*****.**', recent_analysis.triage_history[0].user_name) self.assertFalse(recent_analysis.triage_email_obscured) self.assertEqual('*****@*****.**', recent_analysis.triggering_user_email) self.assertFalse(recent_analysis.triggering_user_email_obscured)
def testRecursiveFlakeTryJobPipelineAbortedNoUpdateCompletedTryJob( self, _): master_name = 'm' builder_name = 'b' master_build_number = 100 step_name = 's' test_name = 't' revision = 'rev' commit_position = 1 lower_boundary_commit_position = 0 analysis = MasterFlakeAnalysis.Create(master_name, builder_name, master_build_number, step_name, test_name) analysis.status = analysis_status.COMPLETED analysis.Save() try_job = FlakeTryJob.Create(master_name, builder_name, step_name, test_name, revision) try_job.status = analysis_status.COMPLETED try_job.put() rftp = RecursiveFlakeTryJobPipeline(analysis.key.urlsafe(), commit_position, revision, lower_boundary_commit_position, _DEFAULT_CACHE_NAME, None) rftp._LogUnexpectedAbort() self.assertEqual(analysis_status.COMPLETED, try_job.status)
def testGenerateCommentWithSuspectedBuildHighConfidence(self): analysis = MasterFlakeAnalysis.Create('m', 'b', 1, 's', 't') analysis.status = analysis_status.COMPLETED analysis.suspected_flake_build_number = 120 analysis.confidence_in_suspected_build = 0.6641 comment = update_flake_bug_pipeline._GenerateComment(analysis) self.assertTrue('started in build 120' in comment, comment)
def testGetLastAttemptedTryJobDetails(self): master_name = 'm' builder_name = 'b' build_number = 123 step_name = 's' test_name = 't' revision = 'r1' try_job_id = '12345' try_job_url = 'url' status = analysis_status.RUNNING analysis = MasterFlakeAnalysis.Create( master_name, builder_name, build_number, step_name, test_name) analysis.last_attempted_revision = revision analysis.put() try_job = FlakeTryJob.Create( master_name, builder_name, step_name, test_name, revision) try_job.try_job_ids = [try_job_id] try_job.status = status try_job.put() try_job_data = FlakeTryJobData.Create(try_job_id) try_job_data.try_job_key = try_job.key try_job_data.try_job_url = try_job_url try_job_data.put() self.assertEqual( { 'url': try_job_url, 'status': analysis_status.STATUS_TO_DESCRIPTION.get(status) }, check_flake._GetLastAttemptedTryJobDetails(analysis))
def testCheckTestsRunStatuses(self, mocked_fn, _): build_info = BuildInfo(self.master_name, self.build_number, self.build_number) build_info.commit_position = 12345 build_info.chromium_revision = 'a1b2c3d4' mocked_fn.return_value = build_info analysis = MasterFlakeAnalysis.Create(self.master_name, self.builder_name, self.build_number, self.step_name, self.test_name) analysis.Save() task = FlakeSwarmingTask.Create(self.master_name, self.builder_name, self.build_number, self.step_name, self.test_name) task.put() call_params = ProcessFlakeSwarmingTaskResultPipeline._GetArgs( self.pipeline, self.master_name, self.builder_name, self.build_number, self.step_name, self.build_number, self.test_name, self.version_number) tests_statuses = ( ProcessFlakeSwarmingTaskResultPipeline._CheckTestsRunStatuses( self.pipeline, base_test._SAMPLE_FAILURE_LOG, *call_params)) self.assertEqual(base_test._EXPECTED_TESTS_STATUS, tests_statuses)
def testMonitorSwarmingTaskBuildException(self, mocked_fn, _): task_id = NO_TASK_EXCEPTION build_info = BuildInfo(self.master_name, self.build_number, self.build_number) build_info.commit_position = 12345 build_info.chromium_revision = 'a1b2c3d4' mocked_fn.return_value = build_info task = FlakeSwarmingTask.Create(self.master_name, self.builder_name, self.build_number, self.step_name, self.test_name) task.task_id = 'task_id' task.put() analysis = MasterFlakeAnalysis.Create(self.master_name, self.builder_name, self.build_number, self.step_name, self.test_name) analysis.Save() pipeline = ProcessFlakeSwarmingTaskResultPipeline() pipeline.start_test() pipeline.run(self.master_name, self.builder_name, self.build_number, self.step_name, task_id, self.build_number, self.test_name, 1) self.assertIsNone(task.task_id) self.assertEqual(analysis_status.SKIPPED, task.status) self.assertEqual(-1, analysis.data_points[-1].pass_rate) self.assertFalse(analysis.data_points[-1].has_valid_artifact)
def testGetSuspectedFlakeInfo(self): analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't') analysis.suspected_flake_build_number = 123 data_point = DataPoint() data_point.build_number = 123 data_point.pass_rate = 0.9 data_point.commit_position = 2 data_point.git_hash = 'git_hash_2' data_point.previous_build_commit_position = 1 data_point.previous_build_git_hash = 'git_hash_1' analysis.data_points.append(data_point) analysis.confidence_in_suspected_build = 0 analysis.Save() expected_result = { 'confidence': 0, 'build_number': analysis.suspected_flake_build_number, 'commit_position': 2, 'git_hash': 'git_hash_2', 'lower_bound_commit_position': 1, 'lower_bound_git_hash': 'git_hash_1', 'triage_result': 0 } self.assertEqual(expected_result, check_flake._GetSuspectedFlakeInfo(analysis))
def testGetTriageHistory(self, *_): master_name = 'm' builder_name = 'b' build_number = '123' step_name = 's' test_name = 't' suspected_flake_build_number = 123 triage_result = 2 user_name = 'test' analysis = MasterFlakeAnalysis.Create( master_name, builder_name, build_number, step_name, test_name) analysis.status = analysis_status.COMPLETED analysis.suspected_flake_build_number = 100 analysis.Save() analysis.UpdateTriageResult( triage_result, {'build_number': suspected_flake_build_number}, 'test') response = self.test_app.get('/waterfall/flake', params={ 'url': buildbot.CreateBuildUrl(master_name, builder_name, build_number), 'step_name': step_name, 'test_name': test_name, 'format': 'json'}) # Because TriagedResult uses auto_now=True, a direct dict comparison will # always fail. Instead only compare the relevant fields for trige_history. triage_history = response.json_body.get('triage_history') self.assertEqual(len(triage_history), 1) self.assertEqual(triage_history[0].get('triage_result'), 'Correct') self.assertEqual(triage_history[0].get('user_name'), user_name) self.assertEqual( triage_history[0].get('suspect_info', {}).get('build_number'), suspected_flake_build_number)
def testGenerateCommentWithCulprit(self): analysis = MasterFlakeAnalysis.Create('m', 'b', 1, 's', 't') analysis.status = analysis_status.COMPLETED analysis.culprit = FlakeCulprit.Create('c', 'r', 123, 'http://', 0.6713) comment = update_flake_bug_pipeline._GenerateComment(analysis) self.assertTrue('culprit r123 with confidence 67.1%' in comment, comment)
def _CreateAndSaveMasterFlakeAnalysis(self, master_name, builder_name, build_number, step_name, test_name, status): analysis = MasterFlakeAnalysis.Create(master_name, builder_name, build_number, step_name, test_name) analysis.status = status analysis.Save()
def testNextCommitPositionNewlyAddedFlakyTest(self, mocked_fn): master_name = 'm' builder_name = 'b' build_number = 100 step_name = 's' test_name = 't' git_hash = 'r100' try_job_id = '123' revision = 'r100' commit_position = 100 url = 'url' change_log = ChangeLog(None, None, revision, commit_position, None, None, url, None) mocked_fn.return_value = change_log try_job = FlakeTryJob.Create(master_name, builder_name, step_name, test_name, revision) try_job.try_job_ids.append(try_job_id) try_job.put() try_job_data = FlakeTryJobData.Create(try_job_id) try_job_data.try_job_key = try_job.key try_job_data.put() analysis = MasterFlakeAnalysis.Create(master_name, builder_name, build_number, step_name, test_name) analysis.status = analysis_status.COMPLETED analysis.try_job_status = analysis_status.RUNNING analysis.data_points = [ _GenerateDataPoint(pass_rate=0.9, commit_position=commit_position, build_number=12345, previous_build_commit_position=98, blame_list=['r99', 'r100']), _GenerateDataPoint(pass_rate=-1, commit_position=99, try_job_url='id1') ] analysis.suspected_flake_build_number = 12345 analysis.algorithm_parameters = DEFAULT_CONFIG_DATA[ 'check_flake_settings'] analysis.Save() self.MockPipeline( recursive_flake_try_job_pipeline.RecursiveFlakeTryJobPipeline, '', expected_args=[]) pipeline = NextCommitPositionPipeline(analysis.key.urlsafe(), try_job.key.urlsafe(), 98, _DEFAULT_CACHE_NAME, None) pipeline.start(queue_name=constants.DEFAULT_QUEUE) self.execute_queued_tasks() culprit = analysis.culprit self.assertEqual(git_hash, culprit.revision) self.assertEqual(100, culprit.commit_position)
def testFindMatchingAnalysisForConfiguration(self, _): request = FlakeAnalysisRequest.Create('test', False, 123) analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 'test') analysis.Save() request.analyses.append(analysis.key) request.Save() self.assertEqual( analysis, request.FindMatchingAnalysisForConfiguration('m', 'b'))
def testShouldUpdateBugForAnalysisFalse(self): analysis = MasterFlakeAnalysis.Create('m', 'b', 1, 's', 't') analysis.status = analysis_status.COMPLETED analysis.bug_id = 123 analysis.data_points = [DataPoint(), DataPoint(), DataPoint()] analysis.suspected_flake_build_number = 1 analysis.algorithm_parameters = {'update_monorail_bug': False} self.assertFalse( update_flake_bug_pipeline._ShouldUpdateBugForAnalysis(analysis))
def testUpdateAnalysisUponCompletionError(self): analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't') analysis.last_attempted_revision = 'a1b2c3d4' UpdateAnalysisUponCompletion(analysis, None, analysis_status.ERROR, {'error': 'errror'}) self.assertIsNotNone(analysis.error) self.assertEqual('a1b2c3d4', analysis.last_attempted_revision) self.assertIsNone(analysis.culprit) self.assertEqual(analysis_status.ERROR, analysis.try_job_status) self.assertIsNone(analysis.result_status)
def testNextCommitPositionPipeline(self): master_name = 'm' builder_name = 'b' build_number = 100 step_name = 's' test_name = 't' revision = 'r99' try_job_id = '123' try_job = FlakeTryJob.Create(master_name, builder_name, step_name, test_name, revision) try_job.try_job_ids.append(try_job_id) try_job.put() try_job_data = FlakeTryJobData.Create(try_job_id) try_job_data.try_job_key = try_job.key try_job_data.put() analysis = MasterFlakeAnalysis.Create(master_name, builder_name, build_number, step_name, test_name) analysis.status = analysis_status.COMPLETED analysis.try_job_status = analysis_status.RUNNING analysis.data_points = [ _GenerateDataPoint(pass_rate=0.9, commit_position=100, build_number=12345, previous_build_commit_position=90, blame_list=[ 'r91', 'r92', 'r93', 'r94', 'r95', 'r96', 'r97', 'r98', 'r99', 'r100' ]), _GenerateDataPoint(pass_rate=0.9, commit_position=99, try_job_url='u') ] analysis.suspected_flake_build_number = 12345 analysis.algorithm_parameters = DEFAULT_CONFIG_DATA[ 'check_flake_settings'] analysis.Save() self.MockPipeline( recursive_flake_try_job_pipeline.RecursiveFlakeTryJobPipeline, '', expected_args=[ analysis.key.urlsafe(), 97, 'r97', 90, _DEFAULT_CACHE_NAME, None ], expected_kwargs={}) pipeline = NextCommitPositionPipeline(analysis.key.urlsafe(), try_job.key.urlsafe(), 90, _DEFAULT_CACHE_NAME, None) pipeline.start(queue_name=constants.DEFAULT_QUEUE) self.execute_queued_tasks()
def testGetLastAttemptedSwarmingTask(self): analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't') analysis.last_attempted_swarming_task_id = 'a1b2c3d4' analysis.last_attempted_build_number = 122 expected_result = { 'task_id': 'a1b2c3d4', 'build_number': 122 } self.assertEqual( expected_result, check_flake._GetLastAttemptedSwarmingTaskDetails(analysis))
def testUpdateAnalysisUponCompletionNotFound(self): analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't') analysis.last_attempted_revision = 'a1b2c3d4' UpdateAnalysisUponCompletion(analysis, None, analysis_status.COMPLETED, None) self.assertIsNone(analysis.error) self.assertIsNone(analysis.last_attempted_revision) self.assertIsNone(analysis.last_attempted_swarming_task_id) self.assertIsNone(analysis.culprit) self.assertEqual(analysis_status.COMPLETED, analysis.try_job_status) self.assertEqual(result_status.NOT_FOUND_UNTRIAGED, analysis.result_status)
def testGetDataPointOfSuspectedBuildNoDataPoint(self): # This scenario should not happen. expected_build_number = 123 unexpected_build_number = 124 data_point = DataPoint() data_point.build_number = expected_build_number analysis = MasterFlakeAnalysis.Create('m', 'b', 125, 's', 't') analysis.suspected_flake_build_number = unexpected_build_number analysis.data_points.append(data_point) self.assertIsNone(analysis.GetDataPointOfSuspectedBuild())
def testGetErrorMessage(self): cases = [ (None, None), ('error', { 'message': 'error', 'code': 'code' }), ] for expected_error_message, error in cases: analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't') analysis.error = error self.assertEqual(expected_error_message, analysis.error_message)
def testGetDataPointOfSuspectedBuild(self): expected_build_number = 123 data_point = DataPoint() data_point.build_number = expected_build_number analysis = MasterFlakeAnalysis.Create('m', 'b', 125, 's', 't') analysis.suspected_flake_build_number = expected_build_number analysis.data_points.append(data_point) suspected_data_point = analysis.GetDataPointOfSuspectedBuild() self.assertEqual(expected_build_number, suspected_data_point.build_number)
def testGetIterationsToRerun(self): cases = [ (-1, {}), (5, { 'key': 'value', 'iterations_to_rerun': 5 }), ] for expected_rerun, algorithm_parameters in cases: analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't') analysis.algorithm_parameters = algorithm_parameters self.assertEqual(expected_rerun, analysis.iterations_to_rerun)
def testUpdateAnalysisUponCompletionFound(self): analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't') analysis.last_attempted_revision = 'a1b2c3d4' culprit = FlakeCulprit.Create('repo_name', 'a1b2c3d4', 12345, 'url') UpdateAnalysisUponCompletion(analysis, culprit, analysis_status.COMPLETED, None) self.assertIsNone(analysis.error) self.assertIsNone(analysis.last_attempted_revision) self.assertIsNone(analysis.last_attempted_swarming_task_id) self.assertEqual(culprit.revision, analysis.culprit.revision) self.assertEqual(analysis_status.COMPLETED, analysis.try_job_status) self.assertEqual(result_status.FOUND_UNTRIAGED, analysis.result_status)
def testGetCoordinatesData(self): master_name = 'm' builder_name = 'b' build_number = 123 step_name = 's' test_name = 't' success_rate = .9 try_job_url = 'try_job_url' analysis = MasterFlakeAnalysis.Create( master_name, builder_name, build_number, step_name, test_name) data_point_1 = DataPoint() data_point_1.build_number = build_number data_point_1.pass_rate = success_rate data_point_1.commit_position = 5 data_point_1.git_hash = 'git_hash_5' data_point_1.previous_build_commit_position = 4 data_point_1.previous_build_git_hash = 'git_hash_4' data_point_1.try_job_url = try_job_url analysis.data_points.append(data_point_1) data_point_2 = DataPoint() data_point_2.build_number = build_number - 3 data_point_2.pass_rate = success_rate data_point_2.commit_position = 2 data_point_2.git_hash = 'git_hash_2' data_point_2.previous_build_commit_position = 1 data_point_2.previous_build_git_hash = 'git_hash_1' data_point_2.try_job_url = try_job_url analysis.data_points.append(data_point_2) analysis.Save() expected_result = [ { 'commit_position': 2, 'pass_rate': success_rate, 'task_id': None, 'build_number': build_number - 3, 'git_hash': 'git_hash_2', 'try_job_url': try_job_url }, { 'commit_position': 5, 'pass_rate': success_rate, 'task_id': None, 'build_number': build_number, 'git_hash': 'git_hash_5', 'lower_bound_commit_position': 2, 'lower_bound_git_hash': 'git_hash_2', 'try_job_url': try_job_url } ] self.assertEqual(expected_result, check_flake._GetCoordinatesData(analysis))
def testShouldUpdateBugForAnalysis(self): analysis = MasterFlakeAnalysis.Create('m', 'b', 1, 's', 't') analysis.status = analysis_status.COMPLETED analysis.bug_id = 123 analysis.data_points = [DataPoint(), DataPoint(), DataPoint()] analysis.suspected_flake_build_number = 1 analysis.algorithm_parameters = { 'update_monorail_bug': True, 'minimum_confidence_score_to_run_tryjobs': 0.6 } analysis.confidence_in_suspected_build = 0.7 self.assertTrue( update_flake_bug_pipeline._ShouldUpdateBugForAnalysis(analysis))
def testMonitorSwarmingTaskTimeOut(self, mocked_fn, _): build_info = BuildInfo(self.master_name, self.builder_name, self.build_number) build_info.commit_position = 12345 build_info.chromium_revision = 'a1b2c3d4' mocked_fn.return_value = build_info # Override swarming config settings to force a timeout. override_swarming_settings = {'task_timeout_hours': -1} self.UpdateUnitTestConfigSettings('swarming_settings', override_swarming_settings) task = FlakeSwarmingTask.Create(self.master_name, self.builder_name, self.build_number, self.step_name, self.test_name) task.task_id = 'task_id1' task.put() analysis = MasterFlakeAnalysis.Create(self.master_name, self.builder_name, self.build_number, self.step_name, self.test_name) analysis.Save() pipeline = ProcessFlakeSwarmingTaskResultPipeline(self.master_name, self.builder_name, self.build_number, self.step_name, self.build_number, self.test_name, 1, task_id='task_id1') pipeline.start_test() pipeline.run(self.master_name, self.builder_name, self.build_number, self.step_name, 'task_id1', self.build_number, self.test_name, 1) pipeline.callback(callback_params=pipeline.last_params) # Reload from ID to get all internal properties in sync. pipeline = ProcessFlakeSwarmingTaskResultPipeline.from_id( pipeline.pipeline_id) pipeline.finalized() step_name, task_info = pipeline.outputs.default.value self.assertEqual('abc_tests', task_info) self.assertEqual(self.step_name, step_name) task = FlakeSwarmingTask.Get(self.master_name, self.builder_name, self.build_number, self.step_name, self.test_name) self.assertEqual(analysis_status.ERROR, task.status) self.assertEqual({}, task.tests_statuses)
def testProcessFlakeTryJobResultPipeline(self): master_name = 'm' builder_name = 'b' build_number = 100 step_name = 's' test_name = 't' revision = 'r4' commit_position = 4 try_job_id = 'try_job_id' url = 'url' try_job_result = { 'report': { 'result': { revision: { step_name: { 'status': 'failed', 'failures': [test_name], 'valid': True, 'pass_fail_counts': { test_name: { 'pass_count': 20, 'fail_count': 80 } } } } } } } analysis = MasterFlakeAnalysis.Create(master_name, builder_name, build_number, step_name, test_name) analysis.Save() try_job = FlakeTryJob.Create(master_name, builder_name, step_name, test_name, revision) try_job.flake_results = [{ 'url': url, 'report': try_job_result, 'try_job_id': try_job_id }] try_job.try_job_ids = [try_job_id] try_job.put() ProcessFlakeTryJobResultPipeline().run(revision, commit_position, try_job_result, try_job.key.urlsafe(), analysis.key.urlsafe()) resulting_data_point = analysis.data_points[-1] self.assertEqual(0.2, resulting_data_point.pass_rate) self.assertEqual(commit_position, resulting_data_point.commit_position) self.assertEqual(url, resulting_data_point.try_job_url)
def testPost(self, _): analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't') analysis.put() self.mock_current_user(user_email='*****@*****.**') response = self.test_app.get('/waterfall/analyze_regression_range', params={ 'lower_bound_commit_position': 1, 'upper_bound_commit_position': 2, 'iterations_to_rerun': 100, 'key': analysis.key.urlsafe() }) self.assertEqual(200, response.status_int)
def testGetLastAttemptedTryJobDetailsNoTryJobID(self): master_name = 'm' builder_name = 'b' build_number = 123 step_name = 's' test_name = 't' revision = 'r1' analysis = MasterFlakeAnalysis.Create( master_name, builder_name, build_number, step_name, test_name) analysis.last_attempted_revision = revision try_job = FlakeTryJob.Create( master_name, builder_name, step_name, test_name, revision) try_job.put() self.assertEqual({}, check_flake._GetLastAttemptedTryJobDetails(analysis))
def testGetTryJobDataPointsNoTryJobsYet(self): suspected_flake_build_number = 12345 data_points = [ _GenerateDataPoint(pass_rate=0.8, commit_position=100, build_number=suspected_flake_build_number) ] analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't') analysis.suspected_flake_build_number = suspected_flake_build_number analysis.data_points = data_points normalized_data_points = _GetNormalizedTryJobDataPoints(analysis) self.assertEqual(normalized_data_points[0].run_point_number, 100) self.assertEqual(normalized_data_points[0].pass_rate, 0.8) self.assertEqual(len(normalized_data_points), 1)