def testFirstFailureLastPassUpdating(self): """last pass always should just be updated once.""" master_name = 'm' builder_name = 'b' build_number = 100 self._CreateAndSaveWfAnanlysis(master_name, builder_name, build_number, wf_analysis_status.ANALYZING) # Setup build data for builds: # 100: net_unitests failed, unit_tests failed. # 99: net_unitests passed, unit_tests failed. # 98: net_unitests passed, unit_tests failed. # 97: net_unitests failed, unit_tests failed. # 96: net_unitests passed, unit_tests passed. for i in range(5): self._MockUrlfetchWithBuildData(master_name, builder_name, 100 - i) pipeline = DetectFirstFailurePipeline() failure_info = pipeline.run(master_name, builder_name, build_number) expected_failed_steps = { 'net_unittests': { 'last_pass': 99, 'current_failure': 100, 'first_failure': 100 }, 'unit_tests': { 'last_pass': 96, 'current_failure': 100, 'first_failure': 97 } } self.assertEqual(expected_failed_steps, failure_info['failed_steps'])
def testAnalyzeSwarmingTestResultsInitiateLastPassForTests(self): json_data = json.loads( self._GetSwarmingData('isolated-plain', 'm_b_223_abc_test.json')) step = WfStep.Create('m', 'b', 223, 'abc_test') step.isolated = True step.put() failed_step = { 'current_failure': 223, 'first_failure': 221, 'tests': {} } pipeline = DetectFirstFailurePipeline() pipeline._InitiateTestLevelFirstFailureAndSaveLog( json_data, step, failed_step) expected_failed_step = { 'current_failure': 223, 'first_failure': 221, 'tests': { 'Unittest2.Subtest1': { 'current_failure': 223, 'first_failure': 223 }, 'Unittest3.Subtest2': { 'current_failure': 223, 'first_failure': 223, } } } self.assertEqual(expected_failed_step, failed_step)
def testFirstFailureLastPassUpdating(self): """last pass always should just be updated once.""" master_name = 'm' builder_name = 'b' build_number = 100 self._CreateAndSaveWfAnanlysis( master_name, builder_name, build_number, wf_analysis_status.ANALYZING) # Setup build data for builds: # 100: net_unitests failed, unit_tests failed. # 99: net_unitests passed, unit_tests failed. # 98: net_unitests passed, unit_tests failed. # 97: net_unitests failed, unit_tests failed. # 96: net_unitests passed, unit_tests passed. for i in range(5): self._MockUrlfetchWithBuildData(master_name, builder_name, 100 - i) pipeline = DetectFirstFailurePipeline() failure_info = pipeline.run(master_name, builder_name, build_number) expected_failed_steps = { 'net_unittests': { 'last_pass': 99, 'current_failure': 100, 'first_failure': 100 }, 'unit_tests': { 'last_pass': 96, 'current_failure': 100, 'first_failure': 97 } } self.assertEqual(expected_failed_steps, failure_info['failed_steps'])
def testStopLookingBackIfFindTheFirstBuild(self): master_name = 'm' builder_name = 'b' build_number = 2 self._CreateAndSaveWfAnanlysis( master_name, builder_name, build_number, wf_analysis_status.ANALYZING) # Setup build data for builds: self._MockUrlfetchWithBuildData(master_name, builder_name, 2) self._MockUrlfetchWithBuildData(master_name, builder_name, 1) self._MockUrlfetchWithBuildData(master_name, builder_name, 0) pipeline = DetectFirstFailurePipeline() failure_info = pipeline.run(master_name, builder_name, build_number) expected_failed_steps = { 'a_tests': { 'current_failure': 2, 'first_failure': 0 }, 'unit_tests': { 'current_failure': 2, 'first_failure': 0 } } self.assertEqual(expected_failed_steps, failure_info['failed_steps'])
def testStopLookingBackIfFindTheFirstBuild(self): master_name = 'm' builder_name = 'b' build_number = 2 self._CreateAndSaveWfAnanlysis(master_name, builder_name, build_number, wf_analysis_status.ANALYZING) # Setup build data for builds: self._MockUrlfetchWithBuildData(master_name, builder_name, 2) self._MockUrlfetchWithBuildData(master_name, builder_name, 1) self._MockUrlfetchWithBuildData(master_name, builder_name, 0) pipeline = DetectFirstFailurePipeline() failure_info = pipeline.run(master_name, builder_name, build_number) expected_failed_steps = { 'a_tests': { 'current_failure': 2, 'first_failure': 0 }, 'unit_tests': { 'current_failure': 2, 'first_failure': 0 } } self.assertEqual(expected_failed_steps, failure_info['failed_steps'])
def testStopLookingBackIfFindTheFirstBuild(self, mock_fn): master_name = 'm' builder_name = 'b' build_number = 2 self._CreateAndSaveWfAnanlysis(master_name, builder_name, build_number, analysis_status.RUNNING) # Setup build data for builds: mock_fn.side_effect = [ self._GetBuildData(master_name, builder_name, 2), self._GetBuildData(master_name, builder_name, 1), self._GetBuildData(master_name, builder_name, 0) ] pipeline = DetectFirstFailurePipeline() failure_info = pipeline.run(master_name, builder_name, build_number) expected_failed_steps = { 'a_tests': { 'current_failure': 2, 'first_failure': 0 }, 'unit_tests': { 'current_failure': 2, 'first_failure': 0 } } self.assertEqual(expected_failed_steps, failure_info['failed_steps'])
def testStopLookingBackIfAllFailedStepsPassedInLastBuild(self, mock_fn): master_name = 'm' builder_name = 'b' build_number = 124 self._CreateAndSaveWfAnanlysis(master_name, builder_name, build_number, analysis_status.RUNNING) # Setup build data for builds: mock_fn.side_effect = [ self._GetBuildData(master_name, builder_name, 124), self._GetBuildData(master_name, builder_name, 123) ] pipeline = DetectFirstFailurePipeline() failure_info = pipeline.run(master_name, builder_name, build_number) expected_failed_steps = { 'a': { 'last_pass': 123, 'current_failure': 124, 'first_failure': 124 } } self.assertEqual(expected_failed_steps, failure_info['failed_steps'])
def testStopLookingBackIfAllFailedStepsPassedInLastBuild(self): master_name = 'm' builder_name = 'b' build_number = 124 self._CreateAndSaveWfAnanlysis( master_name, builder_name, build_number, wf_analysis_status.ANALYZING) # Setup build data for builds: self._MockUrlfetchWithBuildData(master_name, builder_name, 124) self._MockUrlfetchWithBuildData(master_name, builder_name, 123, build_data=None, archive=True) self._MockUrlfetchWithBuildData( master_name, builder_name, 122, build_data='Blow up if used!') pipeline = DetectFirstFailurePipeline() failure_info = pipeline.run(master_name, builder_name, build_number) expected_failed_steps = { 'a': { 'last_pass': 123, 'current_failure': 124, 'first_failure': 124 } } self.assertEqual(expected_failed_steps, failure_info['failed_steps'])
def testStopLookingBackIfAllFailedStepsPassedInLastBuild(self): master_name = 'm' builder_name = 'b' build_number = 124 self._CreateAndSaveWfAnanlysis(master_name, builder_name, build_number, wf_analysis_status.ANALYZING) # Setup build data for builds: self._MockUrlfetchWithBuildData(master_name, builder_name, 124) self._MockUrlfetchWithBuildData(master_name, builder_name, 123, build_data=None, archive=True) self._MockUrlfetchWithBuildData(master_name, builder_name, 122, build_data='Blow up if used!') pipeline = DetectFirstFailurePipeline() failure_info = pipeline.run(master_name, builder_name, build_number) expected_failed_steps = { 'a': { 'last_pass': 123, 'current_failure': 124, 'first_failure': 124 } } self.assertEqual(expected_failed_steps, failure_info['failed_steps'])
def testCheckFirstKnownFailureForSwarmingTestsFoundFlaky(self): master_name = 'm' builder_name = 'b' build_number = 221 step_name = 'abc_test' failed_steps = { 'abc_test': { 'current_failure': 221, 'first_failure': 221, 'list_isolated_data': [ { 'isolatedserver': 'https://isolateserver.appspot.com', 'namespace': 'default-gzip', 'digest': 'isolatedhashabctest-223' } ] } } builds = { '221': { 'blame_list': ['commit1'], 'chromium_revision': 'commit1' }, '222': { 'blame_list': ['commit2'], 'chromium_revision': 'commit2' }, '223': { 'blame_list': ['commit3', 'commit4'], 'chromium_revision': 'commit4' } } expected_failed_steps = failed_steps step = WfStep.Create(master_name, builder_name, build_number, step_name) step.isolated = True step.put() def MockGetIsolatedDataForFailedBuild(*_): return True self.mock( swarming_util, 'GetIsolatedDataForFailedBuild', MockGetIsolatedDataForFailedBuild) def MockRetrieveShardedTestResultsFromIsolatedServer(*_): return json.loads( self._GetSwarmingData( 'isolated-plain', 'm_b_223_abc_test_flaky.json')) self.mock( swarming_util, 'RetrieveShardedTestResultsFromIsolatedServer', MockRetrieveShardedTestResultsFromIsolatedServer) pipeline = DetectFirstFailurePipeline() pipeline._CheckFirstKnownFailureForSwarmingTests( master_name, builder_name, build_number, failed_steps, builds) self.assertEqual(expected_failed_steps, failed_steps)
def testBuildDataNeedUpdating(self): build = WfBuild.Create('m', 'b', 1) pipeline = DetectFirstFailurePipeline('m', 'b', 1) # Build data is not available. self.assertTrue(pipeline._BuildDataNeedUpdating(build)) # Build was not completed and data is not recent. build.data = 'dummy' build.completed = False build.last_crawled_time = self._TimeBeforeNowBySeconds(360) self.assertTrue(pipeline._BuildDataNeedUpdating(build))
def testCheckFirstKnownFailureForSwarmingTestsFoundFlaky( self, mock_module): master_name = 'm' builder_name = 'b' build_number = 221 step_name = 'abc_test' failed_steps = { 'abc_test': { 'current_failure': 221, 'first_failure': 221, 'list_isolated_data': [{ 'isolatedserver': 'https://isolateserver.appspot.com', 'namespace': 'default-gzip', 'digest': 'isolatedhashabctest-223' }] } } builds = { '221': { 'blame_list': ['commit1'], 'chromium_revision': 'commit1' }, '222': { 'blame_list': ['commit2'], 'chromium_revision': 'commit2' }, '223': { 'blame_list': ['commit3', 'commit4'], 'chromium_revision': 'commit4' } } expected_failed_steps = failed_steps step = WfStep.Create(master_name, builder_name, build_number, step_name) step.isolated = True step.put() mock_module.GetIsolatedDataForFailedBuild.return_value = True mock_module.RetrieveShardedTestResultsFromIsolatedServer.return_value = ( json.loads( self._GetSwarmingData('isolated-plain', 'm_b_223_abc_test_flaky.json'))) pipeline = DetectFirstFailurePipeline() pipeline._CheckFirstKnownFailureForSwarmingTests( master_name, builder_name, build_number, failed_steps, builds) self.assertEqual(expected_failed_steps, failed_steps)
def testGetBuildeDataFromBuildMaster(self): master_name = 'm' builder_name = 'b' build_number = 123 self._MockUrlfetchWithBuildData(master_name, builder_name, 123, build_data='Test get build data') pipeline = DetectFirstFailurePipeline() build = pipeline._DownloadBuildData(master_name, builder_name, build_number) expected_build_data = 'Test get build data from build master' self.assertIsNotNone(build) self.assertEqual(expected_build_data, build.data)
def run(self, master_name, builder_name, build_number, build_completed, force): self._ResetAnalysis(master_name, builder_name, build_number) # The yield statements below return PipelineFutures, which allow subsequent # pipelines to refer to previous output values. # https://github.com/GoogleCloudPlatform/appengine-pipelines/wiki/Python # Heuristic Approach. failure_info = yield DetectFirstFailurePipeline( master_name, builder_name, build_number) change_logs = yield PullChangelogPipeline(failure_info) deps_info = yield ExtractDEPSInfoPipeline(failure_info, change_logs) signals = yield ExtractSignalPipeline(failure_info) heuristic_result = yield IdentifyCulpritPipeline( failure_info, change_logs, deps_info, signals, build_completed) # Try job approach. with pipeline.InOrder(): # Swarming rerun. # Triggers swarming tasks when first time test failure happens. # This pipeline will run before build completes. yield TriggerSwarmingTasksPipeline(master_name, builder_name, build_number, failure_info, force) # Checks if first time failures happen and starts a try job if yes. yield StartTryJobOnDemandPipeline(master_name, builder_name, build_number, failure_info, signals, heuristic_result, build_completed, force) # Trigger flake analysis on flaky tests, if any. yield TriggerFlakeAnalysesPipeline(master_name, builder_name, build_number)
def testAnalyzeSuccessfulBuild(self, mock_fn): master_name = 'm' builder_name = 'b' build_number = 121 self._CreateAndSaveWfAnanlysis(master_name, builder_name, build_number, analysis_status.RUNNING) # Setup build data for builds: mock_fn.return_value = self._GetBuildData(master_name, builder_name, 121) pipeline = DetectFirstFailurePipeline() failure_info = pipeline.run(master_name, builder_name, build_number) self.assertFalse(failure_info['failed'])
def testAnalyzeInfraExceptionBuild(self, mock_fn): master_name = 'm' builder_name = 'b' build_number = 120 self._CreateAndSaveWfAnanlysis(master_name, builder_name, build_number, analysis_status.RUNNING) # Setup build data for builds: mock_fn.return_value = self._GetBuildData(master_name, builder_name, 120) pipeline = DetectFirstFailurePipeline() failure_info = pipeline.run(master_name, builder_name, build_number) self.assertEqual(failure_info['failure_type'], failure_type.INFRA)
def testAnalyzeSuccessfulBuild(self): master_name = 'm' builder_name = 'b' build_number = 121 self._CreateAndSaveWfAnanlysis( master_name, builder_name, build_number, wf_analysis_status.ANALYZING) # Setup build data for builds: self._MockUrlfetchWithBuildData(master_name, builder_name, 121) self._MockUrlfetchWithBuildData( master_name, builder_name, 120, build_data='Blow up if used!') pipeline = DetectFirstFailurePipeline() failure_info = pipeline.run(master_name, builder_name, build_number) self.assertFalse(failure_info['failed'])
def testLookBackUntilGreenBuild(self): master_name = 'm' builder_name = 'b' build_number = 123 self._CreateAndSaveWfAnanlysis(master_name, builder_name, build_number, wf_analysis_status.ANALYZING) # Setup build data for builds: # 123: mock urlfetch to ensure it is fetched. self._MockUrlfetchWithBuildData(master_name, builder_name, 123) # 122: mock a build in datastore to ensure it is not fetched again. build = WfBuild.Create(master_name, builder_name, 122) build.data = self._GetBuildData(master_name, builder_name, 122) build.completed = True build.put() self._MockUrlfetchWithBuildData(master_name, builder_name, 122, build_data='Blow up if used!') # 121: mock a build in datastore to ensure it is updated. build = WfBuild.Create(master_name, builder_name, 121) build.data = 'Blow up if used!' build.last_crawled_time = self._TimeBeforeNowBySeconds(7200) build.completed = False build.put() self._MockUrlfetchWithBuildData(master_name, builder_name, 121) pipeline = DetectFirstFailurePipeline() failure_info = pipeline.run(master_name, builder_name, build_number) expected_failed_steps = { 'net_unittests': { 'last_pass': 122, 'current_failure': 123, 'first_failure': 123 }, 'unit_tests': { 'last_pass': 121, 'current_failure': 123, 'first_failure': 122 } } self.assertEqual(expected_failed_steps, failure_info['failed_steps'])
def testRunPipelineForCompileFailure(self): def _MockExtractBuildInfo(*_): build_info = BuildInfo('m', 'b', 25409) build_info.failed_steps = { 'compile': { 'last_pass': '******', 'current_failure': '25409', 'first_failure': '25409' } } return build_info self.mock(DetectFirstFailurePipeline, '_ExtractBuildInfo', _MockExtractBuildInfo) self._CreateAndSaveWfAnanlysis('m', 'b', 25409, analysis_status.RUNNING) pipeline = DetectFirstFailurePipeline() failure_info = pipeline.run('m', 'b', 25409) expected_failure_info = { 'failed': True, 'master_name': 'm', 'builder_name': 'b', 'build_number': 25409, 'chromium_revision': None, 'builds': { 25409: { 'blame_list': [], 'chromium_revision': None } }, 'failed_steps': { 'compile': { 'current_failure': 25409, 'first_failure': 25409 } }, 'failure_type': failure_type.COMPILE, 'parent_mastername': None, 'parent_buildername': None, } self.assertEqual(failure_info, expected_failure_info)
def testLookBackUntilGreenBuild(self): master_name = 'm' builder_name = 'b' build_number = 123 self._CreateAndSaveWfAnanlysis( master_name, builder_name, build_number, wf_analysis_status.ANALYZING) # Setup build data for builds: # 123: mock urlfetch to ensure it is fetched. self._MockUrlfetchWithBuildData(master_name, builder_name, 123) # 122: mock a build in datastore to ensure it is not fetched again. build = WfBuild.Create(master_name, builder_name, 122) build.data = self._GetBuildData(master_name, builder_name, 122) build.completed = True build.put() self._MockUrlfetchWithBuildData( master_name, builder_name, 122, build_data='Blow up if used!') # 121: mock a build in datastore to ensure it is updated. build = WfBuild.Create(master_name, builder_name, 121) build.data = 'Blow up if used!' build.last_crawled_time = self._TimeBeforeNowBySeconds(7200) build.completed = False build.put() self._MockUrlfetchWithBuildData(master_name, builder_name, 121) pipeline = DetectFirstFailurePipeline() failure_info = pipeline.run(master_name, builder_name, build_number) expected_failed_steps = { 'net_unittests': { 'last_pass': 122, 'current_failure': 123, 'first_failure': 123 }, 'unit_tests': { 'last_pass': 121, 'current_failure': 123, 'first_failure': 122 } } self.assertEqual(expected_failed_steps, failure_info['failed_steps'])
def testAnalyzeSuccessfulBuild(self): master_name = 'm' builder_name = 'b' build_number = 121 self._CreateAndSaveWfAnanlysis(master_name, builder_name, build_number, wf_analysis_status.ANALYZING) # Setup build data for builds: self._MockUrlfetchWithBuildData(master_name, builder_name, 121) self._MockUrlfetchWithBuildData(master_name, builder_name, 120, build_data='Blow up if used!') pipeline = DetectFirstFailurePipeline() failure_info = pipeline.run(master_name, builder_name, build_number) self.assertFalse(failure_info['failed'])
def testUpdateFirstFailureOnTestLevelFlaky(self): master_name = 'm' builder_name = 'b' build_number = 223 step_name = 'abc_test' failed_step = { 'current_failure': 223, 'first_failure': 221, 'tests': { 'Unittest2.Subtest1': { 'current_failure': 223, 'first_failure': 223, 'last_pass': 223, 'base_test_name': 'Unittest2.Subtest1' } } } step = WfStep.Create(master_name, builder_name, 222, step_name) step.isolated = True step.log_data = 'flaky' step.put() pipeline = DetectFirstFailurePipeline() pipeline._UpdateFirstFailureOnTestLevel(master_name, builder_name, build_number, step_name, failed_step, HttpClientAppengine()) expected_failed_step = { 'current_failure': 223, 'first_failure': 223, 'last_pass': 222, 'tests': { 'Unittest2.Subtest1': { 'current_failure': 223, 'first_failure': 223, 'last_pass': 222, 'base_test_name': 'Unittest2.Subtest1' } } } self.assertEqual(expected_failed_step, failed_step)
def run(self, master_name, builder_name, build_number): self._ResetAnalysis(master_name, builder_name, build_number) # The yield statements below return PipelineFutures, which allow subsequent # pipelines to refer to previous output values. # https://github.com/GoogleCloudPlatform/appengine-pipelines/wiki/Python failure_info = yield DetectFirstFailurePipeline( master_name, builder_name, build_number) change_logs = yield PullChangelogPipeline(failure_info) deps_info = yield ExtractDEPSInfoPipeline(failure_info, change_logs) signals = yield ExtractSignalPipeline(failure_info) yield IdentifyCulpritPipeline(failure_info, change_logs, deps_info, signals)
def testUpdateFirstFailureOnTestLevelFlaky(self): master_name = 'm' builder_name = 'b' build_number = 223 step_name = 'abc_test' failed_step = { 'current_failure': 223, 'first_failure': 221, 'tests': { 'Unittest2.Subtest1': { 'current_failure': 223, 'first_failure': 223, 'last_pass': 223 } } } step = WfStep.Create(master_name, builder_name, 222, step_name) step.isolated = True step.log_data = 'flaky' step.put() pipeline = DetectFirstFailurePipeline() pipeline._UpdateFirstFailureOnTestLevel( master_name, builder_name, build_number, step_name, failed_step, HttpClient()) expected_failed_step = { 'current_failure': 223, 'first_failure': 223, 'last_pass': 222, 'tests': { 'Unittest2.Subtest1': { 'current_failure': 223, 'first_failure': 223, 'last_pass': 222 } } } self.assertEqual(expected_failed_step, failed_step)
def testFirstFailureLastPassUpdating(self, mock_fn): """last pass always should just be updated once.""" master_name = 'm' builder_name = 'b' build_number = 100 self._CreateAndSaveWfAnanlysis(master_name, builder_name, build_number, analysis_status.RUNNING) # Setup build data for builds: # 100: net_unitests failed, unit_tests failed. # 99: net_unitests passed, unit_tests failed. # 98: net_unitests passed, unit_tests failed. # 97: net_unitests failed, unit_tests failed. # 96: net_unitests passed, unit_tests passed. side_effects = [] for i in range(5): side_effects.append( self._GetBuildData(master_name, builder_name, 100 - i)) mock_fn.side_effect = side_effects pipeline = DetectFirstFailurePipeline() failure_info = pipeline.run(master_name, builder_name, build_number) expected_failed_steps = { 'net_unittests': { 'last_pass': 99, 'current_failure': 100, 'first_failure': 100 }, 'unit_tests': { 'last_pass': 96, 'current_failure': 100, 'first_failure': 97 } } self.assertEqual(expected_failed_steps, failure_info['failed_steps'])
def testAnalyzeSwarmingTestResultsInitiateLastPassForTests(self): json_data = json.loads( self._GetSwarmingData('isolated-plain', 'm_b_223_abc_test.json')) step = WfStep.Create('m', 'b', 223, 'abc_test') step.isolated = True step.put() failed_step = { 'current_failure': 223, 'first_failure': 221, 'tests': {} } pipeline = DetectFirstFailurePipeline() pipeline._InitiateTestLevelFirstFailureAndSaveLog( json_data, step, failed_step) expected_failed_step = { 'current_failure': 223, 'first_failure': 221, 'tests': { 'Unittest2.Subtest1': { 'current_failure': 223, 'first_failure': 223, 'base_test_name': 'Unittest2.Subtest1' }, 'Unittest3.Subtest2': { 'current_failure': 223, 'first_failure': 223, 'base_test_name': 'Unittest3.Subtest2' } } } self.assertEqual(expected_failed_step, failed_step)
def testUpdateFirstFailureOnTestLevelThenUpdateStepLevel(self): master_name = 'm' builder_name = 'b' build_number = 223 step_name = 'abc_test' failed_step = { 'current_failure': 223, 'first_failure': 221, 'tests': { 'Unittest2.Subtest1': { 'current_failure': 223, 'first_failure': 223, 'last_pass': 223 }, 'Unittest3.Subtest2': { 'current_failure': 223, 'first_failure': 223 } } } for n in xrange(222, 220, -1): # Mock retrieving data from swarming server for a single step. self._MockUrlFetchWithSwarmingData( master_name, builder_name, n, 'abc_test') # Mock retrieving hash to output.json from isolated server. isolated_data = { 'isolatedserver': 'https://isolateserver.appspot.com', 'namespace': { 'namespace': 'default-gzip' }, 'digest': 'isolatedhashabctest-%d' % n } self._MockUrlfetchWithIsolatedData( isolated_data, build_number=n) # Mock retrieving url to output.json from isolated server. file_hash_data = { 'isolatedserver': 'https://isolateserver.appspot.com', 'namespace': { 'namespace': 'default-gzip' }, 'digest': 'abctestoutputjsonhash-%d' % n } self._MockUrlfetchWithIsolatedData( file_hash_data, build_number=n) # Mock downloading output.json from isolated server. self._MockUrlfetchWithIsolatedData( None, ('https://isolateserver.storage.googleapis.com/default-gzip/' 'm_b_%d_abc_test' % n), '%s_%s_%d_%s.json' % (master_name, builder_name, n, 'abc_test')) pipeline = DetectFirstFailurePipeline() pipeline._UpdateFirstFailureOnTestLevel( master_name, builder_name, build_number, step_name, failed_step, HttpClient()) expected_failed_step = { 'current_failure': 223, 'first_failure': 221, 'tests': { 'Unittest2.Subtest1': { 'current_failure': 223, 'first_failure': 222, 'last_pass': 221 }, 'Unittest3.Subtest2': { 'current_failure': 223, 'first_failure': 221 } } } self.assertEqual(expected_failed_step, failed_step)
def testTestLevelFailedInfo(self, mock_fn): master_name = 'm' builder_name = 'b' build_number = 223 self._CreateAndSaveWfAnanlysis(master_name, builder_name, build_number, analysis_status.RUNNING) # Mock data for retrieving data from swarming server for a build. self._MockUrlFetchWithSwarmingData(master_name, builder_name, 223) mock_fn.side_effect = [ self._GetBuildData(master_name, builder_name, 223), self._GetBuildData(master_name, builder_name, 222), self._GetBuildData(master_name, builder_name, 221), self._GetBuildData(master_name, builder_name, 220) ] for n in xrange(223, 219, -1): # pragma: no branch. # Setup build data for builds: if n == 220: break # Mock data for retrieving data from swarming server for a single step. self._MockUrlFetchWithSwarmingData(master_name, builder_name, n, 'abc_test') # Mock data for retrieving hash to output.json from isolated server. isolated_data = { 'isolatedserver': 'https://isolateserver.appspot.com', 'namespace': { 'namespace': 'default-gzip' }, 'digest': 'isolatedhashabctest-%d' % n } self._MockUrlfetchWithIsolatedData(isolated_data, build_number=n) # Mock data for retrieving url to output.json from isolated server. file_hash_data = { 'isolatedserver': 'https://isolateserver.appspot.com', 'namespace': { 'namespace': 'default-gzip' }, 'digest': 'abctestoutputjsonhash-%d' % n } self._MockUrlfetchWithIsolatedData(file_hash_data, build_number=n) # Mock data for downloading output.json from isolated server. self._MockUrlfetchWithIsolatedData( None, ('https://isolateserver.storage.googleapis.com/default-gzip/' 'm_b_%d_abc_test' % n), '%s_%s_%d_%s.json' % (master_name, builder_name, n, 'abc_test')) step_221 = WfStep.Create(master_name, builder_name, 221, 'abc_test') step_221.isolated = True step_221.log_data = ( '{"Unittest3.Subtest3": "YS9iL3UzczIuY2M6MTEwOiBGYWlsdXJlCg=="}') step_221.put() pipeline = DetectFirstFailurePipeline() failure_info = pipeline.run(master_name, builder_name, build_number) expected_failed_steps = { 'abc_test': { 'current_failure': 223, 'first_failure': 222, 'last_pass': 221, 'list_isolated_data': [{ 'isolatedserver': 'https://isolateserver.appspot.com', 'namespace': 'default-gzip', 'digest': 'isolatedhashabctest-223' }], 'tests': { 'Unittest2.Subtest1': { 'current_failure': 223, 'first_failure': 222, 'last_pass': 221, 'base_test_name': 'Unittest2.Subtest1' }, 'Unittest3.Subtest2': { 'current_failure': 223, 'first_failure': 222, 'last_pass': 221, 'base_test_name': 'Unittest3.Subtest2' } } } } expected_step_log_data = { 223: ('{"Unittest2.Subtest1": "RVJST1I6eF90ZXN0LmNjOjEyMzRcbmEvYi91Mn' 'MxLmNjOjU2NzogRmFpbHVyZVxuRVJST1I6WzJdOiAyNTk0NzM1MDAwIGJvZ28tb' 'Wljcm9zZWNvbmRzXG5FUlJPUjp4X3Rlc3QuY2M6MTIzNAphL2IvdTJzMS5jYzo1' 'Njc6IEZhaWx1cmUK", ' '"Unittest3.Subtest2": "YS9iL3UzczIuY2M6MTEwOiBGYWlsdXJlCg=="}'), 222: ('{"Unittest2.Subtest1": "RVJST1I6eF90ZXN0LmNjOjEyMzRcbmEvYi91Mn' 'MxLmNjOjU2NzogRmFpbHVyZVxuRVJST1I6WzJdOiAyNTk0NzM1MDAwIGJvZ28tb' 'Wljcm9zZWNvbmRzXG5FUlJPUjp4X3Rlc3QuY2M6MTIzNAphL2IvdTJzMS5jYzo1' 'Njc6IEZhaWx1cmUK", ' '"Unittest3.Subtest2": "YS9iL3UzczIuY2M6MTEwOiBGYWlsdXJlCg=="}'), 221: '{"Unittest3.Subtest3": "YS9iL3UzczIuY2M6MTEwOiBGYWlsdXJlCg=="}' } for n in xrange(223, 220, -1): step = WfStep.Get(master_name, builder_name, n, 'abc_test') self.assertIsNotNone(step) self.assertTrue(step.isolated) self.assertEqual(expected_step_log_data[n], step.log_data) self.assertEqual(expected_failed_steps, failure_info['failed_steps'])
def testUpdateFailureInfoBuildsUpdateBuilds(self): failed_steps = { 'compile': { 'current_failure': 223, 'first_failure': 222, 'last_pass': 221 }, 'abc_test': { 'current_failure': 223, 'first_failure': 222, 'last_pass': 221, 'list_isolated_data': [{ 'isolatedserver': 'https://isolateserver.appspot.com', 'namespace': 'default-gzip', 'digest': 'isolatedhashabctest-223' }], 'tests': { 'Unittest2.Subtest1': { 'current_failure': 223, 'first_failure': 222, 'last_pass': 221, 'base_test_name': 'Unittest2.Subtest1' }, 'Unittest3.Subtest2': { 'current_failure': 223, 'first_failure': 222, 'last_pass': 221, 'base_test_name': 'Unittest3.Subtest2' } } } } builds = { '220': { 'blame_list': ['commit0'], 'chromium_revision': 'commit0' }, '221': { 'blame_list': ['commit1'], 'chromium_revision': 'commit1' }, '222': { 'blame_list': ['commit2'], 'chromium_revision': 'commit2' }, '223': { 'blame_list': ['commit3', 'commit4'], 'chromium_revision': 'commit4' } } pipeline = DetectFirstFailurePipeline() pipeline._UpdateFailureInfoBuilds(failed_steps, builds) expected_builds = { '221': { 'blame_list': ['commit1'], 'chromium_revision': 'commit1' }, '222': { 'blame_list': ['commit2'], 'chromium_revision': 'commit2' }, '223': { 'blame_list': ['commit3', 'commit4'], 'chromium_revision': 'commit4' } } self.assertEqual(builds, expected_builds)
def testUpdateFirstFailureOnTestLevelThenUpdateStepLevel(self): master_name = 'm' builder_name = 'b' build_number = 223 step_name = 'abc_test' failed_step = { 'current_failure': 223, 'first_failure': 221, 'tests': { 'Unittest2.Subtest1': { 'current_failure': 223, 'first_failure': 223, 'last_pass': 223, 'base_test_name': 'Unittest2.Subtest1' }, 'Unittest3.Subtest2': { 'current_failure': 223, 'first_failure': 223, 'base_test_name': 'Unittest3.Subtest2' } } } for n in xrange(222, 220, -1): # Mock retrieving data from swarming server for a single step. self._MockUrlFetchWithSwarmingData(master_name, builder_name, n, 'abc_test') # Mock retrieving hash to output.json from isolated server. isolated_data = { 'isolatedserver': 'https://isolateserver.appspot.com', 'namespace': { 'namespace': 'default-gzip' }, 'digest': 'isolatedhashabctest-%d' % n } self._MockUrlfetchWithIsolatedData(isolated_data, build_number=n) # Mock retrieving url to output.json from isolated server. file_hash_data = { 'isolatedserver': 'https://isolateserver.appspot.com', 'namespace': { 'namespace': 'default-gzip' }, 'digest': 'abctestoutputjsonhash-%d' % n } self._MockUrlfetchWithIsolatedData(file_hash_data, build_number=n) # Mock downloading output.json from isolated server. self._MockUrlfetchWithIsolatedData( None, ('https://isolateserver.storage.googleapis.com/default-gzip/' 'm_b_%d_abc_test' % n), '%s_%s_%d_%s.json' % (master_name, builder_name, n, 'abc_test')) pipeline = DetectFirstFailurePipeline() pipeline._UpdateFirstFailureOnTestLevel(master_name, builder_name, build_number, step_name, failed_step, HttpClientAppengine()) expected_failed_step = { 'current_failure': 223, 'first_failure': 221, 'tests': { 'Unittest2.Subtest1': { 'current_failure': 223, 'first_failure': 222, 'last_pass': 221, 'base_test_name': 'Unittest2.Subtest1' }, 'Unittest3.Subtest2': { 'current_failure': 223, 'first_failure': 221, 'base_test_name': 'Unittest3.Subtest2' } } } self.assertEqual(expected_failed_step, failed_step)
def testUpdateFailureInfoBuildsUpdateBuilds(self): failed_steps = { 'compile': { 'current_failure': 223, 'first_failure': 222, 'last_pass': 221 }, 'abc_test': { 'current_failure': 223, 'first_failure': 222, 'last_pass': 221, 'list_isolated_data': [ { 'isolatedserver': 'https://isolateserver.appspot.com', 'namespace': 'default-gzip', 'digest': 'isolatedhashabctest-223' } ], 'tests': { 'Unittest2.Subtest1': { 'current_failure': 223, 'first_failure': 222, 'last_pass': 221 }, 'Unittest3.Subtest2': { 'current_failure': 223, 'first_failure': 222, 'last_pass': 221 } } } } builds = { '220': { 'blame_list': ['commit0'], 'chromium_revision': 'commit0' }, '221': { 'blame_list': ['commit1'], 'chromium_revision': 'commit1' }, '222': { 'blame_list': ['commit2'], 'chromium_revision': 'commit2' }, '223': { 'blame_list': ['commit3', 'commit4'], 'chromium_revision': 'commit4' } } pipeline = DetectFirstFailurePipeline() pipeline._UpdateFailureInfoBuilds(failed_steps, builds) expected_builds = { '221': { 'blame_list': ['commit1'], 'chromium_revision': 'commit1' }, '222': { 'blame_list': ['commit2'], 'chromium_revision': 'commit2' }, '223': { 'blame_list': ['commit3', 'commit4'], 'chromium_revision': 'commit4' } } self.assertEqual(builds, expected_builds)
def testTestLevelFailedInfo(self): master_name = 'm' builder_name = 'b' build_number = 223 self._CreateAndSaveWfAnanlysis( master_name, builder_name, build_number, analysis_status.RUNNING) # Mock data for retrieving data from swarming server for a build. self._MockUrlFetchWithSwarmingData(master_name, builder_name, 223) for n in xrange(223, 219, -1): # pragma: no cover # Setup build data for builds: self._MockUrlfetchWithBuildData(master_name, builder_name, n) if n == 220: break # Mock data for retrieving data from swarming server for a single step. self._MockUrlFetchWithSwarmingData( master_name, builder_name, n, 'abc_test') # Mock data for retrieving hash to output.json from isolated server. isolated_data = { 'isolatedserver': 'https://isolateserver.appspot.com', 'namespace': { 'namespace': 'default-gzip' }, 'digest': 'isolatedhashabctest-%d' % n } self._MockUrlfetchWithIsolatedData( isolated_data, build_number=n) # Mock data for retrieving url to output.json from isolated server. file_hash_data = { 'isolatedserver': 'https://isolateserver.appspot.com', 'namespace': { 'namespace': 'default-gzip' }, 'digest': 'abctestoutputjsonhash-%d' % n } self._MockUrlfetchWithIsolatedData( file_hash_data, build_number=n) # Mock data for downloading output.json from isolated server. self._MockUrlfetchWithIsolatedData( None, ('https://isolateserver.storage.googleapis.com/default-gzip/' 'm_b_%d_abc_test' % n), '%s_%s_%d_%s.json' % (master_name, builder_name, n, 'abc_test')) step_221 = WfStep.Create(master_name, builder_name, 221, 'abc_test') step_221.isolated = True step_221.log_data = ( '{"Unittest3.Subtest3": "YS9iL3UzczIuY2M6MTEwOiBGYWlsdXJlCg=="}') step_221.put() pipeline = DetectFirstFailurePipeline() failure_info = pipeline.run(master_name, builder_name, build_number) expected_failed_steps = { 'compile': { 'current_failure': 223, 'first_failure': 221, 'last_pass': 220 }, 'abc_test': { 'current_failure': 223, 'first_failure': 222, 'last_pass': 221, 'list_isolated_data': [ { 'isolatedserver': 'https://isolateserver.appspot.com', 'namespace': 'default-gzip', 'digest': 'isolatedhashabctest-223' } ], 'tests': { 'Unittest2.Subtest1': { 'current_failure': 223, 'first_failure': 222, 'last_pass': 221 }, 'Unittest3.Subtest2': { 'current_failure': 223, 'first_failure': 222, 'last_pass': 221 } } } } expected_step_log_data = { 223: ('{"Unittest2.Subtest1": "RVJST1I6eF90ZXN0LmNjOjEyMzRcbmEvYi91Mn' 'MxLmNjOjU2NzogRmFpbHVyZVxuRVJST1I6WzJdOiAyNTk0NzM1MDAwIGJvZ28tb' 'Wljcm9zZWNvbmRzXG5FUlJPUjp4X3Rlc3QuY2M6MTIzNAphL2IvdTJzMS5jYzo1' 'Njc6IEZhaWx1cmUK", ' '"Unittest3.Subtest2": "YS9iL3UzczIuY2M6MTEwOiBGYWlsdXJlCg=="}'), 222: ('{"Unittest2.Subtest1": "RVJST1I6eF90ZXN0LmNjOjEyMzRcbmEvYi91Mn' 'MxLmNjOjU2NzogRmFpbHVyZVxuRVJST1I6WzJdOiAyNTk0NzM1MDAwIGJvZ28tb' 'Wljcm9zZWNvbmRzXG5FUlJPUjp4X3Rlc3QuY2M6MTIzNAphL2IvdTJzMS5jYzo1' 'Njc6IEZhaWx1cmUK", ' '"Unittest3.Subtest2": "YS9iL3UzczIuY2M6MTEwOiBGYWlsdXJlCg=="}'), 221: '{"Unittest3.Subtest3": "YS9iL3UzczIuY2M6MTEwOiBGYWlsdXJlCg=="}' } for n in xrange(223, 220, -1): step = WfStep.Get(master_name, builder_name, n, 'abc_test') self.assertIsNotNone(step) self.assertTrue(step.isolated) self.assertEqual(expected_step_log_data[n], step.log_data) self.assertEqual(expected_failed_steps, failure_info['failed_steps'])