def HandleGet(self): """Fetch the log of a failed step as a JSON result.""" url = self.request.get('url', '') step_info = buildbot.ParseStepUrl(url) if not step_info: return BaseHandler.CreateError( 'Url "%s" is not pointing to a step.' % url, 501) master_name, builder_name, build_number, step_name = step_info step = WfStep.Get(master_name, builder_name, build_number, step_name) if not step: return BaseHandler.CreateError('No failure log available.', 404) failure_log = self._GetFormattedJsonLogIfSwarming(step) data = { 'master_name': master_name, 'builder_name': builder_name, 'build_number': build_number, 'step_name': step_name, 'step_logs': failure_log, } return {'template': 'failure_log.html', 'data': data}
def testGetSignalFromStepLogFlaky(self): master_name = 'm' builder_name = 'b' build_number = 124 step_name = 'abc_test' failure_info = { 'master_name': 'm', 'builder_name': 'b', 'build_number': 124, 'failed': True, 'chromium_revision': 'a_git_hash', 'failed_steps': { 'abc_test': { 'last_pass': 123, 'current_failure': 124, 'first_failure': 124, } } } self.MockGetStdiolog(master_name, builder_name, build_number, step_name) self.MockGetGtestJsonResult() pipeline = ExtractSignalPipeline() signals = pipeline.run(failure_info) step = WfStep.Get(master_name, builder_name, build_number, step_name) self.assertIsNotNone(step) self.assertIsNotNone(step.log_data) self.assertEqual('flaky', step.log_data) self.assertEqual({}, signals['abc_test']['files'])
def _GetSameStepFromBuild(self, master_name, builder_name, build_number, step_name, http_client): """Downloads swarming test results for a step from previous build.""" step = WfStep.Get(master_name, builder_name, build_number, step_name) if step and step.isolated and step.log_data: # Test level log has been saved for this step. return step # Sends request to swarming server for isolated data. step_isolated_data = swarming_util.GetIsolatedDataForStep( master_name, builder_name, build_number, step_name, http_client) if not step_isolated_data: # pragma: no cover return None result_log = swarming_util.RetrieveShardedTestResultsFromIsolatedServer( step_isolated_data, http_client) if (not result_log or not result_log.get('per_iteration_data') or result_log['per_iteration_data'] == 'invalid'): # pragma: no cover return None step = WfStep.Create(master_name, builder_name, build_number, step_name) step.isolated = True self._InitiateTestLevelFirstFailureAndSaveLog(result_log, step) return step
def testGetSignalFromStepLog(self, _): master_name = 'm' builder_name = 'b' build_number = 123 step_name = 'abc_test' # Mock both stdiolog and gtest json results to test whether Findit will # go to step log first when both logs exist. self.MockGetGtestJsonResult() self._CreateAndSaveWfAnanlysis( master_name, builder_name, build_number) pipeline = ExtractSignalPipeline(FAILURE_INFO) signals = pipeline.run(FAILURE_INFO) step = WfStep.Get(master_name, builder_name, build_number, step_name) expected_files = { 'a/b/u2s1.cc': [567], 'a/b/u3s2.cc': [110] } self.assertIsNotNone(step) self.assertIsNotNone(step.log_data) self.assertEqual(expected_files, signals['abc_test']['files'])
def _GetTestLevelLogForAStep(master_name, builder_name, build_number, step_name, http_client): """Downloads swarming test results for a step from a build and returns logs for failed tests. Returns: A dict of failure logs for each failed test. """ step = WfStep.Get(master_name, builder_name, build_number, step_name) if (step and step.isolated and step.log_data and step.log_data != constants.TOO_LARGE_LOG): # Test level log has been saved for this step. try: if step.log_data == constants.FLAKY_FAILURE_LOG: return {} return json.loads(step.log_data) except ValueError: logging.error( 'log_data %s of step %s/%s/%d/%s is not json loadable.' % (step.log_data, master_name, builder_name, build_number, step_name)) return None # Sends request to swarming server for isolated data. step_isolated_data = swarming.GetIsolatedDataForStep( master_name, builder_name, build_number, step_name, http_client) if not step_isolated_data: logging.warning( 'Failed to get step_isolated_data for build %s/%s/%d/%s.' % (master_name, builder_name, build_number, step_name)) return None result_log = swarmed_test_util.RetrieveShardedTestResultsFromIsolatedServer( step_isolated_data, http_client) test_results = test_results_util.GetTestResultObject(result_log) if not test_results: logging.warning( 'Failed to get swarming test results for build %s/%s/%d/%s.' % (master_name, builder_name, build_number, step_name)) return None failed_test_log, _ = ( test_results_service.GetFailedTestsInformationFromTestResult( test_results)) return failed_test_log
def _StartTestLevelCheckForFirstFailure(self, master_name, builder_name, build_number, step_name, failed_step, http_client): """Downloads test results and initiates first failure info at test level.""" list_isolated_data = failed_step['list_isolated_data'] result_log = swarming_util.RetrieveShardedTestResultsFromIsolatedServer( list_isolated_data, http_client) if (not result_log or not result_log.get('per_iteration_data') or result_log['per_iteration_data'] == 'invalid'): # pragma: no cover return False step = WfStep.Get(master_name, builder_name, build_number, step_name) return self._InitiateTestLevelFirstFailureAndSaveLog( result_log, step, failed_step)
def _SaveIsolatedResultToStep(master_name, builder_name, build_number, step_name, failed_test_log): """Parses the json data and saves all the reliable failures to the step.""" step = (WfStep.Get(master_name, builder_name, build_number, step_name) or WfStep.Create(master_name, builder_name, build_number, step_name)) step.isolated = True step.log_data = json.dumps( failed_test_log) if failed_test_log else constants.FLAKY_FAILURE_LOG try: step.put() except (BadRequestError, RequestTooLargeError) as e: step.isolated = True step.log_data = constants.TOO_LARGE_LOG logging.warning( 'Failed to save data in %s/%s/%d/%s: %s' % (master_name, builder_name, build_number, step_name, e.message)) step.put()
def testSaveLogToStepLogTooBig(self): master_name = 'm' builder_name = 'b' build_number = 250 step_name = 'atest' original_step_put = WfStep.put calls = [] def MockNdbTransaction(func, **options): if len(calls) < 1: calls.append(1) raise datastore_errors.BadRequestError('log_data is too long') return original_step_put(func, **options) self.mock(WfStep, 'put', MockNdbTransaction) ci_test_failure._SaveIsolatedResultToStep(master_name, builder_name, build_number, step_name, {}) step = WfStep.Get(master_name, builder_name, build_number, step_name) self.assertEqual(step.log_data, constants.TOO_LARGE_LOG)
def testGetSignalFromStepLogInvalid(self): master_name = 'm' builder_name = 'b' build_number = 125 step_name = 'abc_test' failure_info = { 'master_name': 'm', 'builder_name': 'b', 'build_number': 125, 'failed': True, 'chromium_revision': 'a_git_hash', 'failed_steps': { 'abc_test': { 'last_pass': 124, 'current_failure': 125, 'first_failure': 125, } } } self.MockGetStdiolog(master_name, builder_name, build_number, step_name) self.MockGetGtestJsonResult() pipeline = ExtractSignalPipeline() signals = pipeline.run(failure_info) step = WfStep.Get(master_name, builder_name, build_number, step_name) expected_files = { 'content/common/gpu/media/v4l2_video_encode_accelerator.cc': [306] } self.assertIsNotNone(step) self.assertIsNotNone(step.log_data) self.assertEqual(expected_files, signals['abc_test']['files'])
def ExtractSignalsForCompileFailure(failure_info, http_client): signals = {} master_name = failure_info.master_name builder_name = failure_info.builder_name build_number = failure_info.build_number step_name = 'compile' if step_name not in (failure_info.failed_steps or {}): logging.debug( 'No compile failure found when extracting signals for failed ' 'build %s/%s/%d', master_name, builder_name, build_number) return signals if not failure_info.failed_steps[step_name].supported: # Bail out if the step is not supported. logging.info('Findit could not analyze compile failure for master %s.', master_name) return signals failure_log = None # 1. Tries to get stored failure log from step. step = (WfStep.Get(master_name, builder_name, build_number, step_name) or WfStep.Create(master_name, builder_name, build_number, step_name)) if step.log_data: failure_log = step.log_data else: # 2. Tries to get ninja_output as failure log. from_ninja_output = False use_ninja_output_log = (waterfall_config.GetDownloadBuildDataSettings( ).get('use_ninja_output_log')) if use_ninja_output_log: failure_log = step_util.GetWaterfallBuildStepLog( master_name, builder_name, build_number, step_name, http_client, 'json.output[ninja_info]') from_ninja_output = True if not failure_log: # 3. Tries to get stdout log for compile step. from_ninja_output = False failure_log = extract_signal.GetStdoutLog(master_name, builder_name, build_number, step_name, http_client) try: if not failure_log: raise extract_signal.FailedToGetFailureLogError( 'Failed to pull failure log (stdio or ninja output) of step %s of' ' %s/%s/%d' % (step_name, master_name, builder_name, build_number)) except extract_signal.FailedToGetFailureLogError: return {} # Save step log in datastore and avoid downloading again during retry. step.log_data = extract_signal.ExtractStorablePortionOfLog( failure_log, from_ninja_output) try: step.put() except Exception as e: # pragma: no cover # Sometimes, the step log is too large to save in datastore. logging.exception(e) signals[step_name] = extractors.ExtractSignal( master_name, builder_name, step_name, test_name=None, failure_log=failure_log).ToDict() extract_signal.SaveSignalInAnalysis(master_name, builder_name, build_number, signals) return signals
def run(self, failure_info): """ Args: failure_info (dict): Output of pipeline DetectFirstFailurePipeline.run(). Returns: A dict like below: { 'step_name1': waterfall.failure_signal.FailureSignal.ToDict(), ... } """ signals = {} if not failure_info['failed'] or not failure_info['chromium_revision']: # Bail out if no failed step or no chromium revision. return signals master_name = failure_info['master_name'] builder_name = failure_info['builder_name'] build_number = failure_info['build_number'] for step_name in failure_info.get('failed_steps', []): step = WfStep.Get(master_name, builder_name, build_number, step_name) if step and step.log_data: failure_log = step.log_data else: # TODO: do test-level analysis instead of step-level. gtest_result = buildbot.GetGtestResultLog( master_name, builder_name, build_number, step_name) if gtest_result: failure_log = self._GetReliableTestFailureLog(gtest_result) if gtest_result is None or failure_log == 'invalid': if not lock_util.WaitUntilDownloadAllowed( master_name): # pragma: no cover raise pipeline.Retry( 'Failed to pull log of step %s of master %s' % (step_name, master_name)) try: failure_log = buildbot.GetStepStdio( master_name, builder_name, build_number, step_name, self.HTTP_CLIENT) except ResponseTooLargeError: # pragma: no cover. logging.exception( 'Log of step "%s" is too large for urlfetch.', step_name) # If the stdio log of a step is too large, we don't want to pull it # again in next run, because that might lead to DDoS to the master. # TODO: Use archived stdio logs in Google Storage instead. failure_log = 'Stdio log is too large for urlfetch.' if not failure_log: # pragma: no cover raise pipeline.Retry( 'Failed to pull stdio of step %s of master %s' % (step_name, master_name)) # Save step log in datastore and avoid downloading again during retry. if not step: # pragma: no cover step = WfStep.Create(master_name, builder_name, build_number, step_name) step.log_data = self._ExtractStorablePortionOfLog(failure_log) try: step.put() except Exception as e: # pragma: no cover # Sometimes, the step log is too large to save in datastore. logging.exception(e) # TODO: save result in datastore? signals[step_name] = extractors.ExtractSignal( master_name, builder_name, step_name, None, failure_log).ToDict() return signals
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """An example of using Remote API to query datastore on live App Engine.""" import os import sys # Append path of Findit root directory to import remote_api. _FINDIT_DIR = os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir) sys.path.insert(0, _FINDIT_DIR) # During importing, sys.path will be setup appropriately. from local_libs import remote_api # pylint: disable=W # Set up the Remote API to use services on the live App Engine. remote_api.EnableFinditRemoteApi() from model.wf_step import WfStep step = WfStep.Get('chromium.memory', 'Linux ASan Tests (sandboxed)', 11413, 'browser_tests') with open('/tmp/step.log', 'w') as f: f.write(step.log_data)
def testUpdateSwarmingSteps(self, mock_data): master_name = 'm' builder_name = 'b' build_number = 223 failed_steps = { 'a_tests': { 'current_failure': 2, 'first_failure': 0, 'supported': True }, 'unit_tests': { 'current_failure': 2, 'first_failure': 0, 'supported': True }, 'compile': { 'current_failure': 2, 'first_failure': 0, 'supported': True } } failed_steps = TestFailedSteps.FromSerializable(failed_steps) mock_data.return_value = { 'a_tests': [{ 'isolatedserver': 'https://isolateserver.appspot.com', 'namespace': 'default-gzip', 'digest': 'isolatedhashatests' }], 'unit_tests': [{ 'isolatedserver': 'https://isolateserver.appspot.com', 'namespace': 'default-gzip', 'digest': 'isolatedhashunittests1' }] } result = ci_test_failure.UpdateSwarmingSteps(master_name, builder_name, build_number, failed_steps, None) expected_failed_steps = { 'a_tests': { 'current_failure': 2, 'first_failure': 0, 'supported': True, 'last_pass': None, 'tests': None, 'list_isolated_data': [{ 'digest': 'isolatedhashatests', 'namespace': 'default-gzip', 'isolatedserver': (waterfall_config.GetSwarmingSettings().get( 'isolated_server')) }] }, 'unit_tests': { 'current_failure': 2, 'first_failure': 0, 'supported': True, 'last_pass': None, 'tests': None, 'list_isolated_data': [{ 'digest': 'isolatedhashunittests1', 'namespace': 'default-gzip', 'isolatedserver': (waterfall_config.GetSwarmingSettings().get( 'isolated_server')) }] }, 'compile': { 'current_failure': 2, 'first_failure': 0, 'last_pass': None, 'supported': True, 'tests': None, 'list_isolated_data': None } } for step_name in failed_steps: step = WfStep.Get(master_name, builder_name, build_number, step_name) if step_name == 'compile': self.assertIsNone(step) else: self.assertIsNotNone(step) self.assertTrue(result) self.assertEqual(expected_failed_steps, failed_steps.ToSerializable())
def ExtractSignalsForTestFailure(failure_info, http_client): signals = {} master_name = failure_info.master_name builder_name = failure_info.builder_name build_number = failure_info.build_number failed_steps = failure_info.failed_steps or {} for step_name in failed_steps: failure_log = None if not failed_steps[step_name].supported: # Bail out if the step is not supported. continue # 1. Tries to get stored failure log from step. step = (WfStep.Get(master_name, builder_name, build_number, step_name) or WfStep.Create(master_name, builder_name, build_number, step_name)) if step.log_data and step.log_data != constants.TOO_LARGE_LOG: failure_log = step.log_data else: json_formatted_log = True # 2. Gets test results. list_isolated_data = failed_steps[step_name].list_isolated_data list_isolated_data = (list_isolated_data.ToSerializable() if list_isolated_data else []) merged_test_results = ( swarmed_test_util.RetrieveShardedTestResultsFromIsolatedServer( list_isolated_data, http_client)) if merged_test_results: test_results = test_results_util.GetTestResultObject( merged_test_results) if test_results: failure_log, _ = ( test_results_service. GetFailedTestsInformationFromTestResult(test_results)) failure_log = json.dumps( failure_log ) if failure_log else constants.FLAKY_FAILURE_LOG else: failure_log = constants.WRONG_FORMAT_LOG if not merged_test_results or failure_log in [ constants.INVALID_FAILURE_LOG, constants.WRONG_FORMAT_LOG ]: # 3. Gets stdout log. json_formatted_log = False failure_log = extract_signal.GetStdoutLog( master_name, builder_name, build_number, step_name, http_client) try: if not failure_log: raise extract_signal.FailedToGetFailureLogError( 'Failed to pull failure log (stdio or ninja output) of step %s of' ' %s/%s/%d' % (step_name, master_name, builder_name, build_number)) except extract_signal.FailedToGetFailureLogError: return {} # Save step log in datastore and avoid downloading again during retry. step.log_data = extract_signal.ExtractStorablePortionOfLog( failure_log, json_formatted_log ) if step.log_data != constants.TOO_LARGE_LOG else step.log_data step.isolated = step.isolated or json_formatted_log try: step.put() except Exception as e: # pragma: no cover # Sometimes, the step log is too large to save in datastore. logging.exception(e) if step.isolated: try: json_failure_log = (json.loads(failure_log) if failure_log != constants.FLAKY_FAILURE_LOG else {}) except ValueError: json_failure_log = {} logging.warning('failure_log %s is not valid JSON.' % failure_log) signals[step_name] = {'tests': {}} step_signal = FailureSignal() for test_name, test_failure_log in json_failure_log.iteritems(): signals[step_name]['tests'][ test_name] = extractors.ExtractSignal( master_name, builder_name, step_name, test_name, base64.b64decode(test_failure_log)).ToDict() # Save signals in test failure log to step level. step_signal.MergeFrom(signals[step_name]['tests'][test_name]) signals[step_name]['files'] = step_signal.files else: signals[step_name] = extractors.ExtractSignal( master_name, builder_name, step_name, None, failure_log).ToDict() return signals
def testGetIsolatedDataForFailedBuild(self): master_name = 'm' builder_name = 'b' build_number = 223 failed_steps = { 'a_tests': { 'current_failure': 2, 'first_failure': 0 }, 'unit_tests': { 'current_failure': 2, 'first_failure': 0 }, 'compile': { 'current_failure': 2, 'first_failure': 0 } } self.http_client._SetResponseForGetRequestSwarmingList( master_name, builder_name, build_number) result = swarming_util.GetIsolatedDataForFailedBuild( master_name, builder_name, build_number, failed_steps, self.http_client) expected_failed_steps = { 'a_tests': { 'current_failure': 2, 'first_failure': 0, 'list_isolated_data': [{ 'digest': 'isolatedhashatests', 'namespace': 'default-gzip', 'isolatedserver': (waterfall_config.GetSwarmingSettings().get( 'isolated_server')) }] }, 'unit_tests': { 'current_failure': 2, 'first_failure': 0, 'list_isolated_data': [{ 'digest': 'isolatedhashunittests1', 'namespace': 'default-gzip', 'isolatedserver': (waterfall_config.GetSwarmingSettings().get( 'isolated_server')) }] }, 'compile': { 'current_failure': 2, 'first_failure': 0 } } for step_name in failed_steps: step = WfStep.Get(master_name, builder_name, build_number, step_name) if step_name == 'compile': self.assertIsNone(step) else: self.assertIsNotNone(step) self.assertTrue(result) self.assertEqual(expected_failed_steps, failed_steps)
def testTestLevelFailedInfo(self, mock_fn): master_name = 'm' builder_name = 'b' build_number = 223 self._CreateAndSaveWfAnanlysis(master_name, builder_name, build_number, analysis_status.RUNNING) # Mock data for retrieving data from swarming server for a build. self._MockUrlFetchWithSwarmingData(master_name, builder_name, 223) mock_fn.side_effect = [ self._GetBuildData(master_name, builder_name, 223), self._GetBuildData(master_name, builder_name, 222), self._GetBuildData(master_name, builder_name, 221), self._GetBuildData(master_name, builder_name, 220) ] for n in xrange(223, 219, -1): # pragma: no branch. # Setup build data for builds: if n == 220: break # Mock data for retrieving data from swarming server for a single step. self._MockUrlFetchWithSwarmingData(master_name, builder_name, n, 'abc_test') # Mock data for retrieving hash to output.json from isolated server. isolated_data = { 'isolatedserver': 'https://isolateserver.appspot.com', 'namespace': { 'namespace': 'default-gzip' }, 'digest': 'isolatedhashabctest-%d' % n } self._MockUrlfetchWithIsolatedData(isolated_data, build_number=n) # Mock data for retrieving url to output.json from isolated server. file_hash_data = { 'isolatedserver': 'https://isolateserver.appspot.com', 'namespace': { 'namespace': 'default-gzip' }, 'digest': 'abctestoutputjsonhash-%d' % n } self._MockUrlfetchWithIsolatedData(file_hash_data, build_number=n) # Mock data for downloading output.json from isolated server. self._MockUrlfetchWithIsolatedData( None, ('https://isolateserver.storage.googleapis.com/default-gzip/' 'm_b_%d_abc_test' % n), '%s_%s_%d_%s.json' % (master_name, builder_name, n, 'abc_test')) step_221 = WfStep.Create(master_name, builder_name, 221, 'abc_test') step_221.isolated = True step_221.log_data = ( '{"Unittest3.Subtest3": "YS9iL3UzczIuY2M6MTEwOiBGYWlsdXJlCg=="}') step_221.put() pipeline = DetectFirstFailurePipeline() failure_info = pipeline.run(master_name, builder_name, build_number) expected_failed_steps = { 'abc_test': { 'current_failure': 223, 'first_failure': 222, 'last_pass': 221, 'list_isolated_data': [{ 'isolatedserver': 'https://isolateserver.appspot.com', 'namespace': 'default-gzip', 'digest': 'isolatedhashabctest-223' }], 'tests': { 'Unittest2.Subtest1': { 'current_failure': 223, 'first_failure': 222, 'last_pass': 221, 'base_test_name': 'Unittest2.Subtest1' }, 'Unittest3.Subtest2': { 'current_failure': 223, 'first_failure': 222, 'last_pass': 221, 'base_test_name': 'Unittest3.Subtest2' } } } } expected_step_log_data = { 223: ('{"Unittest2.Subtest1": "RVJST1I6eF90ZXN0LmNjOjEyMzRcbmEvYi91Mn' 'MxLmNjOjU2NzogRmFpbHVyZVxuRVJST1I6WzJdOiAyNTk0NzM1MDAwIGJvZ28tb' 'Wljcm9zZWNvbmRzXG5FUlJPUjp4X3Rlc3QuY2M6MTIzNAphL2IvdTJzMS5jYzo1' 'Njc6IEZhaWx1cmUK", ' '"Unittest3.Subtest2": "YS9iL3UzczIuY2M6MTEwOiBGYWlsdXJlCg=="}'), 222: ('{"Unittest2.Subtest1": "RVJST1I6eF90ZXN0LmNjOjEyMzRcbmEvYi91Mn' 'MxLmNjOjU2NzogRmFpbHVyZVxuRVJST1I6WzJdOiAyNTk0NzM1MDAwIGJvZ28tb' 'Wljcm9zZWNvbmRzXG5FUlJPUjp4X3Rlc3QuY2M6MTIzNAphL2IvdTJzMS5jYzo1' 'Njc6IEZhaWx1cmUK", ' '"Unittest3.Subtest2": "YS9iL3UzczIuY2M6MTEwOiBGYWlsdXJlCg=="}'), 221: '{"Unittest3.Subtest3": "YS9iL3UzczIuY2M6MTEwOiBGYWlsdXJlCg=="}' } for n in xrange(223, 220, -1): step = WfStep.Get(master_name, builder_name, n, 'abc_test') self.assertIsNotNone(step) self.assertTrue(step.isolated) self.assertEqual(expected_step_log_data[n], step.log_data) self.assertEqual(expected_failed_steps, failure_info['failed_steps'])
def run(self, failure_info): """Extracts failure signals from failed steps. Args: failure_info (dict): Output of pipeline DetectFirstFailurePipeline.run(). Returns: A dict like below: { 'step_name1': waterfall.failure_signal.FailureSignal.ToDict(), ... } """ signals = {} if not failure_info['failed'] or not failure_info['chromium_revision']: # Bail out if no failed step or no chromium revision. return signals # Bail out on infra failure if failure_info.get('failure_type') == failure_type.INFRA: return signals master_name = failure_info['master_name'] builder_name = failure_info['builder_name'] build_number = failure_info['build_number'] for step_name in failure_info.get('failed_steps', []): if not waterfall_config.StepIsSupportedForMaster( step_name, master_name): # Bail out if the step is not supported. continue step = WfStep.Get(master_name, builder_name, build_number, step_name) if step and step.log_data: failure_log = step.log_data else: # TODO: do test-level analysis instead of step-level. # TODO: Use swarming test result instead of archived gtest results gtest_result = buildbot.GetGtestResultLog( master_name, builder_name, build_number, step_name) if gtest_result: failure_log = _GetReliableTestFailureLog(gtest_result) if gtest_result is None or failure_log == 'invalid': if not lock_util.WaitUntilDownloadAllowed( master_name): # pragma: no cover raise pipeline.Retry( 'Failed to pull log of step %s of master %s' % (step_name, master_name)) try: failure_log = buildbot.GetStepLog( master_name, builder_name, build_number, step_name, self.HTTP_CLIENT) except ResponseTooLargeError: # pragma: no cover. logging.exception( 'Log of step "%s" is too large for urlfetch.', step_name) # If the stdio log of a step is too large, we don't want to pull it # again in next run, because that might lead to DDoS to the master. # TODO: Use archived stdio logs in Google Storage instead. failure_log = 'Stdio log is too large for urlfetch.' if not failure_log: # pragma: no cover raise pipeline.Retry( 'Failed to pull stdio of step %s of master %s' % (step_name, master_name)) # Save step log in datastore and avoid downloading again during retry. if not step: # pragma: no cover step = WfStep.Create(master_name, builder_name, build_number, step_name) step.log_data = _ExtractStorablePortionOfLog(failure_log) try: step.put() except Exception as e: # pragma: no cover # Sometimes, the step log is too large to save in datastore. logging.exception(e) # TODO: save result in datastore? if step.isolated: try: json_failure_log = (json.loads(failure_log) if failure_log != 'flaky' else {}) except ValueError: # pragma: no cover json_failure_log = {} logging.warning('failure_log %s is not valid JSON.' % failure_log) signals[step_name] = {'tests': {}} step_signal = FailureSignal() for test_name, test_failure_log in json_failure_log.iteritems( ): signals[step_name]['tests'][ test_name] = extractors.ExtractSignal( master_name, builder_name, step_name, test_name, base64.b64decode(test_failure_log)).ToDict() # Save signals in test failure log to step level. step_signal.MergeFrom( signals[step_name]['tests'][test_name]) signals[step_name]['files'] = step_signal.files signals[step_name]['keywords'] = step_signal.keywords else: signals[step_name] = extractors.ExtractSignal( master_name, builder_name, step_name, None, failure_log).ToDict() return signals