Esempio n. 1
0
def GetIsolatedDataForFailedBuild(
    master_name, builder_name, build_number, failed_steps, http_client):
  """Checks failed step_names in swarming log for the build.

  Searches each failed step_name to identify swarming/non-swarming tests
  and keeps track of isolated data for each failed swarming steps.
  """
  data = ListSwarmingTasksDataByTags(
      master_name, builder_name, build_number, http_client)
  if not data:
    return False

  tag_name = 'stepname'
  build_isolated_data = defaultdict(list)
  for item in data:
    if item['failure'] and not item['internal_failure']:
      # Only retrieves test results from tasks which have failures and
      # the failure should not be internal infrastructure failure.
      swarming_step_name = GetTagValue(item['tags'], tag_name)
      if swarming_step_name in failed_steps and item.get('outputs_ref'):
        isolated_data = _GenerateIsolatedData(item['outputs_ref'])
        build_isolated_data[swarming_step_name].append(isolated_data)

  new_steps = []
  for step_name in build_isolated_data:
    failed_steps[step_name]['list_isolated_data'] = (
        build_isolated_data[step_name])

    # Create WfStep object for all the failed steps.
    step = WfStep.Create(master_name, builder_name, build_number, step_name)
    step.isolated = True
    new_steps.append(step)

  ndb.put_multi(new_steps)
  return True
Esempio n. 2
0
def UpdateSwarmingSteps(master_name, builder_name, build_number, failed_steps,
                        http_client):
    """Updates swarming steps based on swarming task data.

  Searches each failed step_name to identify swarming/non-swarming steps and
  updates failed swarming steps for isolated data.
  Also creates and saves swarming steps in datastore.
  """
    build_isolated_data = swarming.GetIsolatedDataForFailedStepsInABuild(
        master_name, builder_name, build_number, failed_steps, http_client)

    if not build_isolated_data:
        return False

    new_steps = []
    for step_name in build_isolated_data:
        failed_steps[step_name].list_isolated_data = (
            IsolatedDataList.FromSerializable(build_isolated_data[step_name]))

        # Create WfStep object for all the failed steps.
        step = WfStep.Create(master_name, builder_name, build_number,
                             step_name)
        step.isolated = True
        new_steps.append(step)

    ndb.put_multi(new_steps)
    return True
Esempio n. 3
0
    def _GetSameStepFromBuild(self, master_name, builder_name, build_number,
                              step_name, http_client):
        """Downloads swarming test results for a step from previous build."""
        step = WfStep.Get(master_name, builder_name, build_number, step_name)

        if step and step.isolated and step.log_data:
            # Test level log has been saved for this step.
            return step

        # Sends request to swarming server for isolated data.
        step_isolated_data = swarming_util.GetIsolatedDataForStep(
            master_name, builder_name, build_number, step_name, http_client)

        if not step_isolated_data:  # pragma: no cover
            return None

        result_log = swarming_util.RetrieveShardedTestResultsFromIsolatedServer(
            step_isolated_data, http_client)

        if (not result_log or not result_log.get('per_iteration_data')
                or result_log['per_iteration_data']
                == 'invalid'):  # pragma: no cover
            return None

        step = WfStep.Create(master_name, builder_name, build_number,
                             step_name)
        step.isolated = True
        self._InitiateTestLevelFirstFailureAndSaveLog(result_log, step)

        return step
Esempio n. 4
0
    def testFailureLogFetched(self):
        master_name = 'm'
        builder_name = 'b 1'
        build_number = 123
        step_name = 'compile'
        step_url = CreateStdioLogUrl(master_name, builder_name, build_number,
                                     step_name)

        step_log = WfStep.Create(master_name, builder_name, build_number,
                                 step_name)
        step_log.log_data = 'Log has been successfully fetched!'
        step_log.put()

        self.mock_current_user(user_email='*****@*****.**', is_admin=True)

        response = self.test_app.get('/failure-log',
                                     params={
                                         'url': step_url,
                                         'format': 'json'
                                     })
        expected_response = {
            'master_name': 'm',
            'builder_name': 'b 1',
            'build_number': 123,
            'step_name': 'compile',
            'step_logs': 'Log has been successfully fetched!'
        }

        self.assertEquals(200, response.status_int)
        self.assertEquals(expected_response, response.json_body)
Esempio n. 5
0
    def testBackwardTraverseBuildsWhenGettingTestLevelFailureInfo(
            self, mock_fun, *_):
        master_name = 'm'
        builder_name = 'b'
        build_number = 221
        step_name = 'abc_test'
        failed_steps = {
            'abc_test': {
                'current_failure':
                223,
                'first_failure':
                223,
                'supported':
                True,
                'list_isolated_data': [{
                    'isolatedserver': 'https://isolateserver.appspot.com',
                    'namespace': 'default-gzip',
                    'digest': 'isolatedhashabctest-223'
                }]
            }
        }
        builds = {
            '221': {
                'blame_list': ['commit1'],
                'chromium_revision': 'commit1'
            },
            '222': {
                'blame_list': ['commit2'],
                'chromium_revision': 'commit2'
            },
            '223': {
                'blame_list': ['commit3', 'commit4'],
                'chromium_revision': 'commit4'
            }
        }

        failure_info = {
            'master_name': master_name,
            'builder_name': builder_name,
            'build_number': build_number,
            'failed_steps': failed_steps,
            'builds': builds
        }
        failure_info = TestFailureInfo.FromSerializable(failure_info)

        expected_failed_steps = failed_steps
        expected_failed_steps['abc_test']['tests'] = None
        expected_failed_steps['abc_test']['last_pass'] = None
        step = WfStep.Create(master_name, builder_name, build_number,
                             step_name)
        step.isolated = True
        step.put()

        ci_test_failure.CheckFirstKnownFailureForSwarmingTests(
            master_name, builder_name, build_number, failure_info)
        mock_fun.assert_called_once_with(
            master_name, builder_name, build_number, step_name,
            TestFailedStep.FromSerializable(failed_steps[step_name]),
            ['223', '222', '221'], None)
Esempio n. 6
0
  def testExtractSignalsForTestsFlaky(self):
    master_name = 'm'
    builder_name = 'b'
    build_number = 223

    failure_info = {
        'master_name': master_name,
        'builder_name': builder_name,
        'build_number': build_number,
        'failed': True,
        'chromium_revision': 'a_git_hash',
        'failed_steps': {
            'abc_test': {
                'last_pass': 221,
                'current_failure': 223,
                'first_failure': 222,
                'tests': {
                    'Unittest2.Subtest1': {
                        'current_failure': 223,
                        'first_failure': 222,
                        'last_pass': 221
                    },
                    'Unittest3.Subtest2': {
                        'current_failure': 223,
                        'first_failure': 222,
                        'last_pass': 221
                    }
                }
            }
        }
    }

    step = WfStep.Create(master_name, builder_name, build_number, 'abc_test')
    step.isolated = True
    step.log_data = 'flaky'
    step.put()

    expected_signals = {
        'abc_test': {
            'files': {},
            'keywords': {},
            'tests': {}
        }
    }

    self._CreateAndSaveWfAnanlysis(
        master_name, builder_name, build_number)

    pipeline = ExtractSignalPipeline()
    signals = pipeline.run(failure_info)
    self.assertEqual(expected_signals, signals)
Esempio n. 7
0
    def testCheckFirstKnownFailureForSwarmingTestsFoundFlaky(
            self, mock_module):
        master_name = 'm'
        builder_name = 'b'
        build_number = 221
        step_name = 'abc_test'
        failed_steps = {
            'abc_test': {
                'current_failure':
                221,
                'first_failure':
                221,
                'list_isolated_data': [{
                    'isolatedserver': 'https://isolateserver.appspot.com',
                    'namespace': 'default-gzip',
                    'digest': 'isolatedhashabctest-223'
                }]
            }
        }
        builds = {
            '221': {
                'blame_list': ['commit1'],
                'chromium_revision': 'commit1'
            },
            '222': {
                'blame_list': ['commit2'],
                'chromium_revision': 'commit2'
            },
            '223': {
                'blame_list': ['commit3', 'commit4'],
                'chromium_revision': 'commit4'
            }
        }
        expected_failed_steps = failed_steps
        step = WfStep.Create(master_name, builder_name, build_number,
                             step_name)
        step.isolated = True
        step.put()

        mock_module.GetIsolatedDataForFailedBuild.return_value = True
        mock_module.RetrieveShardedTestResultsFromIsolatedServer.return_value = (
            json.loads(
                self._GetSwarmingData('isolated-plain',
                                      'm_b_223_abc_test_flaky.json')))

        pipeline = DetectFirstFailurePipeline()
        pipeline._CheckFirstKnownFailureForSwarmingTests(
            master_name, builder_name, build_number, failed_steps, builds)

        self.assertEqual(expected_failed_steps, failed_steps)
Esempio n. 8
0
  def testWfStepStdioLogNotDownloadedYet(self, _):
    master_name = 'm'
    builder_name = 'b'
    build_number = 123
    step_name = 'abc_test'

    self._CreateAndSaveWfAnanlysis(
        master_name, builder_name, build_number)

    pipeline = ExtractSignalPipeline(FAILURE_INFO)
    pipeline.start()
    self.execute_queued_tasks()

    step = WfStep.Create(master_name, builder_name, build_number, step_name)
    self.assertIsNotNone(step)
Esempio n. 9
0
  def testWfStepStdioLogAlreadyDownloaded(self, _):
    master_name = 'm'
    builder_name = 'b'
    build_number = 123
    step_name = 'abc_test'
    step = WfStep.Create(master_name, builder_name, build_number, step_name)
    step.log_data = ABC_TEST_FAILURE_LOG
    step.put()

    self._CreateAndSaveWfAnanlysis(
        master_name, builder_name, build_number)

    pipeline = ExtractSignalPipeline(FAILURE_INFO)
    signals = pipeline.run(FAILURE_INFO)

    self.assertEqual(FAILURE_SIGNALS, signals)
Esempio n. 10
0
    def testGetLogForTheSameStepFromBuildNotNotJsonLoadable(self):
        master_name = 'm'
        builder_name = 'b'
        build_number = 121
        step_name = 'atest'

        step = WfStep.Create(master_name, builder_name, build_number,
                             step_name)
        step.isolated = True
        step.log_data = 'log'
        step.put()

        self.assertIsNone(
            ci_test_failure._GetTestLevelLogForAStep(master_name, builder_name,
                                                     build_number, step_name,
                                                     None))
Esempio n. 11
0
    def testWfStepStdioLogNotDownloadedYet(self):
        master_name = 'm'
        builder_name = 'b'
        build_number = 123
        step_name = 'abc_test'

        self.MockGetStdiolog(master_name, builder_name, build_number,
                             step_name)

        pipeline = ExtractSignalPipeline(self.FAILURE_INFO)
        pipeline.start()
        self.execute_queued_tasks()

        step = WfStep.Create(master_name, builder_name, build_number,
                             step_name)
        self.assertIsNotNone(step)
Esempio n. 12
0
    def testUpdateFirstFailureOnTestLevelFlaky(self):
        master_name = 'm'
        builder_name = 'b'
        build_number = 223
        step_name = 'abc_test'
        failed_step = {
            'current_failure': 223,
            'first_failure': 221,
            'supported': True,
            'tests': {
                'Unittest2.Subtest1': {
                    'current_failure': 223,
                    'first_failure': 223,
                    'last_pass': 223,
                    'base_test_name': 'Unittest2.Subtest1'
                }
            }
        }
        failed_step = TestFailedStep.FromSerializable(failed_step)
        step = WfStep.Create(master_name, builder_name, 222, step_name)
        step.isolated = True
        step.log_data = 'flaky'
        step.put()

        ci_test_failure._UpdateFirstFailureOnTestLevel(master_name,
                                                       builder_name,
                                                       build_number, step_name,
                                                       failed_step,
                                                       [223, 222, 221],
                                                       FinditHttpClient())

        expected_failed_step = {
            'current_failure': 223,
            'first_failure': 223,
            'last_pass': 222,
            'supported': True,
            'list_isolated_data': None,
            'tests': {
                'Unittest2.Subtest1': {
                    'current_failure': 223,
                    'first_failure': 223,
                    'last_pass': 222,
                    'base_test_name': 'Unittest2.Subtest1'
                }
            }
        }
        self.assertEqual(expected_failed_step, failed_step.ToSerializable())
Esempio n. 13
0
    def testGetFormattedJsonLogIfSwarming(self):
        master_name = 'm'
        builder_name = 'b'
        build_number = 123
        step_name = 'browser_test'
        step_url = CreateStdioLogUrl(master_name, builder_name, build_number,
                                     step_name)

        step_log = WfStep.Create(master_name, builder_name, build_number,
                                 step_name)
        step_log.isolated = True
        step_log.log_data = (
            '{"Unittest2.Subtest1": "RVJST1I6eF90ZXN0LmNjOjEyMzQKYS9iL3Uy'
            'czEuY2M6NTY3OiBGYWlsdXJlCkVSUk9SOlsyXTogMjU5NDczNTAwMCBib2dvLW1pY3Jv'
            'c2Vjb25kcwpFUlJPUjp4X3Rlc3QuY2M6MTIzNAphL2IvdTJzMS5jYzo1Njc6IE'
            'ZhaWx1cmUK", '
            '"Unittest3.Subtest2": "YS9iL3UzczIuY2M6MTEwOiBGYWlsdXJlCg=="}')
        step_log.put()

        self.mock_current_user(user_email='*****@*****.**', is_admin=True)

        response = self.test_app.get('/failure-log',
                                     params={
                                         'url': step_url,
                                         'format': 'json'
                                     })
        expected_response = {
            'master_name':
            'm',
            'builder_name':
            'b',
            'build_number':
            123,
            'step_name':
            'browser_test',
            'step_logs':
            ('{\n    "Unittest2.Subtest1": "ERROR:x_test.cc:1234'
             '\n        a/b/u2s1.cc:567: Failure\n        '
             'ERROR:[2]: 2594735000 bogo-microseconds\n        '
             'ERROR:x_test.cc:1234\n        a/b/u2s1.cc:567: Failure'
             '\n        ", \n    "Unittest3.Subtest2": '
             '"a/b/u3s2.cc:110: Failure\n        "\n}')
        }
        self.assertEquals(200, response.status_int)
        self.assertEquals(expected_response, response.json_body)
Esempio n. 14
0
def _SaveIsolatedResultToStep(master_name, builder_name, build_number,
                              step_name, failed_test_log):
    """Parses the json data and saves all the reliable failures to the step."""
    step = (WfStep.Get(master_name, builder_name, build_number, step_name) or
            WfStep.Create(master_name, builder_name, build_number, step_name))

    step.isolated = True
    step.log_data = json.dumps(
        failed_test_log) if failed_test_log else constants.FLAKY_FAILURE_LOG
    try:
        step.put()
    except (BadRequestError, RequestTooLargeError) as e:
        step.isolated = True
        step.log_data = constants.TOO_LARGE_LOG
        logging.warning(
            'Failed to save data in %s/%s/%d/%s: %s' %
            (master_name, builder_name, build_number, step_name, e.message))
        step.put()
Esempio n. 15
0
    def testUpdateFirstFailureOnTestLevelFlaky(self):
        master_name = 'm'
        builder_name = 'b'
        build_number = 223
        step_name = 'abc_test'
        failed_step = {
            'current_failure': 223,
            'first_failure': 221,
            'tests': {
                'Unittest2.Subtest1': {
                    'current_failure': 223,
                    'first_failure': 223,
                    'last_pass': 223,
                    'base_test_name': 'Unittest2.Subtest1'
                }
            }
        }
        step = WfStep.Create(master_name, builder_name, 222, step_name)
        step.isolated = True
        step.log_data = 'flaky'
        step.put()

        pipeline = DetectFirstFailurePipeline()
        pipeline._UpdateFirstFailureOnTestLevel(master_name, builder_name,
                                                build_number, step_name,
                                                failed_step,
                                                HttpClientAppengine())

        expected_failed_step = {
            'current_failure': 223,
            'first_failure': 223,
            'last_pass': 222,
            'tests': {
                'Unittest2.Subtest1': {
                    'current_failure': 223,
                    'first_failure': 223,
                    'last_pass': 222,
                    'base_test_name': 'Unittest2.Subtest1'
                }
            }
        }
        self.assertEqual(expected_failed_step, failed_step)
Esempio n. 16
0
    def testWfStepStdioLogAlreadyDownloaded(self):
        master_name = 'm'
        builder_name = 'b'
        build_number = 123
        step_name = 'abc_test'
        step = WfStep.Create(master_name, builder_name, build_number,
                             step_name)
        step.log_data = self.ABC_TEST_FAILURE_LOG
        step.put()

        step_log_url = buildbot.CreateStdioLogUrl(master_name, builder_name,
                                                  build_number, step_name)
        with self.mock_urlfetch() as urlfetch:
            urlfetch.register_handler(step_log_url,
                                      'If used, test should fail!')

        pipeline = ExtractSignalPipeline(self.FAILURE_INFO)
        signals = pipeline.run(self.FAILURE_INFO)

        self.assertEqual(self.FAILURE_SIGNALS, signals)
  def testCompileStepSignalFromCachedStepLog(self):
    master_name = 'm'
    builder_name = 'b'
    build_number = 123
    step_name = 'compile'

    step = WfStep.Create(master_name, builder_name, build_number, step_name)
    step.log_data = _NINJA_OUTPUT_JSON
    step.put()

    self._CreateAndSaveWfAnanlysis(master_name, builder_name, build_number)

    signals = extract_compile_signal.ExtractSignalsForCompileFailure(
        CompileFailureInfo.FromSerializable(_COMPILE_FAILURE_INFO), None)

    expected_failed_edges = [{
        'output_nodes': ['a/b.o'],
        'rule': 'CXX',
        'dependencies': ['b.h', 'b.c']
    }]

    self.assertEqual(expected_failed_edges, signals['compile']['failed_edges'])
Esempio n. 18
0
    def testAnalyzeSwarmingTestResultsInitiateLastPassForTests(self):
        json_data = json.loads(
            self._GetSwarmingData('isolated-plain', 'm_b_223_abc_test.json'))

        step = WfStep.Create('m', 'b', 223, 'abc_test')
        step.isolated = True
        step.put()

        failed_step = {
            'current_failure': 223,
            'first_failure': 221,
            'tests': {}
        }

        pipeline = DetectFirstFailurePipeline()
        pipeline._InitiateTestLevelFirstFailureAndSaveLog(
            json_data, step, failed_step)

        expected_failed_step = {
            'current_failure': 223,
            'first_failure': 221,
            'tests': {
                'Unittest2.Subtest1': {
                    'current_failure': 223,
                    'first_failure': 223,
                    'base_test_name': 'Unittest2.Subtest1'
                },
                'Unittest3.Subtest2': {
                    'current_failure': 223,
                    'first_failure': 223,
                    'base_test_name': 'Unittest3.Subtest2'
                }
            }
        }

        self.assertEqual(expected_failed_step, failed_step)
Esempio n. 19
0
def ExtractSignalsForCompileFailure(failure_info, http_client):
    signals = {}

    master_name = failure_info.master_name
    builder_name = failure_info.builder_name
    build_number = failure_info.build_number
    step_name = 'compile'

    if step_name not in (failure_info.failed_steps or {}):
        logging.debug(
            'No compile failure found when extracting signals for failed '
            'build %s/%s/%d', master_name, builder_name, build_number)
        return signals

    if not failure_info.failed_steps[step_name].supported:
        # Bail out if the step is not supported.
        logging.info('Findit could not analyze compile failure for master %s.',
                     master_name)
        return signals

    failure_log = None

    # 1. Tries to get stored failure log from step.
    step = (WfStep.Get(master_name, builder_name, build_number, step_name) or
            WfStep.Create(master_name, builder_name, build_number, step_name))
    if step.log_data:
        failure_log = step.log_data
    else:
        # 2. Tries to get ninja_output as failure log.
        from_ninja_output = False
        use_ninja_output_log = (waterfall_config.GetDownloadBuildDataSettings(
        ).get('use_ninja_output_log'))
        if use_ninja_output_log:
            failure_log = step_util.GetWaterfallBuildStepLog(
                master_name, builder_name, build_number, step_name,
                http_client, 'json.output[ninja_info]')
            from_ninja_output = True

        if not failure_log:
            # 3. Tries to get stdout log for compile step.
            from_ninja_output = False
            failure_log = extract_signal.GetStdoutLog(master_name,
                                                      builder_name,
                                                      build_number, step_name,
                                                      http_client)

        try:
            if not failure_log:
                raise extract_signal.FailedToGetFailureLogError(
                    'Failed to pull failure log (stdio or ninja output) of step %s of'
                    ' %s/%s/%d' %
                    (step_name, master_name, builder_name, build_number))
        except extract_signal.FailedToGetFailureLogError:
            return {}

        # Save step log in datastore and avoid downloading again during retry.
        step.log_data = extract_signal.ExtractStorablePortionOfLog(
            failure_log, from_ninja_output)
        try:
            step.put()
        except Exception as e:  # pragma: no cover
            # Sometimes, the step log is too large to save in datastore.
            logging.exception(e)

    signals[step_name] = extractors.ExtractSignal(
        master_name,
        builder_name,
        step_name,
        test_name=None,
        failure_log=failure_log).ToDict()

    extract_signal.SaveSignalInAnalysis(master_name, builder_name,
                                        build_number, signals)

    return signals
Esempio n. 20
0
 def testStepName(self):
     step = WfStep.Create('m', 'b', 34, 's')
     self.assertEqual('s', step.step_name)
Esempio n. 21
0
    def run(self, failure_info):
        """
    Args:
      failure_info (dict): Output of pipeline DetectFirstFailurePipeline.run().

    Returns:
      A dict like below:
      {
        'step_name1': waterfall.failure_signal.FailureSignal.ToDict(),
        ...
      }
    """
        signals = {}
        if not failure_info['failed'] or not failure_info['chromium_revision']:
            # Bail out if no failed step or no chromium revision.
            return signals

        master_name = failure_info['master_name']
        builder_name = failure_info['builder_name']
        build_number = failure_info['build_number']
        for step_name in failure_info.get('failed_steps', []):
            step = WfStep.Get(master_name, builder_name, build_number,
                              step_name)
            if step and step.log_data:
                failure_log = step.log_data
            else:
                # TODO: do test-level analysis instead of step-level.
                gtest_result = buildbot.GetGtestResultLog(
                    master_name, builder_name, build_number, step_name)
                if gtest_result:
                    failure_log = self._GetReliableTestFailureLog(gtest_result)

                if gtest_result is None or failure_log == 'invalid':
                    if not lock_util.WaitUntilDownloadAllowed(
                            master_name):  # pragma: no cover
                        raise pipeline.Retry(
                            'Failed to pull log of step %s of master %s' %
                            (step_name, master_name))
                    try:
                        failure_log = buildbot.GetStepStdio(
                            master_name, builder_name, build_number, step_name,
                            self.HTTP_CLIENT)
                    except ResponseTooLargeError:  # pragma: no cover.
                        logging.exception(
                            'Log of step "%s" is too large for urlfetch.',
                            step_name)
                        # If the stdio log of a step is too large, we don't want to pull it
                        # again in next run, because that might lead to DDoS to the master.
                        # TODO: Use archived stdio logs in Google Storage instead.
                        failure_log = 'Stdio log is too large for urlfetch.'

                    if not failure_log:  # pragma: no cover
                        raise pipeline.Retry(
                            'Failed to pull stdio of step %s of master %s' %
                            (step_name, master_name))

                # Save step log in datastore and avoid downloading again during retry.
                if not step:  # pragma: no cover
                    step = WfStep.Create(master_name, builder_name,
                                         build_number, step_name)

                step.log_data = self._ExtractStorablePortionOfLog(failure_log)

                try:
                    step.put()
                except Exception as e:  # pragma: no cover
                    # Sometimes, the step log is too large to save in datastore.
                    logging.exception(e)

            # TODO: save result in datastore?
            signals[step_name] = extractors.ExtractSignal(
                master_name, builder_name, step_name, None,
                failure_log).ToDict()
        return signals
Esempio n. 22
0
  def testExtractSignalsForTests(self):
    master_name = 'm'
    builder_name = 'b'
    build_number = 223

    failure_info = {
        'master_name': master_name,
        'builder_name': builder_name,
        'build_number': build_number,
        'failed': True,
        'chromium_revision': 'a_git_hash',
        'failed_steps': {
            'abc_test': {
                'last_pass': 221,
                'current_failure': 223,
                'first_failure': 222,
                'tests': {
                    'Unittest2.Subtest1': {
                        'current_failure': 223,
                        'first_failure': 222,
                        'last_pass': 221
                    },
                    'Unittest3.Subtest2': {
                        'current_failure': 223,
                        'first_failure': 222,
                        'last_pass': 221
                    }
                }
            }
        }
    }

    step = WfStep.Create(master_name, builder_name, build_number, 'abc_test')
    step.isolated = True
    step.log_data = (
        '{"Unittest2.Subtest1": "RVJST1I6eF90ZXN0LmNjOjEyMzQKYS9iL3UyczEuY2M6N'
        'TY3OiBGYWlsdXJlCkVSUk9SOlsyXTogMjU5NDczNTAwMCBib2dvLW1pY3Jvc2Vjb25kcw'
        'pFUlJPUjp4X3Rlc3QuY2M6MTIzNAphL2IvdTNzMi5jYzoxMjM6IEZhaWx1cmUK"'
        ', "Unittest3.Subtest2": "YS9iL3UzczIuY2M6MTEwOiBGYWlsdXJlCmEvYi91M3My'
        'LmNjOjEyMzogRmFpbHVyZQo="}')
    step.put()

    expected_signals = {
        'abc_test': {
            'files': {
                'a/b/u2s1.cc': [567],
                'a/b/u3s2.cc': [123, 110]
            },
            'keywords': {},
            'tests': {
                'Unittest2.Subtest1': {
                    'files': {
                        'a/b/u2s1.cc': [567],
                        'a/b/u3s2.cc': [123]
                    },
                    'keywords': {}
                },
                'Unittest3.Subtest2': {
                    'files': {
                        'a/b/u3s2.cc': [110, 123]
                    },
                    'keywords': {}
                }
            }
        }
    }

    self._CreateAndSaveWfAnanlysis(
        master_name, builder_name, build_number)

    pipeline = ExtractSignalPipeline()
    signals = pipeline.run(failure_info)
    self.assertEqual(expected_signals, signals)
    def run(self, failure_info):
        """Extracts failure signals from failed steps.

    Args:
      failure_info (dict): Output of pipeline DetectFirstFailurePipeline.run().

    Returns:
      A dict like below:
      {
        'step_name1': waterfall.failure_signal.FailureSignal.ToDict(),
        ...
      }
    """
        signals = {}
        if not failure_info['failed'] or not failure_info['chromium_revision']:
            # Bail out if no failed step or no chromium revision.
            return signals

        # Bail out on infra failure
        if failure_info.get('failure_type') == failure_type.INFRA:
            return signals

        master_name = failure_info['master_name']
        builder_name = failure_info['builder_name']
        build_number = failure_info['build_number']

        for step_name in failure_info.get('failed_steps', []):
            if not waterfall_config.StepIsSupportedForMaster(
                    step_name, master_name):
                # Bail out if the step is not supported.
                continue

            step = WfStep.Get(master_name, builder_name, build_number,
                              step_name)
            if step and step.log_data:
                failure_log = step.log_data
            else:
                # TODO: do test-level analysis instead of step-level.
                # TODO: Use swarming test result instead of archived gtest results
                gtest_result = buildbot.GetGtestResultLog(
                    master_name, builder_name, build_number, step_name)
                if gtest_result:
                    failure_log = _GetReliableTestFailureLog(gtest_result)

                if gtest_result is None or failure_log == 'invalid':
                    if not lock_util.WaitUntilDownloadAllowed(
                            master_name):  # pragma: no cover
                        raise pipeline.Retry(
                            'Failed to pull log of step %s of master %s' %
                            (step_name, master_name))
                    try:
                        failure_log = buildbot.GetStepLog(
                            master_name, builder_name, build_number, step_name,
                            self.HTTP_CLIENT)
                    except ResponseTooLargeError:  # pragma: no cover.
                        logging.exception(
                            'Log of step "%s" is too large for urlfetch.',
                            step_name)
                        # If the stdio log of a step is too large, we don't want to pull it
                        # again in next run, because that might lead to DDoS to the master.
                        # TODO: Use archived stdio logs in Google Storage instead.
                        failure_log = 'Stdio log is too large for urlfetch.'

                    if not failure_log:  # pragma: no cover
                        raise pipeline.Retry(
                            'Failed to pull stdio of step %s of master %s' %
                            (step_name, master_name))

                # Save step log in datastore and avoid downloading again during retry.
                if not step:  # pragma: no cover
                    step = WfStep.Create(master_name, builder_name,
                                         build_number, step_name)

                step.log_data = _ExtractStorablePortionOfLog(failure_log)

                try:
                    step.put()
                except Exception as e:  # pragma: no cover
                    # Sometimes, the step log is too large to save in datastore.
                    logging.exception(e)

            # TODO: save result in datastore?
            if step.isolated:
                try:
                    json_failure_log = (json.loads(failure_log)
                                        if failure_log != 'flaky' else {})
                except ValueError:  # pragma: no cover
                    json_failure_log = {}
                    logging.warning('failure_log %s is not valid JSON.' %
                                    failure_log)

                signals[step_name] = {'tests': {}}
                step_signal = FailureSignal()

                for test_name, test_failure_log in json_failure_log.iteritems(
                ):
                    signals[step_name]['tests'][
                        test_name] = extractors.ExtractSignal(
                            master_name, builder_name, step_name, test_name,
                            base64.b64decode(test_failure_log)).ToDict()

                    # Save signals in test failure log to step level.
                    step_signal.MergeFrom(
                        signals[step_name]['tests'][test_name])

                signals[step_name]['files'] = step_signal.files
                signals[step_name]['keywords'] = step_signal.keywords
            else:
                signals[step_name] = extractors.ExtractSignal(
                    master_name, builder_name, step_name, None,
                    failure_log).ToDict()

        return signals
Esempio n. 24
0
def ExtractSignalsForTestFailure(failure_info, http_client):
    signals = {}

    master_name = failure_info.master_name
    builder_name = failure_info.builder_name
    build_number = failure_info.build_number
    failed_steps = failure_info.failed_steps or {}

    for step_name in failed_steps:
        failure_log = None
        if not failed_steps[step_name].supported:
            # Bail out if the step is not supported.
            continue

        # 1. Tries to get stored failure log from step.
        step = (WfStep.Get(master_name, builder_name, build_number, step_name)
                or WfStep.Create(master_name, builder_name, build_number,
                                 step_name))
        if step.log_data and step.log_data != constants.TOO_LARGE_LOG:
            failure_log = step.log_data
        else:
            json_formatted_log = True
            # 2. Gets test results.
            list_isolated_data = failed_steps[step_name].list_isolated_data
            list_isolated_data = (list_isolated_data.ToSerializable()
                                  if list_isolated_data else [])
            merged_test_results = (
                swarmed_test_util.RetrieveShardedTestResultsFromIsolatedServer(
                    list_isolated_data, http_client))
            if merged_test_results:
                test_results = test_results_util.GetTestResultObject(
                    merged_test_results)
                if test_results:
                    failure_log, _ = (
                        test_results_service.
                        GetFailedTestsInformationFromTestResult(test_results))
                    failure_log = json.dumps(
                        failure_log
                    ) if failure_log else constants.FLAKY_FAILURE_LOG
                else:
                    failure_log = constants.WRONG_FORMAT_LOG

            if not merged_test_results or failure_log in [
                    constants.INVALID_FAILURE_LOG, constants.WRONG_FORMAT_LOG
            ]:
                # 3. Gets stdout log.
                json_formatted_log = False
                failure_log = extract_signal.GetStdoutLog(
                    master_name, builder_name, build_number, step_name,
                    http_client)

            try:
                if not failure_log:
                    raise extract_signal.FailedToGetFailureLogError(
                        'Failed to pull failure log (stdio or ninja output) of step %s of'
                        ' %s/%s/%d' %
                        (step_name, master_name, builder_name, build_number))
            except extract_signal.FailedToGetFailureLogError:
                return {}

            # Save step log in datastore and avoid downloading again during retry.
            step.log_data = extract_signal.ExtractStorablePortionOfLog(
                failure_log, json_formatted_log
            ) if step.log_data != constants.TOO_LARGE_LOG else step.log_data
            step.isolated = step.isolated or json_formatted_log

            try:
                step.put()
            except Exception as e:  # pragma: no cover
                # Sometimes, the step log is too large to save in datastore.
                logging.exception(e)

        if step.isolated:
            try:
                json_failure_log = (json.loads(failure_log) if
                                    failure_log != constants.FLAKY_FAILURE_LOG
                                    else {})
            except ValueError:
                json_failure_log = {}
                logging.warning('failure_log %s is not valid JSON.' %
                                failure_log)

            signals[step_name] = {'tests': {}}
            step_signal = FailureSignal()

            for test_name, test_failure_log in json_failure_log.iteritems():
                signals[step_name]['tests'][
                    test_name] = extractors.ExtractSignal(
                        master_name, builder_name, step_name, test_name,
                        base64.b64decode(test_failure_log)).ToDict()

                # Save signals in test failure log to step level.
                step_signal.MergeFrom(signals[step_name]['tests'][test_name])

            signals[step_name]['files'] = step_signal.files
        else:
            signals[step_name] = extractors.ExtractSignal(
                master_name, builder_name, step_name, None,
                failure_log).ToDict()

    return signals
Esempio n. 25
0
    def testTestLevelFailedInfo(self, mock_fn):
        master_name = 'm'
        builder_name = 'b'
        build_number = 223

        self._CreateAndSaveWfAnanlysis(master_name, builder_name, build_number,
                                       analysis_status.RUNNING)

        # Mock data for retrieving data from swarming server for a build.
        self._MockUrlFetchWithSwarmingData(master_name, builder_name, 223)

        mock_fn.side_effect = [
            self._GetBuildData(master_name, builder_name, 223),
            self._GetBuildData(master_name, builder_name, 222),
            self._GetBuildData(master_name, builder_name, 221),
            self._GetBuildData(master_name, builder_name, 220)
        ]
        for n in xrange(223, 219, -1):  # pragma: no branch.
            # Setup build data for builds:

            if n == 220:
                break

            # Mock data for retrieving data from swarming server for a single step.
            self._MockUrlFetchWithSwarmingData(master_name, builder_name, n,
                                               'abc_test')

            # Mock data for retrieving hash to output.json from isolated server.
            isolated_data = {
                'isolatedserver': 'https://isolateserver.appspot.com',
                'namespace': {
                    'namespace': 'default-gzip'
                },
                'digest': 'isolatedhashabctest-%d' % n
            }
            self._MockUrlfetchWithIsolatedData(isolated_data, build_number=n)
            # Mock data for retrieving url to output.json from isolated server.
            file_hash_data = {
                'isolatedserver': 'https://isolateserver.appspot.com',
                'namespace': {
                    'namespace': 'default-gzip'
                },
                'digest': 'abctestoutputjsonhash-%d' % n
            }
            self._MockUrlfetchWithIsolatedData(file_hash_data, build_number=n)

            # Mock data for downloading output.json from isolated server.
            self._MockUrlfetchWithIsolatedData(
                None,
                ('https://isolateserver.storage.googleapis.com/default-gzip/'
                 'm_b_%d_abc_test' % n), '%s_%s_%d_%s.json' %
                (master_name, builder_name, n, 'abc_test'))

        step_221 = WfStep.Create(master_name, builder_name, 221, 'abc_test')
        step_221.isolated = True
        step_221.log_data = (
            '{"Unittest3.Subtest3": "YS9iL3UzczIuY2M6MTEwOiBGYWlsdXJlCg=="}')
        step_221.put()

        pipeline = DetectFirstFailurePipeline()
        failure_info = pipeline.run(master_name, builder_name, build_number)

        expected_failed_steps = {
            'abc_test': {
                'current_failure':
                223,
                'first_failure':
                222,
                'last_pass':
                221,
                'list_isolated_data': [{
                    'isolatedserver': 'https://isolateserver.appspot.com',
                    'namespace': 'default-gzip',
                    'digest': 'isolatedhashabctest-223'
                }],
                'tests': {
                    'Unittest2.Subtest1': {
                        'current_failure': 223,
                        'first_failure': 222,
                        'last_pass': 221,
                        'base_test_name': 'Unittest2.Subtest1'
                    },
                    'Unittest3.Subtest2': {
                        'current_failure': 223,
                        'first_failure': 222,
                        'last_pass': 221,
                        'base_test_name': 'Unittest3.Subtest2'
                    }
                }
            }
        }

        expected_step_log_data = {
            223:
            ('{"Unittest2.Subtest1": "RVJST1I6eF90ZXN0LmNjOjEyMzRcbmEvYi91Mn'
             'MxLmNjOjU2NzogRmFpbHVyZVxuRVJST1I6WzJdOiAyNTk0NzM1MDAwIGJvZ28tb'
             'Wljcm9zZWNvbmRzXG5FUlJPUjp4X3Rlc3QuY2M6MTIzNAphL2IvdTJzMS5jYzo1'
             'Njc6IEZhaWx1cmUK", '
             '"Unittest3.Subtest2": "YS9iL3UzczIuY2M6MTEwOiBGYWlsdXJlCg=="}'),
            222:
            ('{"Unittest2.Subtest1": "RVJST1I6eF90ZXN0LmNjOjEyMzRcbmEvYi91Mn'
             'MxLmNjOjU2NzogRmFpbHVyZVxuRVJST1I6WzJdOiAyNTk0NzM1MDAwIGJvZ28tb'
             'Wljcm9zZWNvbmRzXG5FUlJPUjp4X3Rlc3QuY2M6MTIzNAphL2IvdTJzMS5jYzo1'
             'Njc6IEZhaWx1cmUK", '
             '"Unittest3.Subtest2": "YS9iL3UzczIuY2M6MTEwOiBGYWlsdXJlCg=="}'),
            221:
            '{"Unittest3.Subtest3": "YS9iL3UzczIuY2M6MTEwOiBGYWlsdXJlCg=="}'
        }

        for n in xrange(223, 220, -1):
            step = WfStep.Get(master_name, builder_name, n, 'abc_test')
            self.assertIsNotNone(step)
            self.assertTrue(step.isolated)
            self.assertEqual(expected_step_log_data[n], step.log_data)

        self.assertEqual(expected_failed_steps, failure_info['failed_steps'])
Esempio n. 26
0
    def testCheckFirstKnownFailureForSwarmingTestsFoundFlaky(
            self, mock_module, *_):
        master_name = 'm'
        builder_name = 'b'
        build_number = 221
        step_name = 'abc_test'
        failed_steps = {
            'abc_test': {
                'current_failure':
                221,
                'first_failure':
                221,
                'supported':
                True,
                'list_isolated_data': [{
                    'isolatedserver': 'https://isolateserver.appspot.com',
                    'namespace': 'default-gzip',
                    'digest': 'isolatedhashabctest-223'
                }]
            }
        }
        builds = {
            '221': {
                'blame_list': ['commit1'],
                'chromium_revision': 'commit1'
            },
            '222': {
                'blame_list': ['commit2'],
                'chromium_revision': 'commit2'
            },
            '223': {
                'blame_list': ['commit3', 'commit4'],
                'chromium_revision': 'commit4'
            }
        }

        failure_info = {
            'master_name': master_name,
            'builder_name': builder_name,
            'build_number': build_number,
            'failed_steps': failed_steps,
            'builds': builds
        }
        failure_info = TestFailureInfo.FromSerializable(failure_info)

        expected_failed_steps = failed_steps
        expected_failed_steps['abc_test']['tests'] = None
        expected_failed_steps['abc_test']['last_pass'] = None
        step = WfStep.Create(master_name, builder_name, build_number,
                             step_name)
        step.isolated = True
        step.put()

        mock_module.RetrieveShardedTestResultsFromIsolatedServer.return_value = {
            'disabled_tests': [],
            'all_tests': [
                'Unittest1.Subtest1',
                'Unittest1.Subtest2',
                'Unittest2.Subtest1',
            ],
            'per_iteration_data': [{
                'Unittest1.Subtest1': [{
                    'elapsed_time_ms':
                    1,
                    'losless_snippet':
                    True,
                    'output_snippet':
                    '[       OK ]\\n',
                    'status':
                    'SUCCESS',
                    'output_snippet_base64':
                    'WyAgICAgICBPSyBdCg=='
                }],
                'Unittest1.Subtest2': [{
                    'elapsed_time_ms':
                    66,
                    'losless_snippet':
                    True,
                    'output_snippet':
                    'a/b/u1s2.cc:1234: Failure\\n',
                    'status':
                    'FAILURE',
                    'output_snippet_base64':
                    'YS9iL3UxczIuY2M6MTIzNDogRmF'
                }, {
                    'elapsed_time_ms':
                    50,
                    'losless_snippet':
                    True,
                    'output_snippet':
                    '[       OK ]\\n',
                    'status':
                    'SUCCESS',
                    'output_snippet_base64':
                    'WyAgICAgICBPSyBdCg=='
                }],
                'Unittest2.Subtest1': [{
                    'elapsed_time_ms':
                    56,
                    'losless_snippet':
                    True,
                    'output_snippet':
                    'ERROR',
                    'status':
                    'FAILURE',
                    'output_snippet_base64':
                    'RVJST1I6eF90ZXN0LmN'
                }, {
                    'elapsed_time_ms':
                    1,
                    'losless_snippet':
                    True,
                    'output_snippet':
                    '[       OK ]\\n',
                    'status':
                    'SUCCESS',
                    'output_snippet_base64':
                    'WyAgICAgICBPSyBdCg=='
                }],
            }]
        }

        mock_module.GetFailedTestsInformation.return_value = ({}, {})

        ci_test_failure.CheckFirstKnownFailureForSwarmingTests(
            master_name, builder_name, build_number, failure_info)

        self.assertEqual(expected_failed_steps,
                         failure_info.failed_steps.ToSerializable())