Example #1
0
    def testParseBuildUrl(self):
        cases = {
            None:
            None,
            '':
            None,
            'https://unknown.host/p/chromium/builders/Linux/builds/55833':
            None,
            'http://build.chromium.org/p/chromium/builders/Linux':
            None,
            'http://build.chromium.org/p/chromium/builders/Linux/builds/55833':
            ('chromium', 'Linux', 55833),
            ('http://build.chromium.org/p/chromium.win/builders/'
             'Win7%20Tests%20%281%29/builds/33911'):
            ('chromium.win', 'Win7 Tests (1)', 33911),
            'https://abc.def.google.com/i/m1/builders/b1/builds/234':
            ('m1', 'b1', 234),
            'https://luci-milo.appspot.com/buildbot/m2/b2/123':
            ('m2', 'b2', 123),
            'https://ci.chromium.org/buildbot/m2/b2/123': ('m2', 'b2', 123),
        }

        for url, expected_result in cases.iteritems():
            result = buildbot.ParseBuildUrl(url)
            self.assertEqual(expected_result, result)
Example #2
0
    def testParseBuildLongUrlNoBuilds(self, _):
        cases = [
            'https://ci.chromium.org/p/chromium/builders/ci'
            '/Linux%20Tests%20SANDBOX/3932',
            'https://luci-milo.appspot.com/p/chromium/builders/ci/b2/111',
        ]

        for url in cases:
            self.assertIsNone(buildbot.ParseBuildUrl(url))
Example #3
0
def _GetTryJobBuildNumber(try_job_result):
  build_keys = buildbot.ParseBuildUrl(try_job_result.get('url'))
  if build_keys and len(build_keys) > 2:
    return build_keys[2]
  if 'try_job_id' in try_job_result:
    return try_job_result['try_job_id']
  else:  # pragma: no cover
    # This will almost certainly not happen.
    logging.error('Cannot find id or url for: ' + try_job_result)
    return 'unknown'
Example #4
0
    def HandlePost(self):
        """Triggers an analysis on demand and redirects to the result page."""
        url = self.request.get('url').strip()

        is_admin = auth_util.IsCurrentUserAdmin()
        user_email = auth_util.GetUserEmail()
        if not acl.CanTriggerNewAnalysis(user_email, is_admin):
            # No permission to schedule a new analysis.
            return self.CreateRedirect('/waterfall/failure?redirect=1&url=%s' %
                                       url)

        build_info = buildbot.ParseBuildUrl(url)
        if not build_info:
            return BaseHandler.CreateError(
                'Url "%s" is not pointing to a build.' % url, 404)
        master_name, builder_name, build_number = build_info

        analysis = None
        if not (waterfall_config.MasterIsSupported(master_name)
                or auth_util.IsCurrentUserAdmin()):
            # If the build failure was already analyzed, just show it to the user.
            analysis = WfAnalysis.Get(master_name, builder_name, build_number)
            if not analysis:
                return BaseHandler.CreateError(
                    'Master "%s" is not supported yet.' % master_name, 501)

        if not analysis:
            # Only allow admin to force a re-run and set the build_completed.
            force = is_admin and self.request.get('force') == '1'

            build = build_util.GetBuildInfo(master_name, builder_name,
                                            build_number)
            if not build:
                return BaseHandler.CreateError(
                    'Can\'t get information about build "%s/%s/%s".' %
                    (master_name, builder_name, build_number), 501)

            if not build.completed and force:
                return BaseHandler.CreateError(
                    'Can\'t force a rerun for an incomplete build "%s/%s/%s".'
                    % (master_name, builder_name, build_number), 501)

            build_failure_analysis_pipelines.ScheduleAnalysisIfNeeded(
                master_name,
                builder_name,
                build_number,
                build_completed=build.completed,
                force=force,
                queue_name=constants.WATERFALL_ANALYSIS_QUEUE)

        return self.CreateRedirect('/waterfall/failure?redirect=1&url=%s' %
                                   url)
Example #5
0
    def HandleGet(self):
        """Triggers analysis of a build failure on demand and return current result.

    If the final analysis result is available, set cache-control to 1 day to
    avoid overload by unnecessary and frequent query from clients; otherwise
    set cache-control to 5 seconds to allow repeated query.

    Serve HTML page or JSON result as requested.
    """
        url = self.request.get('url').strip()
        build_info = buildbot.ParseBuildUrl(url)
        if not build_info:
            return BaseHandler.CreateError(
                'Url "%s" is not pointing to a build.' % url, 501)
        master_name, builder_name, build_number = build_info

        if not masters.MasterIsSupported(master_name):
            return BaseHandler.CreateError(
                'Master "%s" is not supported yet.' % master_name, 501)

        force = self.request.get('force') == '1'
        analysis = build_failure_analysis_pipelines.ScheduleAnalysisIfNeeded(
            master_name,
            builder_name,
            build_number,
            force=force,
            queue_name=BUILD_FAILURE_ANALYSIS_TASKQUEUE)

        data = {
            'master_name': analysis.master_name,
            'builder_name': analysis.builder_name,
            'build_number': analysis.build_number,
            'pipeline_status_path': analysis.pipeline_status_path,
            'show_debug_info': self._ShowDebugInfo(),
            'analysis_request_time': _FormatDatetime(analysis.request_time),
            'analysis_start_time': _FormatDatetime(analysis.start_time),
            'analysis_end_time': _FormatDatetime(analysis.end_time),
            'analysis_duration': analysis.duration,
            'analysis_update_time': _FormatDatetime(analysis.updated_time),
            'analysis_completed': analysis.completed,
            'analysis_failed': analysis.failed,
            'analysis_result': analysis.result,
            'analysis_correct': analysis.correct,
            'triage_history': _GetTriageHistory(analysis),
        }

        return {'template': 'build_failure.html', 'data': data}
Example #6
0
  def HandlePost(self):
    """Gets information to help triage the analysis results.

    1. Checks if any CL in current build is reverted in later builds
    up until the first green build(included).
    2. TODO: Checks if any changed file in current build is changed again in
    later builds up until the first green build(included).
    """
    url = self.request.get('url').strip()
    build_keys = buildbot.ParseBuildUrl(url)

    if not build_keys:  # pragma: no cover
      return {'data': {}}

    data = _CheckReverts(*build_keys)

    return {'data': data}
  def HandleGet(self):  # pragma: no cover
    """Sets the manual triage result for the cl."""
    url = self.request.get('url').strip()
    build_info = buildbot.ParseBuildUrl(url)
    if not build_info:
      return {'data': {'success': False}}
    master_name, builder_name, build_number = build_info

    cl_status = int(self.request.get('status'))
    cl_info = self.request.get('cl_info')
    # As the permission level is CORP_USER, we could assume the current user
    # already logged in.
    user_name = users.get_current_user().email().split('@')[0]
    success = _UpdateSuspectedCLAndAnalysis(
      master_name, builder_name, build_number, cl_info, cl_status, user_name)

    return {'data': {'success': success}}
Example #8
0
  def HandlePost(self):
    """Sets the manual triage result for the analysis.

    Mark the analysis result as correct/wrong/etc.
    TODO: make it possible to set the real culprit CLs.
    """
    url = self.request.get('url').strip()
    build_info = buildbot.ParseBuildUrl(url)
    if not build_info:
      return {'data': {'success': False}}
    master_name, builder_name, build_number = build_info

    correct = self.request.get('correct').lower() == 'true'
    # As the permission level is CORP_USER, we could assume the current user
    # already logged in.
    user_name = users.get_current_user().email().split('@')[0]
    success = _UpdateAnalysisResultStatus(
        master_name, builder_name, build_number, correct, user_name)
    return {'data': {'success': success}}
Example #9
0
    def testParseBuildUrl(self):
        cases = {
            None:
            None,
            '':
            None,
            'https://unknown.host/p/chromium/builders/Linux/builds/55833':
            None,
            'http://build.chromium.org/p/chromium/builders/Linux':
            None,
            'http://build.chromium.org/p/chromium/builders/Linux/builds/55833':
            ('chromium', 'Linux', 55833),
            ('http://build.chromium.org/p/chromium.win/builders/'
             'Win7%20Tests%20%281%29/builds/33911'):
            ('chromium.win', 'Win7 Tests (1)', 33911),
        }

        for url, expected_result in cases.iteritems():
            result = buildbot.ParseBuildUrl(url)
            self.assertEqual(expected_result, result)
Example #10
0
    def testParseBuildLongUrl(self, mock_get_master):

        master_name = 'chromium.sandbox'
        mock_build = Build()
        mock_build.input.properties['mastername'] = master_name
        mock_get_master.return_value = mock_build

        cases = {
            'https://ci.chromium.org/p/chromium/builders/luci.chromium.ci'
            '/Linux%20Tests%20SANDBOX/3932':
            (master_name, 'Linux Tests SANDBOX', 3932),
            'https://luci-milo.appspot.com/p/chromium/builders'
            '/luci.chromium.ci/b2/111': (master_name, 'b2', 111),
            'https://luci-milo.appspot.com/p/chromium/builders/ci/b2/111':
            (master_name, 'b2', 111),
        }

        for url, expected_result in cases.iteritems():
            result = buildbot.ParseBuildUrl(url)
            self.assertEqual(expected_result, result)
Example #11
0
def _ExtractFailureInfo(message):
    """Returns the master/builder/build id/step name of the failure."""
    master_name = None
    builder_name = None
    build_id = None
    step_name = None

    match = _AUTO_CLOSE_MESSAGE_PATTERN.match(message)
    if match:
        step_name = match.group('step')
        builder_name = match.group('builder')
        build = match.group('build')
        build_info = buildbot.ParseBuildUrl(build)
        if build_info:
            master_name = build_info[0]
            build_id = str(build_info[-1])
        else:
            build_id = build.split('/')[-1]

    return (master_name, builder_name, build_id, step_name)
Example #12
0
    def HandleGet(self):
        """Renders analysis result of the failure.

    If the final analysis result is available, set cache-control to 1 day to
    avoid overload by unnecessary and frequent query from clients; otherwise
    set cache-control to 5 seconds to allow repeated query.
    """
        url = self.request.get('url').strip()
        build_info = buildbot.ParseBuildUrl(url)
        if not build_info:
            return BaseHandler.CreateError(
                'Url "%s" is not pointing to a build.' % url, 404)
        master_name, builder_name, build_number = build_info

        analysis = WfAnalysis.Get(master_name, builder_name, build_number)

        if not analysis:
            if self.request.get('redirect') == '1':
                return BaseHandler.CreateError(
                    'No permission to schedule a new analysis.', 401)
            else:
                return BaseHandler.CreateError(
                    'Please schedule analyses on home page instead.', 400)

        data = self._PrepareCommonDataForFailure(analysis)
        data['suspected_cls'] = _GetAllSuspectedCLsAndCheckStatus(
            master_name, builder_name, build_number, analysis)

        # TODO(crbug.com/702444): Do not assume failure_type.INFRA implies a
        # compile failure. Either use a special template, or choose the appropriate
        # one based on the type of job (compile/test).
        if analysis.failure_type == failure_type.COMPILE or (
                analysis.failure_type == failure_type.INFRA):
            self._PrepareDataForCompileFailure(analysis, data)
            return {'template': 'waterfall/compile_failure.html', 'data': data}
        else:
            self._PrepareDataForTestFailures(analysis, build_info, data,
                                             self._ShowDebugInfo())
            return {'template': 'waterfall/test_failure.html', 'data': data}
Example #13
0
    def HandleGet(self):
        """Triggers analysis of a build failure on demand and return current result.

    If the final analysis result is available, set cache-control to 1 day to
    avoid overload by unnecessary and frequent query from clients; otherwise
    set cache-control to 5 seconds to allow repeated query.

    Serve HTML page or JSON result as requested.
    """
        url = self.request.get('url').strip()
        build_info = buildbot.ParseBuildUrl(url)
        if not build_info:
            return BaseHandler.CreateError(
                'Url "%s" is not pointing to a build.' % url, 501)
        master_name, builder_name, build_number = build_info

        analysis = None
        if not (waterfall_config.MasterIsSupported(master_name)
                or users.is_current_user_admin()):
            # If the build failure was already analyzed, just show it to the user.
            analysis = WfAnalysis.Get(master_name, builder_name, build_number)
            if not analysis:
                return BaseHandler.CreateError(
                    'Master "%s" is not supported yet.' % master_name, 501)

        if not analysis:
            # Only allow admin to force a re-run and set the build_completed.
            force = (users.is_current_user_admin()
                     and self.request.get('force') == '1')

            build = build_util.GetBuildInfo(master_name, builder_name,
                                            build_number)
            if not build:
                return BaseHandler.CreateError(
                    'Can\'t get information about build "%s/%s/%s".' %
                    (master_name, builder_name, build_number), 501)
            build_completed = build.completed

            if not build_completed and force:
                return BaseHandler.CreateError(
                    'Can\'t rerun an incomplete build "%s/%s/%s".' %
                    (master_name, builder_name, build_number), 501)

            analysis = build_failure_analysis_pipelines.ScheduleAnalysisIfNeeded(
                master_name,
                builder_name,
                build_number,
                build_completed=build_completed,
                force=force,
                queue_name=constants.WATERFALL_ANALYSIS_QUEUE)

        data = self._PrepareCommonDataForFailure(analysis)
        data['suspected_cls'] = _GetAllSuspectedCLsAndCheckStatus(
            master_name, builder_name, build_number, analysis)

        # TODO(crbug.com/702444): Do not assume failure_type.INFRA implies a
        # compile failure. Either use a special template, or choose the appropriate
        # one based on the type of job (compile/test).
        if analysis.failure_type == failure_type.COMPILE or (
                analysis.failure_type == failure_type.INFRA):
            self._PrepareDataForCompileFailure(analysis, data)
            return {'template': 'waterfall/compile_failure.html', 'data': data}
        else:
            self._PrepareDataForTestFailures(analysis, build_info, data,
                                             self._ShowDebugInfo())
            return {'template': 'waterfall/test_failure.html', 'data': data}
Example #14
0
  def HandlePost(self):
    # Information needed to execute this endpoint, will be populated
    # by the branches below.
    rerun = self.request.get('rerun', '0').strip() == '1'
    cancel = self.request.get('cancel', '0').strip() == '1'
    analyze_recent_commit = (
        self.request.get('analyze_recent_commit', '0').strip() == '1')
    if rerun:  # Rerun an analysis.
      return self._HandleRerunAnalysis()
    elif cancel:  # Force an analysis to be cancelled.
      return self._HandleCancelAnalysis()
    elif analyze_recent_commit:
      return self._HandleAnalyzeRecentCommit()
    else:  # Regular POST requests to start an analysis.
      # If the key hasn't been specified, then we get the information from
      # other URL parameters.
      build_url = self.request.get('url', '').strip()
      build_info = buildbot.ParseBuildUrl(build_url)
      if not build_info:
        return self.CreateError('Unknown build info!', 400)
      master_name, builder_name, build_number = build_info

      step_name = self.request.get('step_name', '').strip()
      test_name = self.request.get('test_name', '').strip()
      bug_id = self.request.get('bug_id', '').strip()

      error = self._ValidateInput(step_name, test_name, bug_id)
      if error:
        return error

      build_number = int(build_number)
      bug_id = int(bug_id) if bug_id else None

      request = FlakeAnalysisRequest.Create(test_name, False, bug_id)
      request.AddBuildStep(master_name, builder_name, build_number, step_name,
                           time_util.GetUTCNow())
      analysis, scheduled = self._CreateAndScheduleFlakeAnalysis(
          request, master_name, builder_name, build_number, step_name,
          test_name, bug_id, False)

      if not analysis:
        if scheduled is None:
          # User does not have permission to trigger, nor was any previous
          # analysis triggered to view.
          return {
              'template': 'error.html',
              'data': {
                  'error_message': (
                      'No permission to schedule an analysis for flaky test. '
                      'Please log in with your @google.com account first.'),
              },
              'return_code': 403,
          }

        # Check if a previous request has already covered this analysis so use
        # the results from that analysis.
        request = FlakeAnalysisRequest.GetVersion(key=test_name)

        if not (request and request.analyses):
          return {
              'template': 'error.html',
              'data': {
                  'error_message': (
                      'Flake analysis is not supported for "%s/%s". Either '
                      'the test type is not supported or the test is not '
                      'swarmed yet.' % (step_name, test_name)),
              },
              'return_code': 400,
          }

        analysis = request.FindMatchingAnalysisForConfiguration(
            master_name, builder_name)

        if not analysis:
          logging.error('Flake analysis was deleted unexpectedly!')
          return {
              'template': 'error.html',
              'data': {
                  'error_message': 'Flake analysis was deleted unexpectedly!',
              },
              'return_code': 404,
          }

      logging.info('Analysis: %s has a scheduled status of: %r', analysis.key,
                   scheduled)
      return self.CreateRedirect(
          '/p/chromium/flake-portal/analysis/analyze?redirect=1&key=%s' %
          analysis.key.urlsafe())
Example #15
0
def _GetTryJobBuildNumber(url):
    build_keys = buildbot.ParseBuildUrl(url)
    return build_keys[2]
  def _callback(self, callback_params, pipeline_id=None):
    """Updates the TryJobData entities with status from buildbucket."""
    # callback_params may have been serialized if the callback was converted to
    # a URL.
    if isinstance(callback_params, basestring):
      callback_params = json.loads(callback_params)

    self.last_params = callback_params

    _ = pipeline_id  # We do nothing with this id.

    try_job_id = callback_params['try_job_id']
    assert try_job_id

    urlsafe_try_job_key = callback_params['urlsafe_try_job_key']
    try_job_type = callback_params['try_job_type']
    deadline = callback_params['deadline']
    already_set_started = callback_params['already_set_started']
    error_count = callback_params['error_count']
    max_error_times = callback_params['max_error_times']
    default_pipeline_wait_seconds = callback_params[
        'default_pipeline_wait_seconds']
    timeout_hours = callback_params['timeout_hours']
    backoff_time = callback_params['backoff_time']

    if try_job_type == failure_type.FLAKY_TEST:
      try_job_data = FlakeTryJobData.Get(try_job_id)
    else:
      try_job_data = WfTryJobData.Get(try_job_id)

    error, build = buildbucket_client.GetTryJobs([try_job_id])[0]

    if error:
      if error_count < max_error_times:
        error_count += 1
        self.delay_callback(
            backoff_time,
            callback_params={
                'try_job_id': try_job_id,
                'try_job_type': try_job_type,
                'urlsafe_try_job_key': urlsafe_try_job_key,
                'deadline': deadline,
                'already_set_started': already_set_started,
                'error_count': error_count,
                'max_error_times': max_error_times,
                'default_pipeline_wait_seconds': default_pipeline_wait_seconds,
                'timeout_hours': timeout_hours,
                'backoff_time': backoff_time * 2,
            }
        )
        return
      else:  # pragma: no cover
        # Buildbucket has responded error more than 5 times, retry pipeline.
        _UpdateTryJobMetadata(
            try_job_data, try_job_type, build, error, False)
        raise pipeline.Retry(
            'Error "%s" occurred. Reason: "%s"' % (error.message,
                                                   error.reason))
    elif build.status == BuildbucketBuild.COMPLETED:
      swarming_task_id = buildbot.GetSwarmingTaskIdFromUrl(
          build.url)

      if swarming_task_id:
        try:
          report = json.loads(swarming_util.GetStepLog(
              try_job_id, 'report', HttpClientAppengine(), 'report'))
        except (ValueError, TypeError) as e:  # pragma: no cover
          report = {}
          logging.exception(
              'Failed to load result report for swarming/%s '
              'due to exception %s.' % (swarming_task_id, e.message))
      else:
        try_job_master_name, try_job_builder_name, try_job_build_number = (
            buildbot.ParseBuildUrl(build.url))

        try:
          report = json.loads(buildbot.GetStepLog(
              try_job_master_name, try_job_builder_name, try_job_build_number,
              'report', HttpClientAppengine(), 'report'))
        except (ValueError, TypeError) as e:  # pragma: no cover
          report = {}
          logging.exception(
              'Failed to load result report for %s/%s/%s due to exception %s.'
              % (try_job_master_name, try_job_builder_name,
                 try_job_build_number, e.message))

      _UpdateTryJobMetadata(
          try_job_data, try_job_type, build, error, False,
          report if report else {})
      result_to_update = self._UpdateTryJobResult(
          urlsafe_try_job_key, try_job_type, try_job_id,
          build.url, BuildbucketBuild.COMPLETED, report)
      self.complete(result_to_update[-1])
      return
    else:
      error_count = 0
      backoff_time = default_pipeline_wait_seconds
      if build.status == BuildbucketBuild.STARTED and not (
          already_set_started):
        # It is possible this branch is skipped if a fast build goes from
        # 'SCHEDULED' to 'COMPLETED' between queries, so start_time may be
        # unavailable.
        start_time = time_util.MicrosecondsToDatetime(build.updated_time)
        self._UpdateTryJobResult(
            urlsafe_try_job_key, try_job_type, try_job_id,
            build.url, BuildbucketBuild.STARTED)

        already_set_started = True

        # Update as much try job metadata as soon as possible to avoid data
        # loss in case of errors.
        try_job_data.start_time = start_time
        try_job_data.request_time = (
            time_util.MicrosecondsToDatetime(build.request_time))
        try_job_data.try_job_url = build.url
        try_job_data.callback_url = self.get_callback_url(
            callback_params=json.dumps({
                'try_job_id': try_job_id,
                'try_job_type': try_job_type,
                'urlsafe_try_job_key': urlsafe_try_job_key,
                'deadline': deadline,
                'already_set_started': already_set_started,
                'error_count': error_count,
                'max_error_times': max_error_times,
                'default_pipeline_wait_seconds': default_pipeline_wait_seconds,
                'timeout_hours': timeout_hours,
                'backoff_time': backoff_time,
            })
        )
        try_job_data.put()

    if time.time() > deadline:  # pragma: no cover
      _UpdateTryJobMetadata(
          try_job_data, try_job_type, build, error, True)
      # Explicitly abort the whole pipeline.
      raise pipeline.Abort(
          'Try job %s timed out after %d hours.' % (
              try_job_id, timeout_hours))

    # Ensure last_buildbucket_response is always the most recent
    # whenever available during intermediate queries.
    _UpdateLastBuildbucketResponse(try_job_data, build)
Example #17
0
    def HandleGet(self):
        key = self.request.get('key')
        if key:
            analysis = ndb.Key(urlsafe=key).get()
            if not analysis:  # pragma: no cover
                return self.CreateError('Analysis of flake is not found', 404)
        else:
            build_url = self.request.get('url', '').strip()
            build_info = buildbot.ParseBuildUrl(build_url)
            if not build_info:  # pragma: no cover
                return self.CreateError('Unknown build info!', 400)
            master_name, builder_name, build_number = build_info

            step_name = self.request.get('step_name', '').strip()
            test_name = self.request.get('test_name', '').strip()
            bug_id = self.request.get('bug_id', '').strip()
            # TODO(lijeffrey): Add support for force flag to trigger a rerun.

            error = self._ValidateInput(step_name, test_name, bug_id)

            if error:  # pragma: no cover
                return error

            build_number = int(build_number)
            bug_id = int(bug_id) if bug_id else None
            user_email = auth_util.GetUserEmail()
            is_admin = auth_util.IsCurrentUserAdmin()

            request = FlakeAnalysisRequest.Create(test_name, False, bug_id)
            request.AddBuildStep(master_name, builder_name, build_number,
                                 step_name, time_util.GetUTCNow())
            scheduled = flake_analysis_service.ScheduleAnalysisForFlake(
                request, user_email, is_admin, triggering_sources.FINDIT_UI)

            analysis = MasterFlakeAnalysis.GetVersion(master_name,
                                                      builder_name,
                                                      build_number, step_name,
                                                      test_name)

            if not analysis:
                if scheduled is None:
                    # User does not have permission to trigger, nor was any previous
                    # analysis triggered to view.
                    return {
                        'template': 'error.html',
                        'data': {
                            'error_message':
                            ('You could schedule an analysis for flaky test only '
                             'after you login with google.com account.'),
                            'login_url':
                            self.GetLoginUrl(),
                        },
                        'return_code': 401,
                    }

                # Check if a previous request has already covered this analysis so use
                # the results from that analysis.
                request = FlakeAnalysisRequest.GetVersion(key=test_name)

                if not (request and request.analyses):
                    return {
                        'template': 'error.html',
                        'data': {
                            'error_message':
                            ('Flake analysis is not supported for this request. Either'
                             ' the build step may not be supported or the test is not '
                             'swarmed.'),
                        },
                        'return_code': 400,
                    }

                analysis = request.FindMatchingAnalysisForConfiguration(
                    master_name, builder_name)

                if not analysis:  # pragma: no cover
                    logging.error('Flake analysis was deleted unexpectedly!')
                    return {
                        'template': 'error.html',
                        'data': {
                            'error_message':
                            'Flake analysis was deleted unexpectedly!',
                        },
                        'return_code': 400
                    }

        suspected_flake = _GetSuspectedFlakeInfo(analysis)
        culprit = _GetCulpritInfo(analysis)
        build_level_number, revision_level_number = _GetNumbersOfDataPointGroups(
            analysis.data_points)

        data = {
            'key':
            analysis.key.urlsafe(),
            'master_name':
            analysis.master_name,
            'builder_name':
            analysis.builder_name,
            'build_number':
            analysis.build_number,
            'step_name':
            analysis.step_name,
            'test_name':
            analysis.test_name,
            'pass_rates': [],
            'analysis_status':
            analysis.status_description,
            'try_job_status':
            analysis_status.STATUS_TO_DESCRIPTION.get(analysis.try_job_status),
            'last_attempted_swarming_task':
            _GetLastAttemptedSwarmingTaskDetails(analysis),
            'last_attempted_try_job':
            _GetLastAttemptedTryJobDetails(analysis),
            'version_number':
            analysis.version_number,
            'suspected_flake':
            suspected_flake,
            'culprit':
            culprit,
            'request_time':
            time_util.FormatDatetime(analysis.request_time),
            'build_level_number':
            build_level_number,
            'revision_level_number':
            revision_level_number,
            'error':
            analysis.error_message,
            'iterations_to_rerun':
            analysis.iterations_to_rerun,
            'show_input_ui':
            self._ShowInputUI(analysis)
        }

        if (users.is_current_user_admin() and analysis.completed
                and analysis.triage_history):
            data['triage_history'] = analysis.GetTriageHistory()

        data['pending_time'] = time_util.FormatDuration(
            analysis.request_time, analysis.start_time
            or time_util.GetUTCNow())
        if analysis.status != analysis_status.PENDING:
            data['duration'] = time_util.FormatDuration(
                analysis.start_time, analysis.end_time
                or time_util.GetUTCNow())

        data['pass_rates'] = _GetCoordinatesData(analysis)

        return {'template': 'flake/result.html', 'data': data}