def testGetTryJobsRequestFailure(self):
     response = 'Not Found'
     self._MockUrlFetch('3', None, response, 404)
     results = buildbucket_client.GetTryJobs(['3'])
     self.assertEqual(1, len(results))
     error, build = results[0]
     self.assertIsNotNone(error)
     self.assertEqual(404, error.reason)
     self.assertEqual('Not Found', error.message)
     self.assertIsNone(build)
 def testGetTryJobsSuccess(self):
     response = {'build': {'id': '1', 'url': 'url', 'status': 'STARTED'}}
     self._MockUrlFetch('1', None, json.dumps(response))
     results = buildbucket_client.GetTryJobs(['1'])
     self.assertEqual(1, len(results))
     error, build = results[0]
     self.assertIsNone(error)
     self.assertIsNotNone(build)
     self.assertEqual('1', build.id)
     self.assertEqual('url', build.url)
     self.assertEqual('STARTED', build.status)
 def testGetTryJobsRequestFailure(self, mocked_fetch):
   response = 'Not Found'
   mocked_fetch.return_value = _Result(
       status_code=404, content=response, headers={})
   results = buildbucket_client.GetTryJobs(['3'])
   self.assertEqual(1, len(results))
   error, build = results[0]
   self.assertIsNotNone(error)
   self.assertEqual(404, error.reason)
   self.assertEqual('Not Found', error.message)
   self.assertIsNone(build)
 def testGetTryJobsSuccess(self, mocked_fetch):
   response = {'build': {'id': '1', 'url': 'url', 'status': 'STARTED'}}
   mocked_fetch.return_value = _Result(
       status_code=200, content=json.dumps(response), headers={})
   results = buildbucket_client.GetTryJobs(['1'])
   self.assertEqual(1, len(results))
   error, build = results[0]
   self.assertIsNone(error)
   self.assertIsNotNone(build)
   self.assertEqual('1', build.id)
   self.assertEqual('url', build.url)
   self.assertEqual('STARTED', build.status)
 def testGetTryJobsFailure(self):
     response = {
         'error': {
             'reason': 'BUILD_NOT_FOUND',
             'message': 'message',
         }
     }
     self._MockUrlFetch('2', None, json.dumps(response))
     results = buildbucket_client.GetTryJobs(['2'])
     self.assertEqual(1, len(results))
     error, build = results[0]
     self.assertIsNotNone(error)
     self.assertEqual('BUILD_NOT_FOUND', error.reason)
     self.assertEqual('message', error.message)
     self.assertIsNone(build)
 def testGetTryJobsFailure(self, mocked_fetch):
   response = {
       'error': {
           'reason': 'BUILD_NOT_FOUND',
           'message': 'message',
       }
   }
   mocked_fetch.return_value = _Result(
       status_code=200, content=json.dumps(response), headers={})
   results = buildbucket_client.GetTryJobs(['2'])
   self.assertEqual(1, len(results))
   error, build = results[0]
   self.assertIsNotNone(error)
   self.assertEqual('BUILD_NOT_FOUND', error.reason)
   self.assertEqual('message', error.message)
   self.assertIsNone(build)
Example #7
0
def GetStepLog(try_job_id, full_step_name, http_client,
               log_type='stdout'):
  """Returns specific log of the specified step."""

  error, build = buildbucket_client.GetTryJobs([try_job_id])[0]
  if error:
    logging.exception('Error retrieving buildbucket build id: %s' %
                      try_job_id)
    return None

  # 1. Get log.
  data = logdog_util.GetStepLogForBuild(build.response, full_step_name,
                                        log_type, http_client)

  if log_type.lower() == 'step_metadata':  # pragma: no branch
    return json.loads(data) if data else None

  return data
Example #8
0
def _UpdateRunningBuilds():
    """Syncs builds in datastore with buildbucket, return ones in progress."""
    result = []
    builds = BuildAheadTryJob.RunningJobs()
    if builds:
        build_ids = [b.BuildId for b in builds]
        updated_builds = buildbucket_client.GetTryJobs(build_ids)
        for error, build in updated_builds:
            if not error:
                if build.status == build.COMPLETED:
                    build_ahead = BuildAheadTryJob.Get(build.id)
                    build_ahead.MarkComplete(build.response)
                    if build.response['result'] == 'SUCCESS':
                        # If the build ahead was successful, record the fact that the cache
                        # is fully built at this commit position, on this particular bot.
                        _RecordFullBuild(build, build_ahead)
                else:
                    result.append(build)
    return result
  def _callback(self, callback_params, pipeline_id=None):
    """Updates the TryJobData entities with status from buildbucket."""
    # callback_params may have been serialized if the callback was converted to
    # a URL.
    if isinstance(callback_params, basestring):
      callback_params = json.loads(callback_params)

    self.last_params = callback_params

    _ = pipeline_id  # We do nothing with this id.

    try_job_id = callback_params['try_job_id']
    assert try_job_id

    urlsafe_try_job_key = callback_params['urlsafe_try_job_key']
    try_job_type = callback_params['try_job_type']
    deadline = callback_params['deadline']
    already_set_started = callback_params['already_set_started']
    error_count = callback_params['error_count']
    max_error_times = callback_params['max_error_times']
    default_pipeline_wait_seconds = callback_params[
        'default_pipeline_wait_seconds']
    timeout_hours = callback_params['timeout_hours']
    backoff_time = callback_params['backoff_time']

    if try_job_type == failure_type.FLAKY_TEST:
      try_job_data = FlakeTryJobData.Get(try_job_id)
    else:
      try_job_data = WfTryJobData.Get(try_job_id)

    error, build = buildbucket_client.GetTryJobs([try_job_id])[0]

    if error:
      if error_count < max_error_times:
        error_count += 1
        self.delay_callback(
            backoff_time,
            callback_params={
                'try_job_id': try_job_id,
                'try_job_type': try_job_type,
                'urlsafe_try_job_key': urlsafe_try_job_key,
                'deadline': deadline,
                'already_set_started': already_set_started,
                'error_count': error_count,
                'max_error_times': max_error_times,
                'default_pipeline_wait_seconds': default_pipeline_wait_seconds,
                'timeout_hours': timeout_hours,
                'backoff_time': backoff_time * 2,
            }
        )
        return
      else:  # pragma: no cover
        # Buildbucket has responded error more than 5 times, retry pipeline.
        _UpdateTryJobMetadata(
            try_job_data, try_job_type, build, error, False)
        raise pipeline.Retry(
            'Error "%s" occurred. Reason: "%s"' % (error.message,
                                                   error.reason))
    elif build.status == BuildbucketBuild.COMPLETED:
      swarming_task_id = buildbot.GetSwarmingTaskIdFromUrl(
          build.url)

      if swarming_task_id:
        try:
          report = json.loads(swarming_util.GetStepLog(
              try_job_id, 'report', HttpClientAppengine(), 'report'))
        except (ValueError, TypeError) as e:  # pragma: no cover
          report = {}
          logging.exception(
              'Failed to load result report for swarming/%s '
              'due to exception %s.' % (swarming_task_id, e.message))
      else:
        try_job_master_name, try_job_builder_name, try_job_build_number = (
            buildbot.ParseBuildUrl(build.url))

        try:
          report = json.loads(buildbot.GetStepLog(
              try_job_master_name, try_job_builder_name, try_job_build_number,
              'report', HttpClientAppengine(), 'report'))
        except (ValueError, TypeError) as e:  # pragma: no cover
          report = {}
          logging.exception(
              'Failed to load result report for %s/%s/%s due to exception %s.'
              % (try_job_master_name, try_job_builder_name,
                 try_job_build_number, e.message))

      _UpdateTryJobMetadata(
          try_job_data, try_job_type, build, error, False,
          report if report else {})
      result_to_update = self._UpdateTryJobResult(
          urlsafe_try_job_key, try_job_type, try_job_id,
          build.url, BuildbucketBuild.COMPLETED, report)
      self.complete(result_to_update[-1])
      return
    else:
      error_count = 0
      backoff_time = default_pipeline_wait_seconds
      if build.status == BuildbucketBuild.STARTED and not (
          already_set_started):
        # It is possible this branch is skipped if a fast build goes from
        # 'SCHEDULED' to 'COMPLETED' between queries, so start_time may be
        # unavailable.
        start_time = time_util.MicrosecondsToDatetime(build.updated_time)
        self._UpdateTryJobResult(
            urlsafe_try_job_key, try_job_type, try_job_id,
            build.url, BuildbucketBuild.STARTED)

        already_set_started = True

        # Update as much try job metadata as soon as possible to avoid data
        # loss in case of errors.
        try_job_data.start_time = start_time
        try_job_data.request_time = (
            time_util.MicrosecondsToDatetime(build.request_time))
        try_job_data.try_job_url = build.url
        try_job_data.callback_url = self.get_callback_url(
            callback_params=json.dumps({
                'try_job_id': try_job_id,
                'try_job_type': try_job_type,
                'urlsafe_try_job_key': urlsafe_try_job_key,
                'deadline': deadline,
                'already_set_started': already_set_started,
                'error_count': error_count,
                'max_error_times': max_error_times,
                'default_pipeline_wait_seconds': default_pipeline_wait_seconds,
                'timeout_hours': timeout_hours,
                'backoff_time': backoff_time,
            })
        )
        try_job_data.put()

    if time.time() > deadline:  # pragma: no cover
      _UpdateTryJobMetadata(
          try_job_data, try_job_type, build, error, True)
      # Explicitly abort the whole pipeline.
      raise pipeline.Abort(
          'Try job %s timed out after %d hours.' % (
              try_job_id, timeout_hours))

    # Ensure last_buildbucket_response is always the most recent
    # whenever available during intermediate queries.
    _UpdateLastBuildbucketResponse(try_job_data, build)