コード例 #1
0
    def _IsFailureFatal(self,
                        failing,
                        inflight,
                        no_stat,
                        self_destructed=False):
        """Returns a boolean indicating whether the build should fail.

    Args:
      failing: Set of build config names of builders that failed.
      inflight: Set of build config names of builders that are inflight
      no_stat: Set of build config names of builders that had status None.
      self_destructed: Boolean indicating whether this is a master which
        self-destructed and stopped waiting for the running slaves. Default to
        False.

    Returns:
      False if this is a CQ-master and the sync_stage.validation_pool hasn't
      picked up any chump CLs or new CLs, else see the return type of
      _IsFailureFatal of the parent class MasterSlaveSyncCompletionStage.
    """
        if (config_lib.IsMasterCQ(self._run.config)
                and not self.sync_stage.pool.HasPickedUpCLs()):
            # If it's a CQ-master build and the validation pool hasn't picked up any
            # CLs, no slave CQ builds have been scheduled.
            return False

        return super(CommitQueueCompletionStage,
                     self)._IsFailureFatal(failing,
                                           inflight,
                                           no_stat,
                                           self_destructed=self_destructed)
コード例 #2
0
  def PerformStage(self):
    if (config_lib.IsMasterCQ(self._run.config) and
        not self.sync_stage.pool.HasPickedUpCLs()):
      logging.info('No new CLs or chumpped CLs found to verify in this CQ run,'
                   'do not schedule CQ slaves.')
      return

    self.ScheduleSlaveBuildsViaBuildbucket(
        important_only=False, dryrun=self._run.options.debug)
コード例 #3
0
  def _RetryBuilds(self, builds):
    """Retry builds with Buildbucket.

    Args:
      builds: config names of the builds to retry with Buildbucket.

    Returns:
      A set of retried builds.
    """
    assert builds is not None

    new_scheduled_important_slaves = []
    new_scheduled_build_reqs = []
    for build in builds:
      try:
        buildbucket_id = self.new_buildbucket_info_dict[build].buildbucket_id
        build_retry = self.new_buildbucket_info_dict[build].retry

        logging.info('Going to retry build %s buildbucket_id %s '
                     'with retry # %d',
                     build, buildbucket_id, build_retry + 1)

        if not self.dry_run:
          fields = {'build_type': self.config.build_type,
                    'build_name': self.config.name}
          metrics.Counter(constants.MON_BB_RETRY_BUILD_COUNT).increment(
              fields=fields)

        content = self.buildbucket_client.RetryBuildRequest(
            buildbucket_id, dryrun=self.dry_run)

        new_buildbucket_id = buildbucket_lib.GetBuildId(content)
        new_created_ts = buildbucket_lib.GetBuildCreated_ts(content)

        new_scheduled_important_slaves.append(
            (build, new_buildbucket_id, new_created_ts))
        new_scheduled_build_reqs.append(build_requests.BuildRequest(
            None, self.master_build_id, build, None, new_buildbucket_id,
            build_requests.REASON_IMPORTANT_CQ_SLAVE, None))

        logging.info('Retried build %s buildbucket_id %s created_ts %s',
                     build, new_buildbucket_id, new_created_ts)
      except buildbucket_lib.BuildbucketResponseException as e:
        logging.error('Failed to retry build %s buildbucket_id %s: %s',
                      build, buildbucket_id, e)

    if config_lib.IsMasterCQ(self.config) and new_scheduled_build_reqs:
      self.db.InsertBuildRequests(new_scheduled_build_reqs)

    if new_scheduled_important_slaves:
      self.metadata.ExtendKeyListWithList(
          constants.METADATA_SCHEDULED_IMPORTANT_SLAVES,
          new_scheduled_important_slaves)

    return set([build for build, _, _ in new_scheduled_important_slaves])
コード例 #4
0
  def ScheduleSlaveBuildsViaBuildbucket(self,
                                        important_only=False,
                                        dryrun=False):
    """Schedule slave builds by sending PUT requests to Buildbucket.

    Args:
      important_only: Whether only schedule important slave builds, default to
        False.
      dryrun: Whether a dryrun, default to False.
    """
    if self.buildbucket_client is None:
      logging.info('No buildbucket_client. Skip scheduling slaves.')
      return

    build_identifier, db = self._run.GetCIDBHandle()
    build_id = build_identifier.cidb_id
    if build_id is None:
      logging.info('No build id. Skip scheduling slaves.')
      return

    # May be None. This is okay.
    master_buildbucket_id = self._run.options.buildbucket_id

    if self._run.options.cbb_snapshot_revision:
      logging.info('Parent has cbb_snapshot_rev=%s',
                   self._run.options.cbb_snapshot_revision)

    scheduled_important_slave_builds = []
    scheduled_experimental_slave_builds = []
    unscheduled_slave_builds = []
    scheduled_build_reqs = []

    # Get all active slave build configs.
    slave_config_map = self._GetSlaveConfigMap(important_only)
    for slave_config_name, slave_config in sorted(slave_config_map.items()):
      try:
        buildbucket_id, created_ts = self.PostSlaveBuildToBuildbucket(
            slave_config_name,
            slave_config,
            build_id,
            master_buildbucket_id,
            dryrun=dryrun)
        request_reason = None

        if slave_config.important:
          scheduled_important_slave_builds.append((slave_config_name,
                                                   buildbucket_id, created_ts))
          request_reason = build_requests.REASON_IMPORTANT_CQ_SLAVE
        else:
          scheduled_experimental_slave_builds.append(
              (slave_config_name, buildbucket_id, created_ts))
          request_reason = build_requests.REASON_EXPERIMENTAL_CQ_SLAVE

        scheduled_build_reqs.append(
            build_requests.BuildRequest(None, build_id, slave_config_name, None,
                                        buildbucket_id, request_reason, None))
      except buildbucket_lib.BuildbucketResponseException as e:
        # Use 16-digit ts to be consistent with the created_ts from Buildbucket
        current_ts = int(round(time.time() * 1000000))
        unscheduled_slave_builds.append((slave_config_name, None, current_ts))
        if important_only or slave_config.important:
          raise
        else:
          logging.warning('Failed to schedule %s current timestamp %s: %s',
                          slave_config_name, current_ts, e)

    if config_lib.IsMasterCQ(self._run.config) and db and scheduled_build_reqs:
      db.InsertBuildRequests(scheduled_build_reqs)

    self._run.attrs.metadata.ExtendKeyListWithList(
        constants.METADATA_SCHEDULED_IMPORTANT_SLAVES,
        scheduled_important_slave_builds)
    self._run.attrs.metadata.ExtendKeyListWithList(
        constants.METADATA_SCHEDULED_EXPERIMENTAL_SLAVES,
        scheduled_experimental_slave_builds)
    self._run.attrs.metadata.ExtendKeyListWithList(
        constants.METADATA_UNSCHEDULED_SLAVES, unscheduled_slave_builds)
コード例 #5
0
ファイル: simple_builders.py プロジェクト: msisov/chromium68
 def _HandleChanges(self):
     """Handle changes picked up by the validation_pool in the sync stage."""
     if config_lib.IsMasterCQ(self._run.config):
         self._RunStage(handle_changes_stages.CommitQueueHandleChangesStage,
                        self.sync_stage, self._completion_stage)
コード例 #6
0
    def PerformStage(self):
        if (config_lib.IsMasterCQ(self._run.config)
                and not self.sync_stage.pool.HasPickedUpCLs()):
            logging.info('No CLs have been picked up and no slaves have been '
                         'scheduled in this run. Will not publish uprevs.')
            return

        # Either has to be a master or not have any push overlays.
        assert self._run.config.master
        assert self._run.config.push_overlays

        staging_branch = None
        if self.stage_push:
            if not config_lib.IsMasterChromePFQ(self._run.config):
                raise ValueError(
                    'This build must be a master chrome PFQ build '
                    'when stage_push is True.')
            build_identifier, _ = self._run.GetCIDBHandle()
            buildbucket_id = build_identifier.buildbucket_id

            # If the master passed BinHostTest and all the important slaves passed
            # UploadPrebuiltsTest, push uprev commits to a staging_branch.
            if (self.CheckMasterBinhostTest(buildbucket_id)
                    and self.CheckSlaveUploadPrebuiltsTest()):
                staging_branch = ('refs/' + constants.PFQ_REF + '/' +
                                  constants.STAGING_PFQ_BRANCH_PREFIX +
                                  str(buildbucket_id))

        # If we're a commit queue, we should clean out our local changes, resync,
        # and reapply our uprevs. This is necessary so that 1) we are sure to point
        # at the remote SHA1s, not our local SHA1s; 2) we can avoid doing a
        # rebase; 3) in the case of failure and staging_branch is None, we don't
        # submit the changes that were committed locally.
        #
        # If we're not a commit queue and the build succeeded, we can skip the
        # cleanup here. This is a cheap trick so that the Chrome PFQ pushes its
        # earlier uprev from the SyncChrome stage (it would be a bit tricky to
        # replicate the uprev here, so we'll leave it alone).

        # If we're not a commit queue and staging_branch is not None, we can skip
        # the cleanup here. When staging_branch is not None, we're going to push
        # the local commits generated in AFDOUpdateEbuild stage to the
        # staging_branch, cleaning up repository here will wipe out the local
        # commits.
        if (config_lib.IsCQType(self._run.config.build_type)
                or not (self.success or staging_branch is not None)):
            repo = self.GetRepoRepository()

            # Clean up our root and sync down the latest changes that were
            # submitted.
            repo.BuildRootGitCleanup(self._build_root)

            # Sync down the latest changes we have submitted.
            if self._run.options.sync:
                next_manifest = self._run.config.manifest
                repo.Sync(next_manifest)

            # Commit uprev and portage cache regeneration locally.
            if self._run.options.uprev and self._run.config.uprev:
                commands.UprevPackages(self._build_root,
                                       self._boards,
                                       overlay_type=self._run.config.overlays)
                push_overlays = portage_util.FindOverlays(
                    self._run.config.push_overlays, buildroot=self._build_root)
                commands.RegenPortageCache(push_overlays)

        # When prebuilts is True, if it's a successful run or staging_branch is
        # not None for a master-chrome-pfq run, update binhost conf
        if (self._run.config.prebuilts
                and (self.success or staging_branch is not None)):
            confwriter = prebuilts.BinhostConfWriter(self._run)
            confwriter.Perform()

        # Push the uprev, portage cache, and binhost commits.
        commands.UprevPush(self._build_root,
                           overlay_type=self._run.config.push_overlays,
                           dryrun=self._run.options.debug,
                           staging_branch=staging_branch)
        if config_lib.IsMasterAndroidPFQ(self._run.config) and self.success:
            self._run.attrs.metadata.UpdateWithDict({'UprevvedAndroid': True})
コード例 #7
0
    def PerformStage(self):
        """Perform the actual work for this stage.

    This includes final metadata archival, and update CIDB with our final status
    as well as producting a logged build result summary.
    """
        build_identifier, _ = self._run.GetCIDBHandle()
        build_id = build_identifier.cidb_id
        buildbucket_id = build_identifier.buildbucket_id
        if results_lib.Results.BuildSucceededSoFar(self.buildstore,
                                                   buildbucket_id, self.name):
            final_status = constants.BUILDER_STATUS_PASSED
        else:
            final_status = constants.BUILDER_STATUS_FAILED

        if not hasattr(self._run.attrs, 'release_tag'):
            # If, for some reason, sync stage was not completed and
            # release_tag was not set. Set it to None here because
            # ArchiveResults() depends the existence of this attr.
            self._run.attrs.release_tag = None

        # Set up our report metadata.
        self._run.attrs.metadata.UpdateWithDict(
            self.GetReportMetadata(
                final_status=final_status,
                completion_instance=self._completion_instance))

        src_root = self._build_root
        # Workspace builders use a different buildroot for overlays.
        if self._run.config.workspace_branch and self._run.options.workspace:
            src_root = self._run.options.workspace

        # Add tags for the arches and statuses of the build.
        # arches requires crossdev which isn't available at the early part of the
        # build.
        arches = []
        for board in self._run.config['boards']:
            toolchains = toolchain.GetToolchainsForBoard(board,
                                                         buildroot=src_root)
            default = list(
                toolchain.FilterToolchains(toolchains, 'default', True))
            if default:
                try:
                    arches.append(toolchain.GetArchForTarget(default[0]))
                except cros_build_lib.RunCommandError as e:
                    logging.warning(
                        'Unable to retrieve arch for board %s default toolchain %s: %s',
                        board, default, e)
        tags = {
            'arches': arches,
            'status': final_status,
        }
        results = self._run.attrs.metadata.GetValue('results')
        for stage in results:
            tags['stage_status:%s' % stage['name']] = stage['status']
            tags['stage_summary:%s' % stage['name']] = stage['summary']
        self._run.attrs.metadata.UpdateKeyDictWithDict(constants.METADATA_TAGS,
                                                       tags)

        # Some operations can only be performed if a valid version is available.
        try:
            self._run.GetVersionInfo()
            self.ArchiveResults(final_status)
            metadata_url = os.path.join(self.upload_url,
                                        constants.METADATA_JSON)
        except cbuildbot_run.VersionNotSetError:
            logging.error('A valid version was never set for this run. '
                          'Can not archive results.')
            metadata_url = ''

        results_lib.Results.Report(sys.stdout,
                                   current_version=(self._run.attrs.release_tag
                                                    or ''))

        # Upload goma log if used for BuildPackage and TestSimpleChrome.
        _UploadAndLinkGomaLogIfNecessary(
            'BuildPackages', self._run.config.name, self._run.options.goma_dir,
            self._run.options.goma_client_json,
            self._run.attrs.metadata.GetValueWithDefault('goma_tmp_dir'))
        _UploadAndLinkGomaLogIfNecessary(
            'TestSimpleChromeWorkflow', self._run.config.name,
            self._run.options.goma_dir, self._run.options.goma_client_json,
            self._run.attrs.metadata.GetValueWithDefault(
                'goma_tmp_dir_for_simple_chrome'))

        if self.buildstore.AreClientsReady():
            status_for_db = final_status

            # TODO(pprabhu): After BuildData and CBuildbotMetdata are merged, remove
            # this extra temporary object creation.
            # XXX:HACK We're creating a BuildData with an empty URL. Don't try to
            # MarkGathered this object.
            build_data = metadata_lib.BuildData(
                '', self._run.attrs.metadata.GetDict())
            # TODO(akeshet): Find a clearer way to get the "primary upload url" for
            # the metadata.json file. One alternative is _GetUploadUrls(...)[0].
            # Today it seems that element 0 of its return list is the primary upload
            # url, but there is no guarantee or unit test coverage of that.
            self.buildstore.FinishBuild(build_id,
                                        status=status_for_db,
                                        summary=build_data.failure_message,
                                        metadata_url=metadata_url)

            duration = self._GetBuildDuration()

            mon_fields = {
                'status': status_for_db,
                'build_config': self._run.config.name,
                'important': self._run.config.important
            }
            metrics.Counter(
                constants.MON_BUILD_COMP_COUNT).increment(fields=mon_fields)
            metrics.CumulativeSecondsDistribution(
                constants.MON_BUILD_DURATION).add(duration, fields=mon_fields)

            if self._run.options.sanity_check_build:
                metrics.Counter(
                    constants.MON_BUILD_SANITY_COMP_COUNT).increment(
                        fields=mon_fields)
                metrics.Gauge(
                    constants.MON_BUILD_SANITY_ID,
                    description=
                    'The build number of the latest sanity build. Used '
                    'for recovering the link to the latest failing build '
                    'in the alert when a sanity build fails.',
                    field_spec=[
                        ts_mon.StringField('status'),
                        ts_mon.StringField('build_config'),
                        ts_mon.StringField('builder_name'),
                        ts_mon.BooleanField('important')
                    ]).set(self._run.buildnumber,
                           fields=dict(
                               mon_fields,
                               builder_name=self._run.GetBuilderName()))

            if config_lib.IsMasterCQ(self._run.config):
                self_destructed = self._run.attrs.metadata.GetValueWithDefault(
                    constants.SELF_DESTRUCTED_BUILD, False)
                mon_fields = {
                    'status': status_for_db,
                    'self_destructed': self_destructed
                }
                metrics.CumulativeSecondsDistribution(
                    constants.MON_CQ_BUILD_DURATION).add(duration,
                                                         fields=mon_fields)
                annotator_link = uri_lib.ConstructAnnotatorUri(build_id)
                logging.PrintBuildbotLink('Build annotator', annotator_link)

            # From this point forward, treat all exceptions as warnings.
            self._post_completion = True

            # Dump report about things we retry.
            retry_stats.ReportStats(sys.stdout)