Ejemplo n.º 1
0
def FetchCurrentSlaveBuilders(config, metadata, builders_array,
                              exclude_experimental=True):
  """Fetch the current important slave builds.

  Args:
    config: Instance of config_lib.BuildConfig. Config dict of this build.
    metadata: Instance of metadata_lib.CBuildbotMetadata. Metadata of this
              build.
    builders_array: A list of slave build configs to check.
    exclude_experimental: Whether to exclude the builds which are important in
      the config but are marked as experimental in the tree status. Default to
      True.

  Returns:
    An updated list of slave build configs for a master build which uses
    Buildbucket to schedule slaves; or the origin builders_array for other
    masters.
  """
  if (config is not None and
      metadata is not None and
      config_lib.UseBuildbucketScheduler(config)):
    scheduled_buildbucket_info_dict = GetBuildInfoDict(
        metadata, exclude_experimental=exclude_experimental)
    return scheduled_buildbucket_info_dict.keys()
  else:
    return builders_array
Ejemplo n.º 2
0
    def GetBuildbucketClient(self):
        """Build a buildbucket_client instance for Buildbucket related operations.

    Returns:
      An instance of buildbucket_lib.BuildbucketClient if the build is using
      Buildbucket as the scheduler; else, None.
    """
        buildbucket_client = None

        if config_lib.UseBuildbucketScheduler(self._run.config):
            if buildbucket_lib.GetServiceAccount(
                    constants.CHROMEOS_SERVICE_ACCOUNT):
                buildbucket_client = buildbucket_lib.BuildbucketClient(
                    auth.GetAccessToken,
                    None,
                    service_account_json=constants.CHROMEOS_SERVICE_ACCOUNT)

            if buildbucket_client is None and self._run.InProduction():
                # If the build using Buildbucket is running on buildbot and
                # is in production mode, buildbucket_client cannot be None.
                raise buildbucket_lib.NoBuildbucketClientException(
                    'Buildbucket_client is None. '
                    'Please check if the buildbot has a valid service account file. '
                    'Please find the service account json file at %s.' %
                    constants.CHROMEOS_SERVICE_ACCOUNT)

        return buildbucket_client
Ejemplo n.º 3
0
    def _AnnotateNoStatBuilders(self, no_stat):
        """Annotate the no stat builds.

    Args:
      no_stat: Set of build config names of slaves that had status None.
    """
        if config_lib.UseBuildbucketScheduler(self._run.config):
            self._AnnotateBuildStatusFromBuildbucket(no_stat)
        else:
            for build in no_stat:
                self._PrintBuildMessage('%s: did not start' % build)
Ejemplo n.º 4
0
    def UpdateSlaveStatus(self):
        """Update slave statuses by querying CIDB and Buildbucket(if supported)."""
        logging.info('Updating slave status...')

        # Fetch experimental builders from tree status and update experimental
        # builders in metedata before querying and updating any slave status.
        if self.metadata is not None:
            try:
                experimental_builders = tree_status.GetExperimentalBuilders()
                self.metadata.UpdateWithDict({
                    constants.METADATA_EXPERIMENTAL_BUILDERS:
                    experimental_builders
                })
            except timeout_util.TimeoutError:
                logging.error(
                    'Timeout getting experimental builders from the tree'
                    'status. Not updating metadata.')

            # If a slave build was important in previous loop and got added to the
            # completed_builds because it completed, but in the current loop it's
            # marked as experimental, take it out from the completed_builds list.
            self.completed_builds = set([
                build for build in self.completed_builds
                if build not in experimental_builders
            ])

        if (self.config is not None and self.metadata is not None
                and config_lib.UseBuildbucketScheduler(self.config)):
            scheduled_buildbucket_info_dict = buildbucket_lib.GetBuildInfoDict(
                self.metadata)
            # It's possible that CQ-master has a list of important slaves configured
            # but doesn't schedule any slaves as no CLs were picked up in SyncStage.
            # These are set to include only important builds.
            self.all_builders = scheduled_buildbucket_info_dict.keys()
            self.all_buildbucket_info_dict = (
                builder_status_lib.SlaveBuilderStatus.
                GetAllSlaveBuildbucketInfo(self.buildbucket_client,
                                           scheduled_buildbucket_info_dict,
                                           dry_run=self.dry_run))
            self.new_buildbucket_info_dict = self._GetNewSlaveBuildbucketInfo(
                self.all_buildbucket_info_dict, self.completed_builds)
            self._SetStatusBuildsDict()

        self.all_cidb_status_dict = (
            builder_status_lib.SlaveBuilderStatus.GetAllSlaveCIDBStatusInfo(
                self.db, self.master_build_id, self.all_buildbucket_info_dict))
        self.new_cidb_status_dict = self._GetNewSlaveCIDBStatusInfo(
            self.all_cidb_status_dict, self.completed_builds)

        self.missing_builds = self._GetMissingBuilds()
        self.scheduled_builds = self._GetScheduledBuilds()
        self.builds_to_retry = self._GetBuildsToRetry()
        self.completed_builds = self._GetCompletedBuilds()
Ejemplo n.º 5
0
 def RunEarlySyncAndSetupStages(self):
     """Runs through the early sync and board setup stages."""
     # If this build is master and uses Buildbucket scheduler, run
     # scheduler_stages.ScheduleSlavesStage to schedule slaves.
     if (config_lib.UseBuildbucketScheduler(self._run.config)
             and config_lib.IsMasterBuild(self._run.config)):
         self._RunStage(scheduler_stages.ScheduleSlavesStage,
                        self.sync_stage)
     self._RunStage(build_stages.UprevStage)
     self._RunStage(build_stages.InitSDKStage)
     self._RunStage(build_stages.RegenPortageCacheStage)
     self.RunSetupBoard()
     self._RunStage(chrome_stages.SyncChromeStage)
     self._RunStage(android_stages.UprevAndroidStage)
     self._RunStage(android_stages.AndroidMetadataStage)
Ejemplo n.º 6
0
    def GetScheduledSlaveBuildbucketIds(self):
        """Get buildbucket_ids list of the scheduled slave builds.

    Returns:
      If slaves were scheduled by Buildbucket, return a list of
      buildbucket_ids (strings) of the slave builds. The list doesn't
      contain the old builds which were retried in Buildbucket.
      If slaves were scheduled by git commits, return None.
    """
        buildbucket_ids = None
        if (config_lib.UseBuildbucketScheduler(self._run.config)
                and config_lib.IsMasterBuild(self._run.config)):
            buildbucket_ids = buildbucket_lib.GetBuildbucketIds(
                self._run.attrs.metadata)

        return buildbucket_ids
Ejemplo n.º 7
0
 def _RunMasterPaladinOrPFQBuild(self):
     """Runs through the stages of the paladin or chrome PFQ master build."""
     # If this master build uses Buildbucket scheduler, run
     # scheduler_stages.ScheduleSlavesStage to schedule slaves.
     if config_lib.UseBuildbucketScheduler(self._run.config):
         self._RunStage(scheduler_stages.ScheduleSlavesStage,
                        self.sync_stage)
     self._RunStage(build_stages.UprevStage)
     self._RunStage(build_stages.InitSDKStage)
     # The CQ/Chrome PFQ master will not actually run the SyncChrome stage, but
     # we want the logic that gets triggered when SyncChrome stage is skipped.
     self._RunStage(chrome_stages.SyncChromeStage)
     self._RunStage(android_stages.UprevAndroidStage)
     self._RunStage(android_stages.AndroidMetadataStage)
     if self._run.config.build_type == constants.PALADIN_TYPE:
         self._RunStage(build_stages.RegenPortageCacheStage)
     self._RunStage(test_stages.BinhostTestStage)
     self._RunStage(test_stages.BranchUtilTestStage)
Ejemplo n.º 8
0
    def _InitSlaveInfo(self):
        """Init slave info including buildbucket info, cidb info and failures."""
        if config_lib.UseBuildbucketScheduler(self.config):
            scheduled_buildbucket_info_dict = buildbucket_lib.GetBuildInfoDict(
                self.metadata, exclude_experimental=self.exclude_experimental)
            self.buildbucket_info_dict = self.GetAllSlaveBuildbucketInfo(
                self.buildbucket_client,
                scheduled_buildbucket_info_dict,
                dry_run=self.dry_run)
            self.builders_array = self.buildbucket_info_dict.keys()

        self.cidb_info_dict = self.GetAllSlaveCIDBStatusInfo(
            self.db, self.master_build_id, self.buildbucket_info_dict)

        self.slave_failures_dict = self._GetSlaveFailures(
            self.buildbucket_info_dict)

        self.aborted_slaves = self._GetSlavesAbortedBySelfDestruction(
            self.cidb_info_dict)
Ejemplo n.º 9
0
    def PerformStage(self):
        if (not (self._run.options.buildbot or self._run.options.remote_trybot)
                and self._run.options.clobber):
            if not commands.ValidateClobber(self._build_root):
                cros_build_lib.Die("--clobber in local mode must be approved.")

        # If we can't get a manifest out of it, then it's not usable and must be
        # clobbered.
        manifest = None
        delete_chroot = False
        if not self._run.options.clobber:
            try:
                manifest = git.ManifestCheckout.Cached(self._build_root,
                                                       search=False)
            except (KeyboardInterrupt, MemoryError, SystemExit):
                raise
            except Exception as e:
                # Either there is no repo there, or the manifest isn't usable.  If the
                # directory exists, log the exception for debugging reasons.  Either
                # way, the checkout needs to be wiped since it's in an unknown
                # state.
                if os.path.exists(self._build_root):
                    logging.warning("ManifestCheckout at %s is unusable: %s",
                                    self._build_root, e)
                delete_chroot = True

        # Clean mount points first to be safe about deleting.
        chroot_path = os.path.join(self._build_root,
                                   constants.DEFAULT_CHROOT_DIR)
        cros_sdk_lib.CleanupChrootMount(chroot=chroot_path)
        osutils.UmountTree(self._build_root)

        if not delete_chroot:
            delete_chroot = not self.CanReuseChroot(chroot_path)

        # If we're going to delete the chroot and we can use a snapshot instead,
        # try to revert.  If the revert succeeds, we don't need to delete after all.
        if delete_chroot and self.CanUseChrootSnapshotToDelete(chroot_path):
            delete_chroot = not self._RevertChrootToCleanSnapshot()

        # Re-mount chroot image if it exists so that subsequent steps can clean up
        # inside.
        if not delete_chroot and self._run.config.chroot_use_image:
            try:
                cros_sdk_lib.MountChroot(chroot=chroot_path, create=False)
            except cros_build_lib.RunCommandError as e:
                logging.error(
                    'Unable to mount chroot under %s.  Deleting chroot.  '
                    'Error: %s', self._build_root, e)
                delete_chroot = True

        if manifest is None:
            self._DeleteChroot()
            repository.ClearBuildRoot(self._build_root,
                                      self._run.options.preserve_paths)
        else:
            tasks = [
                self._BuildRootGitCleanup, self._WipeOldOutput,
                self._DeleteArchivedTrybotImages,
                self._DeleteArchivedPerfResults,
                self._DeleteAutotestSitePackages
            ]
            if self._run.options.chrome_root:
                tasks.append(self._DeleteChromeBuildOutput)
            if delete_chroot:
                tasks.append(self._DeleteChroot)
            else:
                tasks.append(self._CleanChroot)

            # Only enable CancelObsoleteSlaveBuilds on the master builds
            # which use the Buildbucket scheduler, it checks for builds in
            # ChromiumOs and ChromeOs waterfalls.
            if (config_lib.UseBuildbucketScheduler(self._run.config)
                    and config_lib.IsMasterBuild(self._run.config)):
                tasks.append(self.CancelObsoleteSlaveBuilds)

            parallel.RunParallelSteps(tasks)

        # If chroot.img still exists after everything is cleaned up, it means we're
        # planning to reuse it. This chroot was created by the previous run, so its
        # creation isn't affected by any potential changes in the current run.
        # Therefore, if this run fails, having the subsequent run revert to this
        # snapshot will still produce a clean chroot.  If this run succeeds, the
        # next run will reuse the chroot without needing to revert it.  Thus, taking
        # a snapshot now should be correct regardless of whether this run will
        # ultimately succeed or not.
        if os.path.exists(chroot_path + '.img'):
            self._CreateCleanSnapshot()