Ejemplo n.º 1
0
 def testValidUnifiedMasterConfig(self):
     """Make sure any unified master configurations are valid."""
     for build_name, config in self.site_config.iteritems():
         error = 'Unified config for %s has invalid values' % build_name
         # Unified masters must be internal and must rev both overlays.
         if config['master']:
             self.assertTrue(
                 config['internal'] and config['manifest_version'], error)
         elif not config['master'] and config['manifest_version']:
             # Unified slaves can rev either public or both depending on whether
             # they are internal or not.
             if not config['internal']:
                 self.assertEqual(config['overlays'],
                                  constants.PUBLIC_OVERLAYS, error)
             elif config_lib.IsCQType(config['build_type']):
                 self.assertEqual(config['overlays'],
                                  constants.BOTH_OVERLAYS, error)
Ejemplo n.º 2
0
    def GetSyncInstance(self):
        """Syncs the tree using one of the distributed sync logic paths.

    Returns:
      The instance of the sync stage to run.
    """
        # Determine sync class to use.  CQ overrides PFQ bits so should check it
        # first.
        if self._run.config.pre_cq:
            sync_stage = self._GetStageInstance(sync_stages.PreCQSyncStage,
                                                self.patch_pool.gerrit_patches)
            self.completion_stage_class = completion_stages.PreCQCompletionStage
            self.patch_pool.gerrit_patches = []
        elif config_lib.IsCQType(self._run.config.build_type):
            if self._run.config.do_not_apply_cq_patches:
                sync_stage = self._GetStageInstance(
                    sync_stages.MasterSlaveLKGMSyncStage)
            else:
                sync_stage = self._GetStageInstance(
                    sync_stages.CommitQueueSyncStage)
            self.completion_stage_class = completion_stages.CommitQueueCompletionStage
        elif config_lib.IsPFQType(self._run.config.build_type):
            sync_stage = self._GetStageInstance(
                sync_stages.MasterSlaveLKGMSyncStage)
            self.completion_stage_class = (
                completion_stages.MasterSlaveSyncCompletionStage)
        elif config_lib.IsCanaryType(self._run.config.build_type):
            sync_stage = self._GetStageInstance(
                sync_stages.ManifestVersionedSyncStage)
            self.completion_stage_class = (
                completion_stages.CanaryCompletionStage)
        elif self._run.config.build_type == constants.TOOLCHAIN_TYPE:
            sync_stage = self._GetStageInstance(
                sync_stages.MasterSlaveLKGMSyncStage)
            self.completion_stage_class = (
                completion_stages.MasterSlaveSyncCompletionStage)
        else:
            sync_stage = self._GetStageInstance(
                sync_stages.ManifestVersionedSyncStage)
            self.completion_stage_class = (
                completion_stages.ManifestVersionedSyncCompletionStage)

        self.sync_stage = sync_stage
        return self.sync_stage
Ejemplo n.º 3
0
    def GetSyncInstance(self):
        """Syncs the tree using one of the distributed sync logic paths.

    Returns:
      The instance of the sync stage to run.
    """
        # Determine sync class to use.  CQ overrides PFQ bits so should check it
        # first.
        if self._run.config.pre_cq:
            assert False, 'Pre-CQ no longer supported'
        elif config_lib.IsCQType(self._run.config.build_type):
            assert False, 'Legacy CQ no longer supported'
            if self._run.config.do_not_apply_cq_patches:
                sync_stage = self._GetStageInstance(
                    sync_stages.MasterSlaveLKGMSyncStage)
        elif config_lib.IsCanaryType(self._run.config.build_type):
            sync_stage = self._GetStageInstance(
                sync_stages.ManifestVersionedSyncStage)
            self.completion_stage_class = (
                completion_stages.CanaryCompletionStage)
        elif self._run.config.build_type == constants.CHROME_PFQ_TYPE:
            assert False, 'Chrome PFQ no longer supported'
        elif (config_lib.IsPFQType(self._run.config.build_type)
              or self._run.config.build_type
              in (constants.TOOLCHAIN_TYPE, constants.FULL_TYPE,
                  constants.INCREMENTAL_TYPE, constants.POSTSUBMIT_TYPE)):
            sync_stage = self._GetStageInstance(
                sync_stages.MasterSlaveLKGMSyncStage)
            self.completion_stage_class = (
                completion_stages.MasterSlaveSyncCompletionStage)
        else:
            sync_stage = self._GetStageInstance(
                sync_stages.ManifestVersionedSyncStage)
            self.completion_stage_class = (
                completion_stages.ManifestVersionedSyncCompletionStage)

        self.sync_stage = sync_stage
        return self.sync_stage
Ejemplo n.º 4
0
  def PerformStage(self):
    if self.suite_config.suite == constants.HWTEST_AFDO_SUITE:
      arch = self._GetPortageEnvVar('ARCH', self._current_board)
      cpv = portage_util.BestVisible(constants.CHROME_CP,
                                     buildroot=self._build_root)
      if afdo.CheckAFDOPerfData(cpv, arch, gs.GSContext()):
        logging.info('AFDO profile already generated for arch %s '
                     'and Chrome %s. Not generating it again',
                     arch, cpv.version_no_rev.split('_')[0])
        return

    if self.suite_config.suite in [constants.HWTEST_CTS_FOLLOWER_SUITE,
                                   constants.HWTEST_CTS_QUAL_SUITE,
                                   constants.HWTEST_GTS_QUAL_SUITE]:
      # Increase the priority for CTS/GTS qualification suite as we want stable
      # build to have higher priority than beta build (again higher than dev).
      try:
        cros_vers = self._run.GetVersionInfo().VersionString().split('.')
        if not isinstance(self.suite_config.priority, (int, long)):
          # Convert CTS/GTS priority to corresponding integer value.
          self.suite_config.priority = constants.HWTEST_PRIORITIES_MAP[
              self.suite_config.priority]
        # We add 1/10 of the branch version to the priority. This results in a
        # modest priority bump the older the branch is. Typically beta priority
        # would be dev + [1..4] and stable priority dev + [5..9].
        self.suite_config.priority += int(math.ceil(float(cros_vers[1]) / 10.0))
      except cbuildbot_run.VersionNotSetError:
        logging.debug('Could not obtain version info. %s will use initial '
                      'priority value: %s', self.suite_config.suite,
                      self.suite_config.priority)

    build = '/'.join([self._bot_id, self.version])

    # Get the subsystems set for the board to test
    if self.suite_config.suite == constants.HWTEST_PROVISION_SUITE:
      subsystems = set()
    else:
      subsystems = self._GetSubsystems()

    skip_duts_check = False
    if config_lib.IsCanaryType(self._run.config.build_type):
      skip_duts_check = True

    build_id, db = self._run.GetCIDBHandle()

    test_args = None
    if config_lib.IsCQType(self._run.config.build_type):
      test_args = {'fast': 'True'}

    cmd_result = commands.RunHWTestSuite(
        build, self.suite_config.suite, self._board_name,
        model=self._model,
        pool=self.suite_config.pool,
        file_bugs=self.suite_config.file_bugs,
        wait_for_results=self.wait_for_results,
        priority=self.suite_config.priority,
        timeout_mins=self.suite_config.timeout_mins,
        retry=self.suite_config.retry,
        max_retries=self.suite_config.max_retries,
        minimum_duts=self.suite_config.minimum_duts,
        suite_min_duts=self.suite_config.suite_min_duts,
        suite_args=self.suite_config.suite_args,
        offload_failures_only=self.suite_config.offload_failures_only,
        debug=not self.TestsEnabled(self._run),
        subsystems=subsystems,
        skip_duts_check=skip_duts_check,
        job_keyvals=self.GetJobKeyvals(),
        test_args=test_args)

    if config_lib.IsCQType(self._run.config.build_type):
      self.ReportHWTestResults(cmd_result.json_dump_result, build_id, db)

    subsys_tuple = self.GenerateSubsysResult(cmd_result.json_dump_result,
                                             subsystems)
    if db:
      if not subsys_tuple:
        db.InsertBuildMessage(build_id, message_type=constants.SUBSYSTEMS,
                              message_subtype=constants.SUBSYSTEM_UNUSED,
                              board=self._current_board)
      else:
        logging.info('pass_subsystems: %s, fail_subsystems: %s',
                     subsys_tuple[0], subsys_tuple[1])
        for s in subsys_tuple[0]:
          db.InsertBuildMessage(build_id, message_type=constants.SUBSYSTEMS,
                                message_subtype=constants.SUBSYSTEM_PASS,
                                message_value=str(s), board=self._current_board)
        for s in subsys_tuple[1]:
          db.InsertBuildMessage(build_id, message_type=constants.SUBSYSTEMS,
                                message_subtype=constants.SUBSYSTEM_FAIL,
                                message_value=str(s), board=self._current_board)
    if cmd_result.to_raise:
      raise cmd_result.to_raise
Ejemplo n.º 5
0
    def PerformStage(self):
        if (config_lib.IsMasterCQ(self._run.config)
                and not self.sync_stage.pool.HasPickedUpCLs()):
            logging.info('No CLs have been picked up and no slaves have been '
                         'scheduled in this run. Will not publish uprevs.')
            return

        # Either has to be a master or not have any push overlays.
        assert self._run.config.master
        assert self._run.config.push_overlays

        staging_branch = None
        if self.stage_push:
            if not config_lib.IsMasterChromePFQ(self._run.config):
                raise ValueError(
                    'This build must be a master chrome PFQ build '
                    'when stage_push is True.')
            build_identifier, _ = self._run.GetCIDBHandle()
            buildbucket_id = build_identifier.buildbucket_id

            # If the master passed BinHostTest and all the important slaves passed
            # UploadPrebuiltsTest, push uprev commits to a staging_branch.
            if (self.CheckMasterBinhostTest(buildbucket_id)
                    and self.CheckSlaveUploadPrebuiltsTest()):
                staging_branch = ('refs/' + constants.PFQ_REF + '/' +
                                  constants.STAGING_PFQ_BRANCH_PREFIX +
                                  str(buildbucket_id))

        # If we're a commit queue, we should clean out our local changes, resync,
        # and reapply our uprevs. This is necessary so that 1) we are sure to point
        # at the remote SHA1s, not our local SHA1s; 2) we can avoid doing a
        # rebase; 3) in the case of failure and staging_branch is None, we don't
        # submit the changes that were committed locally.
        #
        # If we're not a commit queue and the build succeeded, we can skip the
        # cleanup here. This is a cheap trick so that the Chrome PFQ pushes its
        # earlier uprev from the SyncChrome stage (it would be a bit tricky to
        # replicate the uprev here, so we'll leave it alone).

        # If we're not a commit queue and staging_branch is not None, we can skip
        # the cleanup here. When staging_branch is not None, we're going to push
        # the local commits generated in AFDOUpdateEbuild stage to the
        # staging_branch, cleaning up repository here will wipe out the local
        # commits.
        if (config_lib.IsCQType(self._run.config.build_type)
                or not (self.success or staging_branch is not None)):
            repo = self.GetRepoRepository()

            # Clean up our root and sync down the latest changes that were
            # submitted.
            repo.BuildRootGitCleanup(self._build_root)

            # Sync down the latest changes we have submitted.
            if self._run.options.sync:
                next_manifest = self._run.config.manifest
                repo.Sync(next_manifest)

            # Commit uprev and portage cache regeneration locally.
            if self._run.options.uprev and self._run.config.uprev:
                commands.UprevPackages(self._build_root,
                                       self._boards,
                                       overlay_type=self._run.config.overlays)
                push_overlays = portage_util.FindOverlays(
                    self._run.config.push_overlays, buildroot=self._build_root)
                commands.RegenPortageCache(push_overlays)

        # When prebuilts is True, if it's a successful run or staging_branch is
        # not None for a master-chrome-pfq run, update binhost conf
        if (self._run.config.prebuilts
                and (self.success or staging_branch is not None)):
            confwriter = prebuilts.BinhostConfWriter(self._run)
            confwriter.Perform()

        # Push the uprev, portage cache, and binhost commits.
        commands.UprevPush(self._build_root,
                           overlay_type=self._run.config.push_overlays,
                           dryrun=self._run.options.debug,
                           staging_branch=staging_branch)
        if config_lib.IsMasterAndroidPFQ(self._run.config) and self.success:
            self._run.attrs.metadata.UpdateWithDict({'UprevvedAndroid': True})
Ejemplo n.º 6
0
    def __init__(self,
                 source_repo,
                 manifest_repo,
                 build_names,
                 build_type,
                 incr_type,
                 force,
                 branch,
                 manifest=constants.DEFAULT_MANIFEST,
                 dry_run=True,
                 lkgm_path_rel=constants.LKGM_MANIFEST,
                 config=None,
                 metadata=None,
                 buildbucket_client=None):
        """Initialize an LKGM Manager.

    Args:
      source_repo: Repository object for the source code.
      manifest_repo: Manifest repository for manifest versions/buildspecs.
      build_names: Identifiers for the build. Must match config_lib
          entries. If multiple identifiers are provided, the first item in the
          list must be an identifier for the group.
      build_type: Type of build.  Must be a pfq type.
      incr_type: How we should increment this version - build|branch|patch
      force: Create a new manifest even if there are no changes.
      branch: Branch this builder is running on.
      manifest: Manifest to use for checkout. E.g. 'full' or 'buildtools'.
      dry_run: Whether we actually commit changes we make or not.
      master: Whether we are the master builder.
      lkgm_path_rel: Path to the LKGM symlink, relative to manifest dir.
      config: Instance of config_lib.BuildConfig. Config dict of this builder.
      metadata: Instance of metadata_lib.CBuildbotMetadata. Metadata of this
                builder.
      buildbucket_client: Instance of buildbucket_lib.buildbucket_client.
    """
        super(LKGMManager,
              self).__init__(source_repo=source_repo,
                             manifest_repo=manifest_repo,
                             manifest=manifest,
                             build_names=build_names,
                             incr_type=incr_type,
                             force=force,
                             branch=branch,
                             dry_run=dry_run,
                             config=config,
                             metadata=metadata,
                             buildbucket_client=buildbucket_client)

        self.lkgm_path = os.path.join(self.manifest_dir, lkgm_path_rel)
        self.compare_versions_fn = _LKGMCandidateInfo.VersionCompare
        self.build_type = build_type
        # Chrome PFQ and PFQ's exist at the same time and version separately so they
        # must have separate subdirs in the manifest-versions repository.
        if self.build_type == constants.CHROME_PFQ_TYPE:
            self.rel_working_dir = self.CHROME_PFQ_SUBDIR
        elif self.build_type == constants.ANDROID_PFQ_TYPE:
            self.rel_working_dir = self.ANDROID_PFQ_SUBDIR
        elif self.build_type == constants.TOOLCHAIN_TYPE:
            self.rel_working_dir = self.TOOLCHAIN_SUBDIR
        elif config_lib.IsCQType(self.build_type):
            self.rel_working_dir = self.COMMIT_QUEUE_SUBDIR
        else:
            assert config_lib.IsPFQType(self.build_type)
            self.rel_working_dir = self.LKGM_SUBDIR
Ejemplo n.º 7
0
    def _Publish(self, was_build_successful, build_finished,
                 completion_successful):
        """Updates and publishes uprevs.

    Args:
      was_build_successful: Whether the build succeeded.
      build_finished: Whether the build completed. A build can be successful
        without completing if it raises ExitEarlyException.
      completion_successful: Whether the compeletion_stage succeeded.
    """
        is_master_chrome_pfq = config_lib.IsMasterChromePFQ(self._run.config)

        updateEbuild_successful = False
        try:
            # When (afdo_update_ebuild and not afdo_generate_min) is True,
            # if completion_stage passed, need to run
            # AFDOUpdateChromeEbuildStage to prepare for pushing commits to masters;
            # if it's a master_chrome_pfq build and compeletion_stage failed,
            # need to run AFDOUpdateChromeEbuildStage to prepare for pushing commits
            # to a staging branch.
            if ((completion_successful or is_master_chrome_pfq)
                    and self._run.config.afdo_update_ebuild
                    and not self._run.config.afdo_generate_min):
                self._RunStage(afdo_stages.AFDOUpdateChromeEbuildStage)
                self._RunStage(afdo_stages.AFDOUpdateKernelEbuildStage)
                updateEbuild_successful = True
        finally:
            if self._run.config.master:
                self._RunStage(report_stages.SlaveFailureSummaryStage)

            is_master_release = config_lib.IsCanaryMaster(self._run.config)
            if is_master_release:
                if build_finished:
                    self._RunStage(completion_stages.UpdateChromeosLKGMStage)
                else:
                    logging.info(
                        'Skipping UpdateChromeosLKGMStage, '
                        'build_successful=%d completion_successful=%d '
                        'build_finished=%d', was_build_successful,
                        completion_successful, build_finished)

            if self._run.config.push_overlays:
                publish = (was_build_successful and completion_successful
                           and build_finished)
                # If this build is master chrome pfq, completion_stage failed,
                # AFDOUpdateChromeEbuildStage passed, and the necessary build stages
                # passed, it means publish is False and we need to stage the
                # push to another branch instead of master.
                stage_push = (is_master_chrome_pfq
                              and not completion_successful
                              and updateEbuild_successful
                              and was_build_successful and build_finished)

                # CQ and Master Chrome PFQ no longer publish uprevs. For Master Chrome
                # PFQ this is because this duty is being transitioned to the Chrome
                # PUpr in the PCQ world. See http://go/pupr.
                # There is no easy way to disable this in ChromeOS config,
                # so hack the check here.
                if (not config_lib.IsCQType(self._run.config.build_type)
                        and not is_master_chrome_pfq):
                    self._RunStage(completion_stages.PublishUprevChangesStage,
                                   self.sync_stage, publish, stage_push)