Beispiel #1
0
  def PerformStage(self):
    chroot_dir = os.path.join(self._build_root, constants.DEFAULT_CHROOT_DIR)
    sdk_dir = os.path.join(chroot_dir, 'build/amd64-host')
    tmp_dir = os.path.join(chroot_dir, 'tmp')
    osutils.SafeMakedirs(tmp_dir, mode=0o777, sudo=True)
    overlay_output_dir = os.path.join(chroot_dir,
                                      constants.SDK_OVERLAYS_OUTPUT)
    osutils.RmDir(overlay_output_dir, ignore_missing=True, sudo=True)
    osutils.SafeMakedirs(overlay_output_dir, mode=0o777, sudo=True)
    overlay_tarball_template = os.path.join(
        overlay_output_dir, TOOLCHAINS_OVERLAY_TARBALL_TEMPLATE)

    # Generate an overlay tarball for each unique toolchain combination. We
    # restrict ourselves to (a) board configs that are available to the builder
    # (naturally), and (b) toolchains that are part of the 'sdk' set.
    sdk_toolchains = set(toolchain.GetToolchainsForBoard('sdk'))
    generated = set()
    for board in self._run.site_config.GetBoards():
      try:
        toolchains = set(toolchain.GetToolchainsForBoard(board).iterkeys())
      except portage_util.MissingOverlayException:
        # The board overlay may not exist, e.g. on external builders.
        continue

      toolchains_str = '-'.join(sorted(toolchains))
      if not toolchains.issubset(sdk_toolchains) or toolchains_str in generated:
        continue

      with osutils.TempDir(prefix='toolchains-overlay-%s.' % toolchains_str,
                           base_dir=tmp_dir, sudo_rm=True) as overlay_dir:
        # NOTE: We let MountOverlayContext remove the mount point created by
        # the TempDir context below, because it has built-in retries for rmdir
        # EBUSY errors that are due to unmount lag.
        with osutils.TempDir(prefix='amd64-host-%s.' % toolchains_str,
                             base_dir=tmp_dir, delete=False) as merged_dir:
          with osutils.MountOverlayContext(sdk_dir, overlay_dir, merged_dir,
                                           cleanup=True):
            sysroot = merged_dir[len(chroot_dir):]
            cmd = ['cros_setup_toolchains', '--targets=boards',
                   '--include-boards=%s' % board,
                   '--sysroot=%s' % sysroot]
            commands.RunBuildScript(self._build_root, cmd, chromite_cmd=True,
                                    enter_chroot=True, sudo=True,
                                    extra_env=self._portage_extra_env)

        # NOTE: Make sure that the overlay directory is owned root:root and has
        # 0o755 perms; apparently, these things are preserved through
        # tarring/untarring and might cause havoc if overlooked.
        os.chmod(overlay_dir, 0o755)
        cros_build_lib.SudoRunCommand(['chown', 'root:root', overlay_dir])
        CreateTarball(overlay_dir,
                      overlay_tarball_template % {'toolchains': toolchains_str})

      generated.add(toolchains_str)
Beispiel #2
0
 def GetCrossGdb(self):
     """Find the appropriate cross-version of gdb for the board."""
     toolchains = toolchain.GetToolchainsForBoard(self.board)
     tc = toolchain.FilterToolchains(toolchains, 'default', True).keys()
     cross_gdb = tc[0] + '-gdb'
     if not osutils.Which(cross_gdb):
         raise GdbMissingDebuggerError('Cannot find %s; do you need to run '
                                       'setup_board?' % cross_gdb)
     return cross_gdb
def UpdateToolchains(usepkg, deleteold, hostonly, reconfig,
                     targets_wanted, boards_wanted, root='/'):
  """Performs all steps to create a synchronized toolchain enviroment.

  Args:
    usepkg: Use prebuilt packages
    deleteold: Unmerge deprecated packages
    hostonly: Only setup the host toolchain
    reconfig: Reload crossdev config and reselect toolchains
    targets_wanted: All the targets to update
    boards_wanted: Load targets from these boards
    root: The root in which to install the toolchains.
  """
  targets, crossdev_targets, reconfig_targets = {}, {}, {}
  if not hostonly:
    # For hostonly, we can skip most of the below logic, much of which won't
    # work on bare systems where this is useful.
    targets = ExpandTargets(targets_wanted)

    # Now re-add any targets that might be from this board. This is to
    # allow unofficial boards to declare their own toolchains.
    for board in boards_wanted:
      targets.update(toolchain.GetToolchainsForBoard(board))

    # First check and initialize all cross targets that need to be.
    for target in targets:
      if TargetIsInitialized(target):
        reconfig_targets[target] = targets[target]
      else:
        crossdev_targets[target] = targets[target]
    if crossdev_targets:
      logging.info('The following targets need to be re-initialized:')
      logging.info('%s', crossdev_targets)
      Crossdev.UpdateTargets(crossdev_targets, usepkg)
    # Those that were not initialized may need a config update.
    Crossdev.UpdateTargets(reconfig_targets, usepkg, config_only=True)

    # If we're building a subset of toolchains for a board, we might not have
    # all the tuples that the packages expect.  We don't define the "full" set
    # of tuples currently other than "whatever the full sdk has normally".
    if usepkg or set(('all', 'sdk')) & targets_wanted:
      # Since we have cross-compilers now, we can update these packages.
      targets['host-post-cross'] = {}

  # We want host updated.
  targets['host'] = {}

  # Now update all packages.
  if UpdateTargets(targets, usepkg, root=root) or crossdev_targets or reconfig:
    SelectActiveToolchains(targets, CONFIG_TARGET_SUFFIXES, root=root)

  if deleteold:
    CleanTargets(targets, root=root)

  # Now that we've cleared out old versions, see if we need to rebuild
  # anything.  Can't do this earlier as it might not be broken.
  RebuildLibtool(root=root)
Beispiel #4
0
def ShowConfig(name):
  """Show the toolchain tuples used by |name|

  Args:
    name: The board name to query.
  """
  toolchains = toolchain.GetToolchainsForBoard(name)
  # Make sure we display the default toolchain first.
  print(','.join(
      toolchain.FilterToolchains(toolchains, 'default', True).keys() +
      toolchain.FilterToolchains(toolchains, 'default', False).keys()))
Beispiel #5
0
def ShowBoardConfig(board):
    """Show the toolchain tuples used by |board|

  Args:
    board: The board to query.
  """
    toolchains = toolchain.GetToolchainsForBoard(board)
    # Make sure we display the default toolchain first.
    print ','.join(
        toolchain.FilterToolchains(toolchains, 'default', True).keys() +
        toolchain.FilterToolchains(toolchains, 'default', False).keys())
 def testReadsBoardToolchains(self, find_overlays_mock):
   """Tests that we correctly parse toolchain configs for an overlay stack."""
   # Create some fake overlays and put toolchain confs in a subset of them.
   overlays = [os.path.join(self.tempdir, 'overlay%d' % i) for i in range(3)]
   for overlay in overlays:
     osutils.SafeMakedirs(overlay)
   for overlay, contents in [(overlays[0], BASE_TOOLCHAIN_CONF),
                             (overlays[2], ADDITIONAL_TOOLCHAIN_CONF)]:
     osutils.WriteFile(os.path.join(overlay, 'toolchain.conf'), contents)
   find_overlays_mock.return_value = overlays
   actual_targets = toolchain.GetToolchainsForBoard('board_value')
   self.assertEqual(EXPECTED_TOOLCHAINS, actual_targets)
def ShowConfig(name):
    """Show the toolchain tuples used by |name|

  Args:
    name: The board name to query.
  """
    toolchains = toolchain.GetToolchainsForBoard(name)
    # Make sure we display the default toolchain first.
    # Note: Do not use logging here as this is meant to be used by other tools.
    print(','.join(
        toolchain.FilterToolchains(toolchains, 'default', True).keys() +
        toolchain.FilterToolchains(toolchains, 'default', False).keys()))
Beispiel #8
0
    def testTarballCreation(self):
        """Tests that tarballs are created for all board toolchains."""
        self._Prepare('chromiumos-sdk')
        self.RunStage()

        # Check that a tarball was created correctly for all toolchain sets.
        sdk_toolchains = set(toolchain.GetToolchainsForBoard('sdk'))
        all_toolchain_combos = set()
        for board in self._run.site_config.GetBoards():
            try:
                toolchains = set(toolchain.GetToolchainsForBoard(board).keys())
                if toolchains.issubset(sdk_toolchains):
                    all_toolchain_combos.add('-'.join(sorted(toolchains)))
            except portage_util.MissingOverlayError:
                pass

        for toolchains in all_toolchain_combos:
            overlay_tarball = os.path.join(
                self.build_root, constants.DEFAULT_CHROOT_DIR,
                constants.SDK_OVERLAYS_OUTPUT,
                'built-sdk-overlay-toolchains-%s.tar.xz' % toolchains)
            output = cros_build_lib.run(
                ['tar', '-I', 'xz', '-tf', overlay_tarball],
                encoding='utf-8',
                capture_output=True).stdout.splitlines()
            # Check that the overlay tarball contains a marker file and that the
            # board recorded by this marker file indeed uses the toolchains for which
            # the tarball was built.
            tmp_files = [
                os.path.basename(x) for x in output if x.endswith('.tmp')
            ]
            self.assertEqual(1, len(tmp_files))
            board = tmp_files[0][:-len('.tmp')]
            board_toolchains = '-'.join(
                sorted(toolchain.GetToolchainsForBoard(board).keys()))
            self.assertEqual(toolchains, board_toolchains)
  def GenerateBoardConfig(self, board):
    """Generates the configuration for a given board.

    Args:
      board: board name to use to generate the configuration.
    """
    toolchains = toolchain.GetToolchainsForBoard(board)

    # Compute the overlay list.
    portdir_overlays = portage_util.FindOverlays(constants.BOTH_OVERLAYS, board)
    prefix = os.path.join(constants.SOURCE_ROOT, 'src', 'third_party')
    board_overlays = [o for o in portdir_overlays if not o.startswith(prefix)]

    header = "# Created by cros_sysroot_utils from --board=%s." % board
    return self._GenerateConfig(toolchains, board_overlays, portdir_overlays,
                                header, BOARD_USE=board)
def UpdateToolchains(usepkg, getbinpkg, deleteold, hostonly, reconfig,
                     targets_wanted, boards_wanted):
    """Performs all steps to create a synchronized toolchain enviroment.

  args:
    arguments correspond to the given commandline flags
  """
    targets, crossdev_targets, reconfig_targets = {}, {}, {}
    if not hostonly:
        # For hostonly, we can skip most of the below logic, much of which won't
        # work on bare systems where this is useful.
        targets = ExpandTargets(targets_wanted)

        # Now re-add any targets that might be from this board.  This is
        # to allow unofficial boards to declare their own toolchains.
        for board in boards_wanted:
            targets.update(toolchain.GetToolchainsForBoard(board))

        # First check and initialize all cross targets that need to be.
        for target in targets:
            if TargetIsInitialized(target):
                reconfig_targets[target] = targets[target]
            else:
                crossdev_targets[target] = targets[target]
        if crossdev_targets:
            print 'The following targets need to be re-initialized:'
            print crossdev_targets
            Crossdev.UpdateTargets(crossdev_targets,
                                   usepkg,
                                   getbinpkg=getbinpkg)
        # Those that were not initialized may need a config update.
        Crossdev.UpdateTargets(reconfig_targets,
                               usepkg,
                               getbinpkg=getbinpkg,
                               config_only=True)

    # We want host updated.
    targets['host'] = {}

    # Now update all packages.
    if (UpdateTargets(targets, usepkg, getbinpkg=getbinpkg) or crossdev_targets
            or reconfig):
        SelectActiveToolchains(targets, CONFIG_TARGET_SUFFIXES)

    if deleteold:
        CleanTargets(targets)
Beispiel #11
0
    def PerformStage(self):
        config = self._run.config
        build_root = self._build_root

        logging.info('Build re-executions have finished. Chromite source '
                     'will not be modified for remainder of run.')
        logging.info("config['important']=%s", config['important'])
        logging.PrintBuildbotStepText("config['important']=%s" %
                                      config['important'])

        # Flat list of all child config boards. Since child configs
        # are not allowed to have children, it is not necessary to search
        # deeper than one generation.
        child_configs = GetChildConfigListMetadata(
            child_configs=config['child_configs'], config_status_map=None)

        sdk_verinfo = cros_build_lib.LoadKeyValueFile(os.path.join(
            build_root, constants.SDK_VERSION_FILE),
                                                      ignore_missing=True)

        verinfo = self._run.GetVersionInfo()
        platform_tag = getattr(self._run.attrs, 'release_tag')
        if not platform_tag:
            platform_tag = verinfo.VersionString()

        version = {
            'full': self._run.GetVersion(),
            'milestone': verinfo.chrome_branch,
            'platform': platform_tag,
        }

        metadata = {
            # Version of the metadata format.
            'metadata-version': '2',
            'boards': config['boards'],
            'child-configs': child_configs,
            'build_type': config['build_type'],
            'important': config['important'],

            # Data for the toolchain used.
            'sdk-version': sdk_verinfo.get('SDK_LATEST_VERSION', '<unknown>'),
            'toolchain-url': sdk_verinfo.get('TC_PATH', '<unknown>'),
        }

        if len(config['boards']) == 1:
            toolchains = toolchain.GetToolchainsForBoard(config['boards'][0],
                                                         buildroot=build_root)
            metadata['toolchain-tuple'] = (
                toolchain.FilterToolchains(toolchains, 'default', True).keys()
                + toolchain.FilterToolchains(toolchains, 'default',
                                             False).keys())

        logging.info('Metadata being written: %s', metadata)
        self._run.attrs.metadata.UpdateWithDict(metadata)
        # Update 'version' separately to avoid overwriting the existing
        # entries in it (e.g. PFQ builders may have written the Chrome
        # version to uprev).
        logging.info("Metadata 'version' being written: %s", version)
        self._run.attrs.metadata.UpdateKeyDictWithDict('version', version)

        # Ensure that all boards and child config boards have a per-board
        # metadata subdict.
        for b in config['boards']:
            self._run.attrs.metadata.UpdateBoardDictWithDict(b, {})

        for cc in child_configs:
            for b in cc['boards']:
                self._run.attrs.metadata.UpdateBoardDictWithDict(b, {})

        # Upload build metadata (and write it to database if necessary)
        self.UploadMetadata(filename=constants.PARTIAL_METADATA_JSON)

        # Write child-per-build and board-per-build rows to database
        build_id, db = self._run.GetCIDBHandle()
        if db:
            # TODO(akeshet): replace this with a GetValue call once crbug.com/406522
            # is resolved
            per_board_dict = self._run.attrs.metadata.GetDict(
            )['board-metadata']
            for board, board_metadata in per_board_dict.items():
                db.InsertBoardPerBuild(build_id, board)
                if board_metadata:
                    db.UpdateBoardPerBuildMetadata(build_id, board,
                                                   board_metadata)
            for child_config in self._run.attrs.metadata.GetValue(
                    'child-configs'):
                db.InsertChildConfigPerBuild(build_id, child_config['name'])

            # If this build has a master build, ensure that the master full_version
            # is the same as this build's full_version. This is a sanity check to
            # avoid bugs in master-slave logic.
            master_id = self._run.attrs.metadata.GetDict().get(
                'master_build_id')
            if master_id is not None:
                master_full_version = db.GetBuildStatus(
                    master_id)['full_version']
                my_full_version = self._run.attrs.metadata.GetValue(
                    'version').get('full')
                if master_full_version != my_full_version:
                    raise failures_lib.MasterSlaveVersionMismatchFailure(
                        'Master build id %s has full_version %s, while slave version is '
                        '%s.' %
                        (master_id, master_full_version, my_full_version))

        # Abort previous hw test suites. This happens after reexecution as it
        # requires chromite/third_party/swarming.client, which is not available
        # untill after reexecution.
        self._AbortPreviousHWTestSuites(version['milestone'])
Beispiel #12
0
def main(argv):
  conf = cros_build_lib.LoadKeyValueFile(
      os.path.join(constants.SOURCE_ROOT, constants.SDK_VERSION_FILE),
      ignore_missing=True)
  sdk_latest_version = conf.get('SDK_LATEST_VERSION', '<unknown>')
  bootstrap_latest_version = conf.get('BOOTSTRAP_LATEST_VERSION', '<unknown>')
  parser, commands = _CreateParser(sdk_latest_version, bootstrap_latest_version)
  options = parser.parse_args(argv)
  chroot_command = options.commands

  # Some sanity checks first, before we ask for sudo credentials.
  cros_build_lib.AssertOutsideChroot()

  host = os.uname()[4]
  if host != 'x86_64':
    parser.error(
        "cros_sdk is currently only supported on x86_64; you're running"
        " %s.  Please find a x86_64 machine." % (host,))

  _ReportMissing(osutils.FindMissingBinaries(NEEDED_TOOLS))
  if options.proxy_sim:
    _ReportMissing(osutils.FindMissingBinaries(PROXY_NEEDED_TOOLS))

  _ReExecuteIfNeeded([sys.argv[0]] + argv)
  if options.ns_pid:
    first_pid = namespaces.CreatePidNs()
  else:
    first_pid = None

  # Expand out the aliases...
  if options.replace:
    options.delete = options.create = True

  if options.bootstrap:
    options.create = True

  # If a command is not given, default to enter.
  # pylint: disable=protected-access
  # This _group_actions access sucks, but upstream decided to not include an
  # alternative to optparse's option_list, and this is what they recommend.
  options.enter |= not any(getattr(options, x.dest)
                           for x in commands._group_actions)
  # pylint: enable=protected-access
  options.enter |= bool(chroot_command)

  if options.enter and options.delete and not options.create:
    parser.error("Trying to enter the chroot when --delete "
                 "was specified makes no sense.")

  # Finally, discern if we need to create the chroot.
  chroot_exists = os.path.exists(options.chroot)
  if options.create or options.enter:
    # Only create if it's being wiped, or if it doesn't exist.
    if not options.delete and chroot_exists:
      options.create = False
    else:
      options.download = True

  # Finally, flip create if necessary.
  if options.enter:
    options.create |= not chroot_exists

  if not options.sdk_version:
    sdk_version = (bootstrap_latest_version if options.bootstrap
                   else sdk_latest_version)
  else:
    sdk_version = options.sdk_version
  if options.buildbot_log_version:
    logging.PrintBuildbotStepText(sdk_version)

  # Based on selections, determine the tarball to fetch.
  if options.sdk_url:
    urls = [options.sdk_url]
  elif options.bootstrap:
    urls = GetStage3Urls(sdk_version)
  else:
    urls = GetArchStageTarballs(sdk_version)

  # Get URLs for the toolchains overlay, if one is to be used.
  toolchains_overlay_urls = None
  if not options.bootstrap:
    toolchains = None
    if options.toolchains:
      toolchains = options.toolchains.split(',')
    elif options.board:
      toolchains = toolchain.GetToolchainsForBoard(options.board).keys()

    if toolchains:
      toolchains_overlay_urls = GetToolchainsOverlayUrls(sdk_version,
                                                         toolchains)

  lock_path = os.path.dirname(options.chroot)
  lock_path = os.path.join(
      lock_path, '.%s_lock' % os.path.basename(options.chroot).lstrip('.'))
  with cgroups.SimpleContainChildren('cros_sdk', pid=first_pid):
    with locking.FileLock(lock_path, 'chroot lock') as lock:
      toolchains_overlay_tarball = None

      if options.proxy_sim:
        _ProxySimSetup(options)

      if options.delete and os.path.exists(options.chroot):
        lock.write_lock()
        DeleteChroot(options.chroot)

      sdk_cache = os.path.join(options.cache_dir, 'sdks')
      distfiles_cache = os.path.join(options.cache_dir, 'distfiles')
      osutils.SafeMakedirsNonRoot(options.cache_dir)

      for target in (sdk_cache, distfiles_cache):
        src = os.path.join(constants.SOURCE_ROOT, os.path.basename(target))
        if not os.path.exists(src):
          osutils.SafeMakedirsNonRoot(target)
          continue
        lock.write_lock(
            "Upgrade to %r needed but chroot is locked; please exit "
            "all instances so this upgrade can finish." % src)
        if not os.path.exists(src):
          # Note that while waiting for the write lock, src may've vanished;
          # it's a rare race during the upgrade process that's a byproduct
          # of us avoiding taking a write lock to do the src check.  If we
          # took a write lock for that check, it would effectively limit
          # all cros_sdk for a chroot to a single instance.
          osutils.SafeMakedirsNonRoot(target)
        elif not os.path.exists(target):
          # Upgrade occurred, but a reversion, or something whacky
          # occurred writing to the old location.  Wipe and continue.
          os.rename(src, target)
        else:
          # Upgrade occurred once already, but either a reversion or
          # some before/after separate cros_sdk usage is at play.
          # Wipe and continue.
          osutils.RmDir(src)

      if options.download:
        lock.write_lock()
        sdk_tarball = FetchRemoteTarballs(
            sdk_cache, urls, 'stage3' if options.bootstrap else 'SDK')
        if toolchains_overlay_urls:
          toolchains_overlay_tarball = FetchRemoteTarballs(
              sdk_cache, toolchains_overlay_urls, 'SDK toolchains overlay',
              allow_none=True)

      if options.create:
        lock.write_lock()
        CreateChroot(options.chroot, sdk_tarball, toolchains_overlay_tarball,
                     options.cache_dir,
                     nousepkg=(options.bootstrap or options.nousepkg))

      if options.enter:
        lock.read_lock()
        EnterChroot(options.chroot, options.cache_dir, options.chrome_root,
                    options.chrome_root_mount, options.workspace,
                    chroot_command)
Beispiel #13
0
    def PerformStage(self):
        """Perform the actual work for this stage.

    This includes final metadata archival, and update CIDB with our final status
    as well as producting a logged build result summary.
    """
        build_identifier, _ = self._run.GetCIDBHandle()
        build_id = build_identifier.cidb_id
        buildbucket_id = build_identifier.buildbucket_id
        if results_lib.Results.BuildSucceededSoFar(self.buildstore,
                                                   buildbucket_id, self.name):
            final_status = constants.BUILDER_STATUS_PASSED
        else:
            final_status = constants.BUILDER_STATUS_FAILED

        if not hasattr(self._run.attrs, 'release_tag'):
            # If, for some reason, sync stage was not completed and
            # release_tag was not set. Set it to None here because
            # ArchiveResults() depends the existence of this attr.
            self._run.attrs.release_tag = None

        # Set up our report metadata.
        self._run.attrs.metadata.UpdateWithDict(
            self.GetReportMetadata(
                final_status=final_status,
                completion_instance=self._completion_instance))

        src_root = self._build_root
        # Workspace builders use a different buildroot for overlays.
        if self._run.config.workspace_branch and self._run.options.workspace:
            src_root = self._run.options.workspace

        # Add tags for the arches and statuses of the build.
        # arches requires crossdev which isn't available at the early part of the
        # build.
        arches = []
        for board in self._run.config['boards']:
            toolchains = toolchain.GetToolchainsForBoard(board,
                                                         buildroot=src_root)
            default = list(
                toolchain.FilterToolchains(toolchains, 'default', True))
            if default:
                try:
                    arches.append(toolchain.GetArchForTarget(default[0]))
                except cros_build_lib.RunCommandError as e:
                    logging.warning(
                        'Unable to retrieve arch for board %s default toolchain %s: %s',
                        board, default, e)
        tags = {
            'arches': arches,
            'status': final_status,
        }
        results = self._run.attrs.metadata.GetValue('results')
        for stage in results:
            tags['stage_status:%s' % stage['name']] = stage['status']
            tags['stage_summary:%s' % stage['name']] = stage['summary']
        self._run.attrs.metadata.UpdateKeyDictWithDict(constants.METADATA_TAGS,
                                                       tags)

        # Some operations can only be performed if a valid version is available.
        try:
            self._run.GetVersionInfo()
            self.ArchiveResults(final_status)
            metadata_url = os.path.join(self.upload_url,
                                        constants.METADATA_JSON)
        except cbuildbot_run.VersionNotSetError:
            logging.error('A valid version was never set for this run. '
                          'Can not archive results.')
            metadata_url = ''

        results_lib.Results.Report(sys.stdout,
                                   current_version=(self._run.attrs.release_tag
                                                    or ''))

        # Upload goma log if used for BuildPackage and TestSimpleChrome.
        _UploadAndLinkGomaLogIfNecessary(
            'BuildPackages', self._run.config.name, self._run.options.goma_dir,
            self._run.options.goma_client_json,
            self._run.attrs.metadata.GetValueWithDefault('goma_tmp_dir'))
        _UploadAndLinkGomaLogIfNecessary(
            'TestSimpleChromeWorkflow', self._run.config.name,
            self._run.options.goma_dir, self._run.options.goma_client_json,
            self._run.attrs.metadata.GetValueWithDefault(
                'goma_tmp_dir_for_simple_chrome'))

        if self.buildstore.AreClientsReady():
            status_for_db = final_status

            # TODO(pprabhu): After BuildData and CBuildbotMetdata are merged, remove
            # this extra temporary object creation.
            # XXX:HACK We're creating a BuildData with an empty URL. Don't try to
            # MarkGathered this object.
            build_data = metadata_lib.BuildData(
                '', self._run.attrs.metadata.GetDict())
            # TODO(akeshet): Find a clearer way to get the "primary upload url" for
            # the metadata.json file. One alternative is _GetUploadUrls(...)[0].
            # Today it seems that element 0 of its return list is the primary upload
            # url, but there is no guarantee or unit test coverage of that.
            self.buildstore.FinishBuild(build_id,
                                        status=status_for_db,
                                        summary=build_data.failure_message,
                                        metadata_url=metadata_url)

            duration = self._GetBuildDuration()

            mon_fields = {
                'status': status_for_db,
                'build_config': self._run.config.name,
                'important': self._run.config.important
            }
            metrics.Counter(
                constants.MON_BUILD_COMP_COUNT).increment(fields=mon_fields)
            metrics.CumulativeSecondsDistribution(
                constants.MON_BUILD_DURATION).add(duration, fields=mon_fields)

            if self._run.options.sanity_check_build:
                metrics.Counter(
                    constants.MON_BUILD_SANITY_COMP_COUNT).increment(
                        fields=mon_fields)
                metrics.Gauge(
                    constants.MON_BUILD_SANITY_ID,
                    description=
                    'The build number of the latest sanity build. Used '
                    'for recovering the link to the latest failing build '
                    'in the alert when a sanity build fails.',
                    field_spec=[
                        ts_mon.StringField('status'),
                        ts_mon.StringField('build_config'),
                        ts_mon.StringField('builder_name'),
                        ts_mon.BooleanField('important')
                    ]).set(self._run.buildnumber,
                           fields=dict(
                               mon_fields,
                               builder_name=self._run.GetBuilderName()))

            if config_lib.IsMasterCQ(self._run.config):
                self_destructed = self._run.attrs.metadata.GetValueWithDefault(
                    constants.SELF_DESTRUCTED_BUILD, False)
                mon_fields = {
                    'status': status_for_db,
                    'self_destructed': self_destructed
                }
                metrics.CumulativeSecondsDistribution(
                    constants.MON_CQ_BUILD_DURATION).add(duration,
                                                         fields=mon_fields)
                annotator_link = uri_lib.ConstructAnnotatorUri(build_id)
                logging.PrintBuildbotLink('Build annotator', annotator_link)

            # From this point forward, treat all exceptions as warnings.
            self._post_completion = True

            # Dump report about things we retry.
            retry_stats.ReportStats(sys.stdout)
Beispiel #14
0
def main(argv):
  conf = cros_build_lib.LoadKeyValueFile(
      os.path.join(constants.SOURCE_ROOT, constants.SDK_VERSION_FILE),
      ignore_missing=True)
  sdk_latest_version = conf.get('SDK_LATEST_VERSION', '<unknown>')
  bootstrap_latest_version = conf.get('BOOTSTRAP_LATEST_VERSION', '<unknown>')
  parser, commands = _CreateParser(sdk_latest_version, bootstrap_latest_version)
  options = parser.parse_args(argv)
  chroot_command = options.commands

  # Some sanity checks first, before we ask for sudo credentials.
  cros_build_lib.AssertOutsideChroot()

  host = os.uname()[4]
  if host != 'x86_64':
    cros_build_lib.Die(
        "cros_sdk is currently only supported on x86_64; you're running"
        " %s.  Please find a x86_64 machine." % (host,))

  _ReportMissing(osutils.FindMissingBinaries(NEEDED_TOOLS))
  if options.proxy_sim:
    _ReportMissing(osutils.FindMissingBinaries(PROXY_NEEDED_TOOLS))
  missing_image_tools = osutils.FindMissingBinaries(IMAGE_NEEDED_TOOLS)

  if (sdk_latest_version == '<unknown>' or
      bootstrap_latest_version == '<unknown>'):
    cros_build_lib.Die(
        'No SDK version was found. '
        'Are you in a Chromium source tree instead of Chromium OS?\n\n'
        'Please change to a directory inside your Chromium OS source tree\n'
        'and retry.  If you need to setup a Chromium OS source tree, see\n'
        '  http://www.chromium.org/chromium-os/developer-guide')

  any_snapshot_operation = (options.snapshot_create or options.snapshot_restore
                            or options.snapshot_delete or options.snapshot_list)
  if any_snapshot_operation and not options.use_image:
    cros_build_lib.Die('Snapshot operations are not compatible with '
                       '--nouse-image.')

  if (options.snapshot_delete and options.snapshot_delete ==
      options.snapshot_restore):
    parser.error('Cannot --snapshot_delete the same snapshot you are '
                 'restoring with --snapshot_restore.')

  _ReExecuteIfNeeded([sys.argv[0]] + argv)

  lock_path = os.path.dirname(options.chroot)
  lock_path = os.path.join(
      lock_path, '.%s_lock' % os.path.basename(options.chroot).lstrip('.'))

  # Expand out the aliases...
  if options.replace:
    options.delete = options.create = True

  if options.bootstrap:
    options.create = True

  # If a command is not given, default to enter.
  # pylint: disable=protected-access
  # This _group_actions access sucks, but upstream decided to not include an
  # alternative to optparse's option_list, and this is what they recommend.
  options.enter |= not any(getattr(options, x.dest)
                           for x in commands._group_actions)
  # pylint: enable=protected-access
  options.enter |= bool(chroot_command)

  if (options.delete and not options.create and
      (options.enter or any_snapshot_operation)):
    parser.error("Trying to enter or snapshot the chroot when --delete "
                 "was specified makes no sense.")

  if options.working_dir is not None and not os.path.isabs(options.working_dir):
    options.working_dir = path_util.ToChrootPath(options.working_dir)

  # Clean up potential leftovers from previous interrupted builds.
  # TODO(bmgordon): Remove this at the end of 2017.  That should be long enough
  # to get rid of them all.
  chroot_build_path = options.chroot + '.build'
  if options.use_image and os.path.exists(chroot_build_path):
    try:
      with cgroups.SimpleContainChildren('cros_sdk'):
        with locking.FileLock(lock_path, 'chroot lock') as lock:
          logging.notice('Cleaning up leftover build directory %s',
                         chroot_build_path)
          lock.write_lock()
          osutils.UmountTree(chroot_build_path)
          osutils.RmDir(chroot_build_path)
    except cros_build_lib.RunCommandError as e:
      logging.warning('Unable to remove %s: %s', chroot_build_path, e)

  # Discern if we need to create the chroot.
  chroot_ver_file = os.path.join(options.chroot, 'etc', 'cros_chroot_version')
  chroot_exists = os.path.exists(chroot_ver_file)
  if (options.use_image and not chroot_exists and not options.delete and
      not missing_image_tools and
      os.path.exists(_ImageFileForChroot(options.chroot))):
    # Try to re-mount an existing image in case the user has rebooted.
    with cgroups.SimpleContainChildren('cros_sdk'):
      with locking.FileLock(lock_path, 'chroot lock') as lock:
        logging.debug('Checking if existing chroot image can be mounted.')
        lock.write_lock()
        cros_sdk_lib.MountChroot(options.chroot, create=False)
        chroot_exists = os.path.exists(chroot_ver_file)
        if chroot_exists:
          logging.notice('Mounted existing image %s on chroot',
                         _ImageFileForChroot(options.chroot))
  if (options.create or options.enter or options.snapshot_create or
      options.snapshot_restore):
    # Only create if it's being wiped, or if it doesn't exist.
    if not options.delete and chroot_exists:
      options.create = False
    else:
      options.download = True

  # Finally, flip create if necessary.
  if options.enter or options.snapshot_create:
    options.create |= not chroot_exists

  # Anything that needs to manipulate the main chroot mount or communicate with
  # LVM needs to be done here before we enter the new namespaces.

  # If deleting, do it regardless of the use_image flag so that a
  # previously-created loopback chroot can also be cleaned up.
  # TODO(bmgordon): See if the DeleteChroot call below can be removed in
  # favor of this block.
  chroot_deleted = False
  if options.delete:
    with cgroups.SimpleContainChildren('cros_sdk'):
      with locking.FileLock(lock_path, 'chroot lock') as lock:
        lock.write_lock()
        if missing_image_tools:
          logging.notice('Unmounting chroot.')
          osutils.UmountTree(options.chroot)
        else:
          logging.notice('Deleting chroot.')
          cros_sdk_lib.CleanupChrootMount(options.chroot, delete_image=True)
          osutils.RmDir(options.chroot, ignore_missing=True)
          chroot_deleted = True

  # Make sure the main chroot mount is visible.  Contents will be filled in
  # below if needed.
  if options.create and options.use_image:
    if missing_image_tools:
      raise SystemExit(
          '''The tool(s) %s were not found.
Please make sure the lvm2 and thin-provisioning-tools packages
are installed on your host.
Example(ubuntu):
  sudo apt-get install lvm2 thin-provisioning-tools

If you want to run without lvm2, pass --nouse-image (chroot
snapshots will be unavailable).''' % ', '.join(missing_image_tools))

    logging.debug('Making sure chroot image is mounted.')
    with cgroups.SimpleContainChildren('cros_sdk'):
      with locking.FileLock(lock_path, 'chroot lock') as lock:
        lock.write_lock()
        if not cros_sdk_lib.MountChroot(options.chroot, create=True):
          cros_build_lib.Die('Unable to mount %s on chroot',
                             _ImageFileForChroot(options.chroot))
        logging.notice('Mounted %s on chroot',
                       _ImageFileForChroot(options.chroot))

  # Snapshot operations will always need the VG/LV, but other actions won't.
  if any_snapshot_operation:
    with cgroups.SimpleContainChildren('cros_sdk'):
      with locking.FileLock(lock_path, 'chroot lock') as lock:
        chroot_vg, chroot_lv = cros_sdk_lib.FindChrootMountSource(
            options.chroot)
        if not chroot_vg or not chroot_lv:
          cros_build_lib.Die('Unable to find VG/LV for chroot %s',
                             options.chroot)

        # Delete snapshot before creating a new one.  This allows the user to
        # throw out old state, create a new snapshot, and enter the chroot in a
        # single call to cros_sdk.  Since restore involves deleting, also do it
        # before creating.
        if options.snapshot_restore:
          lock.write_lock()
          valid_snapshots = ListChrootSnapshots(chroot_vg, chroot_lv)
          if options.snapshot_restore not in valid_snapshots:
            cros_build_lib.Die('%s is not a valid snapshot to restore to. '
                               'Valid snapshots: %s', options.snapshot_restore,
                               ', '.join(valid_snapshots))
          osutils.UmountTree(options.chroot)
          if not RestoreChrootSnapshot(options.snapshot_restore, chroot_vg,
                                       chroot_lv):
            cros_build_lib.Die('Unable to restore chroot to snapshot.')
          if not cros_sdk_lib.MountChroot(options.chroot, create=False):
            cros_build_lib.Die('Unable to mount restored snapshot onto chroot.')

        # Use a read lock for snapshot delete and create even though they modify
        # the filesystem, because they don't modify the mounted chroot itself.
        # The underlying LVM commands take their own locks, so conflicting
        # concurrent operations here may crash cros_sdk, but won't corrupt the
        # chroot image.  This tradeoff seems worth it to allow snapshot
        # operations on chroots that have a process inside.
        if options.snapshot_delete:
          lock.read_lock()
          DeleteChrootSnapshot(options.snapshot_delete, chroot_vg, chroot_lv)

        if options.snapshot_create:
          lock.read_lock()
          if not CreateChrootSnapshot(options.snapshot_create, chroot_vg,
                                      chroot_lv):
            cros_build_lib.Die('Unable to create snapshot.')

  img_path = _ImageFileForChroot(options.chroot)
  if (options.use_image and os.path.exists(options.chroot) and
      os.path.exists(img_path)):
    img_stat = os.stat(img_path)
    img_used_bytes = img_stat.st_blocks * 512

    mount_stat = os.statvfs(options.chroot)
    mount_used_bytes = mount_stat.f_frsize * (mount_stat.f_blocks -
                                              mount_stat.f_bfree)

    extra_gbs = (img_used_bytes - mount_used_bytes) / 2**30
    if extra_gbs > MAX_UNUSED_IMAGE_GBS:
      logging.notice('%s is using %s GiB more than needed.  Running '
                     'fstrim.', img_path, extra_gbs)
      cmd = ['fstrim', options.chroot]
      try:
        cros_build_lib.RunCommand(cmd, print_cmd=False)
      except cros_build_lib.RunCommandError as e:
        logging.warning('Running fstrim failed. Consider running fstrim on '
                        'your chroot manually.\nError: %s', e)

  # Enter a new set of namespaces.  Everything after here cannot directly affect
  # the hosts's mounts or alter LVM volumes.
  namespaces.SimpleUnshare()
  if options.ns_pid:
    first_pid = namespaces.CreatePidNs()
  else:
    first_pid = None

  if options.snapshot_list:
    for snap in ListChrootSnapshots(chroot_vg, chroot_lv):
      print(snap)
    sys.exit(0)

  if not options.sdk_version:
    sdk_version = (bootstrap_latest_version if options.bootstrap
                   else sdk_latest_version)
  else:
    sdk_version = options.sdk_version
  if options.buildbot_log_version:
    logging.PrintBuildbotStepText(sdk_version)

  # Based on selections, determine the tarball to fetch.
  if options.download:
    if options.sdk_url:
      urls = [options.sdk_url]
    elif options.bootstrap:
      urls = GetStage3Urls(sdk_version)
    else:
      urls = GetArchStageTarballs(sdk_version)

  # Get URLs for the toolchains overlay, if one is to be used.
  toolchains_overlay_urls = None
  if not options.bootstrap:
    toolchains = None
    if options.toolchains:
      toolchains = options.toolchains.split(',')
    elif options.board:
      toolchains = toolchain.GetToolchainsForBoard(options.board).keys()

    if toolchains:
      toolchains_overlay_urls = GetToolchainsOverlayUrls(sdk_version,
                                                         toolchains)

  with cgroups.SimpleContainChildren('cros_sdk', pid=first_pid):
    with locking.FileLock(lock_path, 'chroot lock') as lock:
      toolchains_overlay_tarball = None

      if options.proxy_sim:
        _ProxySimSetup(options)

      if (options.delete and not chroot_deleted and
          (os.path.exists(options.chroot) or
           os.path.exists(_ImageFileForChroot(options.chroot)))):
        lock.write_lock()
        DeleteChroot(options.chroot)

      sdk_cache = os.path.join(options.cache_dir, 'sdks')
      distfiles_cache = os.path.join(options.cache_dir, 'distfiles')
      osutils.SafeMakedirsNonRoot(options.cache_dir)

      for target in (sdk_cache, distfiles_cache):
        src = os.path.join(constants.SOURCE_ROOT, os.path.basename(target))
        if not os.path.exists(src):
          osutils.SafeMakedirsNonRoot(target)
          continue
        lock.write_lock(
            "Upgrade to %r needed but chroot is locked; please exit "
            "all instances so this upgrade can finish." % src)
        if not os.path.exists(src):
          # Note that while waiting for the write lock, src may've vanished;
          # it's a rare race during the upgrade process that's a byproduct
          # of us avoiding taking a write lock to do the src check.  If we
          # took a write lock for that check, it would effectively limit
          # all cros_sdk for a chroot to a single instance.
          osutils.SafeMakedirsNonRoot(target)
        elif not os.path.exists(target):
          # Upgrade occurred, but a reversion, or something whacky
          # occurred writing to the old location.  Wipe and continue.
          os.rename(src, target)
        else:
          # Upgrade occurred once already, but either a reversion or
          # some before/after separate cros_sdk usage is at play.
          # Wipe and continue.
          osutils.RmDir(src)

      if options.download:
        lock.write_lock()
        sdk_tarball = FetchRemoteTarballs(
            sdk_cache, urls, 'stage3' if options.bootstrap else 'SDK')
        if toolchains_overlay_urls:
          toolchains_overlay_tarball = FetchRemoteTarballs(
              sdk_cache, toolchains_overlay_urls, 'SDK toolchains overlay',
              allow_none=True)

      if options.create:
        lock.write_lock()
        CreateChroot(options.chroot, sdk_tarball, toolchains_overlay_tarball,
                     options.cache_dir,
                     nousepkg=(options.bootstrap or options.nousepkg))

      if options.enter:
        lock.read_lock()
        EnterChroot(options.chroot, options.cache_dir, options.chrome_root,
                    options.chrome_root_mount, options.workspace,
                    options.goma_dir, options.goma_client_json,
                    options.working_dir, chroot_command)