Example #1
0
def _deploy_ssh(image, module, flashrom, fast, verbose, ip, port, dryrun):
  """Deploy to a servo connection.

  Args:
    image (str): Path to the image to flash.
    module: The config module.
    flashrom (bool): Whether to use flashrom or futility.
    fast (bool): Whether to do a fast (no verification) flash.
    verbose (bool): Whether to use verbose output for flash commands.
    ip (str): The DUT ip address.
    port (int): The port to ssh to.
    dryrun (bool): Whether to execute the deployment or just print the
      commands that would have been executed.
  """
  logging.notice('Attempting to flash via ssh.')
  # TODO(b/143241417): Can't use flashrom over ssh on wilco.
  if (hasattr(module, 'DEPLOY_SSH_FORCE_FUTILITY') and
      module.DEPLOY_SSH_FORCE_FUTILITY and flashrom):
    logging.warning('Flashing with flashrom over ssh on this device fails '
                    'consistently, flashing with futility instead.')
    flashrom = False
  if _ssh_flash(not flashrom, image, verbose, ip, port, fast, dryrun):
    logging.notice('ssh flash successful. Exiting flash_ap')
  else:
    raise DeployFailed('ssh failed, try using a servo connection instead.')
Example #2
0
    def FlashCrosImage(self, xbuddy_path):
        """Flashes CrOS image to DUT.

    It returns True when it successfully flashes image to DUT. Raises exception
    when it fails after retry.

    Args:
      xbuddy_path: xbuddy path to CrOS image to flash.

    Returns:
      True

    Raises:
      FlashError: An unrecoverable error occured.
    """
        logging.notice('cros flash %s', xbuddy_path)

        @retry_util.WithRetry(self.cros_flash_retry,
                              log_all_retries=True,
                              sleep=self.cros_flash_sleep,
                              backoff_factor=self.cros_flash_backoff)
        def flash_with_retry():
            flash.Flash(self.remote,
                        xbuddy_path,
                        board=self.board,
                        clobber_stateful=True,
                        disable_rootfs_verification=True)

        flash_with_retry()
        return True
Example #3
0
    def _TransferRootfsUpdate(self):
        """Transfer files for rootfs update.

    Copy the update payload to the remote device for rootfs update from the
    staging server via curl.
    """
        self._EnsureDeviceDirectory(self._device_payload_dir)

        logging.notice('Copying rootfs payload to device...')

        # TODO(crbug.com/1024639): Another way to make the payloads available is
        # to make update_engine download it directly from the staging_server. This
        # will avoid a disk copy but has the potential to be harder to debug if
        # update engine does not report the error clearly.

        self._device.run(
            self._GetCurlCmdForPayloadDownload(
                payload_dir=self._device_payload_dir,
                build_id=self._payload_dir,
                payload_filename=self._payload_name))

        self._device.CopyToWorkDir(src=self._local_payload_props_path,
                                   dest=self.PAYLOAD_DIR_NAME,
                                   mode=self._payload_mode,
                                   log_output=True,
                                   **self._cmd_kwargs)
Example #4
0
def CreateChroot(chroot_path, sdk_tarball, toolchains_overlay_tarball,
                 cache_dir, nousepkg=False):
  """Creates a new chroot from a given SDK.

  Args:
    chroot_path: Path where the new chroot will be created.
    sdk_tarball: Path to a downloaded Gentoo Stage3 or Chromium OS SDK tarball.
    toolchains_overlay_tarball: Optional path to a second tarball that will be
        unpacked into the chroot on top of the SDK tarball.
    cache_dir: Path to a directory that will be used for caching portage files,
        etc.
    nousepkg: If True, pass --nousepkg to cros_setup_toolchains inside the
        chroot.
  """

  cmd = MAKE_CHROOT + ['--stage3_path', sdk_tarball,
                       '--chroot', chroot_path,
                       '--cache_dir', cache_dir]

  if toolchains_overlay_tarball:
    cmd.extend(['--toolchains_overlay_path', toolchains_overlay_tarball])

  if nousepkg:
    cmd.append('--nousepkg')

  logging.notice('Creating chroot. This may take a few minutes...')
  try:
    cros_build_lib.RunCommand(cmd, print_cmd=False)
  except cros_build_lib.RunCommandError:
    raise SystemExit('Running %r failed!' % cmd)
Example #5
0
  def MaySetupBoard(self):
    """Checks if /build/${board} exists. Sets it up if not.

    Returns:
      False if setup_board or build_package failed. True otherwise.
    """
    if not os.path.isdir(self.cros_dir):
      logging.notice('ChromeOS source: %s does not exist, set it up',
                     self.cros_dir)
      self.SetupCrosRepo()

    board_path = self.ResolvePathFromChroot(os.path.join('/build', self.board))
    if os.path.isdir(board_path):
      return True

    try:
      self.RunCommandInsideCrosSdk(['./setup_board', '--board', self.board])
    except cros_build_lib.RunCommandError as e:
      logging.error('Failed to setup_board for %s: %s', self.board, e)
      return False

    try:
      self.RunCommandInsideCrosSdk(['./build_packages', '--board', self.board])
    except cros_build_lib.RunCommandError as e:
      logging.error('Failed to build_package for %s: %s', self.board, e)
      return False
    return True
Example #6
0
def CreateChroot(chroot_path, sdk_tarball, cache_dir, nousepkg=False):
    """Creates a new chroot from a given SDK.

  Args:
    chroot_path: Path where the new chroot will be created.
    sdk_tarball: Path to a downloaded Gentoo Stage3 or Chromium OS SDK tarball.
    cache_dir: Path to a directory that will be used for caching portage files,
        etc.
    nousepkg: If True, pass --nousepkg to cros_setup_toolchains inside the
        chroot.
  """

    cmd = MAKE_CHROOT + [
        '--stage3_path', sdk_tarball, '--chroot', chroot_path, '--cache_dir',
        cache_dir
    ]

    if nousepkg:
        cmd.append('--nousepkg')

    logging.notice('Creating chroot. This may take a few minutes...')
    try:
        cros_build_lib.dbg_run(cmd)
    except cros_build_lib.RunCommandError as e:
        cros_build_lib.Die('Creating chroot failed!\n%s', e)
  def ObtainBisectBoundaryScoreImpl(self, good_side):
    """The worker of obtaining score of either last-known-good or bad commit.

    Instead of deploying Chrome for good/bad commit, it deploys good/bad
    CrOS image if self.bisect_between_cros_version is set.

    Args:
      good_side: True if it evaluates score for last-known-good. False for
          last-known-bad commit.

    Returns:
      Evaluated score.
    """
    commit = self.good_commit if good_side else self.bad_commit
    commit_label = 'good' if good_side else 'bad'
    # Though bisect_between_cros_version uses archived image directly without
    # building Chrome, it is necessary because BuildDeployEval() will update
    # self.current_commit.
    self.Git(['checkout', commit])
    eval_label = None
    customize_build_deploy = None
    if self.bisect_between_cros_version:
      cros_version = (self.good_cros_version if good_side else
                      self.bad_cros_version)
      logging.notice('Obtaining score of %s CrOS version: %s', commit_label,
                     cros_version)
      eval_label = 'cros_%s' % cros_version
      customize_build_deploy = lambda: self.FlashCrosImage(
          self.GetCrosXbuddyPath(cros_version))
    else:
      logging.notice('Obtaining score of %s commit: %s', commit_label, commit)

    return self.BuildDeployEval(eval_label=eval_label,
                                customize_build_deploy=customize_build_deploy)
def _JsonLintFile(path, _output_format, _debug):
    """Returns result of running json lint checks on |path|."""
    result = cros_build_lib.CommandResult('python -mjson.tool "%s"' % path,
                                          returncode=0)

    data = osutils.ReadFile(path)

    # Strip off leading UTF-8 BOM if it exists.
    if data.startswith(u'\ufeff'):
        data = data[1:]

    # Strip out comments for JSON parsing.
    stripped_data = re.sub(r'^\s*#.*', '', data, flags=re.M)

    # See if it validates.
    try:
        json.loads(stripped_data)
    except ValueError as e:
        result.returncode = 1
        logging.notice('%s: %s', path, e)

    # Check whitespace.
    if not _WhiteSpaceLintData(path, data):
        result.returncode = 1

    return result
Example #9
0
    def Run(self):
        """Perfrom the cros flash command."""
        self.options.Freeze()

        try:
            flash.Flash(
                self.options.device,
                self.options.image,
                board=self.options.board,
                install=self.options.install,
                src_image_to_delta=self.options.src_image_to_delta,
                rootfs_update=self.options.rootfs_update,
                stateful_update=self.options.stateful_update,
                clobber_stateful=self.options.clobber_stateful,
                reboot=self.options.reboot,
                wipe=self.options.wipe,
                ping=self.options.ping,
                disable_rootfs_verification=self.options.disable_rootfs_verification,
                clear_cache=self.options.clear_cache,
                yes=self.options.yes,
                force=self.options.force,
                debug=self.options.debug,
            )
            logging.notice("cros flash completed successfully.")
        except dev_server_wrapper.ImagePathError:
            logging.error(
                "To get the latest remote image, please run:\n" "cros flash --board=%s %s remote/latest",
                self.options.board,
                self.options.device.raw,
            )
            raise
Example #10
0
def DeleteChrootSnapshot(snapshot_name, chroot_vg, chroot_lv):
    """Delete the named snapshot from the specified chroot VG.

  If the requested snapshot is not found, nothing happens.  The main chroot LV
  and internal thinpool LV cannot be deleted with this function.

  Args:
    snapshot_name: The name of the snapshot to delete.
    chroot_vg: The name of the VG containing the origin LV.
    chroot_lv: The name of the origin LV.

  Raises:
    SystemExit: The lvremove command failed.
  """
    if snapshot_name in (cros_sdk_lib.CHROOT_LV_NAME,
                         cros_sdk_lib.CHROOT_THINPOOL_NAME):
        logging.error(
            'Cannot remove LV %s as a snapshot.  Use cros_sdk --delete '
            'if you want to remove the whole chroot.', snapshot_name)
        return

    if snapshot_name not in ListChrootSnapshots(chroot_vg, chroot_lv):
        return

    cmd = ['lvremove', '-f', '%s/%s' % (chroot_vg, snapshot_name)]
    try:
        logging.notice('Deleting snapshot %s in VG %s.', snapshot_name,
                       chroot_vg)
        cros_build_lib.dbg_run(cmd, capture_output=True)
    except cros_build_lib.RunCommandError as e:
        cros_build_lib.Die('Deleting snapshot failed!\n%s', e)
Example #11
0
  def UploadSymbols(self, buildroot, board):
    """Upload generated debug symbols."""
    failed_name = 'failed_upload_symbols.list'
    failed_list = os.path.join(self.archive_path, failed_name)

    if self._run.options.remote_trybot or self._run.options.debug_forced:
      # For debug builds, limit ourselves to just uploading 1 symbol.
      # This way trybots and such still exercise this code.
      cnt = 1
      official = False
    else:
      cnt = None
      official = self._run.config.chromeos_official

    upload_passed = True
    try:
      commands.UploadSymbols(buildroot, board, official, cnt, failed_list)
    except failures_lib.BuildScriptFailure:
      upload_passed = False

    if os.path.exists(failed_list):
      self.UploadArtifact(failed_name, archive=False)

      logging.notice('To upload the missing symbols from this build, run:')
      for url in self._GetUploadUrls(filename=failed_name):
        logging.notice('upload_symbols --failed-list %s %s',
                       os.path.join(url, failed_name),
                       os.path.join(url, 'debug_breakpad.tar.xz'))

    # Delay throwing the exception until after we uploaded the list.
    if not upload_passed:
      raise DebugSymbolsUploadException('Failed to upload all symbols.')
Example #12
0
    def Run(self):
        """Perform the cros flash command."""
        self.options.Freeze()

        try:
            flash.Flash(
                self.options.device,
                self.options.image,
                board=self.options.board,
                version=self._GetDefaultVersion(),
                install=self.options.install,
                src_image_to_delta=self.options.src_image_to_delta,
                rootfs_update=self.options.rootfs_update,
                stateful_update=self.options.stateful_update,
                clobber_stateful=self.options.clobber_stateful,
                reboot=self.options.reboot,
                wipe=self.options.wipe,
                ssh_private_key=self.options.private_key,
                ping=self.options.ping,
                disable_rootfs_verification=self.options.
                disable_rootfs_verification,
                clear_cache=self.options.clear_cache,
                yes=self.options.yes,
                force=self.options.force,
                debug=self.options.debug,
                send_payload_in_parallel=self.options.send_payload_in_parallel)
            logging.notice('cros flash completed successfully.')
        except dev_server_wrapper.ImagePathError:
            logging.error(
                'To get the latest remote image, please run:\n'
                'cros flash --board=%s %s remote/latest', self.options.board,
                self.options.device.raw)
            raise
Example #13
0
    def Initialize(cls,
                   root,
                   manifest_url,
                   repo_url=None,
                   repo_branch=None,
                   groups=None):
        """Initialize the checkout if necessary. Otherwise a no-op.

    Args:
      root: The repo root.
      manifest_url: Manifest repository URL.
      repo_url: Repo repository URL. Uses default googlesource repo if None.
      repo_branch: Repo repository branch.
      groups: Repo groups to sync.
    """
        osutils.SafeMakedirs(root)
        if git.FindRepoCheckoutRoot(root) is None:
            logging.notice('Will initialize checkout %s for this run.', root)
            repo_util.Repository.Initialize(root,
                                            manifest_url,
                                            repo_url=repo_url,
                                            repo_branch=repo_branch,
                                            groups=groups)
        else:
            logging.notice('Will use existing checkout %s for this run.', root)
        return cls(root,
                   manifest_url=manifest_url,
                   repo_url=repo_url,
                   groups=groups)
    def _MarkUpdateType(self, update_type):
        """Marks the type of the update.

    Args:
      update_type: The type of the update to be marked. See Update()
    """
        if update_type not in (self.UPDATE_TYPE_CLOBBER,
                               self.UPDATE_TYPE_STANDARD):
            raise Error('Invalid update type %s' % update_type)

        with tempfile.NamedTemporaryFile() as f:
            if update_type == self.UPDATE_TYPE_STANDARD:
                logging.notice('Performing standard stateful update...')
            elif update_type == self.UPDATE_TYPE_CLOBBER:
                logging.notice('Restoring stateful to factory_install '
                               'with dev_image...')
                osutils.WriteFile(f.name, 'clobber')

            try:
                self._device.CopyToDevice(f.name, self._update_type_file,
                                          'scp')
            except cros_build_lib.RunCommandError as e:
                raise Error(
                    'Failed to copy update type file to device with error %s' %
                    e)
Example #15
0
 def Run(self):
     if self.options.root:
         self._RunInCheckout(self.options.root)
     else:
         with CrosCheckout.TempRoot() as root:
             self._RunInCheckout(root)
             logging.notice('Cleaning up...')
Example #16
0
def _Unmerge(device, pkg, root):
  """Unmerges |pkg| on |device|.

  Args:
    device: A RemoteDevice object.
    pkg: A package name.
    root: Package installation root path.
  """
  pkg_name = os.path.basename(pkg)
  # This message is read by BrilloDeployOperation.
  logging.notice('Unmerging %s.', pkg_name)
  cmd = ['qmerge', '--yes']
  # Check if qmerge is available on the device. If not, use emerge.
  if device.RunCommand(
      ['qmerge', '--version'], error_code_ok=True).returncode != 0:
    cmd = ['emerge']

  cmd.extend(['--unmerge', pkg, '--root=%s' % root])
  try:
    # Always showing the emerge output for clarity.
    device.RunCommand(cmd, capture_output=False, remote_sudo=True,
                      debug_level=logging.INFO)
  except Exception:
    logging.error('Failed to unmerge package %s', pkg_name)
    raise
  else:
    logging.notice('%s has been uninstalled.', pkg_name)
Example #17
0
def GetChromiteTrackingBranch():
  """Returns the remote branch associated with chromite."""
  cwd = os.path.dirname(os.path.realpath(__file__))
  result_ref = GetTrackingBranch(cwd, for_checkout=False, fallback=False)
  if result_ref:
    branch = result_ref.ref
    if branch.startswith('refs/heads/'):
      # Normal scenario.
      return StripRefsHeads(branch)
    # Reaching here means it was refs/remotes/m/blah, or just plain invalid,
    # or that we're on a detached head in a repo not managed by chromite.

  # Manually try the manifest next.
  try:
    manifest = ManifestCheckout.Cached(cwd)
    # Ensure the manifest knows of this checkout.
    if manifest.FindCheckoutFromPath(cwd, strict=False):
      return manifest.manifest_branch
  except EnvironmentError as e:
    if e.errno != errno.ENOENT:
      raise

  # Not a manifest checkout.
  logging.notice(
      "Chromite checkout at %s isn't controlled by repo, nor is it on a "
      'branch (or if it is, the tracking configuration is missing or broken).  '
      'Falling back to assuming the chromite checkout is derived from '
      "'master'; this *may* result in breakage." % cwd)
  return 'master'
Example #18
0
 def Run(self):
   """Perfrom the cros flash command."""
   self.options.Freeze()
   try:
     flash.Flash(
         self.options.device,
         self.options.image,
         project_sdk_image=self.options.project_sdk is not None,
         sdk_version=self.options.project_sdk or None,
         board=self.options.board,
         brick_name=self.options.brick or self.curr_brick_locator,
         blueprint_name=self.options.blueprint,
         install=self.options.install,
         src_image_to_delta=self.options.src_image_to_delta,
         rootfs_update=self.options.rootfs_update,
         stateful_update=self.options.stateful_update,
         clobber_stateful=self.options.clobber_stateful,
         reboot=self.options.reboot,
         wipe=self.options.wipe,
         ping=self.options.ping,
         disable_rootfs_verification=self.options.disable_rootfs_verification,
         clear_cache=self.options.clear_cache,
         yes=self.options.yes,
         force=self.options.force,
         debug=self.options.debug)
   except dev_server_wrapper.ImagePathError as e:
     logging.error('To get the latest remote image, please run:\n'
                   'cros flash --board=%s %s remote/latest',
                   self.options.board, self.options.device.raw)
     self._HandleException(e)
   except Exception as e:
     self._HandleException(e)
   else:
     logging.notice('cros flash completed successfully.')
Example #19
0
    def BumpVersion(self, which, branch, message, dry_run=True, fetch=False):
        """Increment version in chromeos_version.sh and commit it.

    Args:
      which: Which version should be incremented. One of
          'chrome_branch', 'build', 'branch, 'patch'.
      branch: The branch to push to.
      message: The commit message for the version bump.
      dry_run: Whether to use git --dry-run.
      fetch: Whether to fetch and checkout to the given branch.
    """
        logging.notice(message)

        chromiumos_overlay = self.manifest.GetUniqueProject(
            'chromiumos/overlays/chromiumos-overlay')
        remote = chromiumos_overlay.Remote().GitName()
        ref = git.NormalizeRef(branch)

        if fetch:
            self.RunGit(chromiumos_overlay, ['fetch', remote, ref])
            self.RunGit(chromiumos_overlay,
                        ['checkout', '-B', branch, 'FETCH_HEAD'])

        new_version = self.ReadVersion(incr_type=which)
        new_version.IncrementVersion()
        remote_ref = git.RemoteRef(remote, ref)
        new_version.UpdateVersionFile(message,
                                      dry_run=dry_run,
                                      push_to=remote_ref)
Example #20
0
def _Unmerge(device, pkg, root):
  """Unmerges |pkg| on |device|.

  Args:
    device: A RemoteDevice object.
    pkg: A package name.
    root: Package installation root path.
  """
  pkg_name = os.path.basename(pkg)
  # This message is read by BrilloDeployOperation.
  logging.notice('Unmerging %s.', pkg_name)
  cmd = ['qmerge', '--yes']
  # Check if qmerge is available on the device. If not, use emerge.
  if device.RunCommand(
      ['qmerge', '--version'], error_code_ok=True).returncode != 0:
    cmd = ['emerge']

  cmd.extend(['--unmerge', pkg, '--root=%s' % root])
  try:
    # Always showing the emerge output for clarity.
    device.RunCommand(cmd, capture_output=False, remote_sudo=True,
                      debug_level=logging.INFO)
  except Exception:
    logging.error('Failed to unmerge package %s', pkg_name)
    raise
  else:
    logging.notice('%s has been uninstalled.', pkg_name)
Example #21
0
def CreateChrootSnapshot(snapshot_name, chroot_vg, chroot_lv):
    """Create a snapshot for the specified chroot VG/LV.

  Args:
    snapshot_name: The name of the new snapshot.
    chroot_vg: The name of the VG containing the origin LV.
    chroot_lv: The name of the origin LV.

  Returns:
    True if the snapshot was created, or False if a snapshot with the same
    name already exists.

  Raises:
    SystemExit: The lvcreate command failed.
  """
    if snapshot_name in ListChrootSnapshots(chroot_vg, chroot_lv):
        logging.error(
            'Cannot create snapshot %s: A volume with that name already '
            'exists.', snapshot_name)
        return False

    cmd = [
        'lvcreate', '-s', '--name', snapshot_name,
        '%s/%s' % (chroot_vg, chroot_lv)
    ]
    try:
        logging.notice('Creating snapshot %s from %s in VG %s.', snapshot_name,
                       chroot_lv, chroot_vg)
        cros_build_lib.dbg_run(cmd, capture_output=True)
        return True
    except cros_build_lib.RunCommandError as e:
        cros_build_lib.Die('Creating snapshot failed!\n%s', e)
Example #22
0
 def testNotice(self):
   """Test logging.notice works and is between INFO and WARNING."""
   msg = 'notice message'
   self.logger.setLevel(logging.INFO)
   self.AssertLogContainsMsg(msg, lambda: logging.notice(msg))
   self.logger.setLevel(logging.WARNING)
   self.AssertLogContainsMsg(msg, lambda: logging.notice(msg), invert=True)
Example #23
0
def UpdateChroot(brick=None, board=None, update_host_packages=True):
    """Update the chroot."""
    # Run chroot update hooks.
    logging.notice('Updating the chroot. This may take several minutes.')
    cmd = [os.path.join(constants.CROSUTILS_DIR, 'run_chroot_version_hooks')]
    cros_build_lib.RunCommand(cmd, debug_level=logging.DEBUG)

    # Update toolchains.
    cmd = [os.path.join(constants.CHROMITE_BIN_DIR, 'cros_setup_toolchains')]
    if brick:
        cmd += [
            '--targets=bricks',
            '--include-bricks=%s' % brick.brick_locator
        ]
    elif board:
        cmd += ['--targets=boards', '--include-boards=%s' % board]
    cros_build_lib.SudoRunCommand(cmd, debug_level=logging.DEBUG)

    # Update the host before updating the board.
    if update_host_packages:
        Emerge(list(_HOST_PKGS), '/', rebuild_deps=False)

    # Automatically discard all CONFIG_PROTECT'ed files. Those that are
    # protected should not be overwritten until the variable is changed.
    # Autodiscard is option "-9" followed by the "YES" confirmation.
    cros_build_lib.SudoRunCommand(['etc-update'],
                                  input='-9\nYES\n',
                                  debug_level=logging.DEBUG)
Example #24
0
def _DeployDLCImage(device, pkg_path):
    """Deploy (install and mount) a DLC image."""
    dlc_id, dlc_package = _GetDLCInfo(device, pkg_path, from_dut=False)
    if dlc_id and dlc_package:
        logging.notice('Deploy a DLC image for %s', dlc_id)

        dlc_path_src = os.path.join('/build/rootfs/dlc', dlc_id, dlc_package,
                                    'dlc.img')
        dlc_path = os.path.join(_DLC_INSTALL_ROOT, dlc_id, dlc_package)
        dlc_path_a = os.path.join(dlc_path, 'dlc_a')
        dlc_path_b = os.path.join(dlc_path, 'dlc_b')
        # Create folders for DLC images.
        device.RunCommand(['mkdir', '-p', dlc_path_a, dlc_path_b])
        # Copy images to the destination folders.
        device.RunCommand(
            ['cp', dlc_path_src,
             os.path.join(dlc_path_a, 'dlc.img')])
        device.RunCommand(
            ['cp', dlc_path_src,
             os.path.join(dlc_path_b, 'dlc.img')])

        # Set the proper perms and ownership so dlcservice can access the image.
        device.RunCommand(['chmod', '-R', '0755', _DLC_INSTALL_ROOT])
        device.RunCommand(
            ['chown', '-R', 'dlcservice:dlcservice', _DLC_INSTALL_ROOT])
        return True
    else:
        logging.debug('DLC_ID not found in package')
        return False
Example #25
0
def _StopKvmIgnoringErrors(kvm_pid):
    """Stops a running KVM instance. Ignores errors."""
    logging.notice('Stopping KVM. This may take a minute.')
    _RunIgnoringErrors([
        os.path.join(constants.CROSUTILS_DIR, 'bin', 'cros_stop_vm'),
        '--kvm_pid',
        kvm_pid,
    ])
Example #26
0
    def SyncBranch(self, branch):
        """Sync to the given branch.

    Args:
      branch: Name of branch to sync to.
    """
        logging.notice('Syncing checkout %s to branch %s.', self.root, branch)
        self._Sync(['--branch', branch])
Example #27
0
def DeleteChroot(chroot_path):
    """Deletes an existing chroot"""
    cmd = MAKE_CHROOT + ['--chroot', chroot_path, '--delete']
    try:
        logging.notice('Deleting chroot.')
        cros_build_lib.dbg_run(cmd)
    except cros_build_lib.RunCommandError as e:
        cros_build_lib.Die('Deleting chroot failed!\n%s', e)
Example #28
0
def DeleteChroot(chroot_path):
    """Deletes an existing chroot"""
    cmd = MAKE_CHROOT + ['--chroot', chroot_path, '--delete']
    try:
        logging.notice('Deleting chroot.')
        cros_build_lib.RunCommand(cmd, print_cmd=False)
    except cros_build_lib.RunCommandError:
        raise SystemExit('Running %r failed!' % cmd)
Example #29
0
def SetupTsMonGlobalState(service_name,
                          short_lived=False,
                          indirect=False,
                          auto_flush=True):
  """Uses a dummy argument parser to get the default behavior from ts-mon.

  Args:
    service_name: The name of the task we are sending metrics from.
    short_lived: Whether this process is short-lived and should use the autogen
                 hostname prefix.
    indirect: Whether to create a metrics.METRICS_QUEUE object and a separate
              process for indirect metrics flushing. Useful for forking,
              because forking would normally create a duplicate ts_mon thread.
    auto_flush: Whether to create a thread to automatically flush metrics every
                minute.
  """
  if not config:
    return TrivialContextManager()

  if indirect:
    return _CreateTsMonFlushingProcess([service_name],
                                       {'short_lived': short_lived})

  # google-api-client has too much noisey logging.
  googleapiclient.discovery.logger.setLevel(logging.WARNING)
  parser = argparse.ArgumentParser()
  config.add_argparse_options(parser)
  args = [
      '--ts-mon-target-type', 'task',
      '--ts-mon-task-service-name', service_name,
      '--ts-mon-task-job-name', service_name,
  ]

  # Short lived processes will have autogen: prepended to their hostname and
  # use task-number=PID to trigger shorter retention policies under
  # chrome-infra@, and used by a Monarch precomputation to group across the
  # task number.
  # Furthermore, we assume they manually call ts_mon.Flush(), because the
  # ts_mon thread will drop messages if the process exits before it flushes.
  if short_lived:
    auto_flush = False
    fqdn = socket.getfqdn().lower()
    host = fqdn.split('.')[0]
    args.extend(['--ts-mon-task-hostname', 'autogen:' + host,
                 '--ts-mon-task-number', os.getpid()])

  args.extend(['--ts-mon-flush', 'auto' if auto_flush else 'manual'])

  try:
    config.process_argparse_options(parser.parse_args(args=args))
    logging.notice('ts_mon was set up.')
    _WasSetup = True
  except Exception as e:
    logging.warning('Failed to configure ts_mon, monitoring is disabled: %s', e,
                    exc_info=True)


  return TrivialContextManager()
def _GolintFile(path, _, debug):
    """Returns result of running golint on |path|."""
    # Try using golint if it exists.
    try:
        cmd = ['golint', '-set_exit_status', path]
        return _LinterRunCommand(cmd, debug)
    except cros_build_lib.RunCommandError:
        logging.notice('Install golint for additional go linting.')
        return cros_build_lib.CommandResult('gofmt "%s"' % path, returncode=0)
Example #31
0
 def Evaluate(cros_version, chromium_commit):
     self.Git(['checkout', chromium_commit])
     score = self.BuildDeployEval(
         eval_label='cros_%s_cr_%s' % (cros_version, chromium_commit),
         customize_build_deploy=lambda: FlashBuildDeploy(cros_version))
     label = self.LabelBuild(score)
     logging.notice('Score(mean: %.3f std: %.3f). Marked as %s',
                    score.mean, score.std, label)
     return label
Example #32
0
def DeleteChroot(chroot_path):
  """Deletes an existing chroot"""
  cmd = MAKE_CHROOT + ['--chroot', chroot_path,
                       '--delete']
  try:
    logging.notice('Deleting chroot.')
    cros_build_lib.RunCommand(cmd, print_cmd=False)
  except cros_build_lib.RunCommandError:
    raise SystemExit('Running %r failed!' % cmd)
Example #33
0
  def Run(self):
    """Run cros build."""
    self.options.Freeze()

    if not self.host:
      if not (self.board or self.brick or self.blueprint):
        cros_build_lib.Die('You did not specify a board/brick to build for. '
                           'You need to be in a brick directory or set '
                           '--board/--brick/--host')

      if self.brick and self.brick.legacy:
        cros_build_lib.Die('--brick should not be used with board names. Use '
                           '--board=%s instead.' % self.brick.config['name'])

    if self.blueprint:
      chroot_args = ['--toolchains',
                     ','.join(toolchain.GetToolchainsForBrick(
                         self.blueprint.GetBSP()).iterkeys())]
    elif self.board:
      chroot_args = ['--board', self.board]
    else:
      chroot_args = None

    commandline.RunInsideChroot(self, chroot_args=chroot_args)

    if not (self.build_pkgs or self.options.init_only):
      cros_build_lib.Die('No packages found, nothing to build.')

    # Set up the sysroots if not building for host.
    if self.blueprint:
      if self.chroot_update:
        chroot_util.UpdateChroot(
            update_host_packages=self.options.host_packages_update,
            brick=brick_lib.Brick(self.blueprint.GetBSP()))
      chroot_util.InitializeSysroots(self.blueprint)
    elif self.brick or self.board:
      chroot_util.SetupBoard(
          brick=self.brick, board=self.board,
          update_chroot=self.chroot_update,
          update_host_packages=self.options.host_packages_update,
          use_binary=self.options.binary)

    if not self.options.init_only:
      # Preliminary: enable all packages that only have a live ebuild.
      if self.options.enable_only_latest:
        workon = workon_helper.WorkonHelper(self.sysroot)
        workon.StartWorkingOnPackages([], use_workon_only=True)

      if command.UseProgressBar():
        op = BrilloBuildOperation()
        op.Run(
            parallel.RunParallelSteps, [self._CheckDependencies, self._Build],
            log_level=logging.DEBUG)
      else:
        parallel.RunParallelSteps([self._CheckDependencies, self._Build])
      logging.notice('Build completed successfully.')
Example #34
0
    def _HandleCreate(self, checkout):
        """Sync to the version or file and create a branch.

    Args:
      checkout: The CrosCheckout to run commands in.
    """
        # Start with quick, immediate validations.
        if self.options.name and self.options.descriptor:
            raise BranchError('--descriptor cannot be used with --custom.')

        if self.options.version and not self.options.version.endswith('0'):
            raise BranchError(
                'Cannot branch version from nonzero patch number.')

        # Handle sync. Unfortunately, we cannot fully validate the version until
        # we have a copy of chromeos_version.sh.
        if self.options.file:
            checkout.SyncFile(self.options.file)
        else:
            checkout.SyncVersion(self.options.version)

        # Now to validate the version. First, double check that the checkout
        # has a zero patch number in case we synced from file.
        vinfo = checkout.ReadVersion()
        if int(vinfo.patch_number):
            raise BranchError(
                'Cannot branch version with nonzero patch number.')

        # Second, check that we did not already branch from this version.
        # manifest-internal serves as the sentinel project.
        manifest_internal = checkout.manifest.GetUniqueProject(
            'chromeos/manifest-internal')
        pattern = '.*-%s\\.B$' % '\\.'.join(
            str(comp) for comp in vinfo.VersionComponents() if comp)
        if (checkout.BranchExists(manifest_internal, pattern)
                and not self.options.force):
            raise BranchError(
                'Already branched %s. Please rerun with --force if you wish to '
                'proceed.' % vinfo.VersionString())

        # Determine if we are creating a custom branch or a standard branch.
        if self.options.cls:
            branch = self.options.cls(checkout, self.options.descriptor)
        else:
            branch = Branch(checkout, self.options.name)

        # Finally, double check the name with the user.
        proceed = self.options.yes or cros_build_lib.BooleanPrompt(
            prompt='New branch will be named %s. Continue?' % branch.name,
            default=False)

        if proceed:
            branch.Create(push=self.options.push, force=self.options.force)
            logging.notice('Successfully created branch %s.', branch.name)
        else:
            logging.notice('Aborted branch creation.')
 def testGetThresholdFromUser(self):
     """Tests GetThresholdFromUser()."""
     logging.notice('testGetThresholdFromUser')
     self.setDefaultCommitInfo()
     input_mock = self.PatchObject(cros_build_lib,
                                   'GetInput',
                                   return_value=self.THRESHOLD_SPLITTER)
     self.assertTrue(self.bisector.GetThresholdFromUser())
     self.assertEqual(self.THRESHOLD, self.bisector.threshold)
     input_mock.assert_called()
Example #36
0
  def ParseOutput(self, output=None):
    """Display progress bars for brillo image."""

    stdout = self._stdout.read()
    stderr = self._stderr.read()
    output = stdout + stderr
    stage_name, stage_exit = self._StageStatus(output)

    # If we are in a non-summarize stage, then we update the progress bar
    # accordingly.
    if (self._stage_name is not None and
        self._stage_name != self.SUMMARIZE_STAGE and not self._done):
      progress = super(BrilloImageOperation, self).ParseOutput(output)
      # If we are done displaying a progress bar for a stage, then we display
      # progress bar operation (parallel emerge).
      if progress == 1:
        self._done = True
        self.Cleanup()
        # Do not display a 100% progress in exit because it has already been
        # done.
        self._progress_bar_displayed = False
        self._PrintEndStageMessages()

    # Perform cleanup when exiting a stage.
    if stage_exit:
      self._stage_name = None
      self._total = None
      self._done = False
      self._completed = 0
      self._printed_no_packages = False
      self.Cleanup()
      self._progress_bar_displayed = False

    # When entering a stage, print stage appropriate entry messages.
    if stage_name is not None:
      self._stage_name = stage_name
      msg = self._PrintEnterStageMessages()
      self.SetProgressBarMessage(msg)
      if self._stage_name == self.SUMMARIZE_STAGE:
        sys.stdout.write('\n')

    # If we are in a summarize stage, properly format and display the output.
    if self._stage_name == self.SUMMARIZE_STAGE and not self._done:
      summarize_stage_prefix = 'INFO    : '
      for line in output.split('\n'):
        if self.SUMMARIZE_STAGE_START in line:
          self._enable_print = True
          continue
        if self.SUMMARIZE_STAGE_STOP in line:
          self._enable_print = False
          break
        if summarize_stage_prefix in line and self._enable_print:
          line = line.replace(summarize_stage_prefix, '')
          logging.notice(line)
Example #37
0
def _DownloadSdk(gs_ctx, sdk_dir, version):
  """Downloads the specified SDK to |sdk_dir|.

  Args:
    gs_ctx: GS Context to use.
    sdk_dir: Directory in which to create a repo.
    version: Project SDK version to sync. Can be a version number or 'tot';
      'latest' should be resolved before calling this.
  """
  try:
    # Create the SDK dir, if it doesn't already exist.
    osutils.SafeMakedirs(sdk_dir)

    repo_cmd = os.path.join(constants.BOOTSTRAP_DIR, 'repo')

    logging.notice('Fetching files. This could take a few minutes...')
    # TOT is a special case, handle it first.
    if version.lower() == 'tot':
      # Init new repo.
      repo = repository.RepoRepository(
          constants.MANIFEST_URL, sdk_dir, groups='project_sdk',
          repo_cmd=repo_cmd)
      # Sync it.
      repo.Sync()
      return

    with tempfile.NamedTemporaryFile() as manifest:
      # Fetch manifest into temp file.
      gs_ctx.Copy(_GetSdkManifestUrl(version), manifest.name,
                  debug_level=logging.DEBUG)

      manifest_git_dir = os.path.join(sdk_dir, _BRILLO_SDK_LOCAL_MANIFEST_REPO)

      # Convert manifest into a git repository for the repo command.
      repository.PrepManifestForRepo(manifest_git_dir, manifest.name)

      # Fetch the SDK.
      repo = repository.RepoRepository(manifest_git_dir, sdk_dir, depth=1,
                                       repo_cmd=repo_cmd)
      repo.Sync()

    # TODO(dgarrett): Embed this step into the manifest itself.
    # Write out the SDK Version.
    sdk_version_file = project_sdk.VersionFile(sdk_dir)
    osutils.WriteFile(sdk_version_file, version)
  except:
    # If we fail for any reason, remove the partial/corrupt SDK.
    osutils.RmDir(sdk_dir, ignore_missing=True)
    raise
  def testSetupFileLoggerLogLevels(self):
    """Test that the logger operates at the right level."""
    command.SetupFileLogger('foo.log', log_level=logging.INFO)
    logging.getLogger().setLevel(logging.DEBUG)
    logging.debug('debug')
    logging.info('info')
    logging.notice('notice')

    # Test that the logs are correct.
    logs = open(
        os.path.join(self.workspace_path, workspace_lib.WORKSPACE_LOGS_DIR,
                     'foo.log'), 'r').read()
    self.assertNotIn('debug', logs)
    self.assertIn('info', logs)
    self.assertIn('notice', logs)
  def testNotice(self):
    """Test logging.notice works and is between INFO and WARNING."""
    logger = logging.getLogger()
    sh = logging.StreamHandler(sys.stdout)
    logger.addHandler(sh)

    msg = 'notice message'

    logger.setLevel(logging.INFO)
    with self.OutputCapturer():
      logging.notice(msg)
    self.AssertOutputContainsLine(msg)

    logger.setLevel(logging.WARNING)
    with self.OutputCapturer():
      logging.notice(msg)
    self.AssertOutputContainsLine(msg, invert=True)
Example #40
0
def CreateChroot(chroot_path, sdk_tarball, cache_dir, nousepkg=False,
                 workspace=None):
  """Creates a new chroot from a given SDK"""

  cmd = MAKE_CHROOT + ['--stage3_path', sdk_tarball,
                       '--chroot', chroot_path,
                       '--cache_dir', cache_dir]
  if nousepkg:
    cmd.append('--nousepkg')

  if workspace:
    cmd.extend(['--workspace_root', workspace])

  logging.notice('Creating chroot. This may take a few minutes...')
  try:
    cros_build_lib.RunCommand(cmd, print_cmd=False)
  except cros_build_lib.RunCommandError:
    raise SystemExit('Running %r failed!' % cmd)
Example #41
0
  def Run(self):
    """Perform remote device update.

    The update process includes:
    1. initialize a device instance for the given remote device.
    2. achieve payload_dir which contains the required payloads for updating.
    3. initialize an auto-updater instance to do RunUpdate().
    4. After auto-update, all temp files and dir will be cleaned up.
    """
    try:
      device_connected = False

      with remote_access.ChromiumOSDeviceHandler(
          self.ssh_hostname, port=self.ssh_port,
          base_dir=self.DEVICE_BASE_DIR, ping=self.ping) as device:
        device_connected = True

        # Get payload directory
        payload_dir = self.GetPayloadDir(device)

        # Do auto-update
        chromeos_AU = auto_updater.ChromiumOSUpdater(
            device, payload_dir, self.tempdir,
            do_rootfs_update=self.do_rootfs_update,
            do_stateful_update=self.do_stateful_update,
            reboot=self.reboot,
            disable_verification=self.disable_verification,
            clobber_stateful=self.clobber_stateful,
            yes=self.yes)
        chromeos_AU.CheckPayloads()
        chromeos_AU.RunUpdate()

    except Exception:
      logging.error('Device update failed.')
      if device_connected and device.lsb_release:
        lsb_entries = sorted(device.lsb_release.items())
        logging.info('Following are the LSB version details of the device:\n%s',
                     '\n'.join('%s=%s' % (k, v) for k, v in lsb_entries))
      raise
    else:
      logging.notice('Update performed successfully.')
    finally:
      self.Cleanup()
Example #42
0
  def _PrintEnterStageMessages(self):
    """Messages to indicate the start of a new stage.

    As the base image is always created, we display a message then. For the
    other stages, messages are only displayed if those stages will have a
    progress bar.

    Returns:
      A message that is to be displayed before the progress bar is shown (if
      needed). If the progress bar is not shown, then the message should not be
      displayed.
    """
    if self._stage_name == self.BASE_STAGE:
      logging.notice('Creating disk layout')
      return 'Building base image.'
    elif self._stage_name == self.DEV_STAGE:
      return 'Building developer image.'
    else:
      return 'Building test image.'
Example #43
0
def _PrintWorkspaceSdkVersion(workspace_path, to_stdout=False):
  """Prints a workspace SDK version.

  If an SDK version can't be found, calls cros_build_lib.Die().

  Args:
    workspace_path: workspace directory path.
    to_stdout: True to print to stdout, False to use the logger.
  """
  # Print the SDK version.
  sdk_version = workspace_lib.GetActiveSdkVersion(workspace_path)
  if sdk_version is None:
    cros_build_lib.Die(
        'This workspace does not have an SDK.\n'
        'Use `brillo sdk --update latest` to attach to the latest SDK.')
  if to_stdout:
    print(sdk_version)
  else:
    logging.notice('Workspace SDK version is %s.', sdk_version)
def _SetupWorkDirectoryForPatch(work_dir, patch, branch, manifest, email):
  """Set up local dir for uploading changes to the given patch's project."""
  logging.notice('Setting up dir %s for uploading changes to %s', work_dir,
                 patch.project_url)

  # Clone the git repo from reference if we have a pointer to a
  # ManifestCheckout object.
  reference = None
  if manifest:
    # Get the path to the first checkout associated with this change. Since
    # all of the checkouts share git objects, it doesn't matter which checkout
    # we pick.
    path = manifest.FindCheckouts(patch.project, only_patchable=True)[0]['path']

    reference = os.path.join(constants.SOURCE_ROOT, path)
    if not os.path.isdir(reference):
      logging.error('Unable to locate git checkout: %s', reference)
      logging.error('Did you mean to use --nomirror?')
      # This will do an "raise OSError" with the right values.
      os.open(reference, os.O_DIRECTORY)
    # Use the email if email wasn't specified.
    if not email:
      email = git.GetProjectUserEmail(reference)

  repository.CloneGitRepo(work_dir, patch.project_url, reference=reference)

  # Set the git committer.
  git.RunGit(work_dir, ['config', '--replace-all', 'user.email', email])

  mbranch = git.MatchSingleBranchName(
      work_dir, branch, namespace='refs/remotes/origin/')
  if branch != mbranch:
    logging.notice('Auto resolved branch name "%s" to "%s"', branch, mbranch)
  branch = mbranch

  # Finally, create a local branch for uploading changes to the given remote
  # branch.
  git.CreatePushBranch(
      constants.PATCH_BRANCH, work_dir, sync=False,
      remote_push_branch=git.RemoteRef('ignore', 'origin/%s' % branch))

  return branch
Example #45
0
  def ParseOutput(self, output=None):
    """Parse the output of emerge to determine how to update progress bar.

    1) Figure out how many packages exist. If the total number of packages to be
    built is zero, then we do not display the progress bar.
    2) Whenever a package is downloaded or built, 'Fetched' and 'Completed' are
    printed respectively. By counting counting 'Fetched's and 'Completed's, we
    can determine how much to update the progress bar by.

    Args:
      output: Pass in output to parse instead of reading from self._stdout and
        self._stderr.

    Returns:
      A fraction between 0 and 1 indicating the level of the progress bar. If
      the progress bar isn't displayed, then the return value is -1.
    """
    if output is None:
      stdout = self._stdout.read()
      stderr = self._stderr.read()
      output = stdout + stderr

    if self._total is None:
      temp = self._GetTotal(output)
      if temp is not None:
        self._total = temp * len(self._events)
        if self._msg is not None:
          logging.notice(self._msg)

    for event in self._events:
      self._completed += output.count(event)

    if not self._printed_no_packages and self._total == 0:
      logging.notice('No packages to build.')
      self._printed_no_packages = True

    if self._total:
      progress = float(self._completed) / self._total
      self.ProgressBar(progress)
      return progress
    else:
      return -1
Example #46
0
def _Emerge(device, pkg_path, root, extra_args=None):
  """Copies |pkg| to |device| and emerges it.

  Args:
    device: A ChromiumOSDevice object.
    pkg_path: A path to a binary package.
    root: Package installation root path.
    extra_args: Extra arguments to pass to emerge.

  Raises:
    DeployError: Unrecoverable error during emerge.
  """
  pkgroot = os.path.join(device.work_dir, 'packages')
  pkg_name = os.path.basename(pkg_path)
  pkg_dirname = os.path.basename(os.path.dirname(pkg_path))
  pkg_dir = os.path.join(pkgroot, pkg_dirname)
  portage_tmpdir = os.path.join(device.work_dir, 'portage-tmp')
  # Clean out the dirs first if we had a previous emerge on the device so as to
  # free up space for this emerge.  The last emerge gets implicitly cleaned up
  # when the device connection deletes its work_dir.
  device.RunCommand(
      ['rm', '-rf', pkg_dir, portage_tmpdir, '&&',
       'mkdir', '-p', pkg_dir, portage_tmpdir], remote_sudo=True)

  # This message is read by BrilloDeployOperation.
  logging.notice('Copying %s to device.', pkg_name)
  device.CopyToDevice(pkg_path, pkg_dir, remote_sudo=True)

  logging.info('Use portage temp dir %s', portage_tmpdir)

  # This message is read by BrilloDeployOperation.
  logging.notice('Installing %s.', pkg_name)
  pkg_path = os.path.join(pkg_dir, pkg_name)

  # We set PORTAGE_CONFIGROOT to '/usr/local' because by default all
  # chromeos-base packages will be skipped due to the configuration
  # in /etc/protage/make.profile/package.provided. However, there is
  # a known bug that /usr/local/etc/portage is not setup properly
  # (crbug.com/312041). This does not affect `cros deploy` because
  # we do not use the preset PKGDIR.
  extra_env = {
      'FEATURES': '-sandbox',
      'PKGDIR': pkgroot,
      'PORTAGE_CONFIGROOT': '/usr/local',
      'PORTAGE_TMPDIR': portage_tmpdir,
      'PORTDIR': device.work_dir,
      'CONFIG_PROTECT': '-*',
  }
  cmd = ['emerge', '--usepkg', pkg_path, '--root=%s' % root]
  if extra_args:
    cmd.append(extra_args)

  try:
    device.RunCommand(cmd, extra_env=extra_env, remote_sudo=True,
                      capture_output=False, debug_level=logging.INFO)
  except Exception:
    logging.error('Failed to emerge package %s', pkg_name)
    raise
  else:
    logging.notice('%s has been installed.', pkg_name)
Example #47
0
def Expire(ctx, dryrun, url):
  """Given a url, move it to the backup buckets.

  Args:
    ctx: GS context.
    dryrun: Do we actually move the file?
    url: Address of file to move.
  """
  logging.info('Expiring: %s', url)
  # Move gs://foo/some/file -> gs://foo-backup/some/file
  parts = urlparse.urlparse(url)
  expired_parts = list(parts)
  expired_parts[1] = parts.netloc + '-backup'
  target_url = urlparse.urlunparse(expired_parts)
  if dryrun:
    logging.notice('gsutil mv %s %s', url, target_url)
  else:
    try:
      ctx.Move(url, target_url)
    except Exception as e:
      # We can fail for lots of repeated random reasons.
      logging.warn('Move of "%s" failed, ignoring: "%s"', url, e)
Example #48
0
  def Run(self):
    """Perfrom the cros flash command."""
    self.options.Freeze()

    # For brillo flash, enter the chroot to ensure a consistent environment. We
    # only do this for brillo because some cros workflows do not want to use the
    # chroot.
    if command.GetToolset() == 'brillo':
      commandline.RunInsideChroot(self)

    try:
      flash.Flash(
          self.options.device,
          self.options.image,
          project_sdk_image=self.options.project_sdk is not None,
          sdk_version=self.options.project_sdk or None,
          board=self.options.board,
          brick_name=self.options.brick or self.curr_brick_locator,
          blueprint_name=self.options.blueprint,
          install=self.options.install,
          src_image_to_delta=self.options.src_image_to_delta,
          rootfs_update=self.options.rootfs_update,
          stateful_update=self.options.stateful_update,
          clobber_stateful=self.options.clobber_stateful,
          reboot=self.options.reboot,
          wipe=self.options.wipe,
          ping=self.options.ping,
          disable_rootfs_verification=self.options.disable_rootfs_verification,
          clear_cache=self.options.clear_cache,
          yes=self.options.yes,
          force=self.options.force,
          debug=self.options.debug)
      logging.notice('cros flash completed successfully.')
    except dev_server_wrapper.ImagePathError:
      logging.error('To get the latest remote image, please run:\n'
                    'cros flash --board=%s %s remote/latest',
                    self.options.board, self.options.device.raw)
      raise
Example #49
0
  def RebootAndVerify(self):
    """Reboot and verify the remote device.

    1. Reboot the remote device. If _clobber_stateful (--clobber-stateful)
    is executed, the stateful partition is wiped, and the working directory
    on the remote device no longer exists. So, recreate the working directory
    for this remote device.
    2. Verify the remote device, by checking that whether the root device
    changed after reboot.
    """
    logging.notice('rebooting device...')
    # Record the current root device. This must be done after SetupRootfsUpdate
    # and before reboot, since SetupRootfsUpdate may reboot the device if there
    # is a pending update, which changes the root device, and reboot will
    # definitely change the root device if update successfully finishes.
    old_root_dev = self.GetRootDev(self.device)
    self.device.Reboot()
    if self._clobber_stateful:
      self.device.BaseRunCommand(['mkdir', '-p', self.device.work_dir])

    if self._do_rootfs_update:
      logging.notice('Verifying that the device has been updated...')
      new_root_dev = self.GetRootDev(self.device)
      if old_root_dev is None:
        raise AutoUpdateVerifyError(
            'Failed to locate root device before update.')

      if new_root_dev is None:
        raise AutoUpdateVerifyError(
            'Failed to locate root device after update.')

      if new_root_dev == old_root_dev:
        raise AutoUpdateVerifyError(
            'Failed to boot into the new version. Possibly there was a '
            'signing problem, or an automated rollback occurred because '
            'your new image failed to boot.')
Example #50
0
  def Run(self):
    """Run brillo sdk."""
    self.options.Freeze()

    # Must run outside the chroot.
    cros_build_lib.AssertOutsideChroot()

    workspace_path = workspace_lib.WorkspacePath()
    if not workspace_path:
      cros_build_lib.Die('You must be in a workspace.')

    # Perform the update.
    if self.options.update:
      # Shared GSContext object to use.
      gs_ctx = gs.GSContext()

      bootstrap_path = bootstrap_lib.FindBootstrapPath()

      # Check this first so we don't do a bootstrap update then error out.
      if not _SdkVersionExists(gs_ctx, bootstrap_path, self.options.update):
        cros_build_lib.Die('Invalid SDK version "%s".' % self.options.update)

      logging.info('Update bootstrap...')
      _UpdateBootstrap(bootstrap_path)

      # Verify environment after _UpdateBootstrap() so we could potentially
      # fix some problems automatically.
      if not project_sdk.VerifyEnvironment():
        cros_build_lib.Die('Environment verification failed.')

      logging.notice('Updating SDK...')
      _UpdateWorkspaceSdk(gs_ctx, bootstrap_path, workspace_path,
                          self.options.update)

    # The --version argument should print to stdout for consumption by scripts.
    _PrintWorkspaceSdkVersion(workspace_path, to_stdout=self.options.version)
Example #51
0
def UpdateChroot(brick=None, board=None, update_host_packages=True):
  """Update the chroot."""
  # Run chroot update hooks.
  logging.notice('Updating the chroot. This may take several minutes.')
  cmd = [os.path.join(constants.CROSUTILS_DIR, 'run_chroot_version_hooks')]
  cros_build_lib.RunCommand(cmd, debug_level=logging.DEBUG)

  # Update toolchains.
  cmd = [os.path.join(constants.CHROMITE_BIN_DIR, 'cros_setup_toolchains')]
  if brick:
    cmd += ['--targets=bricks', '--include-bricks=%s' % brick.brick_locator]
  elif board:
    cmd += ['--targets=boards', '--include-boards=%s' % board]
  cros_build_lib.SudoRunCommand(cmd, debug_level=logging.DEBUG)

  # Update the host before updating the board.
  if update_host_packages:
    Emerge(list(_HOST_PKGS), '/', rebuild_deps=False)

  # Automatically discard all CONFIG_PROTECT'ed files. Those that are
  # protected should not be overwritten until the variable is changed.
  # Autodiscard is option "-9" followed by the "YES" confirmation.
  cros_build_lib.SudoRunCommand(['etc-update'], input='-9\nYES\n',
                                debug_level=logging.DEBUG)
def main(argv):
  parser = _GetParser()
  options, args = parser.parse_args(argv)

  if len(args) < 2:
    parser.error('Not enough arguments specified')

  changes = args[0:-1]
  try:
    patches = gerrit.GetGerritPatchInfo(changes)
  except ValueError as e:
    logging.error('Invalid patch: %s', e)
    cros_build_lib.Die('Did you swap the branch/gerrit number?')
  branch = args[-1]

  # Suppress all logging info output unless we're running debug.
  if not options.debug:
    logging.getLogger().setLevel(logging.NOTICE)

  # Get a pointer to your repo checkout to look up the local project paths for
  # both email addresses and for using your checkout as a git mirror.
  manifest = None
  if options.mirror:
    try:
      manifest = git.ManifestCheckout.Cached(constants.SOURCE_ROOT)
    except OSError as e:
      if e.errno == errno.ENOENT:
        logging.error('Unable to locate ChromiumOS checkout: %s',
                      constants.SOURCE_ROOT)
        logging.error('Did you mean to use --nomirror?')
        return 1
      raise
    if not _ManifestContainsAllPatches(manifest, patches):
      return 1
  else:
    if not options.email:
      chromium_email = '*****@*****.**' % os.environ['USER']
      logging.notice('--nomirror set without email, using %s', chromium_email)
      options.email = chromium_email

  index = 0
  work_dir = None
  root_work_dir = tempfile.mkdtemp(prefix='cros_merge_to_branch')
  try:
    for index, (change, patch) in enumerate(zip(changes, patches)):
      # We only clone the project and set the committer the first time.
      work_dir = os.path.join(root_work_dir, patch.project)
      if not os.path.isdir(work_dir):
        branch = _SetupWorkDirectoryForPatch(work_dir, patch, branch, manifest,
                                             options.email)

      # Now that we have the project checked out, let's apply our change and
      # create a new change on Gerrit.
      logging.notice('Uploading change %s to branch %s', change, branch)
      urls = _UploadChangeToBranch(work_dir, patch, branch, options.draft,
                                   options.dryrun)
      logging.notice('Successfully uploaded %s to %s', change, branch)
      for url in urls:
        if url.endswith('\x1b[K'):
          # Git will often times emit these escape sequences.
          url = url[0:-3]
        logging.notice('  URL: %s', url)

  except (cros_build_lib.RunCommandError, cros_patch.ApplyPatchException,
          git.AmbiguousBranchName, OSError) as e:
    # Tell the user how far we got.
    good_changes = changes[:index]
    bad_changes = changes[index:]

    logging.warning('############## SOME CHANGES FAILED TO UPLOAD ############')

    if good_changes:
      logging.notice(
          'Successfully uploaded change(s) %s', ' '.join(good_changes))

    # Printing out the error here so that we can see exactly what failed. This
    # is especially useful to debug without using --debug.
    logging.error('Upload failed with %s', str(e).strip())
    if not options.wipe:
      logging.error('Not wiping the directory. You can inspect the failed '
                    'change at %s; After fixing the change (if trivial) you can'
                    ' try to upload the change by running:\n'
                    'git commit -a -c CHERRY_PICK_HEAD\n'
                    'git push %s HEAD:refs/for/%s', work_dir, patch.project_url,
                    branch)
    else:
      logging.error('--nowipe not set thus deleting the work directory. If you '
                    'wish to debug this, re-run the script with change(s) '
                    '%s and --nowipe by running:\n  %s %s %s --nowipe',
                    ' '.join(bad_changes), sys.argv[0], ' '.join(bad_changes),
                    branch)

    # Suppress the stack trace if we're not debugging.
    if options.debug:
      raise
    else:
      return 1

  finally:
    if options.wipe:
      shutil.rmtree(root_work_dir)

  if options.dryrun:
    logging.notice('Success! To actually upload changes, re-run without '
                   '--dry-run.')
  else:
    logging.notice('Successfully uploaded all changes requested.')

  return 0
Example #53
0
  def UpdateRootfs(self, device, payload, tempdir):
    """Update the rootfs partition of the device.

    Args:
      device: The ChromiumOSDevice object to update.
      payload: The path to the update payload.
      tempdir: The directory to store temporary files.
    """
    # Setup devserver and payload on the target device.
    static_dir = os.path.join(device.work_dir, 'static')
    payload_dir = os.path.join(static_dir, 'pregenerated')
    src_dir = self._CopyDevServerPackage(device, tempdir)
    device.RunCommand(['mkdir', '-p', payload_dir])
    logging.info('Copying rootfs payload to device...')
    device.CopyToDevice(payload, payload_dir)
    devserver_bin = os.path.join(src_dir, self.DEVSERVER_FILENAME)
    ds = ds_wrapper.RemoteDevServerWrapper(
        device, devserver_bin, workspace_path=self.workspace_path,
        static_dir=static_dir, log_dir=device.work_dir)

    logging.info('Updating rootfs partition')
    try:
      ds.Start()
      # Use the localhost IP address to ensure that update engine
      # client can connect to the devserver.
      omaha_url = ds.GetDevServerURL(
          ip='127.0.0.1', port=ds.port, sub_dir='update/pregenerated')
      cmd = [self.UPDATE_ENGINE_BIN, '-check_for_update',
             '-omaha_url=%s' % omaha_url]
      device.RunCommand(cmd)

      # If we are using a progress bar, update it every 0.5s instead of 10s.
      if command.UseProgressBar():
        update_check_interval = self.UPDATE_CHECK_INTERVAL_PROGRESSBAR
        oper = operation.ProgressBarOperation()
      else:
        update_check_interval = self.UPDATE_CHECK_INTERVAL_NORMAL
        oper = None
      end_message_not_printed = True

      # Loop until update is complete.
      while True:
        op, progress = self.GetUpdateStatus(device, ['CURRENT_OP', 'PROGRESS'])
        logging.info('Waiting for update...status: %s at progress %s',
                     op, progress)

        if op == 'UPDATE_STATUS_UPDATED_NEED_REBOOT':
          logging.notice('Update completed.')
          break

        if op == 'UPDATE_STATUS_IDLE':
          raise FlashError(
              'Update failed with unexpected update status: %s' % op)

        if oper is not None:
          if op == 'UPDATE_STATUS_DOWNLOADING':
            oper.ProgressBar(float(progress))
          elif end_message_not_printed and op == 'UPDATE_STATUS_FINALIZING':
            oper.Cleanup()
            logging.notice('Finalizing image.')
            end_message_not_printed = False

        time.sleep(update_check_interval)

      ds.Stop()
    except Exception:
      logging.error('Rootfs update failed.')
      logging.warning(ds.TailLog() or 'No devserver log is available.')
      raise
    finally:
      ds.Stop()
      device.CopyFromDevice(ds.log_file,
                            os.path.join(tempdir, 'target_devserver.log'),
                            error_code_ok=True)
      device.CopyFromDevice('/var/log/update_engine.log', tempdir,
                            follow_symlinks=True,
                            error_code_ok=True)
Example #54
0
 def _PrintEndStageMessages(self):
   """Messages to be shown at the end of a stage."""
   logging.notice('Unmounting image. This may take a while.')
Example #55
0
  def Run(self):
    """Performs remote device update."""
    old_root_dev, new_root_dev = None, None
    try:
      device_connected = False
      with remote_access.ChromiumOSDeviceHandler(
          self.ssh_hostname, port=self.ssh_port,
          base_dir=self.DEVICE_BASE_DIR, ping=self.ping) as device:
        device_connected = True

        payload_dir = self.tempdir
        if os.path.isdir(self.image):
          # If the given path is a directory, we use the provided
          # update payload(s) in the directory.
          payload_dir = self.image
          logging.info('Using provided payloads in %s', payload_dir)
        else:
          if os.path.isfile(self.image):
            # If the given path is an image, make sure devserver can
            # access it and generate payloads.
            logging.info('Using image %s', self.image)
            ds_wrapper.GetUpdatePayloadsFromLocalPath(
                self.image, payload_dir,
                src_image_to_delta=self.src_image_to_delta,
                static_dir=_DEVSERVER_STATIC_DIR,
                workspace_path=self.workspace_path)
          else:
              # We should ignore the given/inferred board value and stick to the
              # device's basic designation. We do emit a warning for good
              # measure.
              # TODO(garnold) In fact we should find the board/overlay that the
              # device inherits from and which defines the SDK "baseline" image
              # (brillo:339).
            if self.sdk_version and self.board and not self.force:
              logging.warning(
                  'Ignoring board value (%s) and deferring to device; use '
                  '--force to override',
                  self.board)
              self.board = None

            self.board = cros_build_lib.GetBoard(device_board=device.board,
                                                 override_board=self.board,
                                                 force=self.yes)
            if not self.board:
              raise FlashError('No board identified')

            if not self.force and self.board != device.board:
              # If a board was specified, it must be compatible with the device.
              raise FlashError('Device (%s) is incompatible with board %s',
                               device.board, self.board)

            logging.info('Board is %s', self.board)

            # Translate the xbuddy path to get the exact image to use.
            translated_path, resolved_path = ds_wrapper.GetImagePathWithXbuddy(
                self.image, self.board, version=self.sdk_version,
                static_dir=_DEVSERVER_STATIC_DIR, lookup_only=True)
            logging.info('Using image %s', translated_path)
            # Convert the translated path to be used in the update request.
            image_path = ds_wrapper.ConvertTranslatedPath(resolved_path,
                                                          translated_path)

            # Launch a local devserver to generate/serve update payloads.
            ds_wrapper.GetUpdatePayloads(
                image_path, payload_dir, board=self.board,
                src_image_to_delta=self.src_image_to_delta,
                workspace_path=self.workspace_path,
                static_dir=_DEVSERVER_STATIC_DIR)

        # Verify that all required payloads are in the payload directory.
        self._CheckPayloads(payload_dir)

        restore_stateful = False
        if (not self._CanRunDevserver(device, self.tempdir) and
            self.do_rootfs_update):
          msg = ('Cannot start devserver! The stateful partition may be '
                 'corrupted.')
          prompt = 'Attempt to restore the stateful partition?'
          restore_stateful = self.yes or cros_build_lib.BooleanPrompt(
              prompt=prompt, default=False, prolog=msg)
          if not restore_stateful:
            raise FlashError('Cannot continue to perform rootfs update!')

        if restore_stateful:
          logging.warning('Restoring the stateful partition...')
          payload = os.path.join(payload_dir, ds_wrapper.STATEFUL_FILENAME)
          self.UpdateStateful(device, payload, clobber=self.clobber_stateful)
          device.Reboot()
          if self._CanRunDevserver(device, self.tempdir):
            logging.info('Stateful partition restored.')
          else:
            raise FlashError('Unable to restore stateful partition.')

        # Perform device updates.
        if self.do_rootfs_update:
          self.SetupRootfsUpdate(device)
          # Record the current root device. This must be done after
          # SetupRootfsUpdate because SetupRootfsUpdate may reboot the
          # device if there is a pending update, which changes the
          # root device.
          old_root_dev = self.GetRootDev(device)
          payload = os.path.join(payload_dir, ds_wrapper.ROOTFS_FILENAME)
          self.UpdateRootfs(device, payload, self.tempdir)
          logging.info('Rootfs update completed.')

        if self.do_stateful_update and not restore_stateful:
          payload = os.path.join(payload_dir, ds_wrapper.STATEFUL_FILENAME)
          self.UpdateStateful(device, payload, clobber=self.clobber_stateful)
          logging.info('Stateful update completed.')

        if self.reboot:
          logging.notice('Rebooting device...')
          device.Reboot()
          if self.clobber_stateful:
            # --clobber-stateful wipes the stateful partition and the
            # working directory on the device no longer exists. To
            # remedy this, we recreate the working directory here.
            device.BaseRunCommand(['mkdir', '-p', device.work_dir])

        if self.do_rootfs_update and self.reboot:
          logging.notice('Verifying that the device has been updated...')
          new_root_dev = self.GetRootDev(device)
          self.Verify(old_root_dev, new_root_dev)

        if self.disable_verification:
          logging.info('Disabling rootfs verification on the device...')
          device.DisableRootfsVerification()

    except Exception:
      logging.error('Device update failed.')
      if device_connected and device.lsb_release:
        lsb_entries = sorted(device.lsb_release.items())
        logging.info('Following are the LSB version details of the device:\n%s',
                     '\n'.join('%s=%s' % (k, v) for k, v in lsb_entries))
      raise
    else:
      logging.notice('Update performed successfully.')
    finally:
      self.Cleanup()
Example #56
0
def Flash(device, image, project_sdk_image=False, sdk_version=None, board=None,
          brick_name=None, blueprint_name=None, install=False,
          src_image_to_delta=None, rootfs_update=True, stateful_update=True,
          clobber_stateful=False, reboot=True, wipe=True, ping=True,
          disable_rootfs_verification=False, clear_cache=False, yes=False,
          force=False, debug=False):
  """Flashes a device, USB drive, or file with an image.

  This provides functionality common to `cros flash` and `brillo flash`
  so that they can parse the commandline separately but still use the
  same underlying functionality.

  Args:
    device: commandline.Device object; None to use the default device.
    image: Path (string) to the update image. Can be a local or xbuddy path;
        non-existant local paths are converted to xbuddy.
    project_sdk_image: Use a clean project SDK image. Overrides |image| if True.
    sdk_version: Which version of SDK image to flash; autodetected if None.
    board: Board to use; None to automatically detect.
    brick_name: Brick locator to use. Overrides |board| if not None.
    blueprint_name: Blueprint locator to use. Overrides |board| and
        |brick_name|.
    install: Install to USB using base disk layout; USB |device| scheme only.
    src_image_to_delta: Local path to an image to be used as the base to
        generate delta payloads; SSH |device| scheme only.
    rootfs_update: Update rootfs partition; SSH |device| scheme only.
    stateful_update: Update stateful partition; SSH |device| scheme only.
    clobber_stateful: Clobber stateful partition; SSH |device| scheme only.
    reboot: Reboot device after update; SSH |device| scheme only.
    wipe: Wipe temporary working directory; SSH |device| scheme only.
    ping: Ping the device before attempting update; SSH |device| scheme only.
    disable_rootfs_verification: Remove rootfs verification after update; SSH
        |device| scheme only.
    clear_cache: Clear the devserver static directory.
    yes: Assume "yes" for any prompt.
    force: Ignore sanity checks and prompts. Overrides |yes| if True.
    debug: Print additional debugging messages.

  Raises:
    FlashError: An unrecoverable error occured.
    ValueError: Invalid parameter combination.
  """
  if force:
    yes = True

  if clear_cache:
    logging.info('Clearing the cache...')
    ds_wrapper.DevServerWrapper.WipeStaticDirectory(_DEVSERVER_STATIC_DIR)

  try:
    osutils.SafeMakedirsNonRoot(_DEVSERVER_STATIC_DIR)
  except OSError:
    logging.error('Failed to create %s', _DEVSERVER_STATIC_DIR)

  if install:
    if not device or device.scheme != commandline.DEVICE_SCHEME_USB:
      raise ValueError(
          '--install can only be used when writing to a USB device')
    if not cros_build_lib.IsInsideChroot():
      raise ValueError('--install can only be used inside the chroot')

  # If installing an SDK image, find the version and override image path.
  if project_sdk_image:
    image = 'project_sdk'
    if sdk_version is None:
      sdk_version = project_sdk.FindVersion()
      if not sdk_version:
        raise FlashError('Could not find SDK version')

  # We don't have enough information on the device to make a good guess on
  # whether this device is compatible with the blueprint.
  # TODO(bsimonnet): Add proper compatibility checks. (brbug.com/969)
  if blueprint_name:
    board = None
    if image == 'latest':
      blueprint = blueprint_lib.Blueprint(blueprint_name)
      image_dir = os.path.join(
          workspace_lib.WorkspacePath(), workspace_lib.WORKSPACE_IMAGES_DIR,
          blueprint.FriendlyName(), 'latest')
      image = _ChooseImageFromDirectory(image_dir)
    elif not os.path.exists(image):
      raise ValueError('Cannot find blueprint image "%s". Only "latest" and '
                       'full image path are supported.' % image)
  elif brick_name:
    board = brick_lib.Brick(brick_name).FriendlyName()

  workspace_path = workspace_lib.WorkspacePath()

  if not device or device.scheme == commandline.DEVICE_SCHEME_SSH:
    if device:
      hostname, port = device.hostname, device.port
    else:
      hostname, port = None, None
    logging.notice('Preparing to update the remote device %s', hostname)
    updater = RemoteDeviceUpdater(
        hostname,
        port,
        image,
        board=board,
        workspace_path=workspace_path,
        src_image_to_delta=src_image_to_delta,
        rootfs_update=rootfs_update,
        stateful_update=stateful_update,
        clobber_stateful=clobber_stateful,
        reboot=reboot,
        wipe=wipe,
        debug=debug,
        yes=yes,
        force=force,
        ping=ping,
        disable_verification=disable_rootfs_verification,
        sdk_version=sdk_version)
    updater.Run()
  elif device.scheme == commandline.DEVICE_SCHEME_USB:
    path = osutils.ExpandPath(device.path) if device.path else ''
    logging.info('Preparing to image the removable device %s', path)
    imager = USBImager(path,
                       board,
                       image,
                       workspace_path=workspace_path,
                       sdk_version=sdk_version,
                       debug=debug,
                       install=install,
                       yes=yes)
    imager.Run()
  elif device.scheme == commandline.DEVICE_SCHEME_FILE:
    logging.info('Preparing to copy image to %s', device.path)
    imager = FileImager(device.path,
                        board,
                        image,
                        sdk_version=sdk_version,
                        debug=debug,
                        yes=yes)
    imager.Run()
  def _SetupEnvironment(self, board, sdk_ctx, options, goma_dir=None,
                        goma_port=None):
    """Sets environment variables to export to the SDK shell."""
    if options.chroot:
      sysroot = os.path.join(options.chroot, 'build', board)
      if not os.path.isdir(sysroot) and not options.cmd:
        logging.warning("Because --chroot is set, expected a sysroot to be at "
                        "%s, but couldn't find one.", sysroot)
    else:
      sysroot = sdk_ctx.key_map[constants.CHROME_SYSROOT_TAR].path

    environment = os.path.join(sdk_ctx.key_map[constants.CHROME_ENV_TAR].path,
                               'environment')
    if options.chroot:
      # Override with the environment from the chroot if available (i.e.
      # build_packages or emerge chromeos-chrome has been run for |board|).
      env_path = os.path.join(sysroot, 'var', 'db', 'pkg', 'chromeos-base',
                              'chromeos-chrome-*')
      env_glob = glob.glob(env_path)
      if len(env_glob) != 1:
        logging.warning('Multiple Chrome versions in %s. This can be resolved'
                        ' by running "eclean-$BOARD -d packages". Using'
                        ' environment from: %s', env_path, environment)
      elif not os.path.isdir(env_glob[0]):
        logging.warning('Environment path not found: %s. Using enviroment from:'
                        ' %s.', env_path, environment)
      else:
        chroot_env_file = os.path.join(env_glob[0], 'environment.bz2')
        if os.path.isfile(chroot_env_file):
          # Log a warning here since this is new behavior that is not obvious.
          logging.notice('Environment fetched from: %s', chroot_env_file)
          # Uncompress enviornment.bz2 to pass to osutils.SourceEnvironment.
          chroot_cache = os.path.join(
              self.options.cache_dir, COMMAND_NAME, 'chroot')
          osutils.SafeMakedirs(chroot_cache)
          environment = os.path.join(chroot_cache, 'environment_%s' % board)
          cros_build_lib.UncompressFile(chroot_env_file, environment)

    env = osutils.SourceEnvironment(environment, self.EBUILD_ENV)
    self._SetupTCEnvironment(sdk_ctx, options, env)

    # Add managed components to the PATH.
    env['PATH'] = '%s:%s' % (constants.CHROMITE_BIN_DIR, env['PATH'])
    env['PATH'] = '%s:%s' % (os.path.dirname(self.sdk.gs_ctx.gsutil_bin),
                             env['PATH'])

    # Export internally referenced variables.
    os.environ[self.sdk.SDK_BOARD_ENV] = board
    if self.options.sdk_path:
      os.environ[self.sdk.SDK_PATH_ENV] = self.options.sdk_path
    os.environ[self.sdk.SDK_VERSION_ENV] = sdk_ctx.version

    # Export the board/version info in a more accessible way, so developers can
    # reference them in their chrome_sdk.bashrc files, as well as within the
    # chrome-sdk shell.
    for var in [self.sdk.SDK_VERSION_ENV, self.sdk.SDK_BOARD_ENV]:
      env[var.lstrip('%')] = os.environ[var]

    # Export Goma information.
    if goma_dir:
      env[self.SDK_GOMA_DIR_ENV] = goma_dir
      env[self.SDK_GOMA_PORT_ENV] = goma_port

    # SYSROOT is necessary for Goma and the sysroot wrapper.
    env['SYSROOT'] = sysroot
    gyp_dict = chrome_util.ProcessGypDefines(env['GYP_DEFINES'])
    gn_args = gn_helpers.FromGNArgs(env['GN_ARGS'])
    gyp_dict['sysroot'] = sysroot
    gn_args['target_sysroot'] = sysroot
    gyp_dict.pop('pkg-config', None)
    gn_args.pop('pkg_config', None)
    if options.clang:
      gyp_dict['clang'] = 1
      gn_args['is_clang'] = True
    if options.internal:
      gyp_dict['branding'] = 'Chrome'
      gn_args['is_chrome_branded'] = True
      gyp_dict['buildtype'] = 'Official'
      gn_args['is_official_build'] = True
    else:
      gyp_dict.pop('branding', None)
      gn_args.pop('is_chrome_branded', None)
      gyp_dict.pop('buildtype', None)
      gn_args.pop('is_official_build', None)
      gyp_dict.pop('internal_gles2_conform_tests', None)
      gn_args.pop('internal_gles2_conform_tests', None)
    if options.component:
      gyp_dict['component'] = 'shared_library'
      gn_args['is_component_build'] = True
    if options.fastbuild:
      gyp_dict['fastbuild'] = 1
      gyp_dict.pop('release_extra_cflags', None)
      # symbol_level corresponds to GYP's fastbuild (https://goo.gl/ZC4fUO).
      gn_args['symbol_level'] = 1
    else:
      # Enable debug fission for GN.
      gn_args['use_debug_fission'] = True

    # For SimpleChrome, we use the binutils that comes bundled within Chrome.
    # We should not use the binutils from the host system.
    gn_args['linux_use_bundled_binutils'] = True

    gyp_dict['host_clang'] = 1
    # Need to reset these after the env vars have been fixed by
    # _SetupTCEnvironment.
    gn_args['cros_host_is_clang'] = True
    gn_args['cros_target_cc'] = env['CC']
    gn_args['cros_target_cxx'] = env['CXX']
    gn_args['cros_target_ld'] = env['LD']
    # We need to reset extra C/CXX flags to remove references to
    # EBUILD_CFLAGS, EBUILD_CXXFLAGS
    gn_args['cros_target_extra_cflags'] = env['CFLAGS']
    gn_args['cros_target_extra_cxxflags'] = env['CXXFLAGS']
    gn_args['cros_host_cc'] = env['CC_host']
    gn_args['cros_host_cxx'] = env['CXX_host']
    gn_args['cros_host_ld'] = env['LD_host']
    gn_args['cros_host_ar'] = env['AR_host']
    gn_args['cros_v8_snapshot_cc'] = env['CC_host']
    gn_args['cros_v8_snapshot_cxx'] = env['CXX_host']
    gn_args['cros_v8_snapshot_ld'] = env['LD_host']
    gn_args['cros_v8_snapshot_ar'] = env['AR_host']
    # No need to adjust CFLAGS and CXXFLAGS for GN since the only
    # adjustment made in _SetupTCEnvironment is for split debug which
    # is done with 'use_debug_fission'.

    # Enable goma if requested.
    if goma_dir:
      gyp_dict['use_goma'] = 1
      gn_args['use_goma'] = True
      gyp_dict['gomadir'] = goma_dir
      gn_args['goma_dir'] = goma_dir

    gn_args.pop('internal_khronos_glcts_tests', None)  # crbug.com/588080

    env['GYP_DEFINES'] = chrome_util.DictToGypDefines(gyp_dict)
    env['GN_ARGS'] = gn_helpers.ToGNString(gn_args)

    # PS1 sets the command line prompt and xterm window caption.
    full_version = sdk_ctx.version
    if full_version != CUSTOM_VERSION:
      full_version = self.sdk.GetFullVersion(sdk_ctx.version)
    env['PS1'] = self._CreatePS1(self.board, full_version,
                                 chroot=options.chroot)

    out_dir = 'out_%s' % self.board
    env['builddir_name'] = out_dir
    env['GYP_GENERATOR_FLAGS'] = 'output_dir=%s' % out_dir
    env['GYP_CROSSCOMPILE'] = '1'

    # deploy_chrome relies on the 'gn' USE flag to locate .so (and potentially
    # other) files. Set this by default if GYP_CHROMIUM_NO_ACTION=1.
    # TODO(stevenjb): Maybe figure out a better way to set this by default.
    if os.environ.get('GYP_CHROMIUM_NO_ACTION', '') == '1':
      env['USE'] = 'gn'
      logging.notice(
          'GYP_CHROMIUM_NO_ACTION=1, setting USE="gn" for deploy_chrome.')

    return env
Example #58
0
def FetchRemoteTarballs(storage_dir, urls, desc, allow_none=False):
  """Fetches a tarball given by url, and place it in |storage_dir|.

  Args:
    storage_dir: Path where to save the tarball.
    urls: List of URLs to try to download. Download will stop on first success.
    desc: A string describing what tarball we're downloading (for logging).
    allow_none: Don't fail if none of the URLs worked.

  Returns:
    Full path to the downloaded file, or None if |allow_none| and no URL worked.

  Raises:
    ValueError: If |allow_none| is False and none of the URLs worked.
  """

  # Note we track content length ourselves since certain versions of curl
  # fail if asked to resume a complete file.
  # pylint: disable=C0301,W0631
  # https://sourceforge.net/tracker/?func=detail&atid=100976&aid=3482927&group_id=976
  logging.notice('Downloading %s tarball...', desc)
  for url in urls:
    # http://www.logilab.org/ticket/8766
    # pylint: disable=E1101
    parsed = urlparse.urlparse(url)
    tarball_name = os.path.basename(parsed.path)
    if parsed.scheme in ('', 'file'):
      if os.path.exists(parsed.path):
        return parsed.path
      continue
    content_length = 0
    logging.debug('Attempting download from %s', url)
    result = retry_util.RunCurl(
        ['-I', url], fail=False, capture_output=False, redirect_stdout=True,
        redirect_stderr=True, print_cmd=False, debug_level=logging.NOTICE)
    successful = False
    for header in result.output.splitlines():
      # We must walk the output to find the string '200 OK' for use cases where
      # a proxy is involved and may have pushed down the actual header.
      if header.find('200 OK') != -1:
        successful = True
      elif header.lower().startswith('content-length:'):
        content_length = int(header.split(':', 1)[-1].strip())
        if successful:
          break
    if successful:
      break
  else:
    if allow_none:
      return None
    raise ValueError('No valid URLs found!')

  tarball_dest = os.path.join(storage_dir, tarball_name)
  current_size = 0
  if os.path.exists(tarball_dest):
    current_size = os.path.getsize(tarball_dest)
    if current_size > content_length:
      osutils.SafeUnlink(tarball_dest)
      current_size = 0

  if current_size < content_length:
    retry_util.RunCurl(
        ['-L', '-y', '30', '-C', '-', '--output', tarball_dest, url],
        print_cmd=False, capture_output=False, debug_level=logging.NOTICE)

  # Cleanup old tarballs now since we've successfull fetched; only cleanup
  # the tarballs for our prefix, or unknown ones. This gets a bit tricky
  # because we might have partial overlap between known prefixes.
  my_prefix = tarball_name.rsplit('-', 1)[0] + '-'
  all_prefixes = ('stage3-amd64-', 'cros-sdk-', 'cros-sdk-overlay-')
  ignored_prefixes = [prefix for prefix in all_prefixes if prefix != my_prefix]
  for filename in os.listdir(storage_dir):
    if (filename == tarball_name or
        any([(filename.startswith(p) and
              not (len(my_prefix) > len(p) and filename.startswith(my_prefix)))
             for p in ignored_prefixes])):
      continue
    logging.info('Cleaning up old tarball: %s', filename)
    osutils.SafeUnlink(os.path.join(storage_dir, filename))

  return tarball_dest