Ejemplo n.º 1
0
  def _ExecuteWithEngine(self, query, engine, *args, **kwargs):
    """Execute a query using |engine|, with retires.

    This method wraps execution of a query against an engine in retries.
    The engine will automatically create new connections if a prior connection
    was dropped.

    Args:
      query: Query to execute, of type string, or sqlalchemy.Executible,
             or other sqlalchemy-executible statement (see sqlalchemy
             docs).
      engine: sqlalchemy.engine to use.
      *args: Additional args passed along to .execute(...)
      **kwargs: Additional args passed along to .execute(...)

    Returns:
      The result of .execute(...)
    """
    f = lambda: engine.execute(query, *args, **kwargs)
    logging.info('Running cidb query on pid %s, repr(query) starts with %s',
                 os.getpid(), repr(query)[:100])
    return retry_stats.RetryWithStats(
        retry_stats.CIDB,
        handler=_IsRetryableException,
        max_retry=self.query_retry_args.max_retry,
        sleep=self.query_retry_args.sleep,
        backoff_factor=self.query_retry_args.backoff_factor,
        functor=f)
Ejemplo n.º 2
0
def CheckAFDOPerfData(arch, cpv, buildroot, gs_context):
  """Check whether AFDO perf data exists for the given architecture.

  Check if 'perf' data file for this architecture and release is available
  in GS. If so, copy it into a temp directory in the buildroot.

  Args:
    arch: architecture we're going to build Chrome for.
    cpv: The portage_util.CPV object for chromeos-chrome.
    buildroot: buildroot where AFDO data should be stored.
    gs_context: GS context to retrieve data.

  Returns:
    True if AFDO perf data is available. False otherwise.
  """
  # The file name of the perf data is based only in the chrome version.
  # The test case that produces it does not know anything about the
  # revision number.
  # TODO(llozano): perf data filename should include the revision number.
  version_number = cpv.version_no_rev.split('_')[0]
  chrome_spec = {'package': cpv.package,
                 'arch': arch,
                 'version': version_number}
  url = CHROME_PERF_AFDO_URL % chrome_spec
  if not gs_context.Exists(url):
    logging.info('Could not find AFDO perf data')
    return False
  dest_dir = AFDO_BUILDROOT_LOCAL % {'build_root': buildroot}
  dest_path = os.path.join(dest_dir, url.rsplit('/', 1)[1])
  gs_context.Copy(url, dest_path)

  UncompressAFDOFile(dest_path, buildroot)
  logging.info('Found and retrieved AFDO perf data')
  return True
Ejemplo n.º 3
0
  def _UploadStatus(self, version, status, message=None, fail_if_exists=False,
                    dashboard_url=None):
    """Upload build status to Google Storage.

    Args:
      version: Version number to use. Must be a string.
      status: Status string.
      message: A failures_lib.BuildFailureMessage object with details
               of builder failure, or None (default).
      fail_if_exists: If set, fail if the status already exists.
      dashboard_url: Optional url linking to builder dashboard for this build.
    """
    data = BuilderStatus(status, message, dashboard_url).AsPickledDict()

    gs_version = None
    # This HTTP header tells Google Storage to return the PreconditionFailed
    # error message if the file already exists. Unfortunately, with new versions
    # of gsutil, PreconditionFailed is sometimes returned erroneously, so we've
    # replaced this check with # an Exists check below instead.
    # TODO(davidjames): Revert CL:223267 when Google Storage is fixed.
    #if fail_if_exists:
    #  gs_version = 0

    logging.info('Recording status %s for %s', status, self.build_names)
    for build_name in self.build_names:
      url = BuildSpecsManager._GetStatusUrl(build_name, version)

      ctx = gs.GSContext(dry_run=self.dry_run)
      # Check if the file already exists.
      if fail_if_exists and not self.dry_run and ctx.Exists(url):
        raise GenerateBuildSpecException('Builder already inflight')
      # Do the actual upload.
      ctx.Copy('-', url, input=data, version=gs_version)
Ejemplo n.º 4
0
  def PerformStage(self):
    # Wait for UploadHWTestArtifacts to generate the payloads.
    if not self.GetParallel('payloads_generated', pretty_name='payloads'):
      cros_build_lib.PrintBuildbotStepWarnings('missing payloads')
      logging.warning('Cannot run HWTest because UploadTestArtifacts failed. '
                      'See UploadTestArtifacts for details.')
      return

    if (self.suite_config.suite == constants.HWTEST_AFDO_SUITE and
        not self._run.attrs.metadata.GetValue('chrome_was_uprevved')):
      logging.info('Chrome was not uprevved. Nothing to do in this stage')
      return

    build = '/'.join([self._bot_id, self.version])
    if self._run.options.remote_trybot and self._run.options.hwtest:
      debug = self._run.options.debug_forced
    else:
      debug = self._run.options.debug

    self._CheckLabStatus()
    commands.RunHWTestSuite(
        build, self.suite_config.suite, self._current_board,
        pool=self.suite_config.pool, num=self.suite_config.num,
        file_bugs=self.suite_config.file_bugs,
        wait_for_results=self.wait_for_results,
        priority=self.suite_config.priority,
        timeout_mins=self.suite_config.timeout_mins,
        retry=self.suite_config.retry,
        max_retries=self.suite_config.max_retries,
        minimum_duts=self.suite_config.minimum_duts,
        suite_min_duts=self.suite_config.suite_min_duts,
        offload_failures_only=self.suite_config.offload_failures_only,
        debug=debug)
Ejemplo n.º 5
0
 def _CheckConnection(self):
   try:
     logging.info('Testing connection to the device...')
     self.device.RunCommand('true')
   except cros_build_lib.RunCommandError as ex:
     logging.error('Error connecting to the test device.')
     raise DeployFailure(ex)
Ejemplo n.º 6
0
  def ShouldWait(self):
    """Decides if we should continue to wait for the builders to finish.

    This will be the retry function for timeout_util.WaitForSuccess, basically
    this function will return False if all builders finished or we see a
    problem with the builders.  Otherwise we'll return True to continue polling
    for the builders statuses.

    Returns:
      A bool of True if we should continue to wait and False if we should not.
    """
    # Check if all builders completed.
    if self.Completed():
      return False

    current_time = datetime.datetime.now()

    # Guess there are some builders building, check if there is a problem.
    if self.ShouldFailForBuilderStartTimeout(current_time):
      logging.error('Ending build since at least one builder has not started '
                    'within 5 mins.')
      return False

    # We got here which means no problems, we should still wait.
    logging.info('Still waiting for the following builds to complete: %r',
                 sorted(set(self.builders_array).difference(
                     self.GetCompleted())))
    return True
Ejemplo n.º 7
0
  def PublishManifest(self, manifest, version, build_id=None):
    """Publishes the manifest as the manifest for the version to others.

    Args:
      manifest: Path to manifest file to publish.
      version: Manifest version string, e.g. 6102.0.0-rc4
      build_id: Optional integer giving build_id of the build that is
                publishing this manifest. If specified and non-negative,
                build_id will be included in the commit message.
    """
    # Note: This commit message is used by master.cfg for figuring out when to
    #       trigger slave builders.
    commit_message = 'Automatic: Start %s %s %s' % (self.build_names[0],
                                                    self.branch, version)
    if build_id is not None and build_id >= 0:
      commit_message += '\nCrOS-Build-Id: %s' % build_id

    logging.info('Publishing build spec for: %s', version)
    logging.info('Publishing with commit message: %s', commit_message)
    logging.debug('Manifest contents below.\n%s', osutils.ReadFile(manifest))

    # Copy the manifest into the manifest repository.
    spec_file = '%s.xml' % os.path.join(self.all_specs_dir, version)
    osutils.SafeMakedirs(os.path.dirname(spec_file))

    shutil.copyfile(manifest, spec_file)

    # Actually push the manifest.
    self.PushSpecChanges(commit_message)
Ejemplo n.º 8
0
def _FetchChromePackage(cache_dir, tempdir, gs_path):
  """Get the chrome prebuilt tarball from GS.

  Returns:
    Path to the fetched chrome tarball.
  """
  gs_ctx = gs.GSContext(cache_dir=cache_dir, init_boto=True)
  files = gs_ctx.LS(gs_path)
  files = [found for found in files if
           _UrlBaseName(found).startswith('%s-' % constants.CHROME_PN)]
  if not files:
    raise Exception('No chrome package found at %s' % gs_path)
  elif len(files) > 1:
    # - Users should provide us with a direct link to either a stripped or
    #   unstripped chrome package.
    # - In the case of being provided with an archive directory, where both
    #   stripped and unstripped chrome available, use the stripped chrome
    #   package.
    # - Stripped chrome pkg is chromeos-chrome-<version>.tar.gz
    # - Unstripped chrome pkg is chromeos-chrome-<version>-unstripped.tar.gz.
    files = [f for f in files if not 'unstripped' in f]
    assert len(files) == 1
    logging.warning('Multiple chrome packages found.  Using %s', files[0])

  filename = _UrlBaseName(files[0])
  logging.info('Fetching %s...', filename)
  gs_ctx.Copy(files[0], tempdir, print_cmd=False)
  chrome_path = os.path.join(tempdir, filename)
  assert os.path.exists(chrome_path)
  return chrome_path
Ejemplo n.º 9
0
  def MarkBuildsGathered(builds, sheets_version, gs_ctx=None):
    """Mark specified |builds| as processed for the given stats versions.

    Args:
      builds: List of BuildData objects.
      sheets_version: The Google Sheets version these builds are now processed
        for.
      gs_ctx: A GSContext object to use, if set.
    """
    gs_ctx = gs_ctx or gs.GSContext()

    # Filter for builds that were not already on these versions.
    builds = [b for b in builds if b.sheets_version != sheets_version]
    if builds:
      log_ver_str = 'Sheets v%d' % sheets_version
      logging.info('Marking %d builds gathered (for %s) using %d processes'
                   ' now.', len(builds), log_ver_str, MAX_PARALLEL)

      def _MarkGathered(build):
        build.MarkGathered(sheets_version)
        json_text = json.dumps(build.gathered_dict.copy())
        gs_ctx.Copy('-', build.gathered_url, input=json_text, print_cmd=False)
        logging.debug('Marked build_number %d processed for %s.',
                      build.build_number, log_ver_str)

      inputs = [[build] for build in builds]
      parallel.RunTasksInProcessPool(_MarkGathered, inputs,
                                     processes=MAX_PARALLEL)
Ejemplo n.º 10
0
  def _GetImagePath(self):
    """Returns the image path to use."""
    image_path = translated_path = None
    if os.path.isfile(self.image):
      if not self.yes and not _IsFilePathGPTDiskImage(self.image):
        # TODO(wnwen): Open the tarball and if there is just one file in it,
        #     use that instead. Existing code in upload_symbols.py.
        if cros_build_lib.BooleanPrompt(
            prolog='The given image file is not a valid disk image. Perhaps '
                   'you forgot to untar it.',
            prompt='Terminate the current flash process?'):
          raise FlashError('Update terminated by user.')
      image_path = self.image
    elif os.path.isdir(self.image):
      # Ask user which image (*.bin) in the folder to use.
      image_path = _ChooseImageFromDirectory(self.image)
    else:
      # Translate the xbuddy path to get the exact image to use.
      translated_path, _ = ds_wrapper.GetImagePathWithXbuddy(
          self.image, self.board, version=self.sdk_version,
          static_dir=_DEVSERVER_STATIC_DIR)
      image_path = ds_wrapper.TranslatedPathToLocalPath(
          translated_path, _DEVSERVER_STATIC_DIR,
          workspace_path=self.workspace_path)

    logging.info('Using image %s', translated_path or image_path)
    return image_path
Ejemplo n.º 11
0
  def UpdateStateful(self, device, payload, clobber=False):
    """Update the stateful partition of the device.

    Args:
      device: The ChromiumOSDevice object to update.
      payload: The path to the update payload.
      clobber: Clobber stateful partition (defaults to False).
    """
    # Copy latest stateful_update to device.
    stateful_update_bin = path_util.FromChrootPath(
        self.STATEFUL_UPDATE_BIN, workspace_path=self.workspace_path)
    device.CopyToWorkDir(stateful_update_bin)
    msg = 'Updating stateful partition'
    logging.info('Copying stateful payload to device...')
    device.CopyToWorkDir(payload)
    cmd = ['sh',
           os.path.join(device.work_dir,
                        os.path.basename(self.STATEFUL_UPDATE_BIN)),
           os.path.join(device.work_dir, os.path.basename(payload))]

    if clobber:
      cmd.append('--stateful_change=clean')
      msg += ' with clobber enabled'

    logging.info('%s...', msg)
    try:
      device.RunCommand(cmd)
    except cros_build_lib.RunCommandError:
      logging.error('Faild to perform stateful partition update.')
Ejemplo n.º 12
0
  def _CheckPayloadIntegrity(self, payload, is_delta, metadata_sig_file_name):
    """Checks the integrity of a generated payload.

    Args:
      payload: an pre-initialized update_payload.Payload object.
      is_delta: whether or not this is a delta payload (Boolean).
      metadata_sig_file_name: metadata signature file.

    Raises:
      PayloadVerificationError: when an error is encountered.
    """
    logging.info('Checking payload integrity')
    with utils.CheckedOpen(metadata_sig_file_name) as metadata_sig_file:
      try:
        # TODO(garnold)(chromium:243559) partition sizes should be embedded in
        # the payload; ditch the default values once it's done.
        # TODO(garnold)(chromium:261417) this disables the check for unmoved
        # blocks in MOVE sequences, which is an inefficiency but not
        # necessarily a problem.  It should be re-enabled once the delta
        # generator can optimize away such cases.
        payload.Check(metadata_sig_file=metadata_sig_file,
                      assert_type=('delta' if is_delta else 'full'),
                      disabled_tests=['move-same-src-dst-block'])
      except self._update_payload.PayloadError as e:
        raise PayloadVerificationError(
            'Payload integrity check failed: %s' % e)
Ejemplo n.º 13
0
  def _CheckIfRebooted(self):
    """Checks whether a remote device has rebooted successfully.

    This uses a rapidly-retried SSH connection, which will wait for at most
    about ten seconds. If the network returns an error (e.g. host unreachable)
    the actual delay may be shorter.

    Returns:
      Whether the device has successfully rebooted.
    """
    # In tests SSH seems to be waiting rather longer than would be expected
    # from these parameters. These values produce a ~5 second wait.
    connect_settings = CompileSSHConnectSettings(
        ConnectTimeout=REBOOT_SSH_CONNECT_TIMEOUT,
        ConnectionAttempts=REBOOT_SSH_CONNECT_ATTEMPTS)
    cmd = "[ ! -e '%s' ]" % REBOOT_MARKER
    result = self.RemoteSh(cmd, connect_settings=connect_settings,
                           error_code_ok=True, ssh_error_ok=True,
                           capture_output=True)

    errors = {0: 'Reboot complete.',
              1: 'Device has not yet shutdown.',
              255: 'Cannot connect to device; reboot in progress.'}
    if result.returncode not in errors:
      raise Exception('Unknown error code %s returned by %s.'
                      % (result.returncode, cmd))

    logging.info(errors[result.returncode])
    return result.returncode == 0
Ejemplo n.º 14
0
  def PerformStage(self):
    """Do the work of generating our release payloads."""
    # Convert to release tools naming for boards.
    board = self._current_board.replace('_', '-')
    version = self._run.attrs.release_tag

    assert version, "We can't generate payloads without a release_tag."
    logging.info("Generating payloads for: %s, %s", board, version)

    # Test to see if the current board has a Paygen configuration. We do
    # this here, not in the sub-process so we don't have to pass back a
    # failure reason.
    try:
      paygen_build_lib.ValidateBoardConfig(board)
    except  paygen_build_lib.BoardNotConfigured:
      raise PaygenNoPaygenConfigForBoard(
          'Golden Eye (%s) has no entry for board %s. Get a TPM to fix.' %
          (paygen_build_lib.BOARDS_URI, board))

    with parallel.BackgroundTaskRunner(self._RunPaygenInProcess) as per_channel:
      logging.info("Using channels: %s", self.channels)

      # Default to False, set to True if it's a canary type build
      skip_duts_check = False
      if config_lib.IsCanaryType(self._run.config.build_type):
        skip_duts_check = True

      # If we have an explicit list of channels, use it.
      for channel in self.channels:
        per_channel.put((channel, board, version, self._run.debug,
                         self._run.config.paygen_skip_testing,
                         self._run.config.paygen_skip_delta_payloads,
                         skip_duts_check))
Ejemplo n.º 15
0
def ReadUnknownEncodedFile(file_path, logging_text=None):
  """Read a file of unknown encoding (UTF-8 or latin) by trying in sequence.

  Args:
    file_path: what to read.
    logging_text: what to display for logging depending on file read.

  Returns:
    File content, possibly converted from latin1 to UTF-8.

  Raises:
    Assertion error: if non-whitelisted illegal XML characters
      are found in the file.
    ValueError: returned if we get invalid XML.
  """
  try:
    with codecs.open(file_path, encoding='utf-8') as c:
      file_txt = c.read()
      if logging_text:
        logging.info('%s %s (UTF-8)', logging_text, file_path)
  except UnicodeDecodeError:
    with codecs.open(file_path, encoding='latin1') as c:
      file_txt = c.read()
      if logging_text:
        logging.info('%s %s (latin1)', logging_text, file_path)

  file_txt, char_list = _HandleIllegalXMLChars(file_txt)

  if char_list:
    raise ValueError('Illegal XML characters %s found in %s.' %
                     (char_list, file_path))

  return file_txt
Ejemplo n.º 16
0
  def _GenerateUnsignedPayload(self):
    """Generate the unsigned delta into self.payload_file."""
    # Note that the command run here requires sudo access.

    logging.info('Generating unsigned payload as %s', self.payload_file)

    tgt_image = self.payload.tgt_image
    cmd = ['cros_generate_update_payload',
           '--outside_chroot',
           '--output', self.payload_file,
           '--image', self.tgt_image_file,
           '--channel', tgt_image.channel,
           '--board', tgt_image.board,
           '--version', tgt_image.version]
    cmd += self._BuildArg('--key', tgt_image, 'key', default='test')
    cmd += self._BuildArg('--build_channel', tgt_image, 'image_channel',
                          default=tgt_image.channel)
    cmd += self._BuildArg('--build_version', tgt_image, 'image_version',
                          default=tgt_image.version)

    if self.payload.src_image:
      src_image = self.payload.src_image
      cmd += ['--src_image', self.src_image_file,
              '--src_channel', src_image.channel,
              '--src_board', src_image.board,
              '--src_version', src_image.version]
      cmd += self._BuildArg('--src_key', src_image, 'key', default='test')
      cmd += self._BuildArg('--src_build_channel', src_image, 'image_channel',
                            default=src_image.channel)
      cmd += self._BuildArg('--src_build_version', src_image, 'image_version',
                            default=src_image.version)

    delta_log = self._RunGeneratorCmd(cmd)
    self._StoreDeltaLog(delta_log)
Ejemplo n.º 17
0
    def _enforce_lock(self, flags, message):
        # Try nonblocking first, if it fails, display the context/message,
        # and then wait on the lock.
        try:
            self.locking_mechanism(self.fd, flags | fcntl.LOCK_NB)
            return
        except EnvironmentError as e:
            if e.errno == errno.EDEADLOCK:
                self.unlock()
            elif e.errno != errno.EAGAIN:
                raise
        if self.description:
            message = "%s: blocking while %s" % (self.description, message)
        if not self.blocking:
            self.close()
            raise LockNotAcquiredError(message)
        if self._verbose:
            logging.info(message)

        try:
            self.locking_mechanism(self.fd, flags)
        except EnvironmentError as e:
            if e.errno != errno.EDEADLOCK:
                raise
            self.unlock()
            self.locking_mechanism(self.fd, flags)
def CreatePackages(targets_wanted, output_dir, root='/'):
  """Create redistributable cross-compiler packages for the specified targets

  This creates toolchain packages that should be usable in conjunction with
  a downloaded sysroot (created elsewhere).

  Tarballs (one per target) will be created in $PWD.

  Args:
    targets_wanted: The targets to package up.
    output_dir: The directory to put the packages in.
    root: The root path to pull all packages/files from.
  """
  logging.info('Writing tarballs to %s', output_dir)
  osutils.SafeMakedirs(output_dir)
  ldpaths = lddtree.LoadLdpaths(root)
  targets = ExpandTargets(targets_wanted)

  with osutils.TempDir() as tempdir:
    # We have to split the root generation from the compression stages.  This is
    # because we hardlink in all the files (to avoid overhead of reading/writing
    # the copies multiple times).  But tar gets angry if a file's hardlink count
    # changes from when it starts reading a file to when it finishes.
    with parallel.BackgroundTaskRunner(CreatePackagableRoot) as queue:
      for target in targets:
        output_target_dir = os.path.join(tempdir, target)
        queue.put([target, output_target_dir, ldpaths, root])

    # Build the tarball.
    with parallel.BackgroundTaskRunner(cros_build_lib.CreateTarball) as queue:
      for target in targets:
        tar_file = os.path.join(output_dir, target + '.tar.xz')
        queue.put([tar_file, os.path.join(tempdir, target)])
Ejemplo n.º 19
0
def GitPush(git_repo, refspec, push_to, force=False, retry=True,
            capture_output=True, skip=False, **kwargs):
  """Wrapper for pushing to a branch.

  Args:
    git_repo: Git repository to act on.
    refspec: The local ref to push to the remote.
    push_to: A RemoteRef object representing the remote ref to push to.
    force: Whether to bypass non-fastforward checks.
    retry: Retry a push in case of transient errors.
    capture_output: Whether to capture output for this command.
    skip: Do not actually push anything.
  """
  cmd = ['push', push_to.remote, '%s:%s' % (refspec, push_to.ref)]
  if force:
    cmd.append('--force')

  if skip:
    # git-push has a --dry-run option but we can't use it because that still
    # runs push-access checks, and we want the skip mode to be available to
    # users who can't really push to remote.
    logging.info('Would have run "%s"', cmd)
    return

  RunGit(git_repo, cmd, retry=retry, capture_output=capture_output, **kwargs)
Ejemplo n.º 20
0
    def ClassifyWorkOnChanges(cls, changes, config, build_root, manifest, packages_under_test):
        """Classifies WorkOn package changes in |changes|.

    Args:
      changes: The list or set of GerritPatch instances.
      config: The cbuildbot config.
      build_root: Path to the build root.
      manifest: A ManifestCheckout instance representing our build directory.
      packages_under_test: A list of packages names included in the build.
        (e.g. ['chromeos-base/chromite-0.0.1-r1258']).

    Returns:
      A (workon_changes, irrelevant_workon_changes) tuple; workon_changes
      is a subset of |changes| that have modified workon packages, and
      irrelevant_workon_changes is a subset of workon_changes which are
      irrelevant to |config|.
    """
        workon_changes = set()
        irrelevant_workon_changes = set()

        workon_dict = portage_util.BuildFullWorkonPackageDictionary(build_root, config.overlays, manifest)

        pp = pprint.PrettyPrinter(indent=2)
        logging.info("(project, branch) to workon package mapping:\n %s", pp.pformat(workon_dict))
        logging.info("packages under test\n: %s", pp.pformat(packages_under_test))

        for change in changes:
            packages = workon_dict.get((change.project, change.tracking_branch))
            if packages:
                # The CL modifies a workon package.
                workon_changes.add(change)
                if all(x not in packages_under_test for x in packages):
                    irrelevant_workon_changes.add(change)

        return workon_changes, irrelevant_workon_changes
Ejemplo n.º 21
0
  def SetAlias(self, alias_name):
    """Assign to the device a user-friendly alias name.

    Args:
      alias_name: The alias name to set. It must be no more than 128 in length
        containing only alphanumeric characters and/or underscores.

    Raises:
      InvalidDevicePropertyError if |alias_name| is invalid.
    """
    if len(alias_name) > BRILLO_DEVICE_PROPERTY_MAX_LEN:
      raise InvalidDevicePropertyError(
          'The alias name cannot be more than %d characters.' %
          BRILLO_DEVICE_PROPERTY_MAX_LEN)
    valid_alias_chars = string.ascii_letters + string.digits + '_'
    if not all(c in valid_alias_chars for c in alias_name):
      raise InvalidDevicePropertyError(
          'The alias name can only contain alphanumeric characters and/or '
          'underscores.')

    self.RunCommand(['mkdir', '-p', BRILLO_DEVICE_PROPERTY_DIR],
                    remote_sudo=True)
    alias_file_path = os.path.join(BRILLO_DEVICE_PROPERTY_DIR,
                                   BRILLO_DEVICE_PROPERTY_ALIAS)
    self.RunCommand(['echo', alias_name, '>', alias_file_path],
                    remote_sudo=True)
    self._alias = alias_name

    logging.info('Successfully set alias to "%s".', alias_name)
Ejemplo n.º 22
0
 def Run(self):
   """Run cros deploy."""
   commandline.RunInsideChroot(self, auto_detect_brick=True)
   self.options.Freeze()
   try:
     deploy.Deploy(
         self.options.device,
         self.options.packages,
         board=self.options.board,
         brick_name=self.options.brick or self.curr_brick_locator,
         blueprint=self.options.blueprint,
         emerge=self.options.emerge,
         update=self.options.update,
         deep=self.options.deep,
         deep_rev=self.options.deep_rev,
         clean_binpkg=self.options.clean_binpkg,
         root=self.options.root,
         strip=self.options.strip,
         emerge_args=self.options.emerge_args,
         ssh_private_key=self.options.private_key,
         ping=self.options.ping,
         reflash=self.options.reflash,
         force=self.options.force,
         dry_run=self.options.dry_run)
   except Exception as e:
     logging.error(e)
     logging.error('cros deploy terminated before completing.')
     if self.options.debug:
       raise
     else:
       raise SystemExit(1)
   else:
     logging.info('cros deploy completed successfully.')
Ejemplo n.º 23
0
def main(argv):
  options = ParseArgs(argv)

  if not cros_build_lib.IsInsideChroot():
    raise commandline.ChrootRequiredError()

  if os.geteuid() != 0:
    cros_build_lib.SudoRunCommand(sys.argv)
    return

  # sysroot must have a trailing / as the tree dictionary produced by
  # create_trees in indexed with a trailing /.
  sysroot = cros_build_lib.GetSysroot(options.board) + '/'
  trees = create_trees(target_root=sysroot, config_root=sysroot)

  vartree = trees[sysroot]['vartree']

  cache_dir = os.path.join(path_util.FindCacheDir(),
                           'cros_install_debug_syms-v' + CACHE_VERSION)

  if options.clearcache:
    osutils.RmDir(cache_dir, ignore_missing=True)

  binhost_cache = None
  if options.cachebinhost:
    binhost_cache = cache.DiskCache(cache_dir)

  boto_file = vartree.settings['BOTO_CONFIG']
  if boto_file:
    os.environ['BOTO_CONFIG'] = boto_file

  gs_context = gs.GSContext()
  symbols_mapping = RemoteSymbols(vartree, binhost_cache)

  if options.all:
    to_install = vartree.dbapi.cpv_all()
  else:
    to_install = [GetMatchingCPV(p, vartree.dbapi) for p in options.packages]

  to_install = [p for p in to_install
                if ShouldGetSymbols(p, vartree.dbapi, symbols_mapping)]

  if not to_install:
    logging.info('nothing to do, exit')
    return

  with DebugSymbolsInstaller(vartree, gs_context, sysroot,
                             not options.debug) as installer:
    args = [(p, symbols_mapping[p]) for p in to_install]
    parallel.RunTasksInProcessPool(installer.Install, args,
                                   processes=options.jobs)

  logging.debug('installation done, updating packages index file')
  packages_dir = os.path.join(sysroot, 'packages')
  packages_file = os.path.join(packages_dir, 'Packages')
  # binpkg will set DEBUG_SYMBOLS automatically if it detects the debug symbols
  # in the packages dir.
  pkgindex = binpkg.GrabLocalPackageIndex(packages_dir)
  with open(packages_file, 'w') as p:
    pkgindex.Write(p)
Ejemplo n.º 24
0
  def TestDebug(self):
    """Tests the debug command."""
    logging.info('Test to start and debug a new process on the VM device.')
    exe_path = '/bin/bash'
    start_cmd = self.BuildCommand('debug', device=self.vm.device_addr,
                                  opt_args=['--exe', exe_path])
    result = cros_build_lib.RunCommand(start_cmd, capture_output=True,
                                       error_code_ok=True, input='\n')
    if result.returncode:
      logging.error('Failed to start and debug a new process on the VM device.')
      raise CommandError(result.error)

    logging.info('Test to attach a running process on the VM device.')
    with remote_access.ChromiumOSDeviceHandler(
        remote_access.LOCALHOST, port=self.vm.port) as device:
      exe = 'update_engine'
      pids = device.GetRunningPids(exe, full_path=False)
      if not pids:
        logging.error('Failed to find any running process to debug.')
        raise CommandError()
      pid = pids[0]
      attach_cmd = self.BuildCommand('debug', device=self.vm.device_addr,
                                     opt_args=['--pid', str(pid)])
      result = cros_build_lib.RunCommand(attach_cmd, capture_output=True,
                                         error_code_ok=True, input='\n')
      if result.returncode:
        logging.error('Failed to attach a running process on the VM device.')
        raise CommandError(result.error)
Ejemplo n.º 25
0
  def PromoteCandidate(self, retries=manifest_version.NUM_RETRIES):
    """Promotes the current LKGM candidate to be a real versioned LKGM."""
    assert self.current_version, 'No current manifest exists.'

    last_error = None
    path_to_candidate = self.GetLocalManifest(self.current_version)
    assert os.path.exists(path_to_candidate), 'Candidate not found locally.'

    # This may potentially fail for not being at TOT while pushing.
    for attempt in range(0, retries + 1):
      try:
        if attempt > 0:
          self.RefreshManifestCheckout()
        git.CreatePushBranch(manifest_version.PUSH_BRANCH,
                             self.manifest_dir, sync=False)
        manifest_version.CreateSymlink(path_to_candidate, self.lkgm_path)
        git.RunGit(self.manifest_dir, ['add', self.lkgm_path])
        self.PushSpecChanges(
            'Automatic: %s promoting %s to LKGM' % (self.build_names[0],
                                                    self.current_version))
        return
      except cros_build_lib.RunCommandError as e:
        last_error = 'Failed to promote manifest. error: %s' % e
        logging.info(last_error)
        logging.info('Retrying to promote manifest:  Retry %d/%d', attempt + 1,
                     retries)

    raise PromoteCandidateException(last_error)
Ejemplo n.º 26
0
def _Emerge(device, pkg_path, root, extra_args=None):
  """Copies |pkg| to |device| and emerges it.

  Args:
    device: A ChromiumOSDevice object.
    pkg_path: A path to a binary package.
    root: Package installation root path.
    extra_args: Extra arguments to pass to emerge.

  Raises:
    DeployError: Unrecoverable error during emerge.
  """
  pkgroot = os.path.join(device.work_dir, 'packages')
  pkg_name = os.path.basename(pkg_path)
  pkg_dirname = os.path.basename(os.path.dirname(pkg_path))
  pkg_dir = os.path.join(pkgroot, pkg_dirname)
  portage_tmpdir = os.path.join(device.work_dir, 'portage-tmp')
  # Clean out the dirs first if we had a previous emerge on the device so as to
  # free up space for this emerge.  The last emerge gets implicitly cleaned up
  # when the device connection deletes its work_dir.
  device.RunCommand(
      ['rm', '-rf', pkg_dir, portage_tmpdir, '&&',
       'mkdir', '-p', pkg_dir, portage_tmpdir], remote_sudo=True)

  # This message is read by BrilloDeployOperation.
  logging.notice('Copying %s to device.', pkg_name)
  device.CopyToDevice(pkg_path, pkg_dir, remote_sudo=True)

  logging.info('Use portage temp dir %s', portage_tmpdir)

  # This message is read by BrilloDeployOperation.
  logging.notice('Installing %s.', pkg_name)
  pkg_path = os.path.join(pkg_dir, pkg_name)

  # We set PORTAGE_CONFIGROOT to '/usr/local' because by default all
  # chromeos-base packages will be skipped due to the configuration
  # in /etc/protage/make.profile/package.provided. However, there is
  # a known bug that /usr/local/etc/portage is not setup properly
  # (crbug.com/312041). This does not affect `cros deploy` because
  # we do not use the preset PKGDIR.
  extra_env = {
      'FEATURES': '-sandbox',
      'PKGDIR': pkgroot,
      'PORTAGE_CONFIGROOT': '/usr/local',
      'PORTAGE_TMPDIR': portage_tmpdir,
      'PORTDIR': device.work_dir,
      'CONFIG_PROTECT': '-*',
  }
  cmd = ['emerge', '--usepkg', pkg_path, '--root=%s' % root]
  if extra_args:
    cmd.append(extra_args)

  try:
    device.RunCommand(cmd, extra_env=extra_env, remote_sudo=True,
                      capture_output=False, debug_level=logging.INFO)
  except Exception:
    logging.error('Failed to emerge package %s', pkg_name)
    raise
  else:
    logging.notice('%s has been installed.', pkg_name)
Ejemplo n.º 27
0
  def ApplySchemaMigrations(self, maxVersion=None):
    """Apply pending migration scripts to database, in order.

    Args:
      maxVersion: The highest version migration script to apply. If
                  unspecified, all migrations found will be applied.
    """
    migrations = self._GetMigrationScripts()

    # Execute the migration scripts in order, asserting that each one
    # updates the schema version to the expected number. If maxVersion
    # is specified stop early.
    for (number, script) in migrations:
      if maxVersion is not None and number > maxVersion:
        break

      if number > self.schema_version:
        # Invalidate self._meta, then run script and ensure that schema
        # version was increased.
        self._meta = None
        logging.info('Running migration script %s', script)
        self.RunQueryScript(script)
        self.schema_version = self.QuerySchemaVersion()
        if self.schema_version != number:
          raise DBException('Migration script %s did not update '
                            'schema version to %s as expected. ' % (number,
                                                                    script))
Ejemplo n.º 28
0
  def PerformStage(self):
    # Initially get version from metadata in case the initial sync
    # stage set it.
    self.android_version = self._GetAndroidVersionFromMetadata()

    # Need to always iterate through and generate the board-specific
    # Android version metadata.  Each board must be handled separately
    # since there might be differing builds in the same release group.
    versions = set([])
    for builder_run in self._run.GetUngroupedBuilderRuns():
      for board in builder_run.config.boards:
        try:
          # Determine the version for each board and record metadata.
          version = self._run.DetermineAndroidVersion(boards=[board])
          builder_run.attrs.metadata.UpdateBoardDictWithDict(
              board, {'android-container-version': version})
          versions.add(version)
          logging.info('Board %s has Android version %s', board, version)
        except cbuildbot_run.NoAndroidVersionError as ex:
          logging.info('Board %s does not contain Android (%s)', board, ex)

    # If there wasn't a version specified in the manifest but there is
    # a unique one across all the boards, treat it as the version for the
    # entire step.
    if self.android_version is None and len(versions) == 1:
      self.android_version = versions.pop()

    if self.android_version:
      logging.PrintBuildbotStepText('tag %s' % self.android_version)
Ejemplo n.º 29
0
def main(argv):
  cros_build_lib.AssertInsideChroot()
  opts = _ParseArguments(argv)

  logging.info('Generating board configs. This takes about 2m...')
  for key in sorted(binhost.GetChromePrebuiltConfigs()):
    binhost.GenConfigsForBoard(key.board, regen=opts.regen, error_code_ok=True)

  # Fetch all compat IDs.
  fetcher = binhost.CompatIdFetcher()
  keys = binhost.GetChromePrebuiltConfigs().keys()
  compat_ids = fetcher.FetchCompatIds(keys)

  # Save the PFQ configs.
  pfq_configs = binhost.PrebuiltMapping.Get(keys, compat_ids)
  filename_internal = binhost.PrebuiltMapping.GetFilename(opts.buildroot,
                                                          'chrome')
  pfq_configs.Dump(filename_internal)
  git.AddPath(filename_internal)
  git.Commit(os.path.dirname(filename_internal), 'Update PFQ config dump',
             allow_empty=True)

  filename_external = binhost.PrebuiltMapping.GetFilename(opts.buildroot,
                                                          'chromium',
                                                          internal=False)
  pfq_configs.Dump(filename_external, internal=False)
  git.AddPath(filename_external)
  git.Commit(os.path.dirname(filename_external), 'Update PFQ config dump',
             allow_empty=True)
Ejemplo n.º 30
0
  def _Create(self):
    """Create a given payload, if it doesn't already exist."""

    logging.info('Generating %s payload %s',
                 'delta' if self.payload.src_image else 'full', self.payload)

    # Fetch and extract the delta generator.
    self._PrepareGenerator()

    # Fetch and prepare the tgt image.
    self._PrepareImage(self.payload.tgt_image, self.tgt_image_file)

    # Fetch and prepare the src image.
    if self.payload.src_image:
      self._PrepareImage(self.payload.src_image, self.src_image_file)

    # Generate the unsigned payload.
    self._GenerateUnsignedPayload()

    # Sign the payload, if needed.
    metadata_signatures = None
    if self.signer:
      _, metadata_signatures = self._SignPayload()

    # Store hash and signatures json.
    self._StorePayloadJson(metadata_signatures)
Ejemplo n.º 31
0
    def StopWorkingOnPackages(self,
                              packages,
                              use_all=False,
                              use_workon_only=False):
        """Stop working on a list of packages currently marked as locally worked on.

    Args:
      packages: list of package name fragments.  These will be mapped to
          canonical portage atoms via the same process as
          StartWorkingOnPackages().
      use_all: True iff instead of the provided package list, we should just
          stop working on all currently worked on atoms for the system in
          question.
      use_workon_only: True iff instead of the provided package list, we should
          stop working on all currently worked on atoms that define only a
          -9999 ebuild.
    """
        if use_all or use_workon_only:
            atoms = self._GetLiveAtoms(filter_workon=use_workon_only)
        else:
            atoms = self._GetCanonicalAtoms(packages, find_stale=True)

        current_atoms = self._GetWorkedOnAtoms()
        stopped_atoms = []
        for atom in atoms:
            if not atom in current_atoms:
                logging.warning('Not working on %s', atom)
                continue

            current_atoms.discard(atom)
            stopped_atoms.append(atom)

        self._SetWorkedOnAtoms(current_atoms)

        if stopped_atoms:
            # Legacy scripts used single quotes in their output, and we carry on this
            # honorable tradition.
            logging.info("Stopped working on '%s' for '%s'",
                         ' '.join(stopped_atoms), self._system)
Ejemplo n.º 32
0
    def TransferStatefulUpdate(self):
        """Transfer files for stateful update.

    The stateful update bin and the corresponding payloads are copied to the
    target remote device for stateful update.
    """
        logging.debug('Checking whether file stateful_update_bin needs to be '
                      'transferred to device...')
        need_transfer, stateful_update_bin = self._GetStatefulUpdateScript()
        if need_transfer:
            self.device.CopyToWorkDir(stateful_update_bin,
                                      log_output=True,
                                      **self._cmd_kwargs)
            self.stateful_update_bin = os.path.join(
                self.device.work_dir,
                os.path.basename(self.STATEFUL_UPDATE_BIN))
        else:
            self.stateful_update_bin = stateful_update_bin

        logging.info('Copying stateful payload to device...')
        payload = os.path.join(self.payload_dir, ds_wrapper.STATEFUL_FILENAME)
        self.device.CopyToWorkDir(payload, log_output=True, **self._cmd_kwargs)
def generate_lints(board: str, ebuild_path: str) -> Path:
  """Collects the lints for a given package on a given board.

  Args:
    board: the board to collect lints for.
    ebuild_path: the path to the ebuild to collect lints for.

  Returns:
    The path to a tmpdir that all of the lint YAML files (if any) will be in.
    This will also be populated by JSON files containing InvocationMetadata.
    The generation of this is handled by our compiler wrapper.
  """
  logging.info('Running lints for %r on board %r', ebuild_path, board)

  osutils.RmDir(LINT_BASE, ignore_missing=True, sudo=True)
  osutils.SafeMakedirs(LINT_BASE, 0o777, sudo=True)

  # FIXME(gbiv): |test| might be better here?
  result = cros_build_lib.run(
      [f'ebuild-{board}', ebuild_path, 'clean', 'compile'],
      check=False,
      print_cmd=True,
      extra_env={'WITH_TIDY': 'tricium'},
      capture_output=True,
      encoding='utf-8',
      errors='replace',
  )

  if result.returncode:
    status = f'failed with code {result.returncode}; output:\n{result.stdout}'
    log_fn = logging.warning
  else:
    status = 'succeeded'
    log_fn = logging.info

  log_fn('Running |ebuild| on %s %s', ebuild_path, status)
  lint_tmpdir = tempfile.mkdtemp(prefix='tricium_tidy')
  osutils.CopyDirContents(LINT_BASE, lint_tmpdir)
  return Path(lint_tmpdir)
Ejemplo n.º 34
0
    def GetFailedHWTestsFromCIDB(cls, db, build_ids):
        """Get test names of failed HWTests from CIDB.

    Args:
      db: An instance of cidb.CIDBConnection
      build_ids: A list of build_ids (strings) to get failed HWTests.

    Returns:
      A list of normalized HWTest names (strings).
    """
        # TODO: probably only count 'fail' and exclude abort' and 'other' results?
        hwtest_results = cls.GetHWTestResultsFromCIDB(
            db, build_ids, test_statues=constants.HWTEST_STATUES_NOT_PASSED)

        failed_tests = set([
            HWTestResult.NormalizeTestName(result.test_name)
            for result in hwtest_results
        ])
        failed_tests.discard(None)

        logging.info('Found failed tests: %s ', failed_tests)
        return failed_tests
Ejemplo n.º 35
0
    def CanReuseChroot(self, chroot_path):
        """Determine if the chroot can be reused.

    A chroot can be reused if all of the following are true:
        1.  The existence of chroot.img matches what is requested in the config,
            i.e. exists when chroot_use_image is True or vice versa.
        2.  The build config doesn't request chroot_replace.
        3.  The previous local build succeeded.
        4.  If there was a previous master build, that build also succeeded.

    Args:
      chroot_path: Path to the chroot we want to reuse.

    Returns:
      True if the chroot at |chroot_path| can be reused, False if not.
    """

        chroot_img = chroot_path + '.img'
        chroot_img_exists = os.path.exists(chroot_img)
        if self._run.config.chroot_use_image != chroot_img_exists:
            logging.info(
                'chroot image at %s %s but chroot_use_image=%s.  '
                'Cannot reuse chroot.', chroot_img,
                'exists' if chroot_img_exists else "doesn't exist",
                self._run.config.chroot_use_image)
            return False

        if self._run.config.chroot_replace and self._run.options.build:
            logging.info(
                'Build config has chroot_replace=True. Cannot reuse chroot.')
            return False

        previous_state = self._GetPreviousBuildStatus()
        if previous_state.status != constants.BUILDER_STATUS_PASSED:
            logging.info(
                'Previous local build %s did not pass. Cannot reuse chroot.',
                previous_state.build_number)
            return False

        if previous_state.master_build_id:
            build_number, status = self._GetPreviousMasterStatus(
                previous_state)
            if status != constants.BUILDER_STATUS_PASSED:
                logging.info(
                    'Previous master build %s did not pass (%s).  '
                    'Cannot reuse chroot.', build_number, status)
                return False

        return True
Ejemplo n.º 36
0
def GetUpdatePayloads(path,
                      payload_dir,
                      board=None,
                      src_image_to_delta=None,
                      timeout=60 * 15,
                      static_dir=DEFAULT_STATIC_DIR):
    """Launch devserver to get the update payloads.

  Args:
    path: The xbuddy path.
    payload_dir: The directory to store the payloads. On failure, the devserver
                 log will be copied to |payload_dir|.
    board: The default board to use when |path| is None.
    src_image_to_delta: Image used as the base to generate the delta payloads.
    timeout: Timeout for launching devserver (seconds).
    static_dir: Devserver static dir to use.
  """
    ds = DevServerWrapper(static_dir=static_dir,
                          src_image=src_image_to_delta,
                          board=board)
    req = GenerateXbuddyRequest(path, 'update')
    logging.info('Starting local devserver to generate/serve payloads...')
    try:
        ds.Start()
        url = ds.OpenURL(ds.GetURL(sub_dir=req), timeout=timeout)
        ds.DownloadFile(os.path.join(url, ROOTFS_FILENAME), payload_dir)
        ds.DownloadFile(os.path.join(url, STATEFUL_FILENAME), payload_dir)
    except DevServerException:
        logging.warning(ds.TailLog() or 'No devserver log is available.')
        raise
    else:
        logging.debug(ds.TailLog() or 'No devserver log is available.')
    finally:
        ds.Stop()
        if os.path.exists(ds.log_file):
            shutil.copyfile(ds.log_file,
                            os.path.join(payload_dir, 'local_devserver.log'))
        else:
            logging.warning('Could not find %s', ds.log_file)
    def PerformStage(self):
        """Do the work of generating our release payloads."""
        # Convert to release tools naming for boards.
        board = self._current_board.replace('_', '-')
        version = self._run.attrs.release_tag

        assert version, "We can't generate payloads without a release_tag."
        logging.info('Generating payloads for: %s, %s', board, version)

        # Test to see if the current board has a Paygen configuration. We do
        # this here, not in the sub-process so we don't have to pass back a
        # failure reason.
        try:
            paygen_build_lib.ValidateBoardConfig(board)
        except paygen_build_lib.BoardNotConfigured:
            raise PaygenNoPaygenConfigForBoard(
                'Golden Eye (%s) has no entry for board %s. Get a TPM to fix.'
                % (paygen_build_lib.PAYGEN_URI, board))

        # Default to False, set to True if it's a canary type build
        skip_duts_check = False
        if config_lib.IsCanaryType(self._run.config.build_type):
            skip_duts_check = True

        with parallel.BackgroundTaskRunner(
                self._RunPaygenInProcess) as per_channel:
            logging.info('Using channels: %s', self.channels)

            # Set an metadata with the channels we've had configured.
            self._run.attrs.metadata.UpdateWithDict(
                {'channels': ','.join(self.channels)})

            # If we have an explicit list of channels, use it.
            for channel in self.channels:
                per_channel.put(
                    (channel, board, version, self._run.options.debug,
                     self._run.config.paygen_skip_testing,
                     self._run.config.paygen_skip_delta_payloads,
                     skip_duts_check))
Ejemplo n.º 38
0
    def PerformStage(self):
        if not self._run.config.master:
            logging.info('This stage is only meaningful for master builds. '
                         'Doing nothing.')
            return

        if not self.buildstore.AreClientsReady():
            logging.info('No buildstore connection for this build. '
                         'Doing nothing.')
            return

        child_failures = self.buildstore.GetBuildsFailures(
            self.GetScheduledSlaveBuildbucketIds())
        for failure in child_failures:
            if (failure.stage_status != constants.BUILDER_STATUS_FAILED or
                    failure.build_status == constants.BUILDER_STATUS_INFLIGHT):
                continue
            slave_stage_url = uri_lib.ConstructMiloBuildUri(
                failure.buildbucket_id)
            logging.PrintBuildbotLink(
                '%s %s' % (failure.build_config, failure.stage_name),
                slave_stage_url)
Ejemplo n.º 39
0
def RestoreSnapshot(snapshot_token, chroot=None):
    """Restore a logical volume snapshot of a chroot.

  Args:
    snapshot_token (str): The name of the snapshot to restore. Typically an
      opaque generated name returned from `CreateSnapshot`.
    chroot (chroot_lib.Chroot): The chroot to perform the operation on.
  """
    # Unmount to clean up stale processes that may still be in the chroot, in
    # order to prevent 'device busy' errors from umount.
    Unmount(chroot)
    logging.info('Restoring SDK snapshot with ID: %s', snapshot_token)
    cmd = [
        os.path.join(constants.CHROMITE_BIN_DIR, 'cros_sdk'),
        '--snapshot-restore',
        snapshot_token,
    ]
    if chroot:
        cmd.extend(['--chroot', chroot.path])

    # '--snapshot-restore' will automatically remount the image after restoring.
    cros_build_lib.run(cmd)
Ejemplo n.º 40
0
    def _GetVMPid(self):
        """Get the pid of the VM.

    Returns:
      pid of the VM.
    """
        if not os.path.exists(self.vm_dir):
            logging.debug('%s not present.', self.vm_dir)
            return 0

        if not os.path.exists(self.pidfile):
            logging.info('%s does not exist.', self.pidfile)
            return 0

        pid = osutils.ReadFile(self.pidfile).rstrip()
        if not pid.isdigit():
            # Ignore blank/empty files.
            if pid:
                logging.error('%s in %s is not a pid.', pid, self.pidfile)
            return 0

        return int(pid)
def DiscoverKeysets(keysets_dir):
  """Discover keysets.

  Args:
    keysets_dir: directory where the keysets live.  Typically /cros/keys.

  Returns:
    A sorted list of (setname: directory) tuples.
  """
  _, dirs, _ = next(os.walk(keysets_dir))
  ret = {}
  for src in sorted(dirs):
    path = os.path.join(keysets_dir, src)
    keyset = keys.Keyset(path)
    if keyset.name != 'unknown':
      if keyset.name in ret:
        logging.warning('Ignoring %s because name is duplicate of %s',
                        src, ret[keyset.name])
      else:
        logging.info('Discovered %s in %s', keyset.name, src)
        ret[keyset.name] = src
  return sorted(ret.items())
Ejemplo n.º 42
0
  def FindPackageBuildFailureSuspects(self, changes, failure):
    """Find suspects for a PackageBuild failure.

    If a change touched a package and that package broke, this change is one of
    the suspects; if multiple changes touched one failed package, all these
    changes will be returned as suspects.

    Args:
      changes: A list of cros_patch.GerritPatch instances.
      failure: An instance of StageFailureMessage(or its sub-class).

    Returns:
      A pair of suspects and no_assignee_packages. suspects is a set of
      cros_patch.GerritPatch instances as suspects. no_assignee_packages is True
      when there're failed packages without assigned suspects; else,
      no_assignee_packages is False.
    """
    suspects = set()
    no_assignee_packages = False
    packages_with_assignee = set()
    failed_packages = failure.GetFailedPackages()
    for package in failed_packages:
      failed_projects = portage_util.FindWorkonProjects([package])
      for change in changes:
        if change.project in failed_projects:
          suspects.add(change)
          packages_with_assignee.add(package)

    if suspects:
      logging.info('Find suspects for BuildPackages failures: %s',
                   cros_patch.GetChangesAsString(suspects))

    packages_without_assignee = set(failed_packages) - packages_with_assignee
    if packages_without_assignee:
      logging.info('Didn\'t find changes to blame for failed packages: %s',
                   list(packages_without_assignee))
      no_assignee_packages = True

    return suspects, no_assignee_packages
Ejemplo n.º 43
0
def _UpdateTreeStatus(status_url, message):
  """Updates the tree status to |message|.

  Args:
    status_url: The tree status URL.
    message: The tree status text to post .
  """
  password = _GetPassword()
  params = urllib.urlencode({
      'message': message,
      'username': _USER_NAME,
      'password': password,
  })
  headers = {'Content-Type': 'application/x-www-form-urlencoded'}
  req = urllib2.Request(status_url, data=params, headers=headers)
  try:
    urllib2.urlopen(req)
  except (urllib2.URLError, httplib.HTTPException, socket.error) as e:
    logging.error('Unable to update tree status: %s', e)
    raise e
  else:
    logging.info('Updated tree status with message: %s', message)
Ejemplo n.º 44
0
  def PerformStage(self):
    if not self._run.config.master:
      logging.info('This stage is only meaningful for master builds. '
                   'Doing nothing.')
      return

    build_id, db = self._run.GetCIDBHandle()

    if not db:
      logging.info('No cidb connection for this build. '
                   'Doing nothing.')
      return

    slave_failures = db.GetSlaveFailures(build_id)
    failures_by_build = cros_build_lib.GroupByKey(slave_failures, 'build_id')
    for build_id, build_failures in sorted(failures_by_build.items()):
      failures_by_stage = cros_build_lib.GroupByKey(build_failures,
                                                    'build_stage_id')
      # Surface a link to each slave stage that failed, in stage_id sorted
      # order.
      for stage_id in sorted(failures_by_stage):
        failure = failures_by_stage[stage_id][0]
        # Ignore failures that did not cause their enclosing stage to fail.
        # Ignore slave builds that are still inflight, because some stage logs
        # might not have been printed to buildbot yet.
        # TODO(akeshet) revisit this approach, if we seem to be suppressing
        # useful information as a result of it.
        if (failure['stage_status'] != constants.BUILDER_STATUS_FAILED or
            failure['build_status'] == constants.BUILDER_STATUS_INFLIGHT):
          continue
        waterfall_url = constants.WATERFALL_TO_DASHBOARD[failure['waterfall']]
        slave_stage_url = tree_status.ConstructDashboardURL(
            waterfall_url,
            failure['builder_name'],
            failure['build_number'],
            failure['stage_name'])
        logging.PrintBuildbotLink('%s %s' % (failure['build_config'],
                                             failure['stage_name']),
                                  slave_stage_url)
Ejemplo n.º 45
0
    def ArchiveHWQual():
      """Build and archive the HWQual images."""
      # TODO(petermayo): This logic needs to be exported from the BuildTargets
      # stage rather than copied/re-evaluated here.
      # TODO(mtennant): Make this autotest_built concept into a run param.
      autotest_built = (self._run.options.tests and
                        config['upload_hw_test_artifacts'])

      if config['hwqual'] and autotest_built:
        # Build the full autotest tarball for hwqual image. We don't upload it,
        # as it's fairly large and only needed by the hwqual tarball.
        logging.info('Archiving full autotest tarball locally ...')
        tarball = commands.BuildFullAutotestTarball(self._build_root,
                                                    self._current_board,
                                                    image_dir)
        commands.ArchiveFile(tarball, archive_path)

        # Build hwqual image and upload to Google Storage.
        hwqual_name = 'chromeos-hwqual-%s-%s' % (board, self.version)
        filename = commands.ArchiveHWQual(buildroot, hwqual_name, archive_path,
                                          image_dir)
        self._release_upload_queue.put([filename])
Ejemplo n.º 46
0
def Expire(ctx, dryrun, url):
    """Given a url, move it to the backup buckets.

  Args:
    ctx: GS context.
    dryrun: Do we actually move the file?
    url: Address of file to move.
  """
    logging.info('Expiring: %s', url)
    # Move gs://foo/some/file -> gs://foo-backup/some/file
    parts = urllib.parse.urlparse(url)
    expired_parts = list(parts)
    expired_parts[1] = parts.netloc + '-backup'
    target_url = urllib.parse.urlunparse(expired_parts)
    if dryrun:
        logging.notice('gsutil mv %s %s', url, target_url)
    else:
        try:
            ctx.Move(url, target_url)
        except Exception as e:
            # We can fail for lots of repeated random reasons.
            logging.warn('Move of "%s" failed, ignoring: "%s"', url, e)
Ejemplo n.º 47
0
  def SendBuildbucketRequest(self, url, method, body, dryrun):
    """Generic buildbucket request.

    Args:
      url: Buildbucket url to send requests.
      method: HTTP method to perform, such as GET, POST, DELETE.
      body: The entity body to be sent with the request (a string object).
            See httplib2.Http.request for details.
      dryrun: Whether a dryrun.

    Returns:
      A dict of response entity body if the request succeeds; else, None.
      See httplib2.Http.request for details.

    Raises:
      BuildbucketResponseException when response['status'] is invalid.
    """
    if dryrun:
      logging.info('Dryrun mode is on; Would have made a request '
                   'with url %s method %s body:\n%s', url, method, body)
      return

    def try_method():
      response, content = self.http.request(
          url,
          method,
          body=body,
          headers={'Content-Type': 'application/json'},
      )

      if int(response['status']) // 100 != 2:
        raise BuildbucketResponseException(
            'Got a %s response from buildbucket with url: %s\n'
            'content: %s' % (response['status'], url, content))

      # Deserialize the content into a python dict.
      return json.loads(content)

    return retry_util.GenericRetry(lambda _: True, 3, try_method)
Ejemplo n.º 48
0
def WaitForAFDOPerfData(cpv,
                        arch,
                        buildroot,
                        gs_context,
                        timeout=constants.AFDO_GENERATE_TIMEOUT):
  """Wait for AFDO perf data to show up (with an appropriate timeout).

  Wait for AFDO 'perf' data to show up in GS and copy it into a temp
  directory in the buildroot.

  Args:
    arch: architecture we're going to build Chrome for.
    cpv: CPV object for Chrome.
    buildroot: buildroot where AFDO data should be stored.
    gs_context: GS context to retrieve data.
    timeout: How long to wait total, in seconds.

  Returns:
    True if found the AFDO perf data before the timeout expired.
    False otherwise.
  """
  try:
    timeout_util.WaitForReturnTrue(
        CheckAFDOPerfData,
        func_args=(cpv, arch, gs_context),
        timeout=timeout,
        period=constants.SLEEP_TIMEOUT)
  except timeout_util.TimeoutError:
    logging.info('Could not find AFDO perf data before timeout')
    return False

  url = GetAFDOPerfDataURL(cpv, arch)
  dest_dir = AFDO_BUILDROOT_LOCAL % {'build_root': buildroot}
  dest_path = os.path.join(dest_dir, url.rsplit('/', 1)[1])
  gs_context.Copy(url, dest_path)

  UncompressAFDOFile(dest_path, buildroot)
  logging.info('Retrieved AFDO perf data to %s', dest_path)
  return True
Ejemplo n.º 49
0
def _PostParseCheck(options):
    """Perform some usage validation (after we've parsed the arguments).

  Args:
    options: The options object returned by the cli parser.
  """
    if options.local_pkg_path and not os.path.isfile(options.local_pkg_path):
        cros_build_lib.Die('%s is not a file.', options.local_pkg_path)

    if not options.gn_args:
        gn_env = os.getenv('GN_ARGS')
        if gn_env is not None:
            options.gn_args = gn_helpers.FromGNArgs(gn_env)
            logging.info('GN_ARGS taken from environment: %s', options.gn_args)

    if not options.staging_flags:
        use_env = os.getenv('USE')
        if use_env is not None:
            options.staging_flags = ' '.join(
                set(use_env.split()).intersection(chrome_util.STAGING_FLAGS))
            logging.info('Staging flags taken from USE in environment: %s',
                         options.staging_flags)
Ejemplo n.º 50
0
    def _WaitForOperation(self,
                          operation,
                          get_operation_request,
                          timeout_sec=None,
                          timeout_handler=None):
        """Waits until timeout or the request gets a response with a 'DONE' status.

    Args:
      operation: The GCE operation to wait for.
      get_operation_request:
        The HTTP request to get the operation's status.
        This request will be executed periodically until it returns a status
        'DONE'.
      timeout_sec: The maximum number of seconds to wait for.
      timeout_handler: A callable to be executed when times out.

    Raises:
      Error when timeout happens or the operation fails.
    """
        def _IsDone():
            result = get_operation_request.execute()
            if result['status'] == 'DONE':
                if 'error' in result:
                    raise Error(result['error'])
                return True
            return False

        try:
            timeout = timeout_sec or self.DEFAULT_TIMEOUT_SEC
            logging.info(
                'Waiting up to %d seconds for operation [%s] to complete...',
                timeout, operation)
            timeout_util.WaitForReturnTrue(_IsDone, timeout, period=1)
        except timeout_util.TimeoutError:
            if timeout_handler:
                timeout_handler()
            raise Error('Timeout wating for operation [%s] to complete' %
                        operation)
Ejemplo n.º 51
0
def BuildBucketRequest(http, url, method, body, dryrun):
  """Generic buildbucket request.

  Args:
    http: Http instance.
    url: Buildbucket url to send requests.
    method: Request method.
    body: Body of http request (string object).
    dryrun: Whether a dryrun.

  Returns:
    Content if request succeeds.

  Raises:
    BuildbucketResponseException when response['status'] is invalid.
  """
  if dryrun:
    logging.info('Dryrun mode is on; Would have made a request '
                 'with url %s method %s body:\n%s', url, method, body)
    return

  def try_method():
    response, content = http.request(
        url,
        method,
        body=body,
        headers={'Content-Type': 'application/json'},
    )

    if int(response['status']) // 100 != 2:
      raise BuildbucketResponseException(
          'Got a %s response from buildbucket with url: %s\n'
          'content: %s' % (response['status'], url, content))

    # Return content_dict
    return json.loads(content)

  return retry_util.GenericRetry(lambda _: True, 3, try_method)
Ejemplo n.º 52
0
def _WorkOnEbuild(overlay, ebuild, manifest, options, ebuild_paths_to_add,
                  ebuild_paths_to_remove, messages, revved_packages,
                  new_package_atoms):
    """Work on a single ebuild.

  Args:
    overlay: The overlay where the ebuild belongs to.
    ebuild: The ebuild to work on.
    manifest: The manifest of the given source root.
    options: The options object returned by the argument parser.
    ebuild_paths_to_add: New stable ebuild paths to add to git.
    ebuild_paths_to_remove: Old ebuild paths to remove from git.
    messages: A share list of commit messages.
    revved_packages: A shared list of revved packages.
    new_package_atoms: A shared list of new package atoms.
  """
    if options.verbose:
        logging.info('Working on %s, info %s', ebuild.package,
                     ebuild.cros_workon_vars)
    try:
        result = ebuild.RevWorkOnEBuild(options.srcroot, manifest)
        if result:
            new_package, ebuild_path_to_add, ebuild_path_to_remove = result

            if ebuild_path_to_add:
                ebuild_paths_to_add.append(ebuild_path_to_add)
            if ebuild_path_to_remove:
                ebuild_paths_to_remove.append(ebuild_path_to_remove)

            messages.append(_GIT_COMMIT_MESSAGE % ebuild.package)
            revved_packages.append(ebuild.package)
            new_package_atoms.append('=%s' % new_package)
    except (OSError, IOError):
        logging.warning(
            'Cannot rev %s\n'
            'Note you will have to go into %s '
            'and reset the git repo yourself.', ebuild.package, overlay)
        raise
Ejemplo n.º 53
0
  def CheckMasterBinhostTest(self, db, build_id):
    """Check whether the master builder has passed BinhostTest stage.

    Args:
      db: cidb.CIDBConnection object.
      build_id: build_id of the master build to check for.

    Returns:
      True if the status of the master build BinhostTest stage is 'pass';
      else, False.
    """
    stage_name = 'BinhostTest'

    if self._build_stage_id is not None and db is not None:
      stages = db.GetBuildStages(build_id)

      # No stages found. BinhostTest stage didn't start or got skipped,
      # in both case we don't need to push commits to the temp pfq branch.
      if not stages:
        logging.warning('no %s stage found in build %s' % (
            stage_name, build_id))
        return False

      stage_status = [s for s in stages if (
          s['name'] == stage_name and
          s['status'] == constants.BUILDER_STATUS_PASSED)]
      if stage_status:
        logging.info('build %s passed stage %s with %s' % (
            build_id, stage_name, stage_status))
        return True
      else:
        logging.warning('build %s stage %s result %s' % (
            build_id, stage_name, stage_status))
        return False

    logging.warning('Not valid build_stage_id %s or db %s or no %s found' % (
        self._build_stage_id, db, stage_name))
    return False
Ejemplo n.º 54
0
  def _ProcessAndArchiveResults(self, abs_results_dir, suite_names,
                                already_have_error):
    """Processes and archives test results.

    Args:
      abs_results_dir: Absolute path to directory containing test results.
      suite_names: List of string test suite names.
      already_have_error: Boolean for whether testing has already failed.

    Raises:
      failures_lib.TestFailure if one or more tests failed or results were
        unavailable. Suppressed if already_have_error is True.
    """
    if not os.path.isdir(abs_results_dir) or not os.listdir(abs_results_dir):
      raise failures_lib.TestFailure(FAILURE_NO_RESULTS % abs_results_dir)

    archive_base = constants.TAST_VM_TEST_RESULTS % {'attempt': self._attempt}
    _CopyResultsDir(abs_results_dir,
                    os.path.join(self.archive_path, archive_base))

    # TODO(crbug.com/770562): Collect stack traces once the tast executable is
    # symbolizing and collecting them (see VMTestStage._ArchiveTestResults).

    # Now archive the results to Cloud Storage.
    logging.info('Uploading artifacts to Cloud Storage...')
    self.UploadArtifact(archive_base, archive=False, strict=False)
    self.PrintDownloadLink(archive_base, RESULTS_LINK_PREFIX)

    try:
      self._ProcessResultsFile(abs_results_dir, archive_base, suite_names)
    except Exception as e:
      # Don't raise a new exception if testing already failed.
      if already_have_error:
        logging.exception('Got error while archiving or processing results')
      else:
        raise e
    finally:
      osutils.RmDir(abs_results_dir, ignore_missing=True, sudo=True)
Ejemplo n.º 55
0
    def _enforce_lock(self, flags, message):
        # Try nonblocking first, if it fails, display the context/message,
        # and then wait on the lock.
        try:
            self.locking_mechanism(self.fd, flags | fcntl.LOCK_NB)
            return
        except EnvironmentError as e:
            if e.errno == errno.EDEADLK:
                self.unlock()
            elif e.errno != errno.EAGAIN:
                raise
        if self.description:
            message = '%s: blocking (LOCK_NB) (%s) while %s' % (
                self.description, self.locktype, message)
        if not self.blocking:
            self.close()
            raise LockNotAcquiredError(message)
        if self._verbose:
            logging.info(message)

        try:
            with _optional_timer_context(self.blocking_timeout):
                self.locking_mechanism(self.fd, flags)
        except timeout_util.TimeoutError:
            description = self.description or 'locking._enforce_lock'
            logging.error(
                'Timed out after waiting %d seconds for blocking lock (%s): %s',
                self.blocking_timeout, self.locktype, description)
            raise
        except EnvironmentError as e:
            if e.errno != errno.EDEADLK:
                message = ('%s: blocking wait failed errno %s' %
                           (self.description, e))
                raise
            self.unlock()
            self.locking_mechanism(self.fd, flags)
        logging.debug('%s: lock has been acquired (%s), continuing.',
                      self.description, self.locktype)
Ejemplo n.º 56
0
    def CancelObsoleteSlaveBuilds(self):
        """Cancel the obsolete slave builds scheduled by the previous master."""
        logging.info('Cancelling obsolete slave builds.')

        buildbucket_client = self.GetBuildbucketClient()
        if not buildbucket_client:
            logging.info('No buildbucket_client, not cancelling slaves.')
            return

        # Find the 3 most recent master buildbucket ids.
        master_builds = buildbucket_client.SearchAllBuilds(
            self._run.options.debug,
            buckets=constants.ACTIVE_BUCKETS,
            limit=3,
            tags=[
                'cbb_config:%s' % self._run.config.name,
                'cbb_branch:%s' % self._run.manifest_branch
            ],
            status=constants.BUILDBUCKET_BUILDER_STATUS_COMPLETED)

        slave_ids = []

        # Find the scheduled or started slaves for those master builds.
        master_ids = buildbucket_lib.ExtractBuildIds(master_builds)
        logging.info('Found Previous Master builds: %s', ', '.join(master_ids))
        for master_id in master_ids:
            for status in [
                    constants.BUILDBUCKET_BUILDER_STATUS_SCHEDULED,
                    constants.BUILDBUCKET_BUILDER_STATUS_STARTED
            ]:
                builds = buildbucket_client.SearchAllBuilds(
                    self._run.options.debug,
                    tags=[
                        'buildset:%s' % request_build.SlaveBuildSet(master_id)
                    ],
                    status=status)

                ids = buildbucket_lib.ExtractBuildIds(builds)
                if ids:
                    logging.info(
                        'Found builds %s in status %s from master %s.', ids,
                        status, master_id)
                    slave_ids.extend(ids)

        if slave_ids:
            builder_status_lib.CancelBuilds(slave_ids, buildbucket_client,
                                            self._run.options.debug,
                                            self._run.config)
Ejemplo n.º 57
0
def main(args):
    opts = ParseArgs(args)

    # Build up test suites.
    loader = unittest.TestLoader()
    loader.suiteClass = image_test_lib.ImageTestSuite
    # We use a different prefix here so that unittest DO NOT pick up the
    # image tests automatically because they depend on a proper environment.
    loader.testMethodPrefix = 'Test'
    all_tests = loader.loadTestsFromName('chromite.cros.test.image_test')
    forgiving = image_test_lib.ImageTestSuite()
    non_forgiving = image_test_lib.ImageTestSuite()
    for suite in all_tests:
        for test in suite.GetTests():
            if test.IsForgiving():
                forgiving.addTest(test)
            else:
                non_forgiving.addTest(test)

    # Run them in the image directory.
    runner = image_test_lib.ImageTestRunner()
    runner.SetBoard(opts.board)
    runner.SetResultDir(opts.test_results_root)
    image_file = FindImage(opts.image_dir)
    tmp_in_chroot = path_util.FromChrootPath('/tmp')
    with osutils.TempDir(base_dir=tmp_in_chroot) as temp_dir:
        with osutils.MountImageContext(image_file, temp_dir):
            with osutils.ChdirContext(temp_dir):
                # Run non-forgiving tests first so that exceptions in forgiving tests
                # do not skip any required tests.
                logging.info('Running NON-forgiving tests.')
                result = runner.run(non_forgiving)
                logging.info('Running forgiving tests.')
                runner.run(forgiving)

    if result and not result.wasSuccessful():
        return 1
    return 0
Ejemplo n.º 58
0
    def BuildAndArchiveAllImages():
      # Generate the recovery image. To conserve loop devices, we try to only
      # run one instance of build_image at a time. TODO(davidjames): Move the
      # image generation out of the archive stage.
      self.LoadArtifactsList(self._current_board, image_dir)

      # If there's no plan to run ArchiveHWQual, VMTest should start asap.
      if not config['images']:
        self.board_runattrs.SetParallel('autotest_tarball_generated', True)

      # For recovery image to be generated correctly, BuildRecoveryImage must
      # run before BuildAndArchiveFactoryImages.
      if 'recovery' in config.images:
        assert os.path.isfile(os.path.join(image_dir, constants.BASE_IMAGE_BIN))
        logging.info('Running commands.BuildRecoveryImage')
        commands.BuildRecoveryImage(buildroot, board, image_dir, extra_env)
        self._recovery_image_status_queue.put(True)
        recovery_image = constants.RECOVERY_IMAGE_BIN
        if not self.IsArchivedFile(recovery_image):
          info = {
              'paths': [recovery_image],
              'input': [recovery_image],
              'archive': 'tar',
              'compress': 'xz'
          }
          self.artifacts.append(info)
      else:
        self._recovery_image_status_queue.put(False)

      if config['images']:
        steps = [
            BuildAndArchiveFactoryImages,
            ArchiveLicenseFile,
            ArchiveHWQual,
            ArchiveStandaloneArtifacts,
            ArchiveZipFiles,
        ]
        parallel.RunParallelSteps(steps)
def _PostprocessFiles(directory):
    """Do postprocessing on the generated files.

  Args:
    directory (str): The root directory containing the generated files that are
      to be processed.
  """
    logging.info('Postprocessing: Fix imports.')
    # We are using a negative address here (the /address/! portion of the sed
    # command) to make sure we don't change any imports from protobuf itself.
    address = '^from google.protobuf'
    # Find: 'from x import y_pb2 as x_dot_y_pb2'.
    # "\(^google.protobuf[^ ]*\)" matches the module we're importing from.
    #   - \( and \) are for groups in sed.
    #   - ^google.protobuf prevents changing the import for protobuf's files.
    #   - [^ ] = Not a space. The [:space:] character set is too broad, but would
    #       technically work too.
    find = r'^from \([^ ]*\) import \([^ ]*\)_pb2 as \([^ ]*\)$'
    # Substitute: 'from chromite.api.gen.x import y_pb2 as x_dot_y_pb2'.
    sub = 'from chromite.api.gen.\\1 import \\2_pb2 as \\3'
    from_sed = [
        'sed', '-i',
        '/%(address)s/!s/%(find)s/%(sub)s/g' % {
            'address': address,
            'find': find,
            'sub': sub
        }
    ]

    for dirpath, _dirnames, filenames in os.walk(directory):
        # Update the
        pb2 = [
            os.path.join(dirpath, f) for f in filenames
            if f.endswith('_pb2.py')
        ]
        if pb2:
            cmd = from_sed + pb2
            cros_build_lib.run(cmd, print_cmd=False)
Ejemplo n.º 60
0
    def ShouldSelfDestruct(self):
        """Process builds and relevant changes, decide whether to self-destruct.

    Returns:
      A tuple of (boolean indicating if the master should self-destruct,
                  boolean indicating if the master should self-destruct with
                  with success)
    """
        self._ProcessCompletedBuilds()
        self._ProcessMightSubmitChanges()

        logging.info(
            'will_submit set contains %d changes: [%s]\n'
            'might_submit set contains %d changes: [%s]\n'
            'will_not_submit set contains %d changes: [%s]\n',
            len(self.will_submit),
            cros_patch.GetChangesAsString(self.will_submit),
            len(self.might_submit),
            cros_patch.GetChangesAsString(self.might_submit),
            len(self.will_not_submit),
            cros_patch.GetChangesAsString(self.will_not_submit))

        # The master should wait for all the necessary slaves to pass the
        # UploadPrebuiltsStage so the master can publish prebuilts after
        # self-destruction with success. More context: crbug.com/703819
        all_completed_slaves_passed = (
            self._AllCompletedSlavesPassedUploadPrebuiltsStage())
        all_uncompleted_slaves_passed = (
            self._AllUncompletedSlavesPassedUploadPrebuiltsStage())
        should_self_destruct = (bool(not self.might_submit)
                                and (not all_completed_slaves_passed
                                     or all_uncompleted_slaves_passed))
        should_self_destruct_with_success = (bool(not self.might_submit)
                                             and bool(not self.will_not_submit)
                                             and all_completed_slaves_passed
                                             and all_uncompleted_slaves_passed)

        return should_self_destruct, should_self_destruct_with_success