예제 #1
0
        def _GetStatusesFromDB():
            """Helper function that iterates through current statuses."""
            status_dict = self.GetSlaveStatusesFromCIDB(master_build_id)
            for builder in set(builders_array) - set(status_dict.keys()):
                logging.warning("No status found for build config %s.", builder)

            latest_completed = set(
                [
                    b
                    for b, s in status_dict.iteritems()
                    if s in constants.BUILDER_COMPLETED_STATUSES and b in builders_array
                ]
            )
            for builder in sorted(latest_completed - builders_completed):
                logging.info('Build config %s completed with status "%s".', builder, status_dict[builder])
            builders_completed.update(latest_completed)

            if len(builders_completed) < len(builders_array):
                logging.info(
                    "Still waiting for the following builds to complete: %r",
                    sorted(set(builders_array).difference(builders_completed)),
                )
                return None
            else:
                return "Builds completed."
예제 #2
0
  def CollectionExecutionCallback(self):
    """Callback for cherrypy Monitor. Collect checkfiles from the checkdir."""
    # Find all service check file packages.
    _, service_dirs, _ = next(os.walk(self.checkdir))
    for service_name in service_dirs:
      service_package = os.path.join(self.checkdir, service_name)

      # Import the package.
      try:
        file_, path, desc = imp.find_module(service_name, [self.checkdir])
        imp.load_module(service_name, file_, path, desc)
      except Exception as e:
        logging.warning('Failed to import package %s: %s', service_name, e)
        continue

      # Collect all of the service's health checks.
      for file_ in os.listdir(service_package):
        filepath = os.path.join(service_package, file_)
        if os.path.isfile(filepath) and file_.endswith(CHECKFILE_ENDING):
          try:
            healthchecks, mtime = ImportFile(service_name, filepath)
            self.Update(service_name, healthchecks, mtime)
          except Exception as e:
            logging.warning('Failed to import module %s.%s: %s',
                            service_name, file_[:-3], e)

    self.Execute()
def FindChromeCandidates(package_dir):
  """Return a tuple of chrome's unstable ebuild and stable ebuilds.

  Args:
    package_dir: The path to where the package ebuild is stored.

  Returns:
    Tuple [unstable_ebuild, stable_ebuilds].

  Raises:
    Exception: if no unstable ebuild exists for Chrome.
  """
  stable_ebuilds = []
  unstable_ebuilds = []
  for path in [
      os.path.join(package_dir, entry) for entry in os.listdir(package_dir)]:
    if path.endswith('.ebuild'):
      ebuild = ChromeEBuild(path)
      if not ebuild.chrome_version:
        logging.warning('Poorly formatted ebuild found at %s' % path)
      else:
        if '9999' in ebuild.version:
          unstable_ebuilds.append(ebuild)
        else:
          stable_ebuilds.append(ebuild)

  # Apply some sanity checks.
  if not unstable_ebuilds:
    raise Exception('Missing 9999 ebuild for %s' % package_dir)
  if not stable_ebuilds:
    logging.warning('Missing stable ebuild for %s' % package_dir)

  return portage_util.BestEBuild(unstable_ebuilds), stable_ebuilds
예제 #4
0
def CheckAndGetCIDBCreds(force_update=False, folder=None):
  """Check if CIDB creds exist, download creds if necessary."""
  cache_dir = path_util.GetCacheDir()
  dir_name = folder if folder is not None else 'cidb_creds'
  cidb_dir = os.path.join(cache_dir, dir_name)
  cidb_dir_lock = cidb_dir + '.lock'

  with locking.FileLock(cidb_dir_lock).write_lock():
    if os.path.exists(cidb_dir):
      if force_update:
        shutil.rmtree(cidb_dir, ignore_errors=True)
        logging.debug('Force updating CIDB creds. Deleted %s.', cidb_dir)
      else:
        logging.debug('Using cached credentials %s', cidb_dir)
        return cidb_dir

    os.mkdir(cidb_dir)

    try:
      GetCIDBCreds(cidb_dir)
      return cidb_dir
    except Exception as e:
      if isinstance(e, gs.GSCommandError):
        logging.warning('Please check if the GS credentials is configured '
                        'correctly. Please note the permissions to fetch '
                        'these credentials are for Googlers only,')

      logging.error('Failed to get CIDB credentials. Deleting %s', cidb_dir)
      shutil.rmtree(cidb_dir, ignore_errors=True)
      raise
예제 #5
0
파일: vm.py 프로젝트: qlb7707/webrtc_src
  def Start(self):
    """Start VM and wait until we can ssh into it.

    This command is more robust than just naively starting the VM as it will
    try to start the VM multiple times if the VM fails to start up. This is
    inspired by retry_until_ssh in crosutils/lib/cros_vm_lib.sh.
    """
    for _ in range(self.MAX_LAUNCH_ATTEMPTS):
      try:
        self._Start()
      except VMStartupError:
        logging.warning('VM failed to start.')
        continue

      if self.Connect():
        # VM is started up successfully if we can connect to it.
        break

      logging.warning('Cannot connect to VM...')
      self.Stop(ignore_error=True)
      time.sleep(self.TIME_BETWEEN_LAUNCH_ATTEMPTS)
    else:
      raise VMStartupError('Max attempts (%d) to start VM exceeded.'
                           % self.MAX_LAUNCH_ATTEMPTS)

    logging.info('VM started at port %d', self.port)
예제 #6
0
    def _HandleFail(self, log_directory, fail_directory):
        """Handles test failures.

    In case of a test failure, copy necessary files, i.e., the GCE tarball and
    ssh private key, to |fail_directory|, which will be later archived and
    uploaded to a GCS bucket by chromite.

    Args:
      log_directory: The root directory where test logs are stored.
      fail_directory: The directory to copy files to.
    """
        parent_dir = os.path.dirname(fail_directory)
        if not os.path.isdir(parent_dir):
            os.makedirs(parent_dir)

        # Copy logs. Must be done before moving image, as this creates
        # |fail_directory|.
        try:
            shutil.copytree(log_directory, fail_directory)
        except shutil.Error as e:
            logging.warning("Ignoring errors while copying logs: %s", e)

        # Copy GCE tarball and ssh private key for debugging.
        try:
            shutil.copy(self.tarball_local, fail_directory)
            if self.ssh_private_key is not None:
                shutil.copy(self.ssh_private_key, fail_directory)
        except shutil.Error as e:
            logging.warning("Ignoring errors while copying GCE tarball: %s", e)

        self._DeleteExistingResources()
예제 #7
0
def _GsUpload(gs_context, acl, local_file, remote_file):
  """Upload to GS bucket.

  Args:
    gs_context: A lib.gs.GSContext instance.
    acl: The ACL to use for uploading the file.
    local_file: The local file to be uploaded.
    remote_file: The remote location to upload to.
  """
  CANNED_ACLS = ['public-read', 'private', 'bucket-owner-read',
                 'authenticated-read', 'bucket-owner-full-control',
                 'public-read-write']
  if acl in CANNED_ACLS:
    gs_context.Copy(local_file, remote_file, acl=acl)
  else:
    # For private uploads we assume that the overlay board is set up properly
    # and a googlestore_acl.xml is present. Otherwise, this script errors.
    # We set version=0 here to ensure that the ACL is set only once (see
    # http://b/15883752#comment54).
    try:
      gs_context.Copy(local_file, remote_file, version=0)
    except gs.GSContextPreconditionFailed as ex:
      # If we received a GSContextPreconditionFailed error, we know that the
      # file exists now, but we don't know whether our specific update
      # succeeded. See http://b/15883752#comment62
      logging.warning(
          'Assuming upload succeeded despite PreconditionFailed errors: %s', ex)

    if acl.endswith('.xml'):
      # Apply the passed in ACL xml file to the uploaded object.
      gs_context.SetACL(remote_file, acl=acl)
    else:
      gs_context.ChangeACL(remote_file, acl_args_file=acl)
예제 #8
0
def SymbolDeduplicator(storage, sym_paths):
  """Filter out symbol files that we've already uploaded

  Using the swarming service, ask it to tell us which symbol files we've already
  uploaded in previous runs and/or by other bots.  If the query fails for any
  reason, we'll just upload all symbols.  This is fine as the symbol server will
  do the right thing and this phase is purely an optimization.

  This code runs in the main thread which is why we can re-use the existing
  storage object.  Saves us from having to recreate one all the time.

  Args:
    storage: An isolateserver.StorageApi object
    sym_paths: List of symbol files to check against the dedupe server

  Returns:
    List of SymbolElement objects that have not been uploaded before
  """
  if not sym_paths:
    return sym_paths

  items = [SymbolItem(x) for x in sym_paths]
  for item in items:
    item.prepare(SymbolItem.ALGO)
  if storage:
    try:
      with timeout_util.Timeout(DEDUPE_TIMEOUT):
        items = storage.contains(items)
      return [SymbolElement(symbol_item=item, opaque_push_state=push_state)
              for (item, push_state) in items.iteritems()]
    except Exception:
      logging.warning('talking to dedupe server failed', exc_info=True)

  return [SymbolElement(symbol_item=item, opaque_push_state=None)
          for item in items]
예제 #9
0
파일: git.py 프로젝트: qlb7707/webrtc_src
def GetChromiteTrackingBranch():
  """Returns the remote branch associated with chromite."""
  cwd = os.path.dirname(os.path.realpath(__file__))
  result_ref = GetTrackingBranch(cwd, for_checkout=False, fallback=False)
  if result_ref:
    branch = result_ref.ref
    if branch.startswith('refs/heads/'):
      # Normal scenario.
      return StripRefsHeads(branch)
    # Reaching here means it was refs/remotes/m/blah, or just plain invalid,
    # or that we're on a detached head in a repo not managed by chromite.

  # Manually try the manifest next.
  try:
    manifest = ManifestCheckout.Cached(cwd)
    # Ensure the manifest knows of this checkout.
    if manifest.FindCheckoutFromPath(cwd, strict=False):
      return manifest.manifest_branch
  except EnvironmentError as e:
    if e.errno != errno.ENOENT:
      raise

  # Not a manifest checkout.
  logging.warning(
      "Chromite checkout at %s isn't controlled by repo, nor is it on a "
      'branch (or if it is, the tracking configuration is missing or broken).  '
      'Falling back to assuming the chromite checkout is derived from '
      "'master'; this *may* result in breakage." % cwd)
  return 'master'
예제 #10
0
  def RecordPackagesUnderTest(self, packages_to_build):
    """Records all packages that may affect the board to BuilderRun."""
    deps = dict()
    # Include packages that are built in chroot because they can
    # affect any board.
    packages = ['virtual/target-sdk']
    # Include chromite because we are running cbuildbot.
    packages += ['chromeos-base/chromite']
    try:
      deps.update(commands.ExtractDependencies(self._build_root, packages))

      # Include packages that will be built as part of the board.
      deps.update(commands.ExtractDependencies(self._build_root,
                                               packages_to_build,
                                               board=self._current_board))
    except Exception as e:
      # Dependency extraction may fail due to bad ebuild changes. Let
      # the build continues because we have logic to triage build
      # packages failures separately. Note that we only categorize CLs
      # on the package-level if dependencies are extracted
      # successfully, so it is safe to ignore the exception.
      logging.warning('Unable to gather packages under test: %s', e)
    else:
      logging.info('Recording packages under test')
      self.board_runattrs.SetParallel('packages_under_test', set(deps.keys()))
예제 #11
0
def _FetchChromePackage(cache_dir, tempdir, gs_path):
  """Get the chrome prebuilt tarball from GS.

  Returns:
    Path to the fetched chrome tarball.
  """
  gs_ctx = gs.GSContext(cache_dir=cache_dir, init_boto=True)
  files = gs_ctx.LS(gs_path)
  files = [found for found in files if
           _UrlBaseName(found).startswith('%s-' % constants.CHROME_PN)]
  if not files:
    raise Exception('No chrome package found at %s' % gs_path)
  elif len(files) > 1:
    # - Users should provide us with a direct link to either a stripped or
    #   unstripped chrome package.
    # - In the case of being provided with an archive directory, where both
    #   stripped and unstripped chrome available, use the stripped chrome
    #   package.
    # - Stripped chrome pkg is chromeos-chrome-<version>.tar.gz
    # - Unstripped chrome pkg is chromeos-chrome-<version>-unstripped.tar.gz.
    files = [f for f in files if not 'unstripped' in f]
    assert len(files) == 1
    logging.warning('Multiple chrome packages found.  Using %s', files[0])

  filename = _UrlBaseName(files[0])
  logging.info('Fetching %s...', filename)
  gs_ctx.Copy(files[0], tempdir, print_cmd=False)
  chrome_path = os.path.join(tempdir, filename)
  assert os.path.exists(chrome_path)
  return chrome_path
예제 #12
0
def RunCommandFuncWrapper(func, msg, *args, **kwargs):
  """Wraps a function that invokes cros_build_lib.RunCommand.

  If the command failed, logs warning |msg| if error_code_ok is set;
  logs error |msg| if error_code_ok is not set.

  Args:
    func: The function to call.
    msg: The message to display if the command failed.
    *args: Arguments to pass to |func|.
    **kwargs: Keyword arguments to pass to |func|.

  Returns:
    The result of |func|.

  Raises:
    cros_build_lib.RunCommandError if the command failed and error_code_ok
    is not set.
  """
  error_code_ok = kwargs.pop('error_code_ok', False)
  result = func(*args, error_code_ok=True, **kwargs)
  if result.returncode != 0 and not error_code_ok:
    raise cros_build_lib.RunCommandError(msg, result)

  if result.returncode != 0:
    logging.warning(msg)
예제 #13
0
  def Run(self):
    """Run the tests."""
    self.options.Freeze()
    commandline.RunInsideChroot(self)

    packages, non_matching = self._GetMatchingPackages(self.options.packages)
    if non_matching:
      cros_build_lib.Die('No packages matching: %s', ' '.join(non_matching))

    packages_with_tests = portage_util.PackagesWithTest(self.sysroot, packages)
    packages_without_tests = packages - packages_with_tests
    if packages_without_tests:
      logging.warning('Ignored the following packages because they were '
                      'missing tests:')
      for p in packages_without_tests:
        logging.warning(p)

    if not packages_with_tests:
      logging.error('Nothing to test.')
      return

    try:
      chroot_util.RunUnittests(self.sysroot, packages_with_tests,
                               verbose=self.options.verbose, retries=0)
    except cros_build_lib.RunCommandError as e:
      cros_build_lib.Die('Unit tests failed: %s' % e)
예제 #14
0
def _GetLinesFromFile(path, line_prefix, line_suffix):
  """Get a unique set of lines from a file, stripping off a prefix and suffix.

  Rejects lines that do not start with |line_prefix| or end with |line_suffix|.
  Returns an empty set if the file at |path| does not exist.
  Discards duplicate lines.

  Args:
    path: path to file.
    line_prefix: prefix of line to look for and strip if found.
    line_suffix: suffix of line to look for and strip if found.

  Returns:
    A list of filtered lines from the file at |path|.
  """
  if not os.path.exists(path):
    return set()

  # Note that there is an opportunity to race with the file system here.
  lines = set()
  for line in osutils.ReadFile(path).splitlines():
    if not line.startswith(line_prefix) or not line.endswith(line_suffix):
      logging.warning('Filtering out malformed line: %s', line)
      continue
    lines.add(line[len(line_prefix):-len(line_suffix)])

  return lines
예제 #15
0
  def _RunTest(self, test_type, test_results_dir):
    """Run a VM test.

    Args:
      test_type: Any test in constants.VALID_VM_TEST_TYPES
      test_results_dir: The base directory to store the results.
    """
    if test_type == constants.CROS_VM_TEST_TYPE:
      commands.RunCrosVMTest(self._current_board, self.GetImageDirSymlink())
    elif test_type == constants.DEV_MODE_TEST_TYPE:
      commands.RunDevModeTest(
          self._build_root, self._current_board, self.GetImageDirSymlink())
    else:
      if test_type == constants.GCE_VM_TEST_TYPE:
        image_path = os.path.join(self.GetImageDirSymlink(),
                                  constants.TEST_IMAGE_GCE_TAR)
      else:
        image_path = os.path.join(self.GetImageDirSymlink(),
                                  constants.TEST_IMAGE_BIN)
      ssh_private_key = os.path.join(self.GetImageDirSymlink(),
                                     constants.TEST_KEY_PRIVATE)
      if not os.path.exists(ssh_private_key):
        # TODO: Disallow usage of default test key completely.
        logging.warning('Test key was not found in the image directory. '
                        'Default key will be used.')
        ssh_private_key = None

      commands.RunTestSuite(self._build_root,
                            self._current_board,
                            image_path,
                            os.path.join(test_results_dir, 'test_harness'),
                            test_type=test_type,
                            whitelist_chrome_crashes=self._chrome_rev is None,
                            archive_dir=self.bot_archive_root,
                            ssh_private_key=ssh_private_key)
예제 #16
0
def RefreshManifestCheckout(manifest_dir, manifest_repo):
  """Checks out manifest-versions into the manifest directory.

  If a repository is already present, it will be cleansed of any local
  changes and restored to its pristine state, checking out the origin.
  """
  reinitialize = True
  if os.path.exists(manifest_dir):
    result = git.RunGit(manifest_dir, ['config', 'remote.origin.url'],
                        error_code_ok=True)
    if (result.returncode == 0 and
        result.output.rstrip() == manifest_repo):
      logging.info('Updating manifest-versions checkout.')
      try:
        git.RunGit(manifest_dir, ['gc', '--auto'])
        git.CleanAndCheckoutUpstream(manifest_dir)
      except cros_build_lib.RunCommandError:
        logging.warning('Could not update manifest-versions checkout.')
      else:
        reinitialize = False
  else:
    logging.info('No manifest-versions checkout exists at %s', manifest_dir)

  if reinitialize:
    logging.info('Cloning fresh manifest-versions checkout.')
    osutils.RmDir(manifest_dir, ignore_missing=True)
    repository.CloneGitRepo(manifest_dir, manifest_repo)
예제 #17
0
    def _ReadMetadataURL(url):
      # Read the metadata.json URL and parse json into a dict.
      metadata_dict = json.loads(gs_ctx.Cat(url, print_cmd=False))

      # Read the file next to url which indicates whether the metadata has
      # been gathered before, and with what stats version.
      if get_sheets_version:
        gathered_dict = {}
        gathered_url = url + '.gathered'
        if gs_ctx.Exists(gathered_url, print_cmd=False):
          gathered_dict = json.loads(gs_ctx.Cat(gathered_url,
                                                print_cmd=False))

        sheets_version = gathered_dict.get(BuildData.SHEETS_VER_KEY)
      else:
        sheets_version = None

      bd = BuildData(url, metadata_dict, sheets_version=sheets_version)

      if bd.build_number is None:
        logging.warning('Metadata at %s was missing build number.', url)
        m = re.match(r'.*-b([0-9]*)/.*', url)
        if m:
          inferred_number = int(m.groups()[0])
          logging.warning('Inferred build number %d from metadata url.',
                          inferred_number)
          bd.metadata_dict['build-number'] = inferred_number
      if sheets_version is not None:
        logging.debug('Read %s:\n  build_number=%d, sheets v%d', url,
                      bd.build_number, sheets_version)
      else:
        logging.debug('Read %s:\n  build_number=%d, ungathered', url,
                      bd.build_number)

      build_data_per_url[url] = bd
예제 #18
0
def SwarmingRetriableErrorCheck(exception):
  """Check if a swarming error is retriable.

  Args:
    exception: A cros_build_lib.RunCommandError exception.

  Returns:
    True if retriable, otherwise False.
  """
  if not isinstance(exception, cros_build_lib.RunCommandError):
    return False
  result = exception.result
  if not isinstance(result, SwarmingCommandResult):
    return False
  if result.task_summary_json:
    try:
      internal_failure = result.GetValue('internal_failure')
      state = result.GetValue('state')
      if internal_failure and state in RETRIABLE_INTERNAL_FAILURE_STATES:
        logging.warning(
            'Encountered retriable swarming internal failure: %s',
            json.dumps(result.task_summary_json, indent=2))
        return True
    except (IndexError, KeyError) as e:
      logging.warning(
          "Could not determine if exception is retriable. Exception: %s. "
          "Error: %s. Swarming summary json: %s",
          str(exception), str(e),
          json.dumps(result.task_summary_json, indent=2))
  return False
예제 #19
0
 def Run(self):
   """Runs `cros shell`."""
   self.options.Freeze()
   self._ReadOptions()
   # Nested try blocks so the inner can raise to the outer, which handles
   # overall failures.
   try:
     try:
       return self._StartSsh()
     except remote_access.SSHConnectionError as e:
       # Handle a mismatched host key; mismatched keys are a bit of a pain to
       # fix manually since `ssh-keygen -R` doesn't work within the chroot.
       if e.IsKnownHostsMismatch():
         # The full SSH error message has extra info for the user.
         logging.warning('\n%s', e)
         if self._UserConfirmKeyChange():
           remote_access.RemoveKnownHost(self.ssh_hostname)
           # The user already OK'd so we can skip the additional SSH check.
           self.host_key_checking = 'no'
           return self._StartSsh()
         else:
           raise
       else:
         raise
   except (Exception, KeyboardInterrupt) as e:
     logging.error('\n%s', e)
     logging.error('`cros shell` failed.')
     if self.options.debug:
       raise
예제 #20
0
    def StopWorkingOnPackages(self, packages, use_all=False, use_workon_only=False):
        """Stop working on a list of packages currently marked as locally worked on.

    Args:
      packages: list of package name fragments.  These will be mapped to
          canonical portage atoms via the same process as
          StartWorkingOnPackages().
      use_all: True iff instead of the provided package list, we should just
          stop working on all currently worked on atoms for the system in
          question.
      use_workon_only: True iff instead of the provided package list, we should
          stop working on all currently worked on atoms that define only a
          -9999 ebuild.
    """
        if use_all or use_workon_only:
            atoms = self._GetLiveAtoms(filter_workon=use_workon_only)
        else:
            atoms = self._GetCanonicalAtoms(packages)

        current_atoms = self._GetWorkedOnAtoms()
        stopped_atoms = []
        for atom in atoms:
            if not atom in current_atoms:
                logging.warning("Not working on %s", atom)
                continue

            current_atoms.discard(atom)
            stopped_atoms.append(atom)

        self._SetWorkedOnAtoms(current_atoms)

        if stopped_atoms:
            # Legacy scripts used single quotes in their output, and we carry on this
            # honorable tradition.
            logging.info("Stopped working on '%s' for '%s'", " ".join(stopped_atoms), self._system)
예제 #21
0
  def SendCanaryFailureAlert(self, failing, inflight, no_stat):
    """Send an alert email to summarize canary failures.

    Args:
      failing: The names of the failing builders.
      inflight: The names of the builders that are still running.
      no_stat: The names of the builders that had status None.
    """
    builder_name = 'Canary Master'
    title = '%s has detected build failures:' % builder_name
    msgs = [str(x) for x in self._GetFailedMessages(failing)]
    slaves = self._GetBuildersWithNoneMessages(failing)
    msgs += ['%s failed with unknown reason.' % x for x in slaves]
    msgs += ['%s timed out' % x for x in inflight]
    msgs += ['%s did not start' % x for x in no_stat]
    msgs.insert(0, title)
    msgs.append('You can also view the summary of the slave failures from '
                'the %s stage of %s. Click on the failure message to go '
                'to an individual slave\'s build status page: %s' % (
                    self.name, builder_name, self.ConstructDashboardURL()))
    msg = '\n\n'.join(msgs)
    logging.warning(msg)
    extra_fields = {'X-cbuildbot-alert': 'canary-fail-alert'}
    tree_status.SendHealthAlert(self._run, 'Canary builder failures', msg,
                                extra_fields=extra_fields)
예제 #22
0
  def PerformStage(self):
    # Wait for UploadHWTestArtifacts to generate the payloads.
    if not self.GetParallel('payloads_generated', pretty_name='payloads'):
      cros_build_lib.PrintBuildbotStepWarnings('missing payloads')
      logging.warning('Cannot run HWTest because UploadTestArtifacts failed. '
                      'See UploadTestArtifacts for details.')
      return

    if (self.suite_config.suite == constants.HWTEST_AFDO_SUITE and
        not self._run.attrs.metadata.GetValue('chrome_was_uprevved')):
      logging.info('Chrome was not uprevved. Nothing to do in this stage')
      return

    build = '/'.join([self._bot_id, self.version])
    if self._run.options.remote_trybot and self._run.options.hwtest:
      debug = self._run.options.debug_forced
    else:
      debug = self._run.options.debug

    self._CheckLabStatus()
    commands.RunHWTestSuite(
        build, self.suite_config.suite, self._current_board,
        pool=self.suite_config.pool, num=self.suite_config.num,
        file_bugs=self.suite_config.file_bugs,
        wait_for_results=self.wait_for_results,
        priority=self.suite_config.priority,
        timeout_mins=self.suite_config.timeout_mins,
        retry=self.suite_config.retry,
        max_retries=self.suite_config.max_retries,
        minimum_duts=self.suite_config.minimum_duts,
        suite_min_duts=self.suite_config.suite_min_duts,
        offload_failures_only=self.suite_config.offload_failures_only,
        debug=debug)
예제 #23
0
  def Send(self, message):
    """Send an email via SMTP

    If we get a socket error (e.g. the SMTP server is not listening or
    timesout), we will retry a few times.  All socket errors will be
    caught here.

    Args:
      message: A MIMEMultipart() object containing the body of the message.

    Returns:
      True if the email was sent, else False.
    """
    def _Send():
      smtp_client = smtplib.SMTP(self._smtp_server)
      recipients = [s.strip() for s in message['To'].split(',')]
      smtp_client.sendmail(message['From'], recipients, message.as_string())
      smtp_client.quit()

    try:
      retry_util.RetryException(socket.error, self.SMTP_RETRY_COUNT, _Send,
                                sleep=self.SMTP_RETRY_DELAY)
      return True
    except socket.error as e:
      logging.warning('Could not send e-mail from %s to %s via %r: %s',
                      message['From'], message['To'], self._smtp_server, e)
      return False
예제 #24
0
  def Run(self):
    files = self.options.files
    if not files:
      # Running with no arguments is allowed to make the repo upload hook
      # simple, but print a warning so that if someone runs this manually
      # they are aware that nothing was linted.
      logging.warning('No files provided to lint.  Doing nothing.')

    errors = multiprocessing.Value('i')
    linter_map = _BreakoutFilesByLinter(files)
    dispatcher = functools.partial(_Dispatcher, errors,
                                   self.options.output, self.options.debug)

    # Special case one file as it's common -- faster to avoid parallel startup.
    if sum([len(x) for _, x in linter_map.iteritems()]) == 1:
      linter, files = linter_map.items()[0]
      dispatcher(linter, files[0])
    else:
      # Run the linter in parallel on the files.
      with parallel.BackgroundTaskRunner(dispatcher) as q:
        for linter, files in linter_map.iteritems():
          for path in files:
            q.put([linter, path])

    if errors.value:
      logging.error('linter found errors in %i files', errors.value)
      sys.exit(1)
예제 #25
0
  def OutputPerfValue(self, description, value, units,
                      higher_is_better=True, graph=None):
    """Record a perf value.

    If graph name is not provided, the test method name will be used as the
    graph name.

    Args:
      description: A string description of the value such as "partition-0". A
        special description "ref" is taken as the reference.
      value: A float value.
      units: A string describing the unit of measurement such as "KB", "meter".
      higher_is_better: A boolean indicating if higher value means better
        performance.
      graph: A string name of the graph this value will be plotted on. If not
        provided, the graph name will take the test method name.
    """
    if not self._result_dir:
      logging.warning('Result directory is not set. Ignore OutputPerfValue.')
      return
    if graph is None:
      graph = self._testMethodName
    file_name = self._GeneratePerfFileName()
    perf_uploader.OutputPerfValue(file_name, description, value, units,
                                  higher_is_better, graph)
예제 #26
0
def _ConfirmDeploy(num_updates):
  """Returns whether we can continue deployment."""
  if num_updates > _MAX_UPDATES_NUM:
    logging.warning(_MAX_UPDATES_WARNING)
    return cros_build_lib.BooleanPrompt(default=False)

  return True
예제 #27
0
  def _CanRunDevserver(self):
    """We can run devserver on |device|.

    If the stateful partition is corrupted, Python or other packages
    (e.g. cherrypy) needed for rootfs update may be missing on |device|.

    This will also use `ldconfig` to update library paths on the target
    device if it looks like that's causing problems, which is necessary
    for base images.

    Returns:
      True if we can start devserver; False otherwise.
    """
    logging.info('Checking if we can run devserver on the device.')
    devserver_bin = os.path.join(self.device_dev_dir, self.DEVSERVER_FILENAME)
    devserver_check_command = ['python', devserver_bin, '--help']
    try:
      self.device.RunCommand(devserver_check_command)
    except cros_build_lib.RunCommandError as e:
      logging.warning('Cannot start devserver: %s', e)
      if ERROR_MSG_IN_LOADING_LIB in str(e):
        logging.info('Attempting to correct device library paths...')
        try:
          self.device.RunCommand(['ldconfig', '-r', '/'])
          self.device.RunCommand(devserver_check_command)
          logging.info('Library path correction successful.')
          return True
        except cros_build_lib.RunCommandError as e2:
          logging.warning('Library path correction failed: %s', e2)

      return False

    return True
def GetUpdatePayloads(path, payload_dir, board=None,
                      src_image_to_delta=None, timeout=60 * 15,
                      static_dir=DEFAULT_STATIC_DIR):
  """Launch devserver to get the update payloads.

  Args:
    path: The xbuddy path.
    payload_dir: The directory to store the payloads. On failure, the devserver
                 log will be copied to |payload_dir|.
    board: The default board to use when |path| is None.
    src_image_to_delta: Image used as the base to generate the delta payloads.
    timeout: Timeout for launching devserver (seconds).
    static_dir: Devserver static dir to use.
  """
  ds = DevServerWrapper(static_dir=static_dir, src_image=src_image_to_delta,
                        board=board)
  req = GenerateXbuddyRequest(path, 'update')
  logging.info('Starting local devserver to generate/serve payloads...')
  try:
    ds.Start()
    url = ds.OpenURL(ds.GetURL(sub_dir=req), timeout=timeout)
    ds.DownloadFile(os.path.join(url, ROOTFS_FILENAME), payload_dir)
    ds.DownloadFile(os.path.join(url, STATEFUL_FILENAME), payload_dir)
  except DevServerException:
    logging.warning(ds.TailLog() or 'No devserver log is available.')
    raise
  else:
    logging.debug(ds.TailLog() or 'No devserver log is available.')
  finally:
    ds.Stop()
    if os.path.exists(ds.log_file):
      shutil.copyfile(ds.log_file,
                      os.path.join(payload_dir, 'local_devserver.log'))
    else:
      logging.warning('Could not find %s', ds.log_file)
예제 #29
0
def ThrottleOrCloseTheTree(announcer, message, internal=None, buildnumber=None,
                           dryrun=False):
  """Throttle or close the tree with |message|.

  By default, this function throttles the tree with an updated
  message. If the tree is already not open, it will keep the original
  status (closed, maintenance) and only update the message. This
  ensures that we do not lower the severity of tree closure.

  In the case where the tree is not open, the previous tree status
  message is kept by prepending it to |message|, if possible. This
  ensures that the cause of the previous tree closure remains visible.

  Args:
    announcer: The announcer the message.
    message: A string to display as part of the tree status.
    internal: Whether the build is internal or not. Append the build type
      if this is set. Defaults to None.
    buildnumber: The build number to append.
    dryrun: If set, generate the message but don't update the tree status.
  """
  # Get current tree status.
  status_dict = _GetStatusDict(CROS_TREE_STATUS_JSON_URL)
  current_status = status_dict.get(TREE_STATUS_STATE)
  current_msg = status_dict.get(TREE_STATUS_MESSAGE)

  status = constants.TREE_THROTTLED
  if (constants.VALID_TREE_STATUSES.index(current_status) >
      constants.VALID_TREE_STATUSES.index(status)):
    # Maintain the current status if it is more servere than throttled.
    status = current_status

  epilogue = ''
  # Don't prepend the current status message if the tree is open.
  if current_status != constants.TREE_OPEN and current_msg:
    # Scan the current message and discard the text by the same
    # announcer.
    chunks = [x.strip() for x in current_msg.split(MESSAGE_DELIMITER)
              if '%s' % announcer not in x.strip()]
    current_msg = MESSAGE_DELIMITER.join(chunks)

    if any(x for x in MESSAGE_KEYWORDS if x.lower() in
           current_msg.lower().split()):
      # The waterfall scans the message for keywords to change the
      # tree status. Don't prepend the current status message if it
      # contains such keywords.
      logging.warning('Cannot prepend the previous tree status message because '
                      'there are keywords that may affect the tree state.')
    else:
      epilogue = current_msg

  if internal is not None:
    # 'p' stands for 'public.
    announcer += '-i' if internal else '-p'

  if buildnumber:
    announcer = '%s-%d' % (announcer, buildnumber)

  UpdateTreeStatus(status, message, announcer=announcer, epilogue=epilogue,
                   dryrun=dryrun)
예제 #30
0
def GenerateBlameList(source_repo, lkgm_path, only_print_chumps=False):
  """Generate the blamelist since the specified manifest.

  Args:
    source_repo: Repository object for the source code.
    lkgm_path: Path to LKGM manifest.
    only_print_chumps: If True, only print changes that were chumped.
  """
  handler = git.Manifest(lkgm_path)
  reviewed_on_re = re.compile(r'\s*Reviewed-on:\s*(\S+)')
  author_re = re.compile(r'\s*Author:.*<(\S+)@\S+>\s*')
  committer_re = re.compile(r'\s*Commit:.*<(\S+)@\S+>\s*')
  for rel_src_path, checkout in handler.checkouts_by_path.iteritems():
    project = checkout['name']

    # Additional case in case the repo has been removed from the manifest.
    src_path = source_repo.GetRelativePath(rel_src_path)
    if not os.path.exists(src_path):
      logging.info('Detected repo removed from manifest %s' % project)
      continue

    revision = checkout['revision']
    cmd = ['log', '--pretty=full', '%s..HEAD' % revision]
    try:
      result = git.RunGit(src_path, cmd)
    except cros_build_lib.RunCommandError as ex:
      # Git returns 128 when the revision does not exist.
      if ex.result.returncode != 128:
        raise
      logging.warning('Detected branch removed from local checkout.')
      cros_build_lib.PrintBuildbotStepWarnings()
      return
    current_author = None
    current_committer = None
    for line in unicode(result.output, 'ascii', 'ignore').splitlines():
      author_match = author_re.match(line)
      if author_match:
        current_author = author_match.group(1)

      committer_match = committer_re.match(line)
      if committer_match:
        current_committer = committer_match.group(1)

      review_match = reviewed_on_re.match(line)
      if review_match:
        review = review_match.group(1)
        _, _, change_number = review.rpartition('/')
        items = [
            os.path.basename(project),
            current_author,
            change_number,
        ]
        if current_committer not in ('chrome-bot', 'chrome-internal-fetch',
                                     'chromeos-commit-bot'):
          items.insert(0, 'CHUMP')
        elif only_print_chumps:
          continue
        cros_build_lib.PrintBuildbotLink(' | '.join(items), review)
예제 #31
0
def SetupBuild(options):
  """Set up parameters needed for the build.

  This checks the current environment and options and sets up various things
  needed for the build, including 'base' which holds the base flags for
  passing to the U-Boot Makefile.

  Args:
    options: Command line options

  Returns:
    Base flags to use for U-Boot, as a list.
  """
  # pylint: disable=W0603
  global arch, board, compiler, family, outdir, smdk, uboard, vendor, verbose
  global base_board

  if not verbose:
    verbose = options.verbose != 0

  logging.getLogger().setLevel(options.verbose)

  Log('Building for %s' % options.board)

  # Separate out board_variant string: "peach_pit" becomes "peach", "pit".
  # But don't mess up upstream boards which use _ in their name.
  parts = options.board.split('_')
  if parts[0] in ['daisy', 'peach']:
    board = parts[0]
  else:
    board = options.board

  if board == 'cm_fx6':
    options.dt = None
    options.board = 'imx'
  elif board == 'odroid':
    options.dt = None

  # To allow this to be run from 'cros_sdk'
  if in_chroot:
    os.chdir(os.path.join(src_root, 'third_party', 'u-boot', 'files'))

  base_board = board

  if options.verified:
    base_board = 'chromeos_%s' % base_board

  uboard = UBOARDS.get(options.board)
  if not uboard:
    uboard = UBOARDS.get(base_board, base_board)
  if options.verified:
    uboard = 'chromeos_%s' % uboard
    base_board = uboard
    board = uboard
  Log('U-Boot board is %s, base_board %s' % (uboard, base_board))

  # Pull out some information from the U-Boot boards config file
  family = None
  (PRE_KBUILD, PRE_KCONFIG, KCONFIG) = range(3)
  if os.path.exists('MAINTAINERS'):
    board_format = PRE_KBUILD
  else:
    board_format = PRE_KCONFIG
  with open('boards.cfg') as f:
    for line in f:
      if 'genboardscfg' in line:
        board_format = KCONFIG
      if uboard in line:
        if line[0] == '#':
          continue
        fields = line.split()
        if not fields:
          continue
        arch = fields[1]
        fields += [None, None, None]
        if board_format == PRE_KBUILD:
          smdk = fields[3]
          vendor = fields[4]
          family = fields[5]
        elif board_format in (PRE_KCONFIG, KCONFIG):
          smdk = fields[5]
          vendor = fields[4]
          family = fields[3]
        break
  if not arch:
    cros_build_lib.Die("Selected board '%s' not found in boards.cfg." % board)

  vboot = os.path.join('build', board, 'usr')
  if arch == 'x86':
    family = 'em100'
    if in_chroot:
      compiler = 'i686-pc-linux-gnu-'
    else:
      #compiler = '/opt/i686/bin/i686-unknown-elf-'
      #compiler = '/opt/i386-linux/bin/i386-linux-'
      #compiler = '/opt/gcc-4.6.3-nolibc/x86_64-linux/bin/x86_64-linux-'
      #compiler = 'x86_64-linux-gnu-'
      compiler = '/home/sglass/.buildman-toolchains/gcc-4.9.0-nolibc/x86_64-linux/bin/x86_64-linux-'
  elif arch == 'arm':
    compiler = FindCompiler(arch, 'armv7a-cros-linux-gnueabi-')
  elif arch == 'aarch64':
    compiler = FindCompiler(arch, 'aarch64-cros-linux-gnu-')
    # U-Boot builds both arm and aarch64 with the 'arm' architecture.
    arch = 'arm'
  elif arch == 'sandbox':
    compiler = ''
  else:
    cros_build_lib.Die("Selected arch '%s' not supported." % arch)

  if not options.build:
    options.incremental = True

  cpus = multiprocessing.cpu_count()

  suffix = ''
  #cpus = 1
  base = [
      'make',
      #'-d',
      '-j%d' % cpus,
      'ARCH=%s' % arch,
      'CROSS_COMPILE=%s' % compiler,
      '--no-print-directory',
      'HOSTSTRIP=true',
      'DEV_TREE_SRC=%s-%s' % (family, options.dt),
      'QEMU_ARCH=']

  if options.dtb:
    base.append('DEV_TREE_BIN=%s' % options.dtb)

  if options.verbose < 2:
    base.append('-s')
  elif options.verbose > 2:
    base.append('V=1')

  if options.ro and options.rw:
    cros_build_lib.Die('Cannot specify both --ro and --rw options')
  if options.ro:
    base.append('CROS_RO=1')
    options.small = True
    suffix = '-ro'

  if options.rw:
    base.append('CROS_RW=1')
    options.small = True

  if options.small:
    base.append('CROS_SMALL=1')
    if not suffix:
      suffix = '-sm'
  else:
    base.append('CROS_FULL=1')

  outdir = os.path.join(OUT_DIR, uboard + suffix)
  if not options.in_tree:
    base.append('O=%s' % outdir)

  if options.verified:
    base += [
        'VBOOT=%s' % vboot,
        'MAKEFLAGS_VBOOT=DEBUG=1',
        'QUIET=1',
        'CFLAGS_EXTRA_VBOOT=-DUNROLL_LOOPS',
        'VBOOT_SOURCE=%s/platform/vboot_reference' % src_root]
    base.append('VBOOT_DEBUG=1')

  # Handle the Chrome OS USE_STDINT workaround. Vboot needs <stdint.h> due
  # to a recent change, the need for which I didn't fully understand. But
  # U-Boot doesn't normally use this. We have added an option to U-Boot to
  # enable use of <stdint.h> and without it vboot will fail to build. So we
  # need to enable it where ww can. We can't just enable it always since
  # that would prevent this script from building other non-Chrome OS boards
  # with a different (older) toolchain, or Chrome OS boards without vboot.
  # So use USE_STDINT if the toolchain supports it, and not if not. This
  # file was originally part of glibc but has recently migrated to the
  # compiler so it is reasonable to use it with a stand-alone program like
  # U-Boot. At this point the comment has got long enough that we may as
  # well include some poetry which seems to be sorely lacking the code base,
  # so this is from Ogden Nash:
  #    To keep your marriage brimming
  #    With love in the loving cup,
  #    Whenever you're wrong, admit it;
  #    Whenever you're right, shut up.
  cmd = [CompilerTool('gcc'), '-ffreestanding', '-x', 'c', '-c', '-']
  result = cros_build_lib.RunCommand(cmd,
                                     input='#include <stdint.h>',
                                     capture_output=True,
                                     **kwargs)
  #if result.returncode == 0
    #base.append('USE_STDINT=1')

  base.append('BUILD_ROM=1')
  if options.trace:
    base.append('FTRACE=1')
  if options.separate:
    base.append('DEV_TREE_SEPARATE=1')

  if options.incremental:
    # Get the correct board for cros_write_firmware
    config_mk = '%s/include/autoconf.mk' % outdir
    if not os.path.exists(config_mk):
      logging.warning('No build found for %s - dropping -i' % board)
      options.incremental = False

  config_mk = 'include/autoconf.mk'
  if os.path.exists(config_mk):
    logging.warning("Warning: '%s' exists, try 'make distclean'" % config_mk)

  # For when U-Boot supports ccache
  # See http://patchwork.ozlabs.org/patch/245079/
  if use_ccache:
    os.environ['CCACHE'] = 'ccache'

  return base
예제 #32
0
def request(url,
            method='GET',
            payload=None,
            params=None,
            headers=None,
            include_auth=False,
            deadline=10,
            max_attempts=4):
    """Sends a REST API request, returns raw unparsed response.

  Retries the request on transient errors for up to |max_attempts| times.

  Args:
    url: url to send the request to.
    method: HTTP method to use, e.g. GET, POST, PUT.
    payload: raw data to put in the request body.
    params: dict with query GET parameters (i.e. ?key=value&key=value).
    headers: additional request headers.
    include_auth: Whether to include an OAuth2 access token.
    delegation_token: delegation token returned by auth.delegate.
    deadline: deadline for a single attempt (10 sec by default).
    max_attempts: how many times to retry on errors (4 times by default).

  Returns:
    Buffer with raw response.

  Raises:
    NotFoundError on 404 response.
    AuthError on 401 or 403 response.
    Error on any other non-transient error.
  """
    protocols = ('http://', 'https://')
    assert url.startswith(protocols) and '?' not in url, url
    if params:
        url += '?' + urllib.parse.urlencode(params)

    headers = (headers or {}).copy()

    if include_auth:
        tok = auth.GetAccessToken(
            service_account_json=constants.CHROMEOS_SERVICE_ACCOUNT)
        headers['Authorization'] = 'Bearer %s' % tok

    if payload is not None:
        assert isinstance(payload, (six.string_types, six.binary_type)), \
            type(payload)
        assert method in ('CREATE', 'POST', 'PUT'), method

    attempt = 0
    response = None
    last_status_code = None
    http = httplib2.Http(cache=None, timeout=deadline)
    http.follow_redirects = False
    while attempt < max_attempts:
        if attempt:
            logging.info('Retrying: %s %s', method, url)
        attempt += 1
        try:
            response, content = httprequest(http,
                                            uri=url,
                                            method=method,
                                            headers=headers,
                                            body=payload)
        except httplib2.HttpLib2Error as e:
            # Transient network error or URL fetch service RPC deadline.
            logging.warning('%s %s failed: %s', method, url, e)
            continue

        last_status_code = response.status

        # Transient error on the other side.
        if is_transient_error(response, url):
            logging.warning('%s %s failed with HTTP %d\nHeaders: %r\nBody: %r',
                            method, url, response.status, response, content)
            continue

        # Non-transient error.
        if 300 <= response.status < 500:
            logging.warning('%s %s failed with HTTP %d\nHeaders: %r\nBody: %r',
                            method, url, response.status, response, content)
            raise _error_class_for_status(response.status)(
                'Failed to call %s: HTTP %d' % (url, response.status),
                response.status,
                content,
                headers=response)

        # Success. Beware of large responses.
        if len(content) > 1024 * 1024:
            logging.warning('Response size: %.1f KiB', len(content) / 1024.0)
        return content

    raise _error_class_for_status(last_status_code)(
        'Failed to call %s after %d attempts' % (url, max_attempts),
        response.status if response else None,
        content if response else None,
        headers=response if response else None)
예제 #33
0
def WriteFirmware(options):
  """Write firmware to the board.

  This uses cros_bundle_firmware to create a firmware image and write it to
  the board.

  Args:
    options: Command line options
  """
  global base_board

  flash = []
  kernel = []
  run = []
  secure = []
  servo = []
  silent = []
  verbose_arg = []
  ro_uboot = []
  imx = []
  tz = []

  bl2 = ['--bl2', '%s/spl/%s-spl.bin' % (outdir, smdk)]
  bl2 += ['--add-blob', 'bl2', '%s/spl/%s-spl.bin' % (outdir, smdk)]
  bl2 += ['--add-blob', 'rom', 'board/google/chromebook_link/pci8086,0166.bin']

  if options.use_defaults:
    if base_board == 'odroid':
      bl1 = ['--bl1', 'sd_fuse/hardkernel/bl1.bin.hardkernel']
      #bl1 = ['--bl1', 'pit.bl1.bin']
      bl2 = ['--add-blob', 'bl2', 'sd_fuse/hardkernel/bl2.bin.hardkernel']
      tz = ['--add-blob', 'tz', 'sd_fuse/hardkernel/tzsw.bin.hardkernel']
    else:
      bl1 = []
    bmpblk = []
    ecro = []
    ecrw = []
    defaults = []
  else:
    bl1 = ['--bl1', '##/build/%s/firmware/u-boot.bl1.bin' % options.board]
    bmpblk = ['--bmpblk', '##/build/%s/firmware/bmpblk.bin' % options.board]
    ecro = ['--ecro', '##/build/%s/firmware/ec.RO.bin' % options.board]
    ecrw = ['--ec', '##/build/%s/firmware/ec.RW.bin' % options.board]
    defaults = ['-D']

  if arch == 'x86':
    seabios = ['--seabios',
               '##/build/%s/firmware/seabios.cbfs' % options.board]
  else:
    seabios = []

  if options.sdcard:
    dest = 'sd:.'
  elif arch == 'x86':
    dest = 'em100'
  elif arch == 'sandbox':
    dest = ''
  else:
    dest = 'usb'

  port = SERVO_PORT.get(options.board, '')
  if port:
    servo = ['--servo', '%d' % port]

  flash_method = family
  if flash_method.startswith('tegra'):
    flash_method = 'tegra'

  if options.flash:
    flash = ['-F', 'spi']

    # The small builds don't have the command line interpreter so cannot
    # run the magic flasher script. So use the standard U-Boot in this
    # case.
    if options.small:
      logging.warning('Using standard U-Boot as flasher')
      flash += ['-U', '##/build/%s/firmware/u-boot.bin' % options.board]

  if options.mmc:
    flash = ['-F', 'sdmmc']

  if options.verbose:
    verbose_arg = ['-v', '%s' % options.verbose]

  if options.secure:
    secure += ['--bootsecure', '--bootcmd', 'vboot_go_auto']

  if not options.verified:
    # Make a small image, without GBB, etc.
    secure.append('-s')

  if options.kernel:
    kernel = ['--kernel', '##/build/%s/boot/vmlinux.uimg' % options.board]

  if not options.console:
    silent = ['--add-config-int', 'silent-console', '1']

  if not options.run:
    run = ['--bootcmd', 'none']

  coreboot = []
  #coreboot = ['-C', '##/build/link/firmware/coreboot.rom.serial']
  #coreboot += ['-K', '##/build/link/firmware/coreboot.rom.serial']
  if arch != 'sandbox' and not in_chroot and servo:
    if dest == 'usb':
      logging.warning('Image cannot be written to board')
      dest = ''
      servo = []
    elif dest == 'em100':
      logging.warning('Please reset the board manually to boot firmware')
      servo = []

    if not servo:
      logging.warning('(sadly dut-control does not work outside chroot)')

  if dest:
    dest = ['-w', dest]
  else:
    dest = []
  if not in_chroot or options.board == 'seaboard':
    servo = ['--servo', 'none']

  soc = SOCS.get(options.board)
  print(soc, options.board)
  if not soc:
    soc = SOCS.get(board)
  if not soc:
    soc = SOCS.get(uboard, '')
  if options.board == 'panther':
    soc = 'chromebox_'
  if options.dt is None:
    dts_file = 'none'
  elif options.dt == 'default':
    dt_name = DEFAULT_DTS.get(options.board, options.board)
    dts_file = 'board/%s/dts/%s%s.dts' % (vendor, soc, dt_name)
    if not os.path.exists(dts_file):
      dts_file = 'arch/%s/dts/%s%s.dts' % (arch, soc, dt_name)
  else:
    dts_file = '%s/arch/%s/dts/%s%s.dtb' % (outdir, arch, soc, options.dt)
    if not os.path.exists(dts_file):
      dts_file = '%s/arch/%s/dts/%s.dtb' % (outdir, arch, options.dt)
  Log('Device tree: %s' % dts_file)

  if base_board == 'exynos5-dt':
    args = ['fdtget', dts_file, '/flash/pre-boot', 'filename']
    result = cros_build_lib.RunCommand(args, redirect_stdout=True, **kwargs)
    bl1_name = result.output.strip()
    bl1 = ['--bl1', 'board/%s/%s/%s' % (vendor, smdk, bl1_name)]

  if arch == 'sandbox':
    uboot_fname = '%s/u-boot' % outdir
  else:
    uboot_fname = '%s/u-boot.bin' % outdir

  if options.ro:
    # RO U-Boot is passed through as blob 'ro-boot'. We use the standard
    # ebuild one as RW.
    # TODO([email protected]): Option to build U-Boot a second time to get
    # a fresh RW U-Boot.
    logging.warning('Using standard U-Boot for RW')
    ro_uboot = ['--add-blob', 'ro-boot', uboot_fname]
    #uboot_fname = '##/build/%s/firmware/u-boot.bin' % options.board
    uboot_fname = os.path.join(OUT_DIR, uboard, 'u-boot.bin')

#  sm_uboot_fname = '##/build/%s/firmware/u-boot-small.bin' % options.board
  sm_uboot_fname = os.path.join(OUT_DIR, uboard + '-sm', 'u-boot.bin')
  sm_uboot = ['--add-blob', 'sm-boot', sm_uboot_fname]
  cbf = ['%s/platform/dev/host/cros_bundle_firmware' % src_root,
         '-b', options.board,
         '-I', 'arch/%s/dts' % arch, '-I', 'cros/dts',
         '-u', uboot_fname,
         '-O', '%s/out' % outdir,
         '-M', flash_method]
  if dts_file:
    cbf += ['-d', dts_file]
  dirname = os.path.dirname(uboot_fname)
  if options.board == 'imx':
    root, _ = os.path.splitext(uboot_fname)
    imx = ['--add-blob', 'img', root + '.img',
           '--add-blob', 'imx-cfg', os.path.join('board', vendor,
                                                 base_board, 'imximage.cfg')]
  spl = ['--add-blob', 'spl', os.path.join(dirname, 'spl', 'u-boot-spl.bin')]

  for other in [bl1, bl2, bmpblk, defaults, dest, ecro, ecrw, flash, kernel,
                run, seabios, secure, servo, silent, verbose_arg, ro_uboot,
                sm_uboot, imx, tz, spl, coreboot]:
    if other:
      cbf += other
  if options.cbfargs:
    for item in options.cbfargs:
      cbf += item.split(' ')
  os.environ['PYTHONPATH'] = ('%s/platform/dev/host/lib:%s/..' %
                              (src_root, src_root))
  Log(' '.join(cbf))
  result = cros_build_lib.RunCommand(cbf, **kwargs)
  if result.returncode:
    cros_build_lib.Die('cros_bundle_firmware failed')

  if not dest or not result.returncode:
    logging.info('Image is available at %s/out/image.bin' % outdir)
  else:
    if result.returncode:
      cros_build_lib.Die('Failed to write image to board')
    else:
      logging.info('Image written to board with %s' % ' '.join(dest + servo))
예제 #34
0
def main(argv):
    parser = GetParser()
    options = parser.parse_args(argv)

    repo_dir = git.FindRepoDir(os.getcwd())
    if not repo_dir:
        parser.error("This script must be invoked from within a repository "
                     "checkout.")

    options.git_config = os.path.join(repo_dir, 'manifests.git', 'config')
    options.repo_dir = repo_dir
    options.local_manifest_path = os.path.join(repo_dir, 'local_manifest.xml')
    # This constant is used only when we're doing an upgrade away from
    # minilayout.xml to default.xml.
    options.default_manifest_path = os.path.join(repo_dir, 'manifests',
                                                 'default.xml')
    options.manifest_sym_path = os.path.join(repo_dir, 'manifest.xml')

    active_manifest = os.path.basename(os.readlink(options.manifest_sym_path))
    upgrade_required = active_manifest == 'minilayout.xml'

    if options.command == 'upgrade-minilayout':
        if not upgrade_required:
            print("This repository checkout isn't using minilayout.xml; "
                  "nothing to do")
        else:
            _UpgradeMinilayout(options)
        return 0
    elif upgrade_required:
        logging.warning(
            "Your repository checkout is using the old minilayout.xml workflow; "
            "auto-upgrading it.")
        main(['upgrade-minilayout'])

    # For now, we only support the add command.
    assert options.command == 'add'
    if options.workon:
        if options.path is not None:
            parser.error('Adding workon projects do not set project.')
    else:
        if options.remote is None:
            parser.error('Adding non-workon projects requires a remote.')
        if options.path is None:
            parser.error('Adding non-workon projects requires a path.')
    name = options.project
    path = options.path

    revision = options.revision
    if revision is not None:
        if (not git.IsRefsTags(revision) and not git.IsSHA1(revision)):
            revision = git.StripRefsHeads(revision, False)

    main_manifest = Manifest.FromPath(options.manifest_sym_path,
                                      empty_if_missing=False)
    local_manifest = Manifest.FromPath(options.local_manifest_path)

    main_element = main_manifest.GetProject(name, path=path)

    if options.workon:
        if main_element is None:
            parser.error('No project named %r in the default manifest.' % name)
        _AddProjectsToManifestGroups(options, main_element.attrib['name'])

    elif main_element is not None:
        if options.remote is not None:
            # Likely this project wasn't meant to be remote, so workon main element
            print(
                "Project already exists in manifest. Using that as workon project."
            )
            _AddProjectsToManifestGroups(options, main_element.attrib['name'])
        else:
            # Conflict will occur; complain.
            parser.error(
                "Requested project name=%r path=%r will conflict with "
                "your current manifest %s" % (name, path, active_manifest))

    elif local_manifest.GetProject(name, path=path) is not None:
        parser.error("Requested project name=%r path=%r conflicts with "
                     "your local_manifest.xml" % (name, path))

    else:
        element = local_manifest.AddNonWorkonProject(name=name,
                                                     path=path,
                                                     remote=options.remote,
                                                     revision=revision)
        _AddProjectsToManifestGroups(options, element.attrib['name'])

        with open(options.local_manifest_path, 'w') as f:
            f.write(local_manifest.ToString())
    return 0
예제 #35
0
    def _GetCanonicalAtom(self, package, find_stale=False):
        """Transform a package name or name fragment to the canonical atom.

    If there a multiple atoms that a package name fragment could map to,
    picks an arbitrary one and prints a warning.

    Args:
      package: string package name or fragment of a name.
      find_stale: if True, allow stale (missing) worked on package.

    Returns:
      string canonical atom name (e.g. 'sys-apps/dbus')
    """
        # Attempt to not hit portage if at all possible for speed.
        if package in self._GetWorkedOnAtoms():
            return package

        # Ask portage directly what it thinks about that package.
        ebuild_path = self._FindEbuildForPackage(package)

        # If portage didn't know about that package, try and autocomplete it.
        if ebuild_path is None:
            possible_ebuilds = set()
            for ebuild in (portage_util.EbuildToCP(ebuild)
                           for ebuild in self._GetWorkonEbuilds(
                               filter_on_arch=False)):
                if package in ebuild:
                    possible_ebuilds.add(ebuild)

            # Also autocomplete from the worked-on list, in case the ebuild was
            # deleted.
            if find_stale:
                for ebuild in self._GetWorkedOnAtoms():
                    if package in ebuild:
                        possible_ebuilds.add(ebuild)

            if not possible_ebuilds:
                logging.warning('Could not find canonical package for "%s"',
                                package)
                return None

            # We want some consistent order for making our selection below.
            possible_ebuilds = sorted(possible_ebuilds)

            if len(possible_ebuilds) > 1:
                logging.warning('Multiple autocompletes found:')
                for possible_ebuild in possible_ebuilds:
                    logging.warning('  %s', possible_ebuild)
            autocompleted_package = portage_util.EbuildToCP(
                possible_ebuilds[0])
            # Sanity check to avoid infinite loop.
            if package == autocompleted_package:
                logging.error('Resolved %s to itself', package)
                return None
            logging.info('Autocompleted "%s" to: "%s"', package,
                         autocompleted_package)

            return self._GetCanonicalAtom(autocompleted_package)

        if not _IsWorkonEbuild(True, ebuild_path):
            msg = (
                'In order to cros_workon a package, it must have a -9999 ebuild '
                'that inherits from cros-workon.\n')
            if '-9999' in ebuild_path:
                msg += ('"%s" is a -9999 ebuild, make sure it inherits from '
                        'cros-workon.\n' % ebuild_path)
            else:
                msg += '"%s" is not a -9999 ebuild.\n' % ebuild_path

            logging.warning(msg)
            return None

        return portage_util.EbuildToCP(ebuild_path)
예제 #36
0
  def ScheduleSlaveBuildsViaBuildbucket(self, important_only=False,
                                        dryrun=False):
    """Schedule slave builds by sending PUT requests to Buildbucket.

    Args:
      important_only: Whether only schedule important slave builds, default to
        False.
      dryrun: Whether a dryrun, default to False.
    """
    if self.buildbucket_client is None:
      logging.info('No buildbucket_client. Skip scheduling slaves.')
      return

    build_id, db = self._run.GetCIDBHandle()
    if build_id is None:
      logging.info('No build id. Skip scheduling slaves.')
      return

    # May be None. This is okay.
    master_buildbucket_id = self._run.options.buildbucket_id

    buildset_tag = 'cbuildbot/%s/%s/%s' % (
        self._run.manifest_branch, self._run.config.name, build_id)

    scheduled_important_slave_builds = []
    scheduled_experimental_slave_builds = []
    unscheduled_slave_builds = []
    scheduled_build_reqs = []

    # Get all active slave build configs.
    slave_config_map = self._GetSlaveConfigMap(important_only)
    for slave_config_name, slave_config in slave_config_map.iteritems():
      try:
        buildbucket_id, created_ts = self.PostSlaveBuildToBuildbucket(
            slave_config_name, slave_config, build_id, master_buildbucket_id,
            buildset_tag, dryrun=dryrun)
        request_reason = None

        if slave_config.important:
          scheduled_important_slave_builds.append(
              (slave_config_name, buildbucket_id, created_ts))
          request_reason = build_requests.REASON_IMPORTANT_CQ_SLAVE
        else:
          scheduled_experimental_slave_builds.append(
              (slave_config_name, buildbucket_id, created_ts))
          request_reason = build_requests.REASON_EXPERIMENTAL_CQ_SLAVE

        scheduled_build_reqs.append(build_requests.BuildRequest(
            None, build_id, slave_config_name, None, buildbucket_id,
            request_reason, None))
      except buildbucket_lib.BuildbucketResponseException as e:
        # Use 16-digit ts to be consistent with the created_ts from Buildbucket
        current_ts = int(round(time.time() * 1000000))
        unscheduled_slave_builds.append((slave_config_name, None, current_ts))
        if important_only or slave_config.important:
          raise
        else:
          logging.warning('Failed to schedule %s current timestamp %s: %s'
                          % (slave_config_name, current_ts, e))

    if config_lib.IsMasterCQ(self._run.config) and db and scheduled_build_reqs:
      db.InsertBuildRequests(scheduled_build_reqs)

    self._run.attrs.metadata.ExtendKeyListWithList(
        constants.METADATA_SCHEDULED_IMPORTANT_SLAVES,
        scheduled_important_slave_builds)
    self._run.attrs.metadata.ExtendKeyListWithList(
        constants.METADATA_SCHEDULED_EXPERIMENTAL_SLAVES,
        scheduled_experimental_slave_builds)
    self._run.attrs.metadata.ExtendKeyListWithList(
        constants.METADATA_UNSCHEDULED_SLAVES, unscheduled_slave_builds)
예제 #37
0
    def Wait(self):
        """Wait for the task to complete.

    Output from the task is printed as it runs.

    If an exception occurs, return a string containing the traceback.
    """
        try:
            # Flush stdout and stderr to be sure no output is interleaved.
            sys.stdout.flush()
            sys.stderr.flush()

            # File position pointers are shared across processes, so we must open
            # our own file descriptor to ensure output is not lost.
            self._WaitForStartup()
            silent_death_time = time.time() + self.SILENT_TIMEOUT
            results = []
            with open(self._output.name, 'r') as output:
                pos = 0
                running, exited_cleanly, task_errors, run_errors = (True,
                                                                    False, [],
                                                                    [])
                while running:
                    # Check whether the process is still alive.
                    running = self.is_alive()

                    try:
                        errors, results = \
                            self._queue.get(True, self.PRINT_INTERVAL)
                        if errors:
                            task_errors.extend(errors)

                        running = False
                        exited_cleanly = True
                    except Queue.Empty:
                        pass

                    if not running:
                        # Wait for the process to actually exit. If the child doesn't exit
                        # in a timely fashion, kill it.
                        self.join(self.EXIT_TIMEOUT)
                        if self.exitcode is None:
                            msg = '%r hung for %r seconds' % (
                                self, self.EXIT_TIMEOUT)
                            run_errors.extend(
                                failures_lib.CreateExceptInfo(
                                    ProcessExitTimeout(msg), ''))
                            self._KillChildren([self])
                        elif not exited_cleanly:
                            msg = ('%r exited unexpectedly with code %s' %
                                   (self, self.exitcode))
                            run_errors.extend(
                                failures_lib.CreateExceptInfo(
                                    ProcessUnexpectedExit(msg), ''))

                    # Read output from process.
                    output.seek(pos)
                    buf = output.read(_BUFSIZE)

                    if buf:
                        silent_death_time = time.time() + self.SILENT_TIMEOUT
                    elif running and time.time() > silent_death_time:
                        msg = ('No output from %r for %r seconds' %
                               (self, self.SILENT_TIMEOUT))
                        run_errors.extend(
                            failures_lib.CreateExceptInfo(
                                ProcessSilentTimeout(msg), ''))
                        self._KillChildren([self])

                        # Read remaining output from the process.
                        output.seek(pos)
                        buf = output.read(_BUFSIZE)
                        running = False

                    # Print output so far.
                    while buf:
                        sys.stdout.write(buf)
                        pos += len(buf)
                        if len(buf) < _BUFSIZE:
                            break
                        buf = output.read(_BUFSIZE)

                    # Print error messages if anything exceptional occurred.
                    if run_errors:
                        logging.PrintBuildbotStepFailure()
                        traceback.print_stack()
                        logging.warning('\n'.join(x.str for x in run_errors
                                                  if x))
                        logging.info('\n'.join(x.str for x in task_errors
                                               if x))

                    sys.stdout.flush()
                    sys.stderr.flush()

            # Propagate any results.
            for result in results:
                results_lib.Results.Record(*result)

        finally:
            self.Cleanup(silent=True)

        # If an error occurred, return it.
        return run_errors + task_errors
예제 #38
0
  def CQMasterHandleFailure(self, failing, inflight, no_stat):
    """Handle changes in the validation pool upon build failure or timeout.

    This function determines whether to reject CLs and what CLs to
    reject based on the category of the failures and whether the
    sanity check builder(s) passed.

    Args:
      failing: Names of the builders that failed.
      inflight: Names of the builders that timed out.
      no_stat: Set of builder names of slave builders that had status None.
    """
    messages = self._GetFailedMessages(failing)
    self.SendInfraAlertIfNeeded(failing, inflight, no_stat)

    changes = self.sync_stage.pool.applied

    do_partial_submission = self._ShouldSubmitPartialPool()

    if do_partial_submission:
      changes_by_config = self.GetRelevantChangesForSlaves(changes, no_stat)
      subsys_by_config = self.GetSubsysResultForSlaves()

      # Even if there was a failure, we can submit the changes that indicate
      # that they don't care about this failure.
      changes = self.sync_stage.pool.SubmitPartialPool(
          changes, messages, changes_by_config, subsys_by_config,
          failing, inflight, no_stat)
    else:
      logging.warning('Not doing any partial submission, due to critical stage '
                      'failure(s).')
      title = 'CQ encountered a critical failure.'
      msg = ('CQ encountered a critical failure, and hence skipped '
             'board-aware submission. See %s' % self.ConstructDashboardURL())
      tree_status.SendHealthAlert(self._run, title, msg)

    sanity_check_slaves = set(self._run.config.sanity_check_slaves)
    tot_sanity = self._ToTSanity(sanity_check_slaves, self._slave_statuses)

    if not tot_sanity:
      # Sanity check slave failure may have been caused by bug(s)
      # in ToT or broken infrastructure. In any of those cases, we
      # should not reject any changes.
      logging.warning('Detected that a sanity-check builder failed. '
                      'Will not reject any changes.')

    # If the tree was not open when we acquired a pool, do not assume that
    # tot was sane.
    if not self.sync_stage.pool.tree_was_open:
      logging.info('The tree was not open when changes were acquired so we are '
                   'attributing failures to the broken tree rather than the '
                   'changes.')
      tot_sanity = False

    if inflight:
      # Some slave(s) timed out due to unknown causes, so only reject infra
      # changes (probably just chromite changes).
      self.sync_stage.pool.HandleValidationTimeout(sanity=tot_sanity,
                                                   changes=changes)
      return

    # Some builder failed, or some builder did not report stats, or
    # the intersection of both. Let HandleValidationFailure decide
    # what changes to reject.
    self.sync_stage.pool.HandleValidationFailure(
        messages, sanity=tot_sanity, changes=changes, no_stat=no_stat)
예제 #39
0
    def Copy(self, src_base, dest_base, path, sloppy=False):
        """Copy artifact(s) from source directory to destination.

    Args:
      src_base: The directory to apply the src glob pattern match in.
      dest_base: The directory to copy matched files to.  |Path.dest|.
      path: A Path instance that specifies what is to be copied.
      sloppy: If set, ignore when mandatory artifacts are missing.

    Returns:
      A list of the artifacts copied.
    """
        copied_paths = []
        src = os.path.join(src_base, path.src)
        if not src.endswith('/') and os.path.isdir(src):
            raise MustNotBeDirError('%s must not be a directory\n'
                                    'Aborting copy...' % (src, ))
        paths = glob.glob(src)
        if not paths:
            if path.optional:
                logging.debug('%s does not exist and is optional.  Skipping.',
                              src)
            elif sloppy:
                logging.warning(
                    '%s does not exist and is required.  Skipping anyway.',
                    src)
            else:
                msg = ('%s does not exist and is required.\n'
                       'You can bypass this error with --sloppy.\n'
                       'Aborting copy...' % src)
                raise MissingPathError(msg)
        elif len(paths) > 1 and path.dest and not path.dest.endswith('/'):
            raise MultipleMatchError(
                'Glob pattern %r has multiple matches, but dest %s '
                'is not a directory.\n'
                'Aborting copy...' % (path.src, path.dest))
        else:
            for p in paths:
                rel_src = os.path.relpath(p, src_base)
                if path.IsBlacklisted(rel_src):
                    continue
                if path.dest is None:
                    rel_dest = rel_src
                elif path.dest.endswith('/'):
                    rel_dest = os.path.join(path.dest, os.path.basename(p))
                else:
                    rel_dest = path.dest
                assert not rel_dest.endswith('/')
                dest = os.path.join(dest_base, rel_dest)

                copied_paths.append(p)
                self.Log(p, dest, os.path.isdir(p))
                if os.path.isdir(p):
                    for sub_path in osutils.DirectoryIterator(p):
                        rel_path = os.path.relpath(sub_path, p)
                        sub_dest = os.path.join(dest, rel_path)
                        if path.IsBlacklisted(rel_path):
                            continue
                        if sub_path.endswith('/'):
                            osutils.SafeMakedirs(sub_dest, mode=self.dir_mode)
                        else:
                            self._CopyFile(sub_path, sub_dest, path)
                else:
                    self._CopyFile(p, dest, path)

        return copied_paths
예제 #40
0
def GenerateBuildAlert(build,
                       stages,
                       exceptions,
                       messages,
                       annotations,
                       siblings,
                       severity,
                       now,
                       db,
                       logdog_client,
                       milo_client,
                       allow_experimental=False):
    """Generate an alert for a single build.

  Args:
    build: Dictionary of build details from CIDB.
    stages: A list of dictionaries of stage details from CIDB.
    exceptions: A list of instances of failure_message_lib.StageFailure.
    messages: A list of build message dictionaries from CIDB.
    annotations: A list of dictionaries of build annotations from CIDB.
    siblings: A list of dictionaries of build details from CIDB.
    severity: Sheriff-o-Matic severity to use for the alert.
    now: Current datettime.
    db: cidb.CIDBConnection object.
    logdog_client: logdog.LogdogClient object.
    milo_client: milo.MiloClient object.
    allow_experimental: Boolean if non-important builds should be included.

  Returns:
    som.Alert object if build requires alert.  None otherwise.
  """
    BUILD_IGNORE_STATUSES = frozenset([constants.BUILDER_STATUS_PASSED])
    CIDB_INDETERMINATE_STATUSES = frozenset(
        [constants.BUILDER_STATUS_INFLIGHT, constants.BUILDER_STATUS_ABORTED])
    if ((not allow_experimental and not build['important'])
            or build['status'] in BUILD_IGNORE_STATUSES):
        logging.debug('  %s:%d (id %d) skipped important %s status %s',
                      build['builder_name'], build['build_number'],
                      build['id'], build['important'], build['status'])
        return None

    # Record any relevant build messages, keeping track if it was aborted.
    message = (None, None)
    aborted = build['status'] == constants.BUILDER_STATUS_ABORTED
    for m in messages:
        # MESSAGE_TYPE_IGNORED_REASON implies that the target of the message
        # is stored as message_value (as a string).
        if (m['message_type'] == constants.MESSAGE_TYPE_IGNORED_REASON
                and str(build['id']) == m['message_value']):
            if m['message_subtype'] == constants.MESSAGE_SUBTYPE_SELF_DESTRUCTION:
                aborted = True
            message = (m['message_type'], m['message_subtype'])

    logging.info('  %s:%d (id %d) %s %s', build['builder_name'],
                 build['build_number'], build['id'], build['status'],
                 '%s/%s' % message if message[0] else '')

    # Create links for details on the build.
    dashboard_url = tree_status.ConstructLegolandBuildURL(
        build['buildbucket_id'])
    annotator_url = tree_status.ConstructAnnotatorURL(
        build.get('master_build_id', build['id']))
    links = [
        som.Link('build_details', dashboard_url),
        som.Link('goldeneye',
                 tree_status.ConstructGoldenEyeBuildDetailsURL(build['id'])),
        som.Link('viceroy',
                 tree_status.ConstructViceroyBuildDetailsURL(build['id'])),
        som.Link(
            'buildbot',
            tree_status.ConstructBuildStageURL(
                waterfall.WATERFALL_TO_DASHBOARD[build['waterfall']],
                build['builder_name'], build['build_number'])),
        som.Link('annotator', annotator_url),
    ]

    notes = SummarizeHistory(build, db)
    if len(siblings) > 1:
        notes.append('Siblings: %s \\[[compare](%s)\\]' %
                     (SummarizeStatuses(siblings)[0],
                      GenerateCompareBuildsLink([build['id']], True)))
    # Link to any existing annotations, along with a link back to the annotator.
    notes.extend([('[Annotation](%(link)s): %(failure_category)s'
                   '(%(failure_message)s) %(blame_url)s %(notes)s') %
                  dict(a, **{'link': annotator_url}) for a in annotations])

    # If the CIDB status was indeterminate (inflight/aborted), provide link
    # for sheriffs.
    if build['status'] in CIDB_INDETERMINATE_STATUSES:
        notes.append('Indeterminate CIDB status: '
                     'https://yaqs.googleplex.com/eng/q/5238815784697856')

    # Annotate sanity builders as such.
    if SANITY_BUILD_CONFIG_RE.match(build['build_config']):
        notes.append('%s is a sanity builder: '
                     'https://yaqs.googleplex.com/eng/q/5913965810155520' %
                     build['build_config'])

    # TODO: Gather similar failures.
    builders = [
        som.AlertedBuilder(build['builder_name'], dashboard_url,
                           ToEpoch(build['finish_time'] or now),
                           build['build_number'], build['build_number'])
    ]

    # Access the BuildInfo for per-stage links of failed stages.
    try:
        buildinfo = milo_client.BuildInfoGetBuildbot(build['waterfall'],
                                                     build['builder_name'],
                                                     build['build_number'])
    except prpc.PRPCResponseException as e:
        logging.warning('Unable to retrieve BuildInfo: %s', e)
        buildinfo = None

    # Highlight the problematic stages.
    alert_stages = []
    for stage in stages:
        alert_stage = GenerateAlertStage(build, stage, exceptions, aborted,
                                         buildinfo, logdog_client)
        if alert_stage:
            alert_stages.append(alert_stage)

    if (aborted
            or build['master_build_id'] is None) and len(alert_stages) == 0:
        logging.debug('  %s:%d (id %d) skipped aborted and no stages',
                      build['builder_name'], build['build_number'],
                      build['id'])
        return None

    # Add the alert to the summary.
    key = '%s:%s:%d' % (build['waterfall'], build['build_config'],
                        build['build_number'])
    alert_name = '%s:%d %s' % (build['build_config'], build['build_number'],
                               MapCIDBToSOMStatus(build['status'], message[0],
                                                  message[1]))
    return som.Alert(key, alert_name, alert_name, int(severity), ToEpoch(now),
                     ToEpoch(build['finish_time'] or now), links, [],
                     'cros-failure',
                     som.CrosBuildFailure(notes, alert_stages, builders))
예제 #41
0
def main(argv):
    parser = GetParser()
    options = parser.parse_args(argv)

    # TODO: Remove this code in favor of a simple default on buildroot when
    #       srcroot is removed.
    if options.srcroot and not options.buildroot:
        # Convert /<repo>/src -> <repo>
        options.buildroot = os.path.dirname(options.srcroot)
    if not options.buildroot:
        options.buildroot = constants.SOURCE_ROOT
    options.srcroot = None

    options.Freeze()

    if options.command == 'commit':
        if not options.packages and not options.all:
            parser.error('Please specify at least one package (--packages)')
        if options.force and options.all:
            parser.error(
                'Cannot use --force with --all. You must specify a list of '
                'packages you want to force uprev.')

    if not os.path.isdir(options.buildroot):
        parser.error('buildroot is not a valid path: %s' % options.buildroot)

    if options.overlay_type and options.overlays:
        parser.error('Cannot use --overlay-type with --overlays.')

    portage_util.EBuild.VERBOSE = options.verbose

    package_list = None
    if options.packages:
        package_list = options.packages.split(':')

    overlays = []
    if options.overlays:
        for path in options.overlays.split(':'):
            if not os.path.isdir(path):
                cros_build_lib.Die('Cannot find overlay: %s' % path)
            overlays.append(os.path.realpath(path))
    elif options.overlay_type:
        overlays = portage_util.FindOverlays(options.overlay_type,
                                             buildroot=options.buildroot)
    else:
        logging.warning('Missing --overlays argument')
        overlays.extend([
            '%s/src/private-overlays/chromeos-overlay' % options.buildroot,
            '%s/src/third_party/chromiumos-overlay' % options.buildroot
        ])

    manifest = git.ManifestCheckout.Cached(options.buildroot)

    # Dict mapping from each overlay to its tracking branch.
    overlay_tracking_branch = {}
    # Dict mapping from each git repository (project) to a list of its overlays.
    git_project_overlays = {}

    for overlay in overlays:
        remote_ref = git.GetTrackingBranchViaManifest(overlay,
                                                      manifest=manifest)
        overlay_tracking_branch[overlay] = remote_ref.ref
        git_project_overlays.setdefault(remote_ref.project_name,
                                        []).append(overlay)

    if options.command == 'push':
        _WorkOnPush(options, overlay_tracking_branch, git_project_overlays)
    elif options.command == 'commit':
        _WorkOnCommit(options, overlays, overlay_tracking_branch,
                      git_project_overlays, manifest, package_list)
예제 #42
0
def _CommitOverlays(options, manifest, overlays, overlay_tracking_branch,
                    overlay_ebuilds, revved_packages, new_package_atoms):
    """Commit uprevs for overlays in sequence.

  Args:
    options: The options object returned by the argument parser.
    manifest: The manifest of the given source root.
    overlays: A list over overlays to commit.
    overlay_tracking_branch: A dict mapping from each overlay to its tracking
      branch.
    overlay_ebuilds: A dict mapping overlays to their ebuilds.
    revved_packages: A shared list of revved packages.
    new_package_atoms: A shared list of new package atoms.
  """
    for overlay in overlays:
        if not os.path.isdir(overlay):
            logging.warning('Skipping %s, which is not a directory.', overlay)
            continue

        # Note we intentionally work from the non push tracking branch;
        # everything built thus far has been against it (meaning, http mirrors),
        # thus we should honor that.  During the actual push, the code switches
        # to the correct urls, and does an appropriate rebasing.
        tracking_branch = overlay_tracking_branch[overlay]

        existing_commit = git.GetGitRepoRevision(overlay)

        # Make sure we run in the top-level git directory in case we are
        # adding/removing an overlay in existing_commit.
        git_root = git.FindGitTopLevel(overlay)
        if git_root is None:
            cros_build_lib.Die('No git repo at overlay directory %s.', overlay)

        work_branch = GitBranch(constants.STABLE_EBUILD_BRANCH,
                                tracking_branch,
                                cwd=git_root)
        work_branch.CreateBranch()
        if not work_branch.Exists():
            cros_build_lib.Die('Unable to create stabilizing branch in %s' %
                               overlay)

        # In the case of uprevving overlays that have patches applied to them,
        # include the patched changes in the stabilizing branch.
        git.RunGit(git_root, ['rebase', existing_commit])

        ebuilds = overlay_ebuilds.get(overlay, [])
        if ebuilds:
            with parallel.Manager() as manager:
                # Contains the array of packages we actually revved.
                messages = manager.list()
                ebuild_paths_to_add = manager.list()
                ebuild_paths_to_remove = manager.list()

                inputs = [[
                    overlay, ebuild, manifest, options, ebuild_paths_to_add,
                    ebuild_paths_to_remove, messages, revved_packages,
                    new_package_atoms
                ] for ebuild in ebuilds]
                parallel.RunTasksInProcessPool(_WorkOnEbuild, inputs)

                if ebuild_paths_to_add:
                    logging.info(
                        'Adding new stable ebuild paths %s in overlay %s.',
                        ebuild_paths_to_add, overlay)
                    git.RunGit(overlay, ['add'] + list(ebuild_paths_to_add))

                if ebuild_paths_to_remove:
                    logging.info('Removing old ebuild paths %s in overlay %s.',
                                 ebuild_paths_to_remove, overlay)
                    git.RunGit(overlay,
                               ['rm', '-f'] + list(ebuild_paths_to_remove))

                if messages:
                    portage_util.EBuild.CommitChange('\n\n'.join(messages),
                                                     overlay)
예제 #43
0
def FindSymbolFiles(tempdir, paths):
    """Locate symbol files in |paths|

  This returns SymbolFile objects that contain file references which are valid
  after this exits. Those files may exist externally, or be created in the
  tempdir (say, when expanding tarballs). The caller must not consider
  SymbolFile's valid after tempdir is cleaned up.

  Args:
    tempdir: Path to use for temporary files.
    paths: A list of input paths to walk. Files are returned w/out any checks.
      Dirs are searched for files that end in ".sym". Urls are fetched and then
      processed. Tarballs are unpacked and walked.

  Yields:
    A SymbolFile for every symbol file found in paths.
  """
    cache_dir = path_util.GetCacheDir()
    common_path = os.path.join(cache_dir, constants.COMMON_CACHE)
    tar_cache = cache.TarballCache(common_path)

    for p in paths:
        # Pylint is confused about members of ParseResult.

        o = urlparse.urlparse(p)
        if o.scheme:  # pylint: disable=E1101
            # Support globs of filenames.
            ctx = gs.GSContext()
            for p in ctx.LS(p):
                logging.info('processing files inside %s', p)
                o = urlparse.urlparse(p)
                key = ('%s%s' % (o.netloc, o.path)).split('/')  # pylint: disable=E1101
                # The common cache will not be LRU, removing the need to hold a read
                # lock on the cached gsutil.
                ref = tar_cache.Lookup(key)
                try:
                    ref.SetDefault(p)
                except cros_build_lib.RunCommandError as e:
                    logging.warning('ignoring %s\n%s', p, e)
                    continue
                for p in FindSymbolFiles(tempdir, [ref.path]):
                    yield p

        elif os.path.isdir(p):
            for root, _, files in os.walk(p):
                for f in files:
                    if f.endswith('.sym'):
                        # If p is '/tmp/foo' and filename is '/tmp/foo/bar/bar.sym',
                        # display_path = 'bar/bar.sym'
                        filename = os.path.join(root, f)
                        yield SymbolFile(
                            display_path=filename[len(p):].lstrip('/'),
                            file_name=filename)

        elif IsTarball(p):
            logging.info('processing files inside %s', p)
            tardir = tempfile.mkdtemp(dir=tempdir)
            cache.Untar(os.path.realpath(p), tardir)
            for p in FindSymbolFiles(tardir, [tardir]):
                yield p

        else:
            yield SymbolFile(display_path=p, file_name=p)
예제 #44
0
def _WorkOnEbuild(overlay, ebuild, manifest, options, ebuild_paths_to_add,
                  ebuild_paths_to_remove, messages, revved_packages,
                  new_package_atoms):
    """Work on a single ebuild.

  Args:
    overlay: The overlay where the ebuild belongs to.
    ebuild: The ebuild to work on.
    manifest: The manifest of the given source root.
    options: The options object returned by the argument parser.
    ebuild_paths_to_add: New stable ebuild paths to add to git.
    ebuild_paths_to_remove: Old ebuild paths to remove from git.
    messages: A share list of commit messages.
    revved_packages: A shared list of revved packages.
    new_package_atoms: A shared list of new package atoms.
  """
    if options.verbose:
        logging.info('Working on %s, info %s', ebuild.package,
                     ebuild.cros_workon_vars)
    try:
        result = ebuild.RevWorkOnEBuild(os.path.join(options.buildroot, 'src'),
                                        manifest)
        if result:
            new_package, ebuild_path_to_add, ebuild_path_to_remove = result

            if ebuild_path_to_add:
                ebuild_paths_to_add.append(ebuild_path_to_add)
            if ebuild_path_to_remove:
                ebuild_paths_to_remove.append(ebuild_path_to_remove)

            messages.append(_GIT_COMMIT_MESSAGE % ebuild.package)

            if options.list_revisions:
                info = ebuild.GetSourceInfo(
                    os.path.join(options.buildroot, 'src'), manifest)
                srcdirs = [
                    os.path.join(options.buildroot, 'src', srcdir)
                    for srcdir in ebuild.cros_workon_vars.localname
                ]
                old_commit_ids = dict(
                    zip(srcdirs, ebuild.cros_workon_vars.commit.split(',')))
                git_log = []
                for srcdir in info.srcdirs:
                    old_commit_id = old_commit_ids.get(srcdir)
                    new_commit_id = ebuild.GetCommitId(srcdir)
                    if not old_commit_id or old_commit_id == new_commit_id:
                        continue

                    logs = git.RunGit(srcdir, [
                        'log',
                        '%s..%s' % (old_commit_id[:8], new_commit_id[:8]),
                        '--pretty=format:%h %<(63,trunc)%s'
                    ])
                    git_log.append('$ ' + logs.cmdstr)
                    git_log.extend(line.strip()
                                   for line in logs.output.splitlines())
                if git_log:
                    messages.append('\n'.join(git_log))

            revved_packages.append(ebuild.package)
            new_package_atoms.append('=%s' % new_package)
    except portage_util.EbuildVersionError as e:
        logging.warning('Unable to rev %s: %s', ebuild.package, e)
        raise
    except (OSError, IOError):
        logging.warning(
            'Cannot rev %s\n'
            'Note you will have to go into %s '
            'and reset the git repo yourself.', ebuild.package, overlay)
        raise
예제 #45
0
def main(argv):
    parser = commandline.ArgumentParser(description=__doc__)

    # TODO: Make sym_paths, breakpad_root, and root exclusive.

    parser.add_argument('sym_paths',
                        type='path_or_uri',
                        nargs='*',
                        default=None,
                        help='symbol file or directory or URL or tarball')
    parser.add_argument('--board',
                        default=None,
                        help='Used to find default breakpad_root.')
    parser.add_argument('--breakpad_root',
                        type='path',
                        default=None,
                        help='full path to the breakpad symbol directory')
    parser.add_argument('--root',
                        type='path',
                        default=None,
                        help='full path to the chroot dir')
    parser.add_argument('--official_build',
                        action='store_true',
                        default=False,
                        help='point to official symbol server')
    parser.add_argument('--server',
                        type=str,
                        default=None,
                        help='URI for custom symbol server')
    parser.add_argument('--regenerate',
                        action='store_true',
                        default=False,
                        help='regenerate all symbols')
    parser.add_argument('--upload-limit',
                        type=int,
                        help='only upload # number of symbols')
    parser.add_argument('--strip_cfi',
                        type=int,
                        default=DEFAULT_FILE_LIMIT,
                        help='strip CFI data for files above this size')
    parser.add_argument('--failed-list',
                        type='path',
                        help='where to save a list of failed symbols')
    parser.add_argument('--dedupe',
                        action='store_true',
                        default=False,
                        help='use the swarming service to avoid re-uploading')
    parser.add_argument('--yes',
                        action='store_true',
                        default=False,
                        help='answer yes to all prompts')
    parser.add_argument('--product_name',
                        type=str,
                        default='ChromeOS',
                        help='Produce Name for breakpad stats.')

    opts = parser.parse_args(argv)
    opts.Freeze()

    # Figure out the symbol files/directories to upload.
    if opts.sym_paths:
        sym_paths = opts.sym_paths
    elif opts.breakpad_root:
        sym_paths = [opts.breakpad_root]
    elif opts.root:
        if not opts.board:
            raise ValueError('--board must be set if --root is used.')
        breakpad_dir = cros_generate_breakpad_symbols.FindBreakpadDir(
            opts.board)
        sym_paths = [os.path.join(opts.root, breakpad_dir.lstrip('/'))]
    else:
        raise ValueError(
            '--sym_paths, --breakpad_root, or --root must be set.')

    if opts.sym_paths or opts.breakpad_root:
        if opts.regenerate:
            cros_build_lib.Die(
                '--regenerate may not be used with specific files, '
                'or breakpad_root')
    else:
        if opts.board is None:
            cros_build_lib.Die('--board is required')

    # Figure out the dedupe namespace.
    dedupe_namespace = None
    if opts.dedupe:
        if opts.official_build:
            dedupe_namespace = OFFICIAL_DEDUPE_NAMESPACE_TMPL % opts.product_name
        else:
            dedupe_namespace = STAGING_DEDUPE_NAMESPACE_TMPL % opts.product_name

    # Figure out which crash server to upload too.
    upload_url = opts.server
    if not upload_url:
        if opts.official_build:
            upload_url = OFFICIAL_UPLOAD_URL
        else:
            logging.warning('unofficial builds upload to the staging server')
            upload_url = STAGING_UPLOAD_URL

    # Confirm we really want the long upload.
    if not opts.yes:
        prolog = '\n'.join(
            textwrap.wrap(
                textwrap.dedent("""
        Uploading symbols for an entire Chromium OS build is really only
        necessary for release builds and in a few cases for developers
        to debug problems.  It will take considerable time to run.  For
        developer debugging purposes, consider instead passing specific
        files to upload.
    """), 80)).strip()
        if not cros_build_lib.BooleanPrompt(
                prompt='Are you sure you want to upload all build symbols',
                default=False,
                prolog=prolog):
            cros_build_lib.Die('better safe than sorry')

    ret = 0

    # Regenerate symbols from binaries.
    if opts.regenerate:
        ret += cros_generate_breakpad_symbols.GenerateBreakpadSymbols(
            opts.board, breakpad_dir=opts.breakpad_root)

    # Do the upload.
    ret += UploadSymbols(sym_paths=sym_paths,
                         upload_url=upload_url,
                         product_name=opts.product_name,
                         dedupe_namespace=dedupe_namespace,
                         failed_list=opts.failed_list,
                         upload_limit=opts.upload_limit,
                         strip_cfi=opts.strip_cfi)

    if ret:
        logging.error('encountered %i problem(s)', ret)
        # Since exit(status) gets masked, clamp it to 1 so we don't inadvertently
        # return 0 in case we are a multiple of the mask.
        return 1
예제 #46
0
    def PerformStage(self):
        """Collect a 'perf' profile and convert it into the AFDO format."""
        super(AFDODataGenerateStage, self).PerformStage()

        board = self._current_board
        if not afdo.CanGenerateAFDOData(board):
            logging.warning('Board %s cannot generate its own AFDO profile.',
                            board)
            return

        arch = self._GetCurrentArch()
        buildroot = self._build_root
        gs_context = gs.GSContext()
        cpv = portage_util.PortageqBestVisible(constants.CHROME_CP,
                                               cwd=buildroot)
        afdo_file = None

        # We have a mismatch between how we version the perf.data we collect and
        # how we version our AFDO profiles.
        #
        # This mismatch can cause us to generate garbage profiles, so we skip
        # profile updates for non-r1 revisions of Chrome.
        #
        # Going into more detail, a perf.data file looks like:
        # chromeos-chrome-amd64-68.0.3440.9.perf.data.bz2
        #
        # An AFDO profile looks like:
        # chromeos-chrome-amd64-68.0.3440.9_rc-r1.afdo.bz2
        #
        # And an unstripped Chrome looks like:
        # chromeos-chrome-amd64-68.0.3440.9_rc-r1.debug.bz2
        #
        # Notably, the perf.data is lacking the revision number of the Chrome it
        # was gathered on. This is problematic, since if there's a rev bump, we'll
        # end up using the perf.data collected on Chrome version $N-r1 with a
        # Chrome binary built from Chrome version $N-r2, which may have an entirely
        # different layout than Chrome version $N-r1.
        if cpv.rev != 'r1':
            logging.warning(
                'Non-r1 version of Chrome detected; skipping AFDO generation')
            return

        # Generation of AFDO could fail for different reasons.
        # We will ignore the failures and let the master PFQ builder try
        # to find an older AFDO profile.
        try:
            if afdo.WaitForAFDOPerfData(cpv, arch, buildroot, gs_context):
                afdo_file, uploaded_afdo = afdo.GenerateAFDOData(
                    cpv, arch, board, buildroot, gs_context)
                assert afdo_file
                logging.info('Generated %s AFDO profile %s', arch, afdo_file)

                # If there's no new profile, merging would only be redoing the last
                # merge and uploading nothing.
                if not uploaded_afdo:
                    logging.info('AFDO profile already existed in GS. Quit')
                    return

                merged_file, uploaded_merged = \
                    afdo.CreateAndUploadMergedAFDOProfile(gs_context, buildroot,
                                                          afdo_file)

                if merged_file is not None:
                    logging.info('Generated %s merged AFDO profile %s', arch,
                                 merged_file)

                # TODO(gbiv): once there's clarity that merged profiles are working
                # (e.g. a week goes by with Android/Linux mostly-happily using them),
                # we may want to turn them on for CrOS. Until then, `latest` is always
                # the raw AFDO file.
                if uploaded_merged and False:
                    newest_afdo_file = merged_file
                else:
                    newest_afdo_file = afdo_file

                afdo.UpdateLatestAFDOProfileInGS(cpv, arch, buildroot,
                                                 newest_afdo_file, gs_context)
                logging.info('Pointed newest profile at %s', newest_afdo_file)
            else:
                raise afdo.MissingAFDOData(
                    'Could not find current "perf" profile. '
                    'Master PFQ builder will try to use stale '
                    'AFDO profile.')
        # Will let system-exiting exceptions through.
        except Exception:
            logging.PrintBuildbotStepWarnings()
            logging.warning('AFDO profile generation failed with exception ',
                            exc_info=True)

            alert_msg = ('Please triage. This will become a fatal error.\n\n'
                         'arch=%s buildroot=%s\n\nURL=%s' %
                         (arch, buildroot, self._run.ConstructDashboardURL()))
            subject_msg = (
                'Failure in generation of AFDO Data for builder %s' %
                self._run.config.name)
            alerts.SendEmailLog(subject_msg,
                                afdo.AFDO_ALERT_RECIPIENTS,
                                server=alerts.SmtpServer(
                                    constants.GOLO_SMTP_SERVER),
                                message=alert_msg)
            # Re-raise whatever exception we got here. This stage will only
            # generate a warning but we want to make sure the warning is
            # generated.
            raise
예제 #47
0
파일: pushimage.py 프로젝트: sjg20/chromite
def PushImage(src_path, board, versionrev=None, profile=None, priority=50,
              sign_types=None, dry_run=False, mock=False, force_keysets=()):
  """Push the image from the archive bucket to the release bucket.

  Args:
    src_path: Where to copy the files from; can be a local path or gs:// URL.
      Should be a full path to the artifacts in either case.
    board: The board we're uploading artifacts for (e.g. $BOARD).
    versionrev: The full Chromium OS version string (e.g. R34-5126.0.0).
    profile: The board profile in use (e.g. "asan").
    priority: Set the signing priority (lower == higher prio).
    sign_types: If set, a set of types which we'll restrict ourselves to
      signing.  See the --sign-types option for more details.
    dry_run: Show what would be done, but do not upload anything.
    mock: Upload to a testing bucket rather than the real one.
    force_keysets: Set of keysets to use rather than what the inputs say.

  Returns:
    A dictionary that maps 'channel' -> ['gs://signer_instruction_uri1',
                                         'gs://signer_instruction_uri2',
                                         ...]
  """
  # Whether we hit an unknown error.  If so, we'll throw an error, but only
  # at the end (so that we still upload as many files as possible).
  # It's implemented using a list to deal with variable scopes in nested
  # functions below.
  unknown_error = [False]

  if versionrev is None:
    # Extract milestone/version from the directory name.
    versionrev = os.path.basename(src_path)

  # We only support the latest format here.  Older releases can use pushimage
  # from the respective branch which deals with legacy cruft.
  m = re.match(VERSION_REGEX, versionrev)
  if not m:
    raise ValueError('version %s does not match %s' %
                     (versionrev, VERSION_REGEX))
  milestone = m.group(1)
  version = m.group(2)

  # Normalize board to always use dashes not underscores.  This is mostly a
  # historical artifact at this point, but we can't really break it since the
  # value is used in URLs.
  boardpath = board.replace('_', '-')
  if profile is not None:
    boardpath += '-%s' % profile.replace('_', '-')

  ctx = gs.GSContext(dry_run=dry_run)

  try:
    input_insns = InputInsns(board)
  except MissingBoardInstructions as e:
    logging.warning('Missing base instruction file: %s', e)
    logging.warning('not uploading anything for signing')
    return
  channels = input_insns.GetChannels()

  # We want force_keysets as a set.
  force_keysets = set(force_keysets)

  if mock:
    logging.info('Upload mode: mock; signers will not process anything')
    tbs_base = gs_base = os.path.join(constants.TRASH_BUCKET, 'pushimage-tests',
                                      getpass.getuser())
  elif set(['%s-%s' % (TEST_KEYSET_PREFIX, x)
            for x in TEST_KEYSETS]) & force_keysets:
    logging.info('Upload mode: test; signers will process test keys')
    # We need the tbs_base to be in the place the signer will actually scan.
    tbs_base = TEST_SIGN_BUCKET_BASE
    gs_base = os.path.join(tbs_base, getpass.getuser())
  else:
    logging.info('Upload mode: normal; signers will process the images')
    tbs_base = gs_base = constants.RELEASE_BUCKET

  sect_general = {
      'config_board': board,
      'board': boardpath,
      'version': version,
      'versionrev': versionrev,
      'milestone': milestone,
  }
  sect_insns = {}

  if dry_run:
    logging.info('DRY RUN MODE ACTIVE: NOTHING WILL BE UPLOADED')
  logging.info('Signing for channels: %s', ' '.join(channels))

  instruction_urls = {}

  def _ImageNameBase(image_type=None):
    lmid = ('%s-' % image_type) if image_type else ''
    return 'ChromeOS-%s%s-%s' % (lmid, versionrev, boardpath)

  # These variables are defined outside the loop so that the nested functions
  # below can access them without 'cell-var-from-loop' linter warning.
  dst_path = ""
  files_to_sign = []
  for channel in channels:
    logging.debug('\n\n#### CHANNEL: %s ####\n', channel)
    sect_insns['channel'] = channel
    sub_path = '%s-channel/%s/%s' % (channel, boardpath, version)
    dst_path = '%s/%s' % (gs_base, sub_path)
    logging.info('Copying images to %s', dst_path)

    recovery_basename = _ImageNameBase(constants.IMAGE_TYPE_RECOVERY)
    factory_basename = _ImageNameBase(constants.IMAGE_TYPE_FACTORY)
    firmware_basename = _ImageNameBase(constants.IMAGE_TYPE_FIRMWARE)
    nv_lp0_firmware_basename = _ImageNameBase(
        constants.IMAGE_TYPE_NV_LP0_FIRMWARE)
    acc_usbpd_basename = _ImageNameBase(constants.IMAGE_TYPE_ACCESSORY_USBPD)
    acc_rwsig_basename = _ImageNameBase(constants.IMAGE_TYPE_ACCESSORY_RWSIG)
    test_basename = _ImageNameBase(constants.IMAGE_TYPE_TEST)
    base_basename = _ImageNameBase(constants.IMAGE_TYPE_BASE)
    hwqual_tarball = 'chromeos-hwqual-%s-%s.tar.bz2' % (board, versionrev)

    # The following build artifacts, if present, are always copied regardless of
    # requested signing types.
    files_to_copy_only = (
        # (<src>, <dst>, <suffix>),
        ('image.zip', _ImageNameBase(), 'zip'),
        (constants.TEST_IMAGE_TAR, test_basename, 'tar.xz'),
        ('debug.tgz', 'debug-%s' % boardpath, 'tgz'),
        (hwqual_tarball, '', ''),
        ('au-generator.zip', '', ''),
        ('stateful.tgz', '', ''),
    )

    # The following build artifacts, if present, are always copied.
    # If |sign_types| is None, all of them are marked for signing, otherwise
    # only the image types specified in |sign_types| are marked for signing.
    files_to_copy_and_maybe_sign = (
        # (<src>, <dst>, <suffix>, <signing type>),
        (constants.RECOVERY_IMAGE_TAR, recovery_basename, 'tar.xz',
         constants.IMAGE_TYPE_RECOVERY),

        ('factory_image.zip', factory_basename, 'zip',
         constants.IMAGE_TYPE_FACTORY),

        ('firmware_from_source.tar.bz2', firmware_basename, 'tar.bz2',
         constants.IMAGE_TYPE_FIRMWARE),

        ('firmware_from_source.tar.bz2', nv_lp0_firmware_basename, 'tar.bz2',
         constants.IMAGE_TYPE_NV_LP0_FIRMWARE),

        ('firmware_from_source.tar.bz2', acc_usbpd_basename, 'tar.bz2',
         constants.IMAGE_TYPE_ACCESSORY_USBPD),

        ('firmware_from_source.tar.bz2', acc_rwsig_basename, 'tar.bz2',
         constants.IMAGE_TYPE_ACCESSORY_RWSIG),
    )

    # The following build artifacts are copied and marked for signing, if
    # they are present *and* if the image type is specified via |sign_types|.
    files_to_maybe_copy_and_sign = (
        # (<src>, <dst>, <suffix>, <signing type>),
        (constants.BASE_IMAGE_TAR, base_basename, 'tar.xz',
         constants.IMAGE_TYPE_BASE),
    )

    def _CopyFileToGS(src, dst, suffix):
      """Returns |dst| file name if the copying was successful."""
      if not dst:
        dst = src
      elif suffix:
        dst = '%s.%s' % (dst, suffix)
      success = False
      try:
        ctx.Copy(os.path.join(src_path, src), os.path.join(dst_path, dst))
        success = True
      except gs.GSNoSuchKey:
        logging.warning('Skipping %s as it does not exist', src)
      except gs.GSContextException:
        unknown_error[0] = True
        logging.error('Skipping %s due to unknown GS error', src, exc_info=True)
      return dst if success else None

    for src, dst, suffix in files_to_copy_only:
      _CopyFileToGS(src, dst, suffix)

    # Clear the list of files to sign before adding new artifacts.
    files_to_sign = []

    def _AddToFilesToSign(image_type, dst, suffix):
      assert dst.endswith('.' + suffix), (
          'dst: %s, suffix: %s' % (dst, suffix))
      dst_base = dst[:-(len(suffix) + 1)]
      files_to_sign.append([image_type, dst_base, suffix])

    for src, dst, suffix, image_type in files_to_copy_and_maybe_sign:
      dst = _CopyFileToGS(src, dst, suffix)
      if dst and (not sign_types or image_type in sign_types):
        _AddToFilesToSign(image_type, dst, suffix)

    for src, dst, suffix, image_type in files_to_maybe_copy_and_sign:
      if sign_types and image_type in sign_types:
        dst = _CopyFileToGS(src, dst, suffix)
        if dst:
          _AddToFilesToSign(image_type, dst, suffix)

    logging.debug('Files to sign: %s', files_to_sign)
    # Now go through the subset for signing.
    for image_type, dst_name, suffix in files_to_sign:
      try:
        input_insns = InputInsns(board, image_type=image_type)
      except MissingBoardInstructions as e:
        logging.info('Nothing to sign: %s', e)
        continue

      dst_archive = '%s.%s' % (dst_name, suffix)
      sect_general['archive'] = dst_archive
      sect_general['type'] = image_type

      # In the default/automatic mode, only flag files for signing if the
      # archives were actually uploaded in a previous stage. This additional
      # check can be removed in future once |sign_types| becomes a required
      # argument.
      # TODO: Make |sign_types| a required argument.
      gs_artifact_path = os.path.join(dst_path, dst_archive)
      exists = False
      try:
        exists = ctx.Exists(gs_artifact_path)
      except gs.GSContextException:
        unknown_error[0] = True
        logging.error('Unknown error while checking %s', gs_artifact_path,
                      exc_info=True)
      if not exists:
        logging.info('%s does not exist.  Nothing to sign.',
                     gs_artifact_path)
        continue

      first_image = True
      for alt_insn_set in input_insns.GetAltInsnSets():
        # Figure out which keysets have been requested for this type.
        # We sort the forced set so tests/runtime behavior is stable.
        keysets = sorted(force_keysets)
        if not keysets:
          keysets = input_insns.GetKeysets(insns_merge=alt_insn_set)
          if not keysets:
            logging.warning('Skipping %s image signing due to no keysets',
                            image_type)

        for keyset in keysets:
          sect_insns['keyset'] = keyset

          # Generate the insn file for this artifact that the signer will use,
          # and flag it for signing.
          with tempfile.NamedTemporaryFile(
              bufsize=0, prefix='pushimage.insns.') as insns_path:
            input_insns.OutputInsns(insns_path.name, sect_insns, sect_general,
                                    insns_merge=alt_insn_set)

            gs_insns_path = '%s/%s' % (dst_path, dst_name)
            if not first_image:
              gs_insns_path += '-%s' % keyset
            first_image = False
            gs_insns_path += '.instructions'

            try:
              ctx.Copy(insns_path.name, gs_insns_path)
            except gs.GSContextException:
              unknown_error[0] = True
              logging.error('Unknown error while uploading insns %s',
                            gs_insns_path, exc_info=True)
              continue

            try:
              MarkImageToBeSigned(ctx, tbs_base, gs_insns_path, priority)
            except gs.GSContextException:
              unknown_error[0] = True
              logging.error('Unknown error while marking for signing %s',
                            gs_insns_path, exc_info=True)
              continue
            logging.info('Signing %s image with keyset %s at %s', image_type,
                         keyset, gs_insns_path)
            instruction_urls.setdefault(channel, []).append(gs_insns_path)

  if unknown_error[0]:
    raise PushError('hit some unknown error(s)', instruction_urls)

  return instruction_urls
예제 #48
0
def main(argv):
    parser = GetParser()
    options = parser.parse_args(argv)
    options.Freeze()

    if options.command == 'commit':
        if not options.packages and not options.all:
            parser.error('Please specify at least one package (--packages)')
        if options.force and options.all:
            parser.error(
                'Cannot use --force with --all. You must specify a list of '
                'packages you want to force uprev.')

    if not os.path.isdir(options.srcroot):
        parser.error('srcroot is not a valid path: %s' % options.srcroot)

    portage_util.EBuild.VERBOSE = options.verbose

    package_list = None
    if options.packages:
        package_list = options.packages.split(':')

    if options.overlays:
        overlays = {}
        for path in options.overlays.split(':'):
            if not os.path.isdir(path):
                cros_build_lib.Die('Cannot find overlay: %s' % path)
            overlays[path] = []
    else:
        logging.warning('Missing --overlays argument')
        overlays = {
            '%s/private-overlays/chromeos-overlay' % options.srcroot: [],
            '%s/third_party/chromiumos-overlay' % options.srcroot: [],
        }

    manifest = git.ManifestCheckout.Cached(options.srcroot)

    if options.command == 'commit':
        portage_util.BuildEBuildDictionary(overlays,
                                           options.all,
                                           package_list,
                                           allow_blacklisted=options.force)

    # Contains the array of packages we actually revved.
    revved_packages = []
    new_package_atoms = []

    for overlay in overlays:
        ebuilds = overlays[overlay]
        if not os.path.isdir(overlay):
            logging.warning('Skipping %s' % overlay)
            continue

        # Note we intentionally work from the non push tracking branch;
        # everything built thus far has been against it (meaning, http mirrors),
        # thus we should honor that.  During the actual push, the code switches
        # to the correct urls, and does an appropriate rebasing.
        tracking_branch = git.GetTrackingBranchViaManifest(
            overlay, manifest=manifest).ref

        if options.command == 'push':
            PushChange(constants.STABLE_EBUILD_BRANCH,
                       tracking_branch,
                       options.dryrun,
                       cwd=overlay,
                       staging_branch=options.staging_branch)
        elif options.command == 'commit':
            existing_commit = git.GetGitRepoRevision(overlay)
            work_branch = GitBranch(constants.STABLE_EBUILD_BRANCH,
                                    tracking_branch,
                                    cwd=overlay)
            work_branch.CreateBranch()
            if not work_branch.Exists():
                cros_build_lib.Die(
                    'Unable to create stabilizing branch in %s' % overlay)

            # In the case of uprevving overlays that have patches applied to them,
            # include the patched changes in the stabilizing branch.
            git.RunGit(overlay, ['rebase', existing_commit])

            messages = []
            for ebuild in ebuilds:
                if options.verbose:
                    logging.info('Working on %s', ebuild.package)
                try:
                    new_package = ebuild.RevWorkOnEBuild(
                        options.srcroot, manifest)
                    if new_package:
                        revved_packages.append(ebuild.package)
                        new_package_atoms.append('=%s' % new_package)
                        messages.append(_GIT_COMMIT_MESSAGE % ebuild.package)
                except (OSError, IOError):
                    logging.warning('Cannot rev %s\n'
                                    'Note you will have to go into %s '
                                    'and reset the git repo yourself.' %
                                    (ebuild.package, overlay))
                    raise

            if messages:
                portage_util.EBuild.CommitChange('\n\n'.join(messages),
                                                 overlay)

    if options.command == 'commit':
        chroot_path = os.path.join(options.srcroot,
                                   constants.DEFAULT_CHROOT_DIR)
        if os.path.exists(chroot_path):
            CleanStalePackages(options.srcroot, options.boards.split(':'),
                               new_package_atoms)
        if options.drop_file:
            osutils.WriteFile(options.drop_file, ' '.join(revved_packages))
예제 #49
0
def WriteFirmware(options):
    """Write firmware to the board.

  This uses cros_bundle_firmware to create a firmware image and write it to
  the board.

  Args:
    options: Command line options
  """
    flash = []
    kernel = []
    run = []
    secure = []
    servo = []
    silent = []
    verbose_arg = []
    ro_uboot = []

    bl2 = ['--bl2', '%s/spl/%s-spl.bin' % (outdir, smdk)]

    if options.use_defaults:
        bl1 = []
        bmpblk = []
        ecro = []
        ecrw = []
        defaults = []
    else:
        bl1 = ['--bl1', '##/build/%s/firmware/u-boot.bl1.bin' % options.board]
        bmpblk = [
            '--bmpblk',
            '##/build/%s/firmware/bmpblk.bin' % options.board
        ]
        ecro = ['--ecro', '##/build/%s/firmware/ec.RO.bin' % options.board]
        ecrw = ['--ec', '##/build/%s/firmware/ec.RW.bin' % options.board]
        defaults = ['-D']

    if arch == 'x86':
        seabios = [
            '--seabios',
            '##/build/%s/firmware/seabios.cbfs' % options.board
        ]
    else:
        seabios = []

    if options.sdcard:
        dest = 'sd:.'
    elif arch == 'x86':
        dest = 'em100'
    elif arch == 'sandbox':
        dest = ''
    else:
        dest = 'usb'

    port = SERVO_PORT.get(options.board, '')
    if port:
        servo = ['--servo', '%d' % port]

    if options.flash:
        flash = ['-F', 'spi']

        # The small builds don't have the command line interpreter so cannot
        # run the magic flasher script. So use the standard U-Boot in this
        # case.
        if options.small:
            logging.warning('Using standard U-Boot as flasher')
            flash += ['-U', '##/build/%s/firmware/u-boot.bin' % options.board]

    if options.mmc:
        flash = ['-F', 'sdmmc']

    if options.verbose:
        verbose_arg = ['-v', '%s' % options.verbose]

    if options.secure:
        secure += ['--bootsecure', '--bootcmd', 'vboot_twostop']

    if not options.verified:
        # Make a small image, without GBB, etc.
        secure.append('-s')

    if options.kernel:
        kernel = ['--kernel', '##/build/%s/boot/vmlinux.uimg' % options.board]

    if not options.console:
        silent = ['--add-config-int', 'silent-console', '1']

    if not options.run:
        run = ['--bootcmd', 'none']

    if arch != 'sandbox' and not in_chroot and servo:
        if dest == 'usb':
            logging.warning('Image cannot be written to board')
            dest = ''
            servo = []
        elif dest == 'em100':
            logging.warning('Please reset the board manually to boot firmware')
            servo = []

        if not servo:
            logging.warning('(sadly dut-control does not work outside chroot)')

    if dest:
        dest = ['-w', dest]
    else:
        dest = []

    soc = SOCS.get(board)
    if not soc:
        soc = SOCS.get(uboard, '')
    dt_name = DEFAULT_DTS.get(options.board, options.board)
    dts_file = 'board/%s/dts/%s%s.dts' % (vendor, soc, dt_name)
    Log('Device tree: %s' % dts_file)

    if arch == 'sandbox':
        uboot_fname = '%s/u-boot' % outdir
    else:
        uboot_fname = '%s/u-boot.bin' % outdir

    if options.ro:
        # RO U-Boot is passed through as blob 'ro-boot'. We use the standard
        # ebuild one as RW.
        # TODO([email protected]): Option to build U-Boot a second time to get
        # a fresh RW U-Boot.
        logging.warning('Using standard U-Boot for RW')
        ro_uboot = ['--add-blob', 'ro-boot', uboot_fname]
        uboot_fname = '##/build/%s/firmware/u-boot.bin' % options.board
    cbf = [
        '%s/platform/dev/host/cros_bundle_firmware' % src_root, '-b',
        options.board, '-d', dts_file, '-I',
        'arch/%s/dts' % arch, '-I', 'cros/dts', '-u', uboot_fname, '-O',
        '%s/out' % outdir, '-M', family
    ]

    for other in [
            bl1, bl2, bmpblk, defaults, dest, ecro, ecrw, flash, kernel, run,
            seabios, secure, servo, silent, verbose_arg, ro_uboot
    ]:
        if other:
            cbf += other
    if options.cbfargs:
        for item in options.cbfargs:
            cbf += item.split(' ')
    os.environ['PYTHONPATH'] = ('%s/platform/dev/host/lib:%s/..' %
                                (src_root, src_root))
    Log(' '.join(cbf))
    result = cros_build_lib.run(cbf, **kwargs)
    if result.returncode:
        cros_build_lib.Die('cros_bundle_firmware failed')

    if not dest or not result.returncode:
        logging.info('Image is available at %s/out/image.bin', outdir)
    else:
        if result.returncode:
            cros_build_lib.Die('Failed to write image to board')
        else:
            logging.info('Image written to board with %s',
                         ' '.join(dest + servo))
예제 #50
0
 def _CreateCleanSnapshot(self):
     for snapshot in commands.ListChrootSnapshots(self._build_root):
         if not commands.DeleteChrootSnapshot(self._build_root, snapshot):
             logging.warning("Couldn't delete old snapshot %s", snapshot)
     commands.CreateChrootSnapshot(self._build_root,
                                   constants.CHROOT_SNAPSHOT_CLEAN)
예제 #51
0
import os
import socket
import signal
import time
import Queue

from chromite.lib import cros_logging as logging
from chromite.lib import metrics
from chromite.lib import parallel

try:
    from infra_libs.ts_mon import config
    import googleapiclient.discovery
except (ImportError, RuntimeError) as e:
    config = None
    logging.warning('Failed to import ts_mon, monitoring is disabled: %s', e)

_WasSetup = False

FLUSH_INTERVAL = 60


@contextlib.contextmanager
def TrivialContextManager():
    """Context manager with no side effects."""
    yield


def SetupTsMonGlobalState(service_name,
                          indirect=False,
                          suppress_exception=True,
예제 #52
0
def _BuildInitialPackageRoot(output_dir, paths, elfs, ldpaths,
                             path_rewrite_func=lambda x: x, root='/'):
  """Link in all packable files and their runtime dependencies

  This also wraps up executable ELFs with helper scripts.

  Args:
    output_dir: The output directory to store files
    paths: All the files to include
    elfs: All the files which are ELFs (a subset of |paths|)
    ldpaths: A dict of static ldpath information
    path_rewrite_func: User callback to rewrite paths in output_dir
    root: The root path to pull all packages/files from
  """
  # Link in all the files.
  sym_paths = []
  for path in paths:
    new_path = path_rewrite_func(path)
    dst = output_dir + new_path
    osutils.SafeMakedirs(os.path.dirname(dst))

    # Is this a symlink which we have to rewrite or wrap?
    # Delay wrap check until after we have created all paths.
    src = root + path
    if os.path.islink(src):
      tgt = os.readlink(src)
      if os.path.sep in tgt:
        sym_paths.append((new_path, lddtree.normpath(ReadlinkRoot(src, root))))

        # Rewrite absolute links to relative and then generate the symlink
        # ourselves.  All other symlinks can be hardlinked below.
        if tgt[0] == '/':
          tgt = os.path.relpath(tgt, os.path.dirname(new_path))
          os.symlink(tgt, dst)
          continue

    os.link(src, dst)

  # Now see if any of the symlinks need to be wrapped.
  for sym, tgt in sym_paths:
    if tgt in elfs:
      GeneratePathWrapper(output_dir, sym, tgt)

  # Locate all the dependencies for all the ELFs.  Stick them all in the
  # top level "lib" dir to make the wrapper simpler.  This exact path does
  # not matter since we execute ldso directly, and we tell the ldso the
  # exact path to search for its libraries.
  libdir = os.path.join(output_dir, 'lib')
  osutils.SafeMakedirs(libdir)
  donelibs = set()
  for elf in elfs:
    e = lddtree.ParseELF(elf, root=root, ldpaths=ldpaths)
    interp = e['interp']
    if interp:
      # Generate a wrapper if it is executable.
      interp = os.path.join('/lib', os.path.basename(interp))
      lddtree.GenerateLdsoWrapper(output_dir, path_rewrite_func(elf), interp,
                                  libpaths=e['rpath'] + e['runpath'])
      FixClangXXWrapper(output_dir, path_rewrite_func(elf))

    for lib, lib_data in e['libs'].iteritems():
      if lib in donelibs:
        continue

      src = path = lib_data['path']
      if path is None:
        logging.warning('%s: could not locate %s', elf, lib)
        continue
      donelibs.add(lib)

      # Needed libs are the SONAME, but that is usually a symlink, not a
      # real file.  So link in the target rather than the symlink itself.
      # We have to walk all the possible symlinks (SONAME could point to a
      # symlink which points to a symlink), and we have to handle absolute
      # ourselves (since we have a "root" argument).
      dst = os.path.join(libdir, os.path.basename(path))
      src = ReadlinkRoot(src, root)

      os.link(root + src, dst)
예제 #53
0
    def _AutotestPayloads(self, payload_tests):
        """Create necessary test artifacts and initiate Autotest runs.

    Args:
      payload_tests: An iterable of PayloadTest objects defining payload tests.
    """
        # Create inner hierarchy for dumping Autotest control files.
        control_dir = os.path.join(self._work_dir, 'autotests')
        control_dump_dir = os.path.join(control_dir, self.CONTROL_FILE_SUBDIR)
        os.makedirs(control_dump_dir)

        # Customize the test suite's name based on this build's channel.
        test_channel = self._build.channel.rpartition('-')[0]
        suite_name = (self.PAYGEN_AU_SUITE_TEMPLATE % test_channel)

        # Emit a control file for each payload.
        logging.info('Emitting control files into %s', control_dump_dir)
        for payload_test in payload_tests:
            self._EmitControlFile(payload_test, suite_name, control_dump_dir)

        tarball_name = self.CONTROL_TARBALL_TEMPLATE % test_channel

        # Must use an absolute tarball path since tar is run in a different cwd.
        tarball_path = os.path.join(control_dir, tarball_name)

        # Create the tarball.
        logging.info('Packing %s in %s into %s', self.CONTROL_FILE_SUBDIR,
                     control_dir, tarball_path)
        cmd_result = cros_build_lib.CreateTarball(
            tarball_path,
            control_dir,
            compression=cros_build_lib.COMP_BZIP2,
            inputs=[self.CONTROL_FILE_SUBDIR])
        if cmd_result.returncode != 0:
            logging.error('Error (%d) when tarring control files',
                          cmd_result.returncode)
            raise PayloadTestError(
                'failed to create autotest tarball (return code %d)' %
                cmd_result.returncode)

        # Upload the tarball, be sure to make it world-readable.
        upload_target = os.path.join(self._archive_build_uri, tarball_name)
        logging.info('Uploading autotest control tarball to %s', upload_target)
        gslib.Copy(tarball_path, upload_target, acl='public-read')

        # Do not run the suite for older builds whose suite staging logic is
        # broken.  We use the build's milestone number as a rough estimate to
        # whether or not it's recent enough. We derive the milestone number from
        # the archive build name, which takes the form
        # boardname-release/R12-3456.78.9 (in this case it is 12).
        try:
            build_mstone = int(
                self._archive_build.partition('/')[2].partition('-')[0][1:])
            if build_mstone < RUN_SUITE_MIN_MSTONE:
                logging.warning(
                    'Build milestone < %s, test suite scheduling skipped',
                    RUN_SUITE_MIN_MSTONE)
                return
        except ValueError:
            raise PayloadTestError(
                'Failed to infer archive build milestone number (%s)' %
                self._archive_build)

        # Send the information needed to actually schedule and run the tests.
        return suite_name
예제 #54
0
    def PerformStage(self):
        """Perform the actual work for this stage.

    This includes final metadata archival, and update CIDB with our final status
    as well as producting a logged build result summary.
    """
        build_identifier, _ = self._run.GetCIDBHandle()
        build_id = build_identifier.cidb_id
        buildbucket_id = build_identifier.buildbucket_id
        if results_lib.Results.BuildSucceededSoFar(self.buildstore,
                                                   buildbucket_id, self.name):
            final_status = constants.BUILDER_STATUS_PASSED
        else:
            final_status = constants.BUILDER_STATUS_FAILED

        if not hasattr(self._run.attrs, 'release_tag'):
            # If, for some reason, sync stage was not completed and
            # release_tag was not set. Set it to None here because
            # ArchiveResults() depends the existence of this attr.
            self._run.attrs.release_tag = None

        # Set up our report metadata.
        self._run.attrs.metadata.UpdateWithDict(
            self.GetReportMetadata(
                final_status=final_status,
                completion_instance=self._completion_instance))

        src_root = self._build_root
        # Workspace builders use a different buildroot for overlays.
        if self._run.config.workspace_branch and self._run.options.workspace:
            src_root = self._run.options.workspace

        # Add tags for the arches and statuses of the build.
        # arches requires crossdev which isn't available at the early part of the
        # build.
        arches = []
        for board in self._run.config['boards']:
            toolchains = toolchain.GetToolchainsForBoard(board,
                                                         buildroot=src_root)
            default = list(
                toolchain.FilterToolchains(toolchains, 'default', True))
            if default:
                try:
                    arches.append(toolchain.GetArchForTarget(default[0]))
                except cros_build_lib.RunCommandError as e:
                    logging.warning(
                        'Unable to retrieve arch for board %s default toolchain %s: %s',
                        board, default, e)
        tags = {
            'arches': arches,
            'status': final_status,
        }
        results = self._run.attrs.metadata.GetValue('results')
        for stage in results:
            tags['stage_status:%s' % stage['name']] = stage['status']
            tags['stage_summary:%s' % stage['name']] = stage['summary']
        self._run.attrs.metadata.UpdateKeyDictWithDict(constants.METADATA_TAGS,
                                                       tags)

        # Some operations can only be performed if a valid version is available.
        try:
            self._run.GetVersionInfo()
            self.ArchiveResults(final_status)
            metadata_url = os.path.join(self.upload_url,
                                        constants.METADATA_JSON)
        except cbuildbot_run.VersionNotSetError:
            logging.error('A valid version was never set for this run. '
                          'Can not archive results.')
            metadata_url = ''

        results_lib.Results.Report(sys.stdout,
                                   current_version=(self._run.attrs.release_tag
                                                    or ''))

        # Upload goma log if used for BuildPackage and TestSimpleChrome.
        _UploadAndLinkGomaLogIfNecessary(
            'BuildPackages', self._run.config.name, self._run.options.goma_dir,
            self._run.options.goma_client_json,
            self._run.attrs.metadata.GetValueWithDefault('goma_tmp_dir'))
        _UploadAndLinkGomaLogIfNecessary(
            'TestSimpleChromeWorkflow', self._run.config.name,
            self._run.options.goma_dir, self._run.options.goma_client_json,
            self._run.attrs.metadata.GetValueWithDefault(
                'goma_tmp_dir_for_simple_chrome'))

        if self.buildstore.AreClientsReady():
            status_for_db = final_status

            # TODO(pprabhu): After BuildData and CBuildbotMetdata are merged, remove
            # this extra temporary object creation.
            # XXX:HACK We're creating a BuildData with an empty URL. Don't try to
            # MarkGathered this object.
            build_data = metadata_lib.BuildData(
                '', self._run.attrs.metadata.GetDict())
            # TODO(akeshet): Find a clearer way to get the "primary upload url" for
            # the metadata.json file. One alternative is _GetUploadUrls(...)[0].
            # Today it seems that element 0 of its return list is the primary upload
            # url, but there is no guarantee or unit test coverage of that.
            self.buildstore.FinishBuild(build_id,
                                        status=status_for_db,
                                        summary=build_data.failure_message,
                                        metadata_url=metadata_url)

            duration = self._GetBuildDuration()

            mon_fields = {
                'status': status_for_db,
                'build_config': self._run.config.name,
                'important': self._run.config.important
            }
            metrics.Counter(
                constants.MON_BUILD_COMP_COUNT).increment(fields=mon_fields)
            metrics.CumulativeSecondsDistribution(
                constants.MON_BUILD_DURATION).add(duration, fields=mon_fields)

            if self._run.options.sanity_check_build:
                metrics.Counter(
                    constants.MON_BUILD_SANITY_COMP_COUNT).increment(
                        fields=mon_fields)
                metrics.Gauge(
                    constants.MON_BUILD_SANITY_ID,
                    description=
                    'The build number of the latest sanity build. Used '
                    'for recovering the link to the latest failing build '
                    'in the alert when a sanity build fails.',
                    field_spec=[
                        ts_mon.StringField('status'),
                        ts_mon.StringField('build_config'),
                        ts_mon.StringField('builder_name'),
                        ts_mon.BooleanField('important')
                    ]).set(self._run.buildnumber,
                           fields=dict(
                               mon_fields,
                               builder_name=self._run.GetBuilderName()))

            if config_lib.IsMasterCQ(self._run.config):
                self_destructed = self._run.attrs.metadata.GetValueWithDefault(
                    constants.SELF_DESTRUCTED_BUILD, False)
                mon_fields = {
                    'status': status_for_db,
                    'self_destructed': self_destructed
                }
                metrics.CumulativeSecondsDistribution(
                    constants.MON_CQ_BUILD_DURATION).add(duration,
                                                         fields=mon_fields)
                annotator_link = uri_lib.ConstructAnnotatorUri(build_id)
                logging.PrintBuildbotLink('Build annotator', annotator_link)

            # From this point forward, treat all exceptions as warnings.
            self._post_completion = True

            # Dump report about things we retry.
            retry_stats.ReportStats(sys.stdout)
예제 #55
0
    def _DiscoverRequiredPayloads(self):
        """Find the payload definitions for the current build.

    This method finds the images for the current build, and for all builds we
    need deltas from, and decides exactly what payloads are needed.

    Returns:
      [<gspaths.Payload>...], [<PayloadTest>...]

      The list of payloads does NOT have URLs populated, and has not
      been tested for existence. delta payloads are NOT present if we are
      skipping them.

    Raises:
      BuildNotReady: If the current build doesn't seem to have all of it's
          images available yet. This commonly happens because the signer hasn't
          finished signing the current build.
      BuildCorrupt: If current or previous builds have unexpected images.
      ImageMissing: Raised if expected images are missing for previous builds.
    """
        payloads = []
        payload_tests = []

        try:
            # When discovering the images for our current build, they might not be
            # discoverable right away (GS eventual consistency). So, we retry.
            images = self._DiscoverSignedImages(self._build)
            test_image = self._DiscoverTestImage(self._build)

        except ImageMissing as e:
            # If the main build doesn't have the final build images, then it's
            # not ready.
            logging.info(e)
            raise BuildNotReady()

        _LogList('Images found', images + [test_image])

        # Add full payloads for PreMP and MP (as needed).
        for i in images:
            payloads.append(gspaths.Payload(tgt_image=i))

        # Add full test payload, and N2N test for it.
        full_test_payload = gspaths.Payload(tgt_image=test_image)
        payloads.append(full_test_payload)
        payload_tests.append(
            PayloadTest(full_test_payload, self._build.channel,
                        self._build.version))

        # Add n2n test delta.
        if not self._skip_delta_payloads:
            n2n_payload = gspaths.Payload(tgt_image=test_image,
                                          src_image=test_image)
            payloads.append(n2n_payload)
            payload_tests.append(PayloadTest(n2n_payload))

        # Add in the payloads GE wants us to generate.
        for source in self.GetPaygenJson(self._build.board,
                                         self._build.channel):
            source_build = gspaths.Build(version=source['chrome_os_version'],
                                         board=self._build.board,
                                         channel=self._build.channel,
                                         bucket=self._build.bucket)

            # Extract the source values we care about.
            logging.info('Considering: %s %s', source['delta_type'],
                         source_build)

            if not source['generate_delta'] and not source[
                    'full_payload_tests']:
                logging.warning('Skipping. No payloads or tests requested.')
                continue

            if not gspaths.VersionGreater(self._build.version,
                                          source_build.version):
                logging.warning('Skipping. Newer than current build.')
                continue

            source_images = self._DiscoverSignedImages(source_build)
            source_test_image = self._DiscoverTestImage(source_build)

            if not self._skip_delta_payloads and source['generate_delta']:
                # Generate the signed deltas.
                payloads.extend(
                    self._DiscoverRequiredDeltasBuildToBuild(
                        source_images, images + [test_image]))

                # Generate the test delta.
                test_payload = gspaths.Payload(tgt_image=test_image,
                                               src_image=source_test_image)
                payloads.append(test_payload)

                if source['delta_payload_tests']:
                    payload_tests.append(PayloadTest(test_payload))

            if source['full_payload_tests']:
                # Test the full payload against this source version.
                payload_tests.append(
                    PayloadTest(full_test_payload, source_build.channel,
                                source_build.version))

        for p in payloads:
            paygen_payload_lib.FillInPayloadUri(p)

        for t in payload_tests:
            paygen_payload_lib.FillInPayloadUri(t.payload)

        return payloads, payload_tests
예제 #56
0
    def PerformStage(self):
        if (not (self._run.options.buildbot or self._run.options.remote_trybot)
                and self._run.options.clobber):
            if not commands.ValidateClobber(self._build_root):
                cros_build_lib.Die('--clobber in local mode must be approved.')

        # If we can't get a manifest out of it, then it's not usable and must be
        # clobbered.
        manifest = None
        delete_chroot = False
        if not self._run.options.clobber:
            try:
                manifest = git.ManifestCheckout.Cached(self._build_root,
                                                       search=False)
            except (KeyboardInterrupt, MemoryError, SystemExit):
                raise
            except Exception as e:
                # Either there is no repo there, or the manifest isn't usable.  If the
                # directory exists, log the exception for debugging reasons.  Either
                # way, the checkout needs to be wiped since it's in an unknown
                # state.
                if os.path.exists(self._build_root):
                    logging.warning('ManifestCheckout at %s is unusable: %s',
                                    self._build_root, e)
                delete_chroot = True

        # Clean mount points first to be safe about deleting.
        chroot_path = os.path.join(self._build_root,
                                   constants.DEFAULT_CHROOT_DIR)
        cros_sdk_lib.CleanupChrootMount(chroot=chroot_path)
        osutils.UmountTree(self._build_root)

        if not delete_chroot:
            delete_chroot = not self.CanReuseChroot(chroot_path)

        # If we're going to delete the chroot and we can use a snapshot instead,
        # try to revert.  If the revert succeeds, we don't need to delete after all.
        if delete_chroot and self.CanUseChrootSnapshotToDelete(chroot_path):
            delete_chroot = not self._RevertChrootToCleanSnapshot()

        # Re-mount chroot image if it exists so that subsequent steps can clean up
        # inside.
        if not delete_chroot and self._run.config.chroot_use_image:
            try:
                cros_sdk_lib.MountChroot(chroot=chroot_path, create=False)
            except cros_build_lib.RunCommandError as e:
                logging.error(
                    'Unable to mount chroot under %s.  Deleting chroot.  '
                    'Error: %s', self._build_root, e)
                delete_chroot = True

        if manifest is None:
            self._DeleteChroot()
            repository.ClearBuildRoot(self._build_root,
                                      self._run.options.preserve_paths)
        else:
            tasks = [
                self._BuildRootGitCleanup, self._WipeOldOutput,
                self._DeleteArchivedTrybotImages,
                self._DeleteArchivedPerfResults,
                self._DeleteAutotestSitePackages
            ]
            if self._run.options.chrome_root:
                tasks.append(self._DeleteChromeBuildOutput)
            if delete_chroot:
                tasks.append(self._DeleteChroot)
            else:
                tasks.append(self._CleanChroot)
            if self._run.options.workspace:
                tasks.append(self._CleanWorkspace)

            # CancelObsoleteSlaveBuilds, if there are slave builds to cancel.
            if self._run.config.slave_configs:
                tasks.append(self.CancelObsoleteSlaveBuilds)

            parallel.RunParallelSteps(tasks)

        # If chroot.img still exists after everything is cleaned up, it means we're
        # planning to reuse it. This chroot was created by the previous run, so its
        # creation isn't affected by any potential changes in the current run.
        # Therefore, if this run fails, having the subsequent run revert to this
        # snapshot will still produce a clean chroot.  If this run succeeds, the
        # next run will reuse the chroot without needing to revert it.  Thus, taking
        # a snapshot now should be correct regardless of whether this run will
        # ultimately succeed or not.
        if os.path.exists(chroot_path + '.img'):
            self._CreateCleanSnapshot()
예제 #57
0
def main(argv):
    opts = ParseArgs(argv)

    cros_build_lib.AssertInsideChroot()

    sysroot = opts.sysroot or cros_build_lib.GetSysroot(opts.board)
    package_blacklist = set()
    if opts.package_blacklist:
        package_blacklist |= set(opts.package_blacklist.split())

    packages = set()
    # The list of packages to test can be passed as a file containing a
    # space-separated list of package names.
    # This is used by the builder to test only the packages that were upreved.
    if opts.package_file and os.path.exists(opts.package_file):
        packages = set(osutils.ReadFile(opts.package_file).split())

    if opts.packages:
        packages |= set(opts.packages.split())

    # If no packages were specified, use all testable packages.
    if not (opts.packages or opts.package_file) and not opts.empty_sysroot:
        workon = workon_helper.WorkonHelper(sysroot)
        packages = (workon.InstalledWorkonAtoms() if opts.installed else set(
            workon.ListAtoms(use_all=True)))

    if opts.empty_sysroot:
        packages |= determine_board_packages(sysroot, BOARD_VIRTUAL_PACKAGES)
        workon = workon_helper.WorkonHelper(sysroot)
        workon_packages = set(workon.ListAtoms(use_all=True))
        packages &= workon_packages

    for cp in packages & package_blacklist:
        logging.info('Skipping blacklisted package %s.', cp)

    packages = packages - package_blacklist
    pkg_with_test = portage_util.PackagesWithTest(sysroot, packages)

    if packages - pkg_with_test:
        logging.warning('The following packages do not have tests:\n  %s',
                        '\n  '.join(sorted(packages - pkg_with_test)))

    if not pkg_with_test:
        if opts.testable_packages_optional:
            logging.warning('No testable packages found!')
            return 0
        logging.error('No testable packages found!')
        return 1

    if opts.pretend:
        print('\n'.join(sorted(pkg_with_test)))
        return 0

    env = None
    if opts.nowithdebug:
        use_flags = os.environ.get('USE', '')
        use_flags += ' -cros-debug'
        env = {'USE': use_flags}

    if opts.empty_sysroot:
        try:
            chroot_util.Emerge(list(IMPLICIT_TEST_DEPS),
                               sysroot,
                               rebuild_deps=False,
                               use_binary=False)
            chroot_util.Emerge(list(pkg_with_test),
                               sysroot,
                               rebuild_deps=False,
                               use_binary=False)
        except cros_build_lib.RunCommandError:
            logging.error('Failed building dependencies for unittests.')
            return 1

    try:
        chroot_util.RunUnittests(sysroot,
                                 pkg_with_test,
                                 extra_env=env,
                                 jobs=opts.jobs)
    except cros_build_lib.RunCommandError:
        logging.error('Unittests failed.')
        return 1
예제 #58
0
    def GetReportMetadataDict(builder_run,
                              get_statuses_from_slaves,
                              config=None,
                              stage=None,
                              final_status=None,
                              completion_instance=None,
                              child_configs_list=None):
        """Return a metadata dictionary summarizing a build.

    This method replaces code that used to exist in the ArchivingStageMixin
    class from cbuildbot_stage. It contains all the Report-stage-time
    metadata construction logic. The logic here is intended to be gradually
    refactored out so that the metadata is constructed gradually by the
    stages that are responsible for pieces of data, as they run.

    Args:
      builder_run: BuilderRun instance for this run.
      get_statuses_from_slaves: If True, status information of slave
                                builders will be recorded.
      config: The build config for this run.  Defaults to self._run.config.
      stage: The stage name that this metadata file is being uploaded for.
      final_status: Whether the build passed or failed. If None, the build
                    will be treated as still running.
      completion_instance: The stage instance that was used to wait for slave
                           completion. Used to add slave build information to
                           master builder's metadata. If None, no such status
                           information will be included. It not None, this
                           should be a derivative of
                           MasterSlaveSyncCompletionStage.
      child_configs_list: The list of child config metadata.  If specified it
                          should be added to the metadata.

    Returns:
       A metadata dictionary suitable to be json-serialized.
    """
        config = config or builder_run.config
        start_time = results_lib.Results.start_time
        current_time = datetime.datetime.now()
        start_time_stamp = cros_build_lib.UserDateTimeFormat(
            timeval=start_time)
        current_time_stamp = cros_build_lib.UserDateTimeFormat(
            timeval=current_time)
        duration = '%s' % (current_time - start_time, )

        metadata = {
            'status': {
                'current-time': current_time_stamp,
                'status': final_status if final_status else 'running',
                'summary': stage or '',
            },
            'time': {
                'start': start_time_stamp,
                'finish': current_time_stamp if final_status else '',
                'duration': duration,
            }
        }

        metadata['results'] = []
        for entry in results_lib.Results.Get():
            timestr = datetime.timedelta(seconds=math.ceil(entry.time))
            if entry.result in results_lib.Results.NON_FAILURE_TYPES:
                status = constants.FINAL_STATUS_PASSED
            else:
                status = constants.FINAL_STATUS_FAILED
            metadata['results'].append({
                'name':
                entry.name,
                'status':
                status,
                # The result might be a custom exception.
                'summary':
                str(entry.result),
                'duration':
                '%s' % timestr,
                'board':
                entry.board,
                'description':
                entry.description,
                'log':
                builder_run.ConstructDashboardURL(stage=entry.name),
            })

        if child_configs_list:
            metadata['child-configs'] = child_configs_list

        # If we were a CQ master, then include a summary of the status of slave cq
        # builders in metadata
        if get_statuses_from_slaves:
            statuses = completion_instance.GetSlaveStatuses()
            if not statuses:
                logging.warning(
                    'completion_instance did not have any statuses '
                    'to report. Will not add slave status to metadata.')

            metadata['slave_targets'] = {}
            for builder, status in statuses.iteritems():
                metadata['slave_targets'][builder] = status.AsFlatDict()

        return metadata
예제 #59
0
def Create(target, run_configs, accept_licenses):
  """Create a sysroot.

  This entry point is the subset of the full setup process that does the
  creation and configuration of a sysroot, including installing portage.

  Args:
    target (build_target.BuildTarget): The build target being installed in the
      sysroot being created.
    run_configs (SetupBoardRunConfig): The run configs.
    accept_licenses (str|None): The additional licenses to accept.
  """
  cros_build_lib.AssertInsideChroot()

  sysroot = sysroot_lib.Sysroot(target.root)

  if sysroot.Exists() and not run_configs.force and not run_configs.quiet:
    logging.warning('Board output directory already exists: %s\n'
                    'Use --force to clobber the board root and start again.',
                    sysroot.path)

  # Override regen_configs setting to force full setup run if the sysroot does
  # not exist.
  run_configs.regen_configs = run_configs.regen_configs and sysroot.Exists()

  # Make sure the chroot is fully up to date before we start unless the
  # chroot update is explicitly disabled.
  if run_configs.update_chroot:
    logging.info('Updating chroot.')
    update_chroot = [os.path.join(constants.CROSUTILS_DIR, 'update_chroot'),
                     '--toolchain_boards', target.name]
    update_chroot += run_configs.GetUpdateChrootArgs()
    try:
      cros_build_lib.run(update_chroot)
    except cros_build_lib.RunCommandError:
      raise UpdateChrootError('Error occurred while updating the chroot.'
                              'See the logs for more information.')

  # Delete old sysroot to force a fresh start if requested.
  if sysroot.Exists() and run_configs.force:
    sysroot.Delete(background=True)

  # Step 1: Create folders.
  # Dependencies: None.
  # Create the skeleton.
  logging.info('Creating sysroot directories.')
  _CreateSysrootSkeleton(sysroot)

  # Step 2: Standalone configurations.
  # Dependencies: Folders exist.
  # Install main, board setup, and user make.conf files.
  logging.info('Installing configurations into sysroot.')
  _InstallConfigs(sysroot, target)

  # Step 3: Portage configurations.
  # Dependencies: make.conf.board_setup.
  # Create the command wrappers, choose profile, and make.conf.board.
  # Refresh the workon symlinks to compensate for crbug.com/679831.
  logging.info('Setting up portage in the sysroot.')
  _InstallPortageConfigs(sysroot, target, accept_licenses,
                         run_configs.local_build)

  # Developer Experience Step: Set default board (if requested) to allow
  # running later commands without needing to pass the --board argument.
  if run_configs.set_default:
    cros_build_lib.SetDefaultBoard(target.name)

  return sysroot
예제 #60
0
    def ArchiveResults(self, final_status):
        """Archive our build results.

    Args:
      final_status: constants.BUILDER_STATUS_PASSED or
                    constants.BUILDER_STATUS_FAILED
    """
        # Make sure local archive directory is prepared, if it was not already.
        if not os.path.exists(self.archive_path):
            self.archive.SetupArchivePath()

        # Upload metadata, and update the pass/fail streak counter for the main
        # run only. These aren't needed for the child builder runs.
        self.UploadMetadata(export=True)
        self._UpdateRunStreak(self._run, final_status)

        # Alert if the Pre-CQ has infra failures.
        if final_status == constants.BUILDER_STATUS_FAILED:
            self._SendPreCQInfraAlertMessageIfNeeded()

        build_identifier, db = self._run.GetCIDBHandle()
        build_id = build_identifier.cidb_id
        buildbucket_id = build_identifier.buildbucket_id
        # Iterate through each builder run, whether there is just the main one
        # or multiple child builder runs.
        for builder_run in self._run.GetUngroupedBuilderRuns():
            if db is not None:
                timeline = self._UploadBuildStagesTimeline(
                    builder_run, buildbucket_id)
                logging.PrintBuildbotLink('Build stages timeline', timeline)

                timeline = self._UploadSlavesTimeline(builder_run,
                                                      build_identifier)
                if timeline is not None:
                    logging.PrintBuildbotLink('Slaves timeline', timeline)

            if build_id is not None:
                details_link = uri_lib.ConstructViceroyBuildDetailsUri(
                    build_id)
                logging.PrintBuildbotLink('Build details', details_link)

            # Generate links to archived artifacts if there are any.  All the
            # archived artifacts for one run/config are in one location, so the link
            # is only specific to each run/config.  In theory multiple boards could
            # share that archive, but in practice it is usually one board.  A
            # run/config without a board will also usually not have artifacts to
            # archive, but that restriction is not assumed here.
            self._LinkArtifacts(builder_run)

            # Check if the builder_run is tied to any boards and if so get all
            # upload urls.
            if final_status == constants.BUILDER_STATUS_PASSED:
                # Update the LATEST files if the build passed.
                try:
                    upload_urls = self._GetUploadUrls('LATEST-*',
                                                      builder_run=builder_run)
                except portage_util.MissingOverlayError as e:
                    # If the build failed prematurely, some overlays might be
                    # missing. Ignore them in this stage.
                    logging.warning(e)
                else:
                    if upload_urls:
                        archive = builder_run.GetArchive()
                        archive.UpdateLatestMarkers(
                            builder_run.manifest_branch,
                            builder_run.options.debug_forced,
                            upload_urls=upload_urls)