Beispiel #1
0
def ReportResults(symbols, failed_list):
    """Log a summary of the symbol uploading.

  This has the side effect of fully consuming the symbols iterator.

  Args:
    symbols: An iterator of SymbolFiles to be uploaded.
    failed_list: A filename at which to write out a list of our failed uploads.

  Returns:
    The number of symbols not uploaded.
  """
    upload_failures = []
    result_counts = {
        SymbolFile.INITIAL: 0,
        SymbolFile.UPLOADED: 0,
        SymbolFile.DUPLICATE: 0,
        SymbolFile.ERROR: 0,
    }

    for s in symbols:
        result_counts[s.status] += 1
        if s.status in [SymbolFile.INITIAL, SymbolFile.ERROR]:
            upload_failures.append(s)

    # Report retry numbers.
    _, _, retries = retry_stats.CategoryStats(UPLOAD_STATS)
    if retries:
        logging.warning('%d upload retries performed.', retries)

    logging.info('Uploaded %(uploaded)d, Skipped %(duplicate)d duplicates.',
                 result_counts)

    if result_counts[SymbolFile.ERROR]:
        logging.PrintBuildbotStepWarnings()
        logging.warning('%d non-recoverable upload errors',
                        result_counts[SymbolFile.ERROR])

    if result_counts[SymbolFile.INITIAL]:
        logging.PrintBuildbotStepWarnings()
        logging.warning(
            '%d upload(s) were skipped because of excessive errors',
            result_counts[SymbolFile.INITIAL])

    if failed_list is not None:
        with open(failed_list, 'w') as fl:
            for s in upload_failures:
                fl.write('%s\n' % s.display_path)

    return result_counts[SymbolFile.INITIAL] + result_counts[SymbolFile.ERROR]
Beispiel #2
0
    def PerformStage(self):
        chroot_path = os.path.join(self._build_root,
                                   constants.DEFAULT_CHROOT_DIR)
        chroot_exists = os.path.isdir(self._build_root)
        replace = self._run.config.chroot_replace or self.force_chroot_replace
        pre_ver = None

        if chroot_exists and not replace:
            # Make sure the chroot has a valid version before we update it.
            pre_ver = cros_sdk_lib.GetChrootVersion(chroot_path)
            if pre_ver is None:
                logging.PrintBuildbotStepText('Replacing broken chroot')
                logging.PrintBuildbotStepWarnings()
                replace = True

        if not chroot_exists or replace:
            use_sdk = (self._run.config.use_sdk
                       and not self._run.options.nosdk)
            pre_ver = None
            commands.MakeChroot(buildroot=self._build_root,
                                replace=replace,
                                use_sdk=use_sdk,
                                chrome_root=self._run.options.chrome_root,
                                extra_env=self._portage_extra_env,
                                use_image=self._run.config.chroot_use_image,
                                cache_dir=self._run.options.cache_dir)

        post_ver = cros_sdk_lib.GetChrootVersion(chroot_path)
        if pre_ver is not None and pre_ver != post_ver:
            logging.PrintBuildbotStepText('%s->%s' % (pre_ver, post_ver))
        else:
            logging.PrintBuildbotStepText(post_ver)
Beispiel #3
0
    def HandleFailure(self, failing, inflight, no_stat):
        """Handle a build failure.

    This function is called whenever the cbuildbot run fails.
    For the master, this will be called when any slave fails or times
    out. This function may be overridden by subclasses.

    Args:
      failing: The names of the failing builders.
      inflight: The names of the builders that are still running.
      no_stat: Set of builder names of slave builders that had status None.
    """
        if failing or inflight or no_stat:
            logging.PrintBuildbotStepWarnings()

        if failing:
            logging.warning('\n'.join([
                'The following builders failed with this manifest:',
                ', '.join(sorted(failing)),
                'Please check the logs of the failing builders for details.'
            ]))

        if inflight:
            logging.warning('\n'.join([
                'The following builders took too long to finish:',
                ', '.join(sorted(inflight)),
                'Please check the logs of these builders for details.'
            ]))

        if no_stat:
            logging.warning('\n'.join([
                'The following builders did not start or failed prematurely:',
                ', '.join(sorted(no_stat)),
                'Please check the logs of these builders for details.'
            ]))
Beispiel #4
0
    def PerformStage(self):
        if platform.dist()[-1] == 'lucid':
            # Chrome no longer builds on Lucid. See crbug.com/276311
            print('Ubuntu lucid is no longer supported.')
            print('Please upgrade to Ubuntu Precise.')
            logging.PrintBuildbotStepWarnings()
            return

        steps = [
            self._BuildAndArchiveChromeSysroot, self._ArchiveChromeEbuildEnv,
            self._GenerateAndUploadMetadata
        ]
        with self.ArtifactUploader(self._upload_queue, archive=False):
            parallel.RunParallelSteps(steps)

            if self._run.config.chrome_sdk_build_chrome:
                with osutils.TempDir(prefix='chrome-sdk-cache') as tempdir:
                    cache_dir = os.path.join(tempdir, 'cache')
                    extra_args = [
                        '--cwd', self.chrome_src, '--sdk-path',
                        self.archive_path
                    ]
                    sdk_cmd = commands.ChromeSDK(
                        self._build_root,
                        self._current_board,
                        chrome_src=self.chrome_src,
                        goma=self._run.config.chrome_sdk_goma,
                        extra_args=extra_args,
                        cache_dir=cache_dir)
                    self._BuildChrome(sdk_cmd)
                    self._TestDeploy(sdk_cmd)
Beispiel #5
0
  def WaitUntilReady(self):
    """Decide if we should run the unittest stage."""
    # See crbug.com/937328.
    if not self.AfterLimit(CROS_RUN_UNITTESTS):
      logging.PrintBuildbotStepWarnings()
      logging.warning('cros_run_unit_tests does not exist on this branch.')
      return False

    return True
    def _HandleExceptionAsWarning(cls, exc_info, retrying=False):
        """Use instead of HandleStageException to treat an exception as a warning.

    This is used by the ForgivingBuilderStage's to treat any exceptions as
    warnings instead of stage failures.
    """
        description = cls._StringifyException(exc_info)
        logging.PrintBuildbotStepWarnings()
        logging.warning(description)
        return (results_lib.Results.FORGIVEN, description, retrying)
Beispiel #7
0
        def RunCleanupCommands(project, cwd):
            with locking.FileLock(lock_path,
                                  verbose=False).read_lock() as lock:
                # Calculate where the git repository is stored.
                relpath = os.path.relpath(cwd, self.directory)
                projects_dir = os.path.join(self.directory, '.repo',
                                            'projects')
                project_objects_dir = os.path.join(self.directory, '.repo',
                                                   'project-objects')
                repo_git_store = '%s.git' % os.path.join(projects_dir, relpath)
                repo_obj_store = '%s.git' % os.path.join(
                    project_objects_dir, project)

                try:
                    if os.path.isdir(cwd):
                        git.CleanAndDetachHead(cwd)

                    if os.path.isdir(repo_git_store):
                        git.GarbageCollection(repo_git_store,
                                              prune_all=prune_all)
                except cros_build_lib.RunCommandError as e:
                    result = e.result
                    logging.PrintBuildbotStepWarnings()
                    logging.warning('\n%s', result.error)

                    # If there's no repository corruption, just delete the index.
                    corrupted = git.IsGitRepositoryCorrupted(repo_git_store)
                    lock.write_lock()
                    logging.warning('Deleting %s because %s failed', cwd,
                                    result.cmd)
                    osutils.RmDir(cwd, ignore_missing=True, sudo=True)
                    if corrupted:
                        # Looks like the object dir is corrupted. Delete it.
                        deleted_objdirs.set()
                        for store in (repo_git_store, repo_obj_store):
                            logging.warning('Deleting %s as well', store)
                            osutils.RmDir(store, ignore_missing=True)

                # TODO: Make the deletions below smarter. Look to see what exists,
                # instead of just deleting things we think might be there.

                # Delete all branches created by cbuildbot.
                if os.path.isdir(repo_git_store):
                    cmd = ['branch', '-D'] + list(constants.CREATED_BRANCHES)
                    # Ignore errors, since we delete branches without checking existence.
                    git.RunGit(repo_git_store, cmd, error_code_ok=True)

                if os.path.isdir(cwd):
                    # Above we deleted refs/heads/<branch> for each created branch, now we
                    # need to delete the bare ref <branch> if it was created somehow.
                    for ref in constants.CREATED_BRANCHES:
                        # Ignore errors, since we delete branches without checking
                        # existence.
                        git.RunGit(cwd, ['update-ref', '-d', ref],
                                   error_code_ok=True)
Beispiel #8
0
  def WaitUntilReady(self):
    """Wait until payloads and test artifacts are ready or not."""
    # Wait for UploadHWTestArtifacts to generate and upload the artifacts.
    if not self.GetParallel('test_artifacts_uploaded',
                            pretty_name='payloads and test artifacts'):
      logging.PrintBuildbotStepWarnings('missing test artifacts')
      logging.warning('Cannot run %s because UploadTestArtifacts failed. '
                      'See UploadTestArtifacts for details.' % self.stage_name)
      return False

    return True
def GrabRemotePackageIndex(binhost_url, **kwargs):
    """Grab the latest binary package database from the specified URL.

  Args:
    binhost_url: Base URL of remote packages (PORTAGE_BINHOST).
    kwargs: Additional RunCommand parameters.

  Returns:
    A PackageIndex object, if the Packages file can be retrieved. If the
    packages file cannot be retrieved, then None is returned.
  """
    url = '%s/Packages' % binhost_url.rstrip('/')
    pkgindex = PackageIndex()
    if binhost_url.startswith('http'):
        try:
            f = _RetryUrlOpen(url)
        except urllib.error.HTTPError as e:
            if e.code in HTTP_FORBIDDEN_CODES:
                logging.PrintBuildbotStepWarnings()
                logging.error('Cannot GET %s: %s', url, e)
                return None
            # Not found errors are normal if old prebuilts were cleaned out.
            if e.code in HTTP_NOT_FOUND_CODES:
                return None
            raise
    elif binhost_url.startswith('gs://'):
        try:
            gs_context = gs.GSContext()
            output = gs_context.Cat(url, encoding='utf-8', **kwargs)
        except (cros_build_lib.RunCommandError, gs.GSNoSuchKey) as e:
            logging.PrintBuildbotStepWarnings()
            logging.error('Cannot GET %s: %s', url, e)
            return None
        f = io.StringIO(output)
    else:
        return None
    pkgindex.Read(f)
    pkgindex.header.setdefault('URI', binhost_url)
    f.close()
    return pkgindex
    def PerformStage(self):
        """Collect a 'perf' profile and convert it into the AFDO format."""
        super(AFDODataGenerateStage, self).PerformStage()

        board = self._current_board
        if not afdo.CanGenerateAFDOData(board):
            logging.warning('Board %s cannot generate its own AFDO profile.',
                            board)
            return

        arch = self._GetCurrentArch()
        buildroot = self._build_root
        gs_context = gs.GSContext()
        cpv = portage_util.BestVisible(constants.CHROME_CP,
                                       buildroot=buildroot)
        afdo_file = None

        # Generation of AFDO could fail for different reasons.
        # We will ignore the failures and let the master PFQ builder try
        # to find an older AFDO profile.
        try:
            if afdo.WaitForAFDOPerfData(cpv, arch, buildroot, gs_context):
                afdo_file = afdo.GenerateAFDOData(cpv, arch, board, buildroot,
                                                  gs_context)
                assert afdo_file
                logging.info('Generated %s AFDO profile %s', arch, afdo_file)
            else:
                raise afdo.MissingAFDOData(
                    'Could not find current "perf" profile. '
                    'Master PFQ builder will try to use stale '
                    'AFDO profile.')
        # Will let system-exiting exceptions through.
        except Exception:
            logging.PrintBuildbotStepWarnings()
            logging.warning('AFDO profile generation failed with exception ',
                            exc_info=True)

            alert_msg = ('Please triage. This will become a fatal error.\n\n'
                         'arch=%s buildroot=%s\n\nURL=%s' %
                         (arch, buildroot, self._run.ConstructDashboardURL()))
            subject_msg = (
                'Failure in generation of AFDO Data for builder %s' %
                self._run.config.name)
            alerts.SendEmailLog(subject_msg,
                                afdo.AFDO_ALERT_RECIPIENTS,
                                server=alerts.SmtpServer(
                                    constants.GOLO_SMTP_SERVER),
                                message=alert_msg)
            # Re-raise whatever exception we got here. This stage will only
            # generate a warning but we want to make sure the warning is
            # generated.
            raise
Beispiel #11
0
def AdjustSymbolFileSize(symbol, tempdir, file_limit):
    """Examine symbols files for size problems, and reduce if needed.

  If the symbols size is too big, strip out the call frame info.  The CFI
  is unnecessary for 32bit x86 targets where the frame pointer is used (as
  all of ours have) and it accounts for over half the size of the symbols
  uploaded.

  Stripped files will be created inside tempdir, and will be the callers
  responsibility to clean up.

  We also warn, if a symbols file is still too large after stripping.

  Args:
    symbol: SymbolFile instance to be examined and modified as needed..
    tempdir: A temporary directory we can create files in that the caller will
             clean up.
    file_limit: We only strip files which are larger than this limit.

  Returns:
    SymbolFile instance (original or modified as needed)
  """
    file_size = symbol.FileSize()

    if file_limit and symbol.FileSize() > file_limit:
        with tempfile.NamedTemporaryFile(prefix='upload_symbols',
                                         bufsize=0,
                                         dir=tempdir,
                                         delete=False) as temp_sym_file:

            temp_sym_file.writelines([
                x for x in open(symbol.file_name, 'rb').readlines()
                if not x.startswith('STACK CFI')
            ])

            original_file_size = file_size
            symbol.file_name = temp_sym_file.name
            file_size = symbol.FileSize()

            logging.warning('stripped CFI for %s reducing size %s > %s',
                            symbol.display_name, original_file_size, file_size)

    # Hopefully the crash server will let it through.  But it probably won't.
    # Not sure what the best answer is in this case.
    if file_size >= CRASH_SERVER_FILE_LIMIT:
        logging.PrintBuildbotStepWarnings()
        logging.warning(
            'upload file %s is awfully large, risking rejection by '
            'the symbol server (%s > %s)', symbol.display_path, file_size,
            CRASH_SERVER_FILE_LIMIT)

    return symbol
Beispiel #12
0
 def HandleApplyFailures(self, failures):
   """Handle the case where patches fail to apply."""
   if self._run.config.pre_cq:
     # Let the PreCQSync stage handle this failure. The PreCQSync stage will
     # comment on CLs with the appropriate message when they fail to apply.
     #
     # WARNING: For manifest patches, the Pre-CQ attempts to apply external
     # patches to the internal manifest, and this means we may flag a conflict
     # here even if the patch applies cleanly. TODO(davidjames): Fix this.
     logging.PrintBuildbotStepWarnings()
     logging.error('Failed applying patches: %s\n'.join(
         str(x) for x in failures))
   else:
     PatchChangesStage.HandleApplyFailures(self, failures)
    def PerformStage(self):
        """Generate debug symbols and upload debug.tgz."""
        buildroot = self._build_root
        board = self._current_board

        # Generate breakpad symbols of Chrome OS binaries.
        commands.GenerateBreakpadSymbols(buildroot,
                                         board,
                                         self._run.options.debug_forced,
                                         chroot_args=ChrootArgs(
                                             self._run.options),
                                         extra_env=self._portage_extra_env)

        # Download android symbols (if this build has them), and Generate
        # breakpad symbols of Android binaries. This must be done after
        # GenerateBreakpadSymbols because it clobbers the output
        # directory.
        symbols_file = self.DownloadAndroidSymbols()

        if symbols_file:
            try:
                commands.GenerateAndroidBreakpadSymbols(
                    buildroot,
                    board,
                    symbols_file,
                    chroot_args=ChrootArgs(self._run.options),
                    extra_env=self._portage_extra_env)
            except failures_lib.BuildScriptFailure:
                # Android breakpad symbol preparation is expected to work in
                # modern branches.
                if self.AfterLimit(ANDROID_BREAKPAD):
                    raise

                # For older branches, we only process them on a best effort basis.
                logging.PrintBuildbotStepWarnings()
                logging.warning('Preparing Android symbols failed, ignoring..')

        # Upload them.
        self.UploadDebugTarball()

        # Upload debug/breakpad tarball.
        self.UploadDebugBreakpadTarball()

        # Upload them to crash server.
        if self._run.config.upload_symbols:
            self.UploadSymbols(buildroot, board)
Beispiel #14
0
 def PerformStage(self):
   extra_env = {}
   if self._run.config.useflags:
     extra_env['USE'] = ' '.join(self._run.config.useflags)
   r = ' Reached UnitTestStage timeout.'
   with timeout_util.Timeout(self.UNIT_TEST_TIMEOUT, reason_message=r):
     try:
       commands.RunUnitTests(
           self._build_root,
           self._current_board,
           blacklist=self._run.config.unittest_blacklist,
           build_stage=self._run.config.build_packages,
           chroot_args=ChrootArgs(self._run.options),
           extra_env=extra_env)
     except failures_lib.BuildScriptFailure:
       logging.PrintBuildbotStepWarnings()
       logging.warning('Unittests failed. Ignored crbug.com/936123.')
Beispiel #15
0
    def PerformStage(self):
        """Wait for payloads to be staged and uploads its au control files."""
        # Wait for UploadHWTestArtifacts to generate the payloads.
        if not self.GetParallel('delta_payloads_generated',
                                pretty_name='delta payloads'):
            logging.PrintBuildbotStepText('Missing delta payloads.')
            logging.PrintBuildbotStepWarnings()
            logging.warning(
                'Cannot run HWTest because UploadTestArtifacts failed. '
                'See UploadTestArtifacts for details.')
            return

        with osutils.TempDir() as tempdir:
            tarball = commands.BuildAUTestTarball(self._build_root,
                                                  self._current_board, tempdir,
                                                  self.version,
                                                  self.upload_url)
            self.UploadArtifact(tarball)

        super(AUTestStage, self).PerformStage()
Beispiel #16
0
  def _ArchiveChromeEbuildEnv(self):
    """Generate and upload Chrome ebuild environment."""
    files = glob.glob(os.path.join(self._pkg_dir, constants.CHROME_CP) + '-*')
    if not files:
      raise artifact_stages.NothingToArchiveException(
          'Failed to find package %s' % constants.CHROME_CP)
    if len(files) > 1:
      logging.PrintBuildbotStepWarnings()
      logging.warning('Expected one package for %s, found %d',
                      constants.CHROME_CP, len(files))

    chrome_dir = sorted(files)[-1]
    env_bzip = os.path.join(chrome_dir, 'environment.bz2')
    with osutils.TempDir(prefix='chrome-sdk-stage') as tempdir:
      # Convert from bzip2 to tar format.
      bzip2 = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)
      cros_build_lib.RunCommand(
          [bzip2, '-d', env_bzip, '-c'],
          log_stdout_to_file=os.path.join(tempdir, constants.CHROME_ENV_FILE))
      env_tar = os.path.join(self.archive_path, constants.CHROME_ENV_TAR)
      cros_build_lib.CreateTarball(env_tar, tempdir)
      self._upload_queue.put([os.path.basename(env_tar)])
Beispiel #17
0
    def PerformStage(self):
        # This prepares depot_tools in the source tree, in advance.
        self.DepotToolsEnsureBootstrap()

        chroot_path = os.path.join(self._build_root,
                                   constants.DEFAULT_CHROOT_DIR)
        replace = self._run.config.chroot_replace or self.force_chroot_replace
        pre_ver = post_ver = None
        if os.path.isdir(self._build_root) and not replace:
            try:
                pre_ver = cros_sdk_lib.GetChrootVersion(chroot=chroot_path)
                if pre_ver is not None:
                    commands.RunChrootUpgradeHooks(
                        self._build_root,
                        chrome_root=self._run.options.chrome_root,
                        extra_env=self._portage_extra_env)
            except failures_lib.BuildScriptFailure:
                logging.PrintBuildbotStepText('Replacing broken chroot')
                logging.PrintBuildbotStepWarnings()

        if not os.path.isdir(chroot_path) or replace:
            use_sdk = (self._run.config.use_sdk
                       and not self._run.options.nosdk)
            pre_ver = None
            commands.MakeChroot(buildroot=self._build_root,
                                replace=replace,
                                use_sdk=use_sdk,
                                chrome_root=self._run.options.chrome_root,
                                extra_env=self._portage_extra_env,
                                use_image=self._run.config.chroot_use_image)

        post_ver = cros_sdk_lib.GetChrootVersion(chroot=chroot_path)
        if pre_ver is not None and pre_ver != post_ver:
            logging.PrintBuildbotStepText('%s->%s' % (pre_ver, post_ver))
        else:
            logging.PrintBuildbotStepText(post_ver)

        commands.SetSharedUserPassword(
            self._build_root, password=self._run.config.shared_user_password)
    def PerformStage(self):
        chroot_path = os.path.join(self._build_root,
                                   constants.DEFAULT_CHROOT_DIR)
        replace = self._run.config.chroot_replace or self.force_chroot_replace
        pre_ver = post_ver = None
        if os.path.isdir(self._build_root) and not replace:
            try:
                pre_ver = cros_build_lib.GetChrootVersion(chroot=chroot_path)
                commands.RunChrootUpgradeHooks(
                    self._build_root,
                    chrome_root=self._run.options.chrome_root,
                    extra_env=self._portage_extra_env)
            except failures_lib.BuildScriptFailure:
                logging.PrintBuildbotStepText('Replacing broken chroot')
                logging.PrintBuildbotStepWarnings()
            else:
                # Clear the chroot manifest version as we are in the middle of building.
                chroot_manager = chroot_lib.ChrootManager(self._build_root)
                chroot_manager.ClearChrootVersion()

        if not os.path.isdir(chroot_path) or replace:
            use_sdk = (self._run.config.use_sdk
                       and not self._run.options.nosdk)
            pre_ver = None
            commands.MakeChroot(buildroot=self._build_root,
                                replace=replace,
                                use_sdk=use_sdk,
                                chrome_root=self._run.options.chrome_root,
                                extra_env=self._portage_extra_env)

        post_ver = cros_build_lib.GetChrootVersion(chroot=chroot_path)
        if pre_ver is not None and pre_ver != post_ver:
            logging.PrintBuildbotStepText('%s->%s' % (pre_ver, post_ver))
        else:
            logging.PrintBuildbotStepText(post_ver)

        commands.SetSharedUserPassword(
            self._build_root, password=self._run.config.shared_user_password)
def GenerateBreakpadSymbol(elf_file,
                           debug_file=None,
                           breakpad_dir=None,
                           strip_cfi=False,
                           num_errors=None,
                           dump_syms_cmd='dump_syms'):
    """Generate the symbols for |elf_file| using |debug_file|

  Args:
    elf_file: The file to dump symbols for
    debug_file: Split debug file to use for symbol information
    breakpad_dir: The dir to store the output symbol file in
    strip_cfi: Do not generate CFI data
    num_errors: An object to update with the error count (needs a .value member)
    dump_syms_cmd: Command to use for dumping symbols.

  Returns:
    The name of symbol file written out.
  """
    assert breakpad_dir
    if num_errors is None:
        num_errors = ctypes.c_int()

    cmd_base = [dump_syms_cmd, '-v']
    if strip_cfi:
        cmd_base += ['-c']
    # Some files will not be readable by non-root (e.g. set*id /bin/su).
    needs_sudo = not os.access(elf_file, os.R_OK)

    def _DumpIt(cmd_args):
        if needs_sudo:
            run_command = cros_build_lib.SudoRunCommand
        else:
            run_command = cros_build_lib.RunCommand
        return run_command(cmd_base + cmd_args,
                           redirect_stderr=True,
                           log_stdout_to_file=temp.name,
                           error_code_ok=True,
                           debug_level=logging.DEBUG)

    def _CrashCheck(ret, msg):
        if ret < 0:
            logging.PrintBuildbotStepWarnings()
            logging.warning('dump_syms crashed with %s; %s',
                            signals.StrSignal(-ret), msg)

    osutils.SafeMakedirs(breakpad_dir)
    with tempfile.NamedTemporaryFile(dir=breakpad_dir, bufsize=0) as temp:
        if debug_file:
            # Try to dump the symbols using the debug file like normal.
            cmd_args = [elf_file, os.path.dirname(debug_file)]
            result = _DumpIt(cmd_args)

            if result.returncode:
                # Sometimes dump_syms can crash because there's too much info.
                # Try dumping and stripping the extended stuff out.  At least
                # this way we'll get the extended symbols.  http://crbug.com/266064
                _CrashCheck(result.returncode, 'retrying w/out CFI')
                cmd_args = ['-c', '-r'] + cmd_args
                result = _DumpIt(cmd_args)
                _CrashCheck(result.returncode, 'retrying w/out debug')

            basic_dump = result.returncode
        else:
            basic_dump = True

        if basic_dump:
            # If that didn't work (no debug, or dump_syms still failed), try
            # dumping just the file itself directly.
            result = _DumpIt([elf_file])
            if result.returncode:
                # A lot of files (like kernel files) contain no debug information,
                # do not consider such occurrences as errors.
                logging.PrintBuildbotStepWarnings()
                _CrashCheck(result.returncode, 'giving up entirely')
                if 'file contains no debugging information' in result.error:
                    logging.warning('no symbols found for %s', elf_file)
                else:
                    num_errors.value += 1
                    logging.error('dumping symbols for %s failed:\n%s',
                                  elf_file, result.error)
                return num_errors.value

        # Move the dumped symbol file to the right place:
        # /build/$BOARD/usr/lib/debug/breakpad/<module-name>/<id>/<module-name>.sym
        header = ReadSymsHeader(temp)
        logging.info('Dumped %s as %s : %s', elf_file, header.name, header.id)
        sym_file = os.path.join(breakpad_dir, header.name, header.id,
                                header.name + '.sym')
        osutils.SafeMakedirs(os.path.dirname(sym_file))
        os.rename(temp.name, sym_file)
        os.chmod(sym_file, 0o644)
        temp.delete = False

    return sym_file
def UploadSymbols(board=None,
                  official=False,
                  server=None,
                  breakpad_dir=None,
                  file_limit=DEFAULT_FILE_LIMIT,
                  sleep=DEFAULT_SLEEP_DELAY,
                  upload_limit=None,
                  sym_paths=None,
                  failed_list=None,
                  root=None,
                  retry=True,
                  dedupe_namespace=None,
                  product_name='ChromeOS'):
    """Upload all the generated symbols for |board| to the crash server

  You can use in a few ways:
    * pass |board| to locate all of its symbols
    * pass |breakpad_dir| to upload all the symbols in there
    * pass |sym_paths| to upload specific symbols (or dirs of symbols)

  Args:
    board: The board whose symbols we wish to upload
    official: Use the official symbol server rather than the staging one
    server: Explicit server to post symbols to
    breakpad_dir: The full path to the breakpad directory where symbols live
    file_limit: The max file size of a symbol file before we try to strip it
    sleep: How long to sleep in between uploads
    upload_limit: If set, only upload this many symbols (meant for testing)
    sym_paths: Specific symbol files (or dirs of sym files) to upload,
      otherwise search |breakpad_dir|
    failed_list: Write the names of all sym files we did not upload; can be a
      filename or file-like object.
    root: The tree to prefix to |breakpad_dir| (if |breakpad_dir| is not set)
    retry: Whether we should retry failures.
    dedupe_namespace: The isolateserver namespace to dedupe uploaded symbols.
    product_name: A string for stats purposes. Usually 'ChromeOS' or 'Android'.

  Returns:
    The number of errors that were encountered.
  """
    if server is None:
        if official:
            upload_url = OFFICIAL_UPLOAD_URL
        else:
            logging.warning('unofficial builds upload to the staging server')
            upload_url = STAGING_UPLOAD_URL
    else:
        upload_url = server

    if sym_paths:
        logging.info('uploading specified symbols to %s', upload_url)
    else:
        if breakpad_dir is None:
            if root is None:
                raise ValueError('breakpad_dir requires root to be set')
            breakpad_dir = os.path.join(
                root,
                cros_generate_breakpad_symbols.FindBreakpadDir(board).lstrip(
                    '/'))
        logging.info('uploading all symbols to %s from %s', upload_url,
                     breakpad_dir)
        sym_paths = [breakpad_dir]

    # We use storage_query to ask the server about existing symbols.  The
    # storage_notify_proc process is used to post updates to the server.  We
    # cannot safely share the storage object between threads/processes, but
    # we also want to minimize creating new ones as each object has to init
    # new state (like server connections).
    storage_query = None
    if dedupe_namespace:
        dedupe_limit = DEDUPE_LIMIT
        dedupe_queue = multiprocessing.Queue()
        try:
            with timeout_util.Timeout(DEDUPE_TIMEOUT):
                storage_query = isolateserver.get_storage_api(
                    constants.ISOLATESERVER, dedupe_namespace)
        except Exception:
            logging.warning('initializing dedupe server connection failed',
                            exc_info=True)
    else:
        dedupe_limit = 1
        dedupe_queue = None
    # Can't use parallel.BackgroundTaskRunner because that'll create multiple
    # processes and we want only one the whole time (see comment above).
    storage_notify_proc = multiprocessing.Process(
        target=SymbolDeduplicatorNotify, args=(dedupe_namespace, dedupe_queue))

    bg_errors = multiprocessing.Value('i')
    watermark_errors = multiprocessing.Value('f')
    failed_queue = multiprocessing.Queue()
    uploader = functools.partial(UploadSymbol,
                                 upload_url,
                                 product_name=product_name,
                                 file_limit=file_limit,
                                 sleep=sleep,
                                 num_errors=bg_errors,
                                 watermark_errors=watermark_errors,
                                 failed_queue=failed_queue,
                                 passed_queue=dedupe_queue)

    start_time = datetime.datetime.now()
    Counters = cros_build_lib.Collection('Counters',
                                         upload_limit=upload_limit,
                                         uploaded_count=0,
                                         deduped_count=0)
    counters = Counters()

    def _Upload(queue, counters, files):
        if not files:
            return

        missing_count = 0
        for item in SymbolDeduplicator(storage_query, files):
            missing_count += 1

            if counters.upload_limit == 0:
                continue

            queue.put((item, ))
            counters.uploaded_count += 1
            if counters.upload_limit is not None:
                counters.upload_limit -= 1

        counters.deduped_count += (len(files) - missing_count)

    try:
        storage_notify_proc.start()

        with osutils.TempDir(prefix='upload_symbols.') as tempdir:
            # For the first run, we collect the symbols that failed.  If the
            # overall failure rate was low, we'll retry them on the second run.
            for retry in (retry, False):
                # We need to limit ourselves to one upload at a time to avoid the server
                # kicking in DoS protection.  See these bugs for more details:
                # http://crbug.com/209442
                # http://crbug.com/212496
                with parallel.BackgroundTaskRunner(uploader,
                                                   processes=1) as queue:
                    dedupe_list = []
                    for sym_file in SymbolFinder(tempdir, sym_paths):
                        dedupe_list.append(sym_file)
                        dedupe_len = len(dedupe_list)
                        if dedupe_len < dedupe_limit:
                            if (counters.upload_limit is None
                                    or dedupe_len < counters.upload_limit):
                                continue

                        # We check the counter before _Upload so that we don't keep talking
                        # to the dedupe server.  Otherwise, we end up sending one symbol at
                        # a time to it and that slows things down a lot.
                        if counters.upload_limit == 0:
                            break

                        _Upload(queue, counters, dedupe_list)
                        dedupe_list = []
                    _Upload(queue, counters, dedupe_list)

                # See if we need to retry, and if we haven't failed too many times yet.
                if not retry or ErrorLimitHit(bg_errors, watermark_errors):
                    break

                sym_paths = []
                failed_queue.put(None)
                while True:
                    sym_path = failed_queue.get()
                    if sym_path is None:
                        break
                    sym_paths.append(sym_path)

                if sym_paths:
                    logging.warning('retrying %i symbols', len(sym_paths))
                    if counters.upload_limit is not None:
                        counters.upload_limit += len(sym_paths)
                    # Decrement the error count in case we recover in the second pass.
                    assert bg_errors.value >= len(sym_paths), \
                           'more failed files than errors?'
                    bg_errors.value -= len(sym_paths)
                else:
                    # No failed symbols, so just return now.
                    break

        # If the user has requested it, save all the symbol files that we failed to
        # upload to a listing file.  This should help with recovery efforts later.
        failed_queue.put(None)
        WriteQueueToFile(failed_list, failed_queue, breakpad_dir)

    finally:
        logging.info('finished uploading; joining background process')
        if dedupe_queue:
            dedupe_queue.put(None)

        # The notification might be slow going, so give it some time to finish.
        # We have to poll here as the process monitor is watching for output and
        # will kill us if we go silent for too long.
        wait_minutes = DEDUPE_NOTIFY_TIMEOUT
        while storage_notify_proc.is_alive() and wait_minutes > 0:
            if dedupe_queue:
                qsize = str(dedupe_queue.qsize())
            else:
                qsize = '[None]'
            logging.info('waiting up to %i minutes for ~%s notifications',
                         wait_minutes, qsize)
            storage_notify_proc.join(60)
            wait_minutes -= 1

        # The process is taking too long, so kill it and complain.
        if storage_notify_proc.is_alive():
            logging.warning('notification process took too long')
            logging.PrintBuildbotStepWarnings()

            # Kill it gracefully first (traceback) before tacking it down harder.
            pid = storage_notify_proc.pid
            for sig in (signal.SIGINT, signal.SIGTERM, signal.SIGKILL):
                logging.warning('sending %s to %i', signals.StrSignal(sig),
                                pid)
                # The process might have exited between the last check and the
                # actual kill below, so ignore ESRCH errors.
                try:
                    os.kill(pid, sig)
                except OSError as e:
                    if e.errno == errno.ESRCH:
                        break
                    else:
                        raise
                time.sleep(5)
                if not storage_notify_proc.is_alive():
                    break

            # Drain the queue so we don't hang when we finish.
            try:
                logging.warning('draining the notify queue manually')
                with timeout_util.Timeout(60):
                    try:
                        while dedupe_queue.get_nowait():
                            pass
                    except Queue.Empty:
                        pass
            except timeout_util.TimeoutError:
                logging.warning(
                    'draining the notify queue failed; trashing it')
                dedupe_queue.cancel_join_thread()

    logging.info('uploaded %i symbols (%i were deduped) which took: %s',
                 counters.uploaded_count, counters.deduped_count,
                 datetime.datetime.now() - start_time)

    return bg_errors.value
    def Run(self):
        """Main runner for this builder class.  Runs build and prints summary.

    Returns:
      Whether the build succeeded.
    """
        self._InitializeTrybotPatchPool()

        if self._run.options.bootstrap:
            bootstrap_stage = self._GetBootstrapStage()
            if bootstrap_stage:
                # BootstrapStage blocks on re-execution of cbuildbot.
                bootstrap_stage.Run()
                return bootstrap_stage.returncode == 0

        print_report = True
        exception_thrown = False
        success = True
        sync_instance = None
        try:
            self.Initialize()
            sync_instance = self.GetSyncInstance()
            self._RunSyncStage(sync_instance)

            if self._run.ShouldPatchAfterSync():
                # Filter out patches to manifest, since PatchChangesStage can't handle
                # them.  Manifest patches are patched in the BootstrapStage.
                non_manifest_patches = self.patch_pool.FilterManifest(
                    negate=True)
                if non_manifest_patches:
                    self._RunStage(sync_stages.PatchChangesStage,
                                   non_manifest_patches)

            # Now that we have a fully synced & patched tree, we can let the builder
            # extract version information from the sources for this particular build.
            self.SetVersionInfo()
            if self._run.ShouldReexecAfterSync():
                print_report = False
                success = self._ReExecuteInBuildroot(sync_instance)
            else:
                self._RunStage(report_stages.BuildReexecutionFinishedStage)
                self._RunStage(report_stages.ConfigDumpStage)
                self.RunStages()

        except Exception as ex:
            if isinstance(ex, failures_lib.ExitEarlyException):
                # One stage finished and exited early, not a failure.
                raise

            exception_thrown = True
            build_identifier, _ = self._run.GetCIDBHandle()
            buildbucket_id = build_identifier.buildbucket_id
            if results_lib.Results.BuildSucceededSoFar(self.buildstore,
                                                       buildbucket_id):
                # If the build is marked as successful, but threw exceptions, that's a
                # problem. Print the traceback for debugging.
                if isinstance(ex, failures_lib.CompoundFailure):
                    print(str(ex))

                traceback.print_exc(file=sys.stdout)
                raise

            if not (print_report and isinstance(ex, failures_lib.StepFailure)):
                # If the failed build threw a non-StepFailure exception, we
                # should raise it.
                raise

        finally:
            if print_report:
                results_lib.WriteCheckpoint(self._run.options.buildroot)
                completion_instance = self.GetCompletionInstance()
                self._RunStage(report_stages.ReportStage, completion_instance)
                build_identifier, _ = self._run.GetCIDBHandle()
                buildbucket_id = build_identifier.buildbucket_id
                success = results_lib.Results.BuildSucceededSoFar(
                    self.buildstore, buildbucket_id)
                if exception_thrown and success:
                    success = False
                    logging.PrintBuildbotStepWarnings()
                    print("""\
Exception thrown, but all stages marked successful. This is an internal error,
because the stage that threw the exception should be marked as failing.""")

        return success
    def PerformStage(self):
        """Collect a 'perf' profile and convert it into the AFDO format."""
        super(AFDODataGenerateStage, self).PerformStage()

        board = self._current_board
        if not afdo.CanGenerateAFDOData(board):
            logging.warning('Board %s cannot generate its own AFDO profile.',
                            board)
            return

        arch = self._GetCurrentArch()
        buildroot = self._build_root
        gs_context = gs.GSContext()
        cpv = portage_util.PortageqBestVisible(constants.CHROME_CP,
                                               cwd=buildroot)
        afdo_file = None

        # We have a mismatch between how we version the perf.data we collect and
        # how we version our AFDO profiles.
        #
        # This mismatch can cause us to generate garbage profiles, so we skip
        # profile updates for non-r1 revisions of Chrome.
        #
        # Going into more detail, a perf.data file looks like:
        # chromeos-chrome-amd64-68.0.3440.9.perf.data.bz2
        #
        # An AFDO profile looks like:
        # chromeos-chrome-amd64-68.0.3440.9_rc-r1.afdo.bz2
        #
        # And an unstripped Chrome looks like:
        # chromeos-chrome-amd64-68.0.3440.9_rc-r1.debug.bz2
        #
        # Notably, the perf.data is lacking the revision number of the Chrome it
        # was gathered on. This is problematic, since if there's a rev bump, we'll
        # end up using the perf.data collected on Chrome version $N-r1 with a
        # Chrome binary built from Chrome version $N-r2, which may have an entirely
        # different layout than Chrome version $N-r1.
        if cpv.rev != 'r1':
            logging.warning(
                'Non-r1 version of Chrome detected; skipping AFDO generation')
            return

        # Generation of AFDO could fail for different reasons.
        # We will ignore the failures and let the master PFQ builder try
        # to find an older AFDO profile.
        try:
            if afdo.WaitForAFDOPerfData(cpv, arch, buildroot, gs_context):
                afdo_file, uploaded_afdo = afdo.GenerateAFDOData(
                    cpv, arch, board, buildroot, gs_context)
                assert afdo_file
                logging.info('Generated %s AFDO profile %s', arch, afdo_file)

                # If there's no new profile, merging would only be redoing the last
                # merge and uploading nothing.
                if not uploaded_afdo:
                    logging.info('AFDO profile already existed in GS. Quit')
                    return

                merged_file, uploaded_merged = \
                    afdo.CreateAndUploadMergedAFDOProfile(gs_context, buildroot,
                                                          afdo_file)

                if merged_file is not None:
                    logging.info('Generated %s merged AFDO profile %s', arch,
                                 merged_file)

                # TODO(gbiv): once there's clarity that merged profiles are working
                # (e.g. a week goes by with Android/Linux mostly-happily using them),
                # we may want to turn them on for CrOS. Until then, `latest` is always
                # the raw AFDO file.
                if uploaded_merged and False:
                    newest_afdo_file = merged_file
                else:
                    newest_afdo_file = afdo_file

                afdo.UpdateLatestAFDOProfileInGS(cpv, arch, buildroot,
                                                 newest_afdo_file, gs_context)
                logging.info('Pointed newest profile at %s', newest_afdo_file)
            else:
                raise afdo.MissingAFDOData(
                    'Could not find current "perf" profile. '
                    'Master PFQ builder will try to use stale '
                    'AFDO profile.')
        # Will let system-exiting exceptions through.
        except Exception:
            logging.PrintBuildbotStepWarnings()
            logging.warning('AFDO profile generation failed with exception ',
                            exc_info=True)

            alert_msg = ('Please triage. This will become a fatal error.\n\n'
                         'arch=%s buildroot=%s\n\nURL=%s' %
                         (arch, buildroot, self._run.ConstructDashboardURL()))
            subject_msg = (
                'Failure in generation of AFDO Data for builder %s' %
                self._run.config.name)
            alerts.SendEmailLog(subject_msg,
                                afdo.AFDO_ALERT_RECIPIENTS,
                                server=alerts.SmtpServer(
                                    constants.GOLO_SMTP_SERVER),
                                message=alert_msg)
            # Re-raise whatever exception we got here. This stage will only
            # generate a warning but we want to make sure the warning is
            # generated.
            raise
Beispiel #23
0
    def PerformStage(self):
        # Wait for UploadHWTestArtifacts to generate the payloads.
        if not self.GetParallel('payloads_generated', pretty_name='payloads'):
            logging.PrintBuildbotStepWarnings('missing payloads')
            logging.warning(
                'Cannot run HWTest because UploadTestArtifacts failed. '
                'See UploadTestArtifacts for details.')
            return

        if self.suite_config.suite == constants.HWTEST_AFDO_SUITE:
            arch = self._GetPortageEnvVar('ARCH', self._current_board)
            cpv = portage_util.BestVisible(constants.CHROME_CP,
                                           buildroot=self._build_root)
            if afdo.CheckAFDOPerfData(cpv, arch, gs.GSContext()):
                logging.info(
                    'AFDO profile already generated for arch %s '
                    'and Chrome %s. Not generating it again', arch,
                    cpv.version_no_rev.split('_')[0])
                return

        build = '/'.join([self._bot_id, self.version])
        if (self._run.options.remote_trybot
                and (self._run.options.hwtest or self._run.config.pre_cq)):
            debug = self._run.options.debug_forced
        else:
            debug = self._run.options.debug

        # Get the subsystems set for the board to test
        per_board_dict = self._run.attrs.metadata.GetDict()['board-metadata']
        current_board_dict = per_board_dict.get(self._current_board)
        if current_board_dict:
            subsystems = set(current_board_dict.get('subsystems_to_test', []))
            # 'subsystem:all' indicates to skip the subsystem logic
            if 'all' in subsystems:
                subsystems = None
        else:
            subsystems = None

        skip_duts_check = False
        if config_lib.IsCanaryType(self._run.config.build_type):
            skip_duts_check = True

        build_id, db = self._run.GetCIDBHandle()
        cmd_result = commands.RunHWTestSuite(
            build,
            self.suite_config.suite,
            self._current_board,
            pool=self.suite_config.pool,
            num=self.suite_config.num,
            file_bugs=self.suite_config.file_bugs,
            wait_for_results=self.wait_for_results,
            priority=self.suite_config.priority,
            timeout_mins=self.suite_config.timeout_mins,
            retry=self.suite_config.retry,
            max_retries=self.suite_config.max_retries,
            minimum_duts=self.suite_config.minimum_duts,
            suite_min_duts=self.suite_config.suite_min_duts,
            offload_failures_only=self.suite_config.offload_failures_only,
            debug=debug,
            subsystems=subsystems,
            skip_duts_check=skip_duts_check)
        subsys_tuple = self.GenerateSubsysResult(cmd_result.json_dump_result,
                                                 subsystems)
        if db:
            if not subsys_tuple:
                db.InsertBuildMessage(
                    build_id,
                    message_type=constants.SUBSYSTEMS,
                    message_subtype=constants.SUBSYSTEM_UNUSED,
                    board=self._current_board)
            else:
                logging.info('pass_subsystems: %s, fail_subsystems: %s',
                             subsys_tuple[0], subsys_tuple[1])
                for s in subsys_tuple[0]:
                    db.InsertBuildMessage(
                        build_id,
                        message_type=constants.SUBSYSTEMS,
                        message_subtype=constants.SUBSYSTEM_PASS,
                        message_value=str(s),
                        board=self._current_board)
                for s in subsys_tuple[1]:
                    db.InsertBuildMessage(
                        build_id,
                        message_type=constants.SUBSYSTEMS,
                        message_subtype=constants.SUBSYSTEM_FAIL,
                        message_value=str(s),
                        board=self._current_board)
        if cmd_result.to_raise:
            raise cmd_result.to_raise
Beispiel #24
0
    def Report(self, out, archive_urls=None, current_version=None):
        """Generate a user friendly text display of the results data.

    Args:
      out: Output stream to write to (e.g. sys.stdout).
      archive_urls: Dict where values are archive URLs and keys are names
        to associate with those URLs (typically board name).  If None then
        omit the name when logging the URL.
      current_version: Chrome OS version associated with this report.
    """
        results = self._results_log

        line = '*' * 60 + '\n'
        edge = '*' * 2

        if current_version:
            out.write(line)
            out.write(edge + ' RELEASE VERSION: ' + current_version + '\n')

        out.write(line)
        out.write(edge + ' Stage Results\n')
        warnings = False

        for entry in results:
            name, result, run_time = (entry.name, entry.result, entry.time)
            timestr = datetime.timedelta(seconds=math.ceil(run_time))

            # Don't print data on skipped stages.
            if result == self.SKIPPED:
                continue

            out.write(line)
            details = ''
            if result == self.SUCCESS:
                status = 'PASS'
            elif result == self.FORGIVEN:
                status = 'FAILED BUT FORGIVEN'
                warnings = True
            else:
                status = 'FAIL'
                if isinstance(result, cros_build_lib.RunCommandError):
                    # If there was a RunCommand error, give just the command that
                    # failed, not its full argument list, since those are usually
                    # too long.
                    details = ' in %s' % result.result.cmd[0]
                elif isinstance(result, failures_lib.BuildScriptFailure):
                    # BuildScriptFailure errors publish a 'short' name of the
                    # command that failed.
                    details = ' in %s' % result.shortname
                else:
                    # There was a normal error. Give the type of exception.
                    details = ' with %s' % type(result).__name__

            out.write('%s %s %s (%s)%s\n' %
                      (edge, status, name, timestr, details))

        out.write(line)

        if archive_urls:
            out.write('%s BUILD ARTIFACTS FOR THIS BUILD CAN BE FOUND AT:\n' %
                      edge)
            for name, url in sorted(archive_urls.iteritems()):
                named_url = url
                link_name = 'Artifacts'
                if name:
                    named_url = '%s: %s' % (name, url)
                    link_name = 'Artifacts[%s]' % name

                # Output the bot-id/version used in the archive url.
                link_name = '%s: %s' % (link_name, '/'.join(
                    url.split('/')[-3:-1]))
                out.write('%s  %s' % (edge, named_url))
                logging.PrintBuildbotLink(link_name, url, handle=out)
            out.write(line)

        for x in self.GetTracebacks():
            if x.failed_stage and x.traceback:
                out.write('\nFailed in stage %s:\n\n' % x.failed_stage)
                out.write(x.traceback)
                out.write('\n')

        if warnings:
            logging.PrintBuildbotStepWarnings(out)
 def _CrashCheck(ret, msg):
     if ret < 0:
         logging.PrintBuildbotStepWarnings()
         logging.warning('dump_syms crashed with %s; %s',
                         signals.StrSignal(-ret), msg)
def UploadSymbol(upload_url,
                 symbol_element,
                 product_name,
                 file_limit=DEFAULT_FILE_LIMIT,
                 sleep=0,
                 num_errors=None,
                 watermark_errors=None,
                 failed_queue=None,
                 passed_queue=None):
    """Upload |sym_element.symbol_item| to |upload_url|

  Args:
    upload_url: The crash server to upload things to
    symbol_element: A SymbolElement tuple. symbol_element.symbol_item is a
                    SymbolItem object containing the path to the breakpad symbol
                    to upload. symbol_element.opaque_push_state is an object of
                    _IsolateServerPushState or None if the item doesn't have
                    a push state.
    product_name: A string for stats purposes. Usually 'ChromeOS' or 'Android'.
    file_limit: The max file size of a symbol file before we try to strip it
    sleep: Number of seconds to sleep before running
    num_errors: An object to update with the error count (needs a .value member)
    watermark_errors: An object to track current error behavior (needs a .value)
    failed_queue: When a symbol fails, add it to this queue
    passed_queue: When a symbol passes, add it to this queue

  Returns:
    The number of errors that were encountered.
  """
    sym_file = symbol_element.symbol_item.sym_file
    upload_item = symbol_element.symbol_item

    if num_errors is None:
        num_errors = ctypes.c_int()
    if ErrorLimitHit(num_errors, watermark_errors):
        # Abandon ship!  It's on fire!  NOoooooooooooOOOoooooo.
        if failed_queue:
            failed_queue.put(sym_file)
        return 0

    if sleep:
        # Keeps us from DoS-ing the symbol server.
        time.sleep(sleep)

    logging.debug('uploading %s' % sym_file)

    # Ideally there'd be a tempfile.SpooledNamedTemporaryFile that we could use.
    with tempfile.NamedTemporaryFile(prefix='upload_symbols',
                                     bufsize=0) as temp_sym_file:
        if file_limit:
            # If the symbols size is too big, strip out the call frame info.  The CFI
            # is unnecessary for 32bit x86 targets where the frame pointer is used (as
            # all of ours have) and it accounts for over half the size of the symbols
            # uploaded.
            file_size = os.path.getsize(sym_file)
            if file_size > file_limit:
                logging.warning('stripping CFI from %s due to size %s > %s',
                                sym_file, file_size, file_limit)
                temp_sym_file.writelines([
                    x for x in open(sym_file, 'rb').readlines()
                    if not x.startswith('STACK CFI')
                ])

                upload_item = FakeItem(
                    sym_file=temp_sym_file.name,
                    sym_header=symbol_element.symbol_item.sym_header)

        # Hopefully the crash server will let it through.  But it probably won't.
        # Not sure what the best answer is in this case.
        file_size = os.path.getsize(upload_item.sym_file)
        if file_size > CRASH_SERVER_FILE_LIMIT:
            logging.PrintBuildbotStepWarnings()
            logging.warning(
                'upload file %s is awfully large, risking rejection by '
                'the symbol server (%s > %s)', sym_file, file_size,
                CRASH_SERVER_FILE_LIMIT)

        # Upload the symbol file.
        success = False
        try:
            cros_build_lib.TimedCommand(
                retry_util.RetryException,
                (urllib2.HTTPError, urllib2.URLError),
                MAX_RETRIES,
                SymUpload,
                upload_url,
                upload_item,
                product_name,
                sleep=INITIAL_RETRY_DELAY,
                timed_log_msg=('upload of %10i bytes took %%(delta)s: %s' %
                               (file_size, os.path.basename(sym_file))))
            success = True

            if passed_queue:
                passed_queue.put(symbol_element)
        except urllib2.HTTPError as e:
            logging.warning('could not upload: %s: HTTP %s: %s',
                            os.path.basename(sym_file), e.code, e.reason)
        except (urllib2.URLError, httplib.HTTPException, socket.error) as e:
            logging.warning('could not upload: %s: %s',
                            os.path.basename(sym_file), e)
        finally:
            if success:
                _UpdateCounter(watermark_errors, ERROR_ADJUST_PASS)
            else:
                _UpdateCounter(num_errors, 1)
                _UpdateCounter(watermark_errors, ERROR_ADJUST_FAIL)
                if failed_queue:
                    failed_queue.put(sym_file)

    return num_errors.value
Beispiel #27
0
    def Report(self, out, current_version=None):
        """Generate a user friendly text display of the results data.

    Args:
      out: Output stream to write to (e.g. sys.stdout).
      current_version: Chrome OS version associated with this report.
    """
        results = self._results_log

        line = '*' * 60 + '\n'
        edge = '*' * 2

        if current_version:
            out.write(line)
            out.write(edge + ' RELEASE VERSION: ' + current_version + '\n')

        out.write(line)
        out.write(edge + ' Stage Results\n')
        warnings = False

        for entry in results:
            name, result, run_time = (entry.name, entry.result, entry.time)
            timestr = datetime.timedelta(seconds=math.ceil(run_time))

            # Don't print data on skipped stages.
            if result == self.SKIPPED:
                continue

            out.write(line)
            details = ''
            if result == self.SUCCESS:
                status = 'PASS'
            elif result == self.FORGIVEN:
                status = 'FAILED BUT FORGIVEN'
                warnings = True
            else:
                status = 'FAIL'
                if isinstance(result, cros_build_lib.RunCommandError):
                    # If there was a RunCommand error, give just the command that
                    # failed, not its full argument list, since those are usually
                    # too long.
                    details = ' in %s' % result.result.cmd[0]
                elif isinstance(result, failures_lib.BuildScriptFailure):
                    # BuildScriptFailure errors publish a 'short' name of the
                    # command that failed.
                    details = ' in %s' % result.shortname
                else:
                    # There was a normal error. Give the type of exception.
                    details = ' with %s' % type(result).__name__

            out.write('%s %s %s (%s)%s\n' %
                      (edge, status, name, timestr, details))

        out.write(line)

        for x in self.GetTracebacks():
            if x.failed_stage and x.traceback:
                out.write('\nFailed in stage %s:\n\n' % x.failed_stage)
                out.write(x.traceback)
                out.write('\n')

        if warnings:
            logging.PrintBuildbotStepWarnings(out)
Beispiel #28
0
def GenerateBlameList(source_repo, lkgm_path, only_print_chumps=False):
    """Generate the blamelist since the specified manifest.

  Args:
    source_repo: Repository object for the source code.
    lkgm_path: Path to LKGM manifest.
    only_print_chumps: If True, only print changes that were chumped.
  """
    handler = git.Manifest(lkgm_path)
    reviewed_on_re = re.compile(r'\s*Reviewed-on:\s*(\S+)')
    author_re = re.compile(r'\s*Author:.*<(\S+)@\S+>\s*')
    committer_re = re.compile(r'\s*Commit:.*<(\S+)@\S+>\s*')
    for rel_src_path, checkout in handler.checkouts_by_path.iteritems():
        project = checkout['name']

        # Additional case in case the repo has been removed from the manifest.
        src_path = source_repo.GetRelativePath(rel_src_path)
        if not os.path.exists(src_path):
            logging.info('Detected repo removed from manifest %s' % project)
            continue

        revision = checkout['revision']
        cmd = ['log', '--pretty=full', '%s..HEAD' % revision]
        try:
            result = git.RunGit(src_path, cmd)
        except cros_build_lib.RunCommandError as ex:
            # Git returns 128 when the revision does not exist.
            if ex.result.returncode != 128:
                raise
            logging.warning('Detected branch removed from local checkout.')
            logging.PrintBuildbotStepWarnings()
            return
        current_author = None
        current_committer = None
        for line in unicode(result.output, 'ascii', 'ignore').splitlines():
            author_match = author_re.match(line)
            if author_match:
                current_author = author_match.group(1)

            committer_match = committer_re.match(line)
            if committer_match:
                current_committer = committer_match.group(1)

            review_match = reviewed_on_re.match(line)
            if review_match:
                review = review_match.group(1)
                _, _, change_number = review.rpartition('/')
                if not current_author:
                    logging.notice(
                        'Failed to locate author before the line of review: '
                        '%s. Author name is set to <Unknown>', line)
                    current_author = '<Unknown>'
                items = [
                    os.path.basename(project),
                    current_author,
                    change_number,
                ]
                # TODO(phobbs) verify the domain of the email address as well.
                if current_committer not in ('chrome-bot',
                                             'chrome-internal-fetch',
                                             'chromeos-commit-bot',
                                             '3su6n15k.default'):
                    items.insert(0, 'CHUMP')
                elif only_print_chumps:
                    continue
                logging.PrintBuildbotLink(' | '.join(items), review)