Beispiel #1
0
 def _ParallelHelloWorld(self):
     """Write 'hello world' to stdout using multiple processes."""
     with parallel.Manager() as manager:
         queue = manager.Queue()
         with parallel.BackgroundTaskRunner(self._HelloWorld, queue=queue):
             queue.put([])
             self.printed_hello.wait()
def FetchTarballs(binhost_urls, pkgdir):
    """Prefetch the specified |binhost_urls| to the specified |pkgdir|.

  This function fetches the tarballs from the specified list of binhost
  URLs to disk. It does not populate the Packages file -- we leave that
  to Portage.

  Args:
    binhost_urls: List of binhost URLs to fetch.
    pkgdir: Location to store the fetched packages.
  """
    categories = {}
    for binhost_url in binhost_urls:
        pkgindex = GrabRemotePackageIndex(binhost_url)
        base_uri = pkgindex.header['URI']
        for pkg in pkgindex.packages:
            cpv = pkg['CPV']
            path = pkg.get('PATH', '%s.tbz2' % cpv)
            uri = '/'.join([base_uri, path])
            category = cpv.partition('/')[0]
            fetches = categories.setdefault(category, {})
            fetches[cpv] = uri

    with parallel.BackgroundTaskRunner(_DownloadURLs) as queue:
        for category, urls in categories.items():
            category_dir = os.path.join(pkgdir, category)
            if not os.path.exists(category_dir):
                os.makedirs(category_dir)
            queue.put((urls.values(), category_dir))
Beispiel #3
0
  def testPerBoardDict(self):
    starting_per_board_dict = {
        'board-1': {'kubrick': 2001,
                    'bergman': 'persona',
                    'hitchcock': 'vertigo'},
        'board-2': {'kubrick': ['barry lyndon', 'dr. strangelove'],
                    'bergman': 'the seventh seal'}
    }

    starting_dict = {'board-metadata': starting_per_board_dict}

    m = multiprocessing.Manager()
    metadata = metadata_lib.CBuildbotMetadata(metadata_dict=starting_dict,
                                              multiprocess_manager=m)

    extra_per_board_dict = {
        'board-1': {'kurosawa': 'rashomon',
                    'coen brothers': 'fargo'},
        'board-3': {'hitchcock': 'north by northwest',
                    'coen brothers': 'the big lebowski'}
    }

    expected_dict = starting_per_board_dict

    # Write each per board key-value pair to metadata in a separate process.
    with parallel.BackgroundTaskRunner(metadata.UpdateBoardDictWithDict) as q:
      for board, board_dict in extra_per_board_dict.iteritems():
        expected_dict.setdefault(board, {}).update(board_dict)
        for k, v in board_dict.iteritems():
          q.put([board, {k: v}])

    self.assertEqual(expected_dict, metadata.GetDict()['board-metadata'])
Beispiel #4
0
    def Run(self):
        files = self.options.files
        if not files:
            # Running with no arguments is allowed to make the repo upload hook
            # simple, but print a warning so that if someone runs this manually
            # they are aware that nothing was linted.
            logging.warning('No files provided to lint.  Doing nothing.')

        errors = multiprocessing.Value('i')
        linter_map = _BreakoutFilesByLinter(files)
        dispatcher = functools.partial(_Dispatcher, errors,
                                       self.options.output, self.options.debug)

        # Special case one file as it's common -- faster to avoid parallel startup.
        if sum([len(x) for _, x in linter_map.iteritems()]) == 1:
            linter, files = linter_map.items()[0]
            dispatcher(linter, files[0])
        else:
            # Run the linter in parallel on the files.
            with parallel.BackgroundTaskRunner(dispatcher) as q:
                for linter, files in linter_map.iteritems():
                    for path in files:
                        q.put([linter, path])

        if errors.value:
            logging.error('linter found errors in %i files', errors.value)
            sys.exit(1)
Beispiel #5
0
def GenerateBreakpadSymbols(breakpad_dir, symbols_dir):
    """Generate symbols for all binaries in symbols_dir.

  Args:
    breakpad_dir: The full path in which to write out breakpad symbols.
    symbols_dir: The full path to the binaries to process from.

  Returns:
    The number of errors that were encountered.
  """
    osutils.SafeMakedirs(breakpad_dir)
    logging.info('generating breakpad symbols from %s', symbols_dir)

    num_errors = multiprocessing.Value('i')

    # Now start generating symbols for the discovered elfs.
    with parallel.BackgroundTaskRunner(_UnpackGenerateBreakpad,
                                       breakpad_dir=breakpad_dir,
                                       num_errors=num_errors) as queue:

        for root, _, files in os.walk(symbols_dir):
            for f in files:
                queue.put([os.path.join(root, f)])

    return num_errors.value
  def RunAllConfigs(self, task, skip_missing=False, site_config=None):
    """Run |task| against all major configurations"""
    if site_config is None:
      site_config = chromeos_config.GetConfig()

    with parallel.BackgroundTaskRunner(task) as queue:
      # Loop through all major configuration types and pick one from each.
      for bot_type in config_lib.CONFIG_TYPE_DUMP_ORDER:
        for bot_id in site_config:
          if bot_id.endswith(bot_type):
            # Skip any config without a board, since those configs do not
            # build packages.
            cfg = site_config[bot_id]
            if cfg.boards:
              # Skip boards w/out a local overlay.  Like when running a
              # public manifest and testing private-only boards.
              if skip_missing:
                try:
                  for b in cfg.boards:
                    portage_util.FindPrimaryOverlay(constants.BOTH_OVERLAYS, b)
                except portage_util.MissingOverlayException:
                  continue

              queue.put([bot_id])
              break
Beispiel #7
0
    def PerformStage(self):
        """Do the work of generating our release payloads."""
        # Convert to release tools naming for boards.
        board = self._current_board.replace('_', '-')
        version = self._run.attrs.release_tag

        assert version, "We can't generate payloads without a release_tag."
        logging.info("Generating payloads for: %s, %s", board, version)

        # Test to see if the current board has a Paygen configuration. We do
        # this here, not in the sub-process so we don't have to pass back a
        # failure reason.
        try:
            paygen_build_lib.ValidateBoardConfig(board)
        except paygen_build_lib.BoardNotConfigured:
            raise PaygenNoPaygenConfigForBoard(
                'Golden Eye (%s) has no entry for board %s. Get a TPM to fix.'
                % (paygen_build_lib.BOARDS_URI, board))

        with parallel.BackgroundTaskRunner(
                self._RunPaygenInProcess) as per_channel:
            logging.info("Using channels: %s", self.channels)

            # Default to False, set to True if it's a canary type build
            skip_duts_check = False
            if config_lib.IsCanaryType(self._run.config.build_type):
                skip_duts_check = True

            # If we have an explicit list of channels, use it.
            for channel in self.channels:
                per_channel.put((channel, board, version, self._run.debug,
                                 self._run.config.paygen_skip_testing,
                                 self._run.config.paygen_skip_delta_payloads,
                                 skip_duts_check))
Beispiel #8
0
 def _ProcessCrashListInBackground(self):
     """Create a worker process for processing crash lists."""
     with parallel.BackgroundTaskRunner(self._ProcessCrashListForBot,
                                        processes=self.jobs) as queue:
         for bot_id, build_config in cbuildbot_config.config.iteritems():
             if build_config['vm_tests']:
                 queue.put((bot_id, build_config))
         yield
 def testExceptionPriority(self):
     """Tests that foreground exceptions take priority over background."""
     self.StartPatcher(BackgroundTaskVerifier())
     with self.assertRaises(self._TestException):
         with parallel.BackgroundTaskRunner(self._KeyboardInterrupt,
                                            processes=1) as queue:
             queue.put([])
             raise self._TestException()
 def testForegroundExceptionRaising(self):
     """Test that BackgroundTaskRunner halts tasks on a foreground exception."""
     with self.assertRaises(_TestForegroundException):
         with parallel.BackgroundTaskRunner(self._PassEventually,
                                            processes=1,
                                            halt_on_error=True) as queue:
             queue.put([])
             raise _TestForegroundException()
     self.assertFalse(self.passed.is_set())
Beispiel #11
0
  def RunBuildStages(self):
    """Runs through the stages to perform the build and resulting tests."""
    # Prepare stages to run in background.  If child_configs exist then
    # run each of those here, otherwise use default config.
    builder_runs = self._run.GetUngroupedBuilderRuns()

    tasks = []
    for builder_run in builder_runs:
      # Prepare a local archive directory for each "run".
      builder_run.GetArchive().SetupArchivePath()

      for board in builder_run.config.boards:
        archive_stage = self._GetStageInstance(
            artifact_stages.ArchiveStage, board, builder_run=builder_run,
            chrome_version=self._run.attrs.chrome_version)
        board_config = BoardConfig(board, builder_run.config.name)
        self.archive_stages[board_config] = archive_stage
        tasks.append((builder_run, board))

    # Set up a process pool to run test/archive stages in the background.
    # This process runs task(board) for each board added to the queue.
    task_runner = self._RunBackgroundStagesForBoardAndMarkAsSuccessful
    with parallel.BackgroundTaskRunner(task_runner) as queue:
      for builder_run, board in tasks:
        if not builder_run.config.build_packages_in_background:
          # Run BuildPackages in the foreground, generating or using AFDO data
          # if requested.
          kwargs = {'builder_run': builder_run}
          if builder_run.config.afdo_generate_min:
            kwargs['afdo_generate_min'] = True
          elif builder_run.config.afdo_use:
            kwargs['afdo_use'] = True

          self._RunStage(build_stages.BuildPackagesStage, board,
                         update_metadata=True, **kwargs)

          if (builder_run.config.afdo_generate_min and
              afdo.CanGenerateAFDOData(board)):
            # Generate the AFDO data before allowing any other tasks to run.
            self._RunStage(build_stages.BuildImageStage, board, **kwargs)
            self._RunStage(artifact_stages.UploadTestArtifactsStage, board,
                           builder_run=builder_run,
                           suffix='[afdo_generate_min]')
            for suite in builder_run.config.hw_tests:
              self._RunStage(test_stages.HWTestStage, board, suite,
                             builder_run=builder_run)
            self._RunStage(afdo_stages.AFDODataGenerateStage, board,
                           builder_run=builder_run)

          if (builder_run.config.afdo_generate_min and
              builder_run.config.afdo_update_ebuild):
            self._RunStage(afdo_stages.AFDOUpdateEbuildStage,
                           builder_run=builder_run)

        # Kick off our background stages.
        queue.put([builder_run, board])
Beispiel #12
0
  def testUpdateKeyDictWithDictMultiprocess(self):
    expected_dict = {str(x): x for x in range(20)}
    m = multiprocessing.Manager()
    metadata = metadata_lib.CBuildbotMetadata(multiprocess_manager=m)

    with parallel.BackgroundTaskRunner(metadata.UpdateKeyDictWithDict) as q:
      for k, v in expected_dict.iteritems():
        q.put(['my_dict', {k: v}])

    self.assertEqual(expected_dict, metadata.GetDict()['my_dict'])
Beispiel #13
0
    def RunStages(self):
        """Runs through build process."""
        def _RunStageWrapper(board):
            self._RunStage(release_stages.PaygenStage,
                           board=board,
                           channels=self._run.options.channels)

        with parallel.BackgroundTaskRunner(_RunStageWrapper) as queue:
            for board in self._run.config.boards:
                queue.put([board])
def main(argv):
    parser = GetParser()
    options = parser.parse_args(argv)
    options.Freeze()

    snapshot_ref = options.snapshot_ref
    if snapshot_ref and not snapshot_ref.startswith('refs/'):
        snapshot_ref = BRANCH_REF_PREFIX + snapshot_ref

    repo = repo_util.Repository.Find(options.repo_path)
    if repo is None:
        cros_build_lib.Die('No repo found in --repo_path %r.',
                           options.repo_path)

    manifest = repo.Manifest(revision_locked=True)
    projects = list(manifest.Projects())

    # Check if projects need snapshots (in parallel).
    needs_snapshot_results = parallel.RunTasksInProcessPool(
        _NeedsSnapshot, [(repo.root, x) for x in projects])

    # Group snapshot-needing projects by project name.
    snapshot_projects = {}
    for project, needs_snapshot in zip(projects, needs_snapshot_results):
        if needs_snapshot:
            snapshot_projects.setdefault(project.name, []).append(project)

    if snapshot_projects and not snapshot_ref:
        cros_build_lib.Die('Some project(s) need snapshot refs but no '
                           '--snapshot-ref specified.')

    # Push snapshot refs (in parallel).
    with parallel.BackgroundTaskRunner(_GitPushProjectUpstream,
                                       repo.root,
                                       dry_run=options.dry_run,
                                       processes=options.jobs) as queue:
        for projects in snapshot_projects.values():
            # Since some projects (e.g. chromiumos/third_party/kernel) are checked out
            # multiple places, we may need to push each checkout to a unique ref.
            need_unique_refs = len(projects) > 1
            used_refs = set()
            for project in projects:
                if need_unique_refs:
                    ref = _MakeUniqueRef(project, snapshot_ref, used_refs)
                else:
                    ref = snapshot_ref
                # Update the upstream ref both for the push and the output XML.
                project.upstream = ref
                queue.put([project])

    dest = options.output_file
    if dest is None or dest == '-':
        dest = sys.stdout

    manifest.Write(dest)
Beispiel #15
0
    def simulate_builds(self, db, metadatas):
        """Simulate a series of Commit Queue master and slave builds.

    This method use the metadata objects in |metadatas| to simulate those
    builds insertions and updates to the cidb. All metadatas encountered
    after a particular master build will be assumed to be slaves of that build,
    until a new master build is encountered. Slave builds for a particular
    master will be simulated in parallel.

    The first element in |metadatas| must be a CQ master build.

    Args:
      db: A CIDBConnection instance.
      metadatas: A list of CBuildbotMetadata instances, sorted by start time.
    """
        m_iter = iter(metadatas)

        def is_master(m):
            return m.GetDict()['bot-config'] == 'master-paladin'

        next_master = m_iter.next()

        while next_master:
            master = next_master
            next_master = None
            assert is_master(master)
            master_build_id = _SimulateBuildStart(db, master)

            def simulate_slave(slave_metadata):
                build_id = _SimulateBuildStart(db,
                                               slave_metadata,
                                               master_build_id,
                                               important=True)
                _SimulateCQBuildFinish(db, slave_metadata, build_id)
                logging.debug('Simulated slave build %s on pid %s', build_id,
                              os.getpid())
                return build_id

            slave_metadatas = []
            for slave in m_iter:
                if is_master(slave):
                    next_master = slave
                    break
                slave_metadatas.append(slave)

            with parallel.BackgroundTaskRunner(simulate_slave,
                                               processes=15) as queue:
                for slave in slave_metadatas:
                    queue.put([slave])

            # Yes, this introduces delay in the test. But this lets us do some basic
            # sanity tests on the |last_update| column later.
            time.sleep(1)
            _SimulateCQBuildFinish(db, master, master_build_id)
            logging.debug('Simulated master build %s', master_build_id)
    def Run(self, func, *args, **kwargs):
        """Run func, parse its output, and update the progress bar.

    Args:
      func: Function to execute in the background and whose output is to be
        captured.
      update_period: Optional argument to specify the period that output should
        be read.
      log_level: Logging level to run the func at. By default, it runs at log
        level info.
    """
        update_period = kwargs.pop('update_period',
                                   self._PROGRESS_BAR_UPDATE_INTERVAL)

        # If we are not running in a terminal device, do not display the progress
        # bar.
        if not self._isatty:
            log_level = kwargs.pop('log_level', logging.INFO)
            restore_log_level = logging.getLogger().getEffectiveLevel()
            logging.getLogger().setLevel(log_level)
            try:
                func(*args, **kwargs)
            finally:
                logging.getLogger().setLevel(restore_log_level)
            return

        with osutils.TempDir() as tempdir:
            self._stdout_path = os.path.join(tempdir, STDOUT_FILE)
            self._stderr_path = os.path.join(tempdir, STDERR_FILE)
            osutils.Touch(self._stdout_path)
            osutils.Touch(self._stderr_path)
            try:
                with parallel.BackgroundTaskRunner(
                        self.CaptureOutputInBackground, func, *args,
                        **kwargs) as queue:
                    queue.put([])
                    self.OpenStdoutStderr()
                    while True:
                        self.ParseOutput()
                        if self.WaitUntilComplete(update_period):
                            break
                # Before we exit, parse the output again to update progress bar.
                self.ParseOutput()
                # Final sanity check to update the progress bar to 100% if it was used
                # by ParseOutput
                self.Cleanup()
            except:
                # Add a blank line before the logging message so the message isn't
                # touching the progress bar.
                sys.stdout.write('\n')
                logging.error('Oops. Something went wrong.')
                # Raise the exception so it can be caught again.
                raise
  def RunAllConfigs(self, task, site_config=None):
    """Run |task| against all major configurations"""
    if site_config is None:
      site_config = config_lib.GetConfig()

    boards = ('samus', 'arm-generic')

    for board in boards:
      self.CreateMockOverlay(board)

    with parallel.BackgroundTaskRunner(task) as queue:
      # Test every build config on an waterfall, that builds something.
      for bot_id, cfg in site_config.items():
        if not cfg.boards or cfg.boards[0] not in boards:
          continue

        queue.put([bot_id])
Beispiel #18
0
def main(argv):
    """Main function for script to build/write firmware.

  Args:
    argv: Program arguments.
  """
    options = ParseCmdline(argv)
    base = SetupBuild(options)

    with parallel.BackgroundTaskRunner(Dumper) as queue:
        RunBuild(options, base, options.target, queue)

        if options.write:
            WriteFirmware(options)

        if options.objdump:
            Log('Writing diasssembly files')
Beispiel #19
0
def Examine(ctx, dryrun, expired_cutoff, candidates):
    """Given a list of candidates to move, move them to the backup buckets.

  Args:
    ctx: GS context.
    dryrun: Flag to turn on/off bucket updates.
    expired_cutoff: datetime.datetime of cutoff for expiring candidates.
    candidates: Iterable of gs.GSListResult objects.
  """
    # Scale our processes with CPUs, but overload since we mostly block on
    # external calls.
    processes = multiprocessing.cpu_count() * 5
    with parallel.BackgroundTaskRunner(purge_lib.ExpandAndExpire,
                                       ctx,
                                       dryrun,
                                       expired_cutoff,
                                       processes=processes) as expire_queue:
        for candidate in candidates:
            expire_queue.put((candidate, ))
Beispiel #20
0
    def offload_once(self):
        """Perform one offload cycle.

        Find all job directories for new jobs that we haven't seen
        before.  Then, attempt to offload the directories for any
        jobs that have finished running.  Offload of multiple jobs
        is done in parallel, up to `self._processes` at a time.

        After we've tried uploading all directories, go through the list
        checking the status of all uploaded directories.  If necessary,
        report failures via e-mail.

        """
        self._add_new_jobs()
        with parallel.BackgroundTaskRunner(self._offload_func,
                                           processes=self._processes) as queue:
            for job in self._open_jobs.values():
                job.enqueue_offload(queue, self._age_limit)
        self._update_offload_results()
    def ArtifactUploader(self, queue=None, archive=True, strict=True):
        """Upload each queued input in the background.

    This context manager starts a set of workers in the background, who each
    wait for input on the specified queue. These workers run
    self.UploadArtifact(*args, archive=archive) for each input in the queue.

    Args:
      queue: Queue to use. Add artifacts to this queue, and they will be
        uploaded in the background.  If None, one will be created on the fly.
      archive: Whether to automatically copy files to the archive dir.
      strict: Whether to treat upload errors as fatal.

    Returns:
      The queue to use. This is only useful if you did not supply a queue.
    """
        upload = lambda path: self.UploadArtifact(path, archive, strict)
        with parallel.BackgroundTaskRunner(
                upload, queue=queue, processes=self.PROCESSES) as bg_queue:
            yield bg_queue
  def testArgs(self):
    """Test that we can pass args down to the task."""
    with parallel.Manager() as manager:
      results = manager.Queue()
      arg2s = set((1, 2, 3))
      with parallel.BackgroundTaskRunner(_BackgroundTaskRunnerArgs, results,
                                         'arg1', kwarg1='kwarg1') as queue:
        for arg2 in arg2s:
          queue.put((arg2,))

      # Since the queue is unordered, need to handle arg2 specially.
      result_arg2s = set()
      for _ in range(3):
        result = results.get()
        self.assertEqual(result[0], 'arg1')
        result_arg2s.add(result[1])
        self.assertEqual(result[2], 'kwarg1')
        self.assertEqual(result[3], None)
      self.assertEqual(arg2s, result_arg2s)
      self.assertEqual(results.empty(), True)
Beispiel #23
0
def UploadContext():
    """Provides a context where stats are uploaded in the background.

  Yields:
    A queue that accepts an arg-list of the format [stats, url, timeout].
  """
    try:
        # We need to use parallel.BackgroundTaskRunner, and not
        # parallel.RunParallelTasks, because with RunParallelTasks, both the
        # uploader and the subcommand are treated as background tasks, and the
        # subcommand will lose responsiveness, since its output will be buffered.
        with parallel.BackgroundTaskRunner(StatsUploader.Upload,
                                           processes=1) as queue:
            yield queue
    except parallel.BackgroundFailure as e:
        # Display unexpected errors, but don't propagate the error.
        # KeyboardInterrupts are OK to skip since the user initiated it.
        if (e.exc_infos and all(exc_info.type == KeyboardInterrupt
                                for exc_info in e.exc_infos)):
            return
        logging.error('Uncaught command stats exception', exc_info=True)
Beispiel #24
0
    def RunAllConfigs(self, task, skip_missing=False, site_config=None):
        """Run |task| against all major configurations"""
        if site_config is None:
            site_config = chromeos_config.GetConfig()

        boards = ('samus', 'arm-generic')

        with parallel.BackgroundTaskRunner(task) as queue:
            # Test every build config on an waterfall, that builds something.
            for bot_id, cfg in site_config.iteritems():
                if not cfg.boards or cfg.boards[0] not in boards:
                    continue

                if skip_missing:
                    try:
                        for b in cfg.boards:
                            portage_util.FindPrimaryOverlay(
                                constants.BOTH_OVERLAYS, b)
                    except portage_util.MissingOverlayException:
                        continue

                queue.put([bot_id])
Beispiel #25
0
    def RunRemote(self):
        """Handle remote debugging, via gdbserver & cross debugger."""
        device = None
        try:
            device = remote_access.ChromiumOSDeviceHandler(
                self.remote,
                port=self.remote_port,
                connect_settings=self.ssh_settings,
                ping=self.ping).device
        except remote_access.DeviceNotPingableError:
            raise GdbBadRemoteDeviceError(
                'Remote device %s is not responding to '
                'ping.' % self.remote)

        self.VerifyAndFinishInitialization(device)
        gdb_cmd = self.cross_gdb

        gdb_commands = self.GetGdbInitCommands(self.inf_cmd)
        gdb_args = [gdb_cmd, '--quiet'
                    ] + ['--eval-command=%s' % x for x in gdb_commands]
        if self.cgdb:
            gdb_args = ['cgdb'] + gdb_args

        with parallel.BackgroundTaskRunner(self.StartGdbserver, self.inf_cmd,
                                           device) as task:
            task.put([])
            # Verify that gdbserver finished launching.
            try:
                timeout_util.WaitForSuccess(lambda x: len(x) == 0,
                                            self.device.GetRunningPids,
                                            4,
                                            func_args=('gdbserver', ))
            except timeout_util.TimeoutError:
                raise GdbUnableToStartGdbserverError(
                    'gdbserver did not start on'
                    ' remote device.')
            cros_build_lib.RunCommand(gdb_args)
    def Run(self):
        files = self.options.files
        if not files:
            # Running with no arguments is allowed to make the repo upload hook
            # simple, but print a warning so that if someone runs this manually
            # they are aware that nothing was linted.
            logging.warning('No files provided to lint.  Doing nothing.')

        if self.options.pyver == 'py2':
            _EXT_TO_LINTER_MAP[frozenset({'.py'})] = _Pylint2File
        elif self.options.pyver == 'py3':
            _EXT_TO_LINTER_MAP[frozenset({'.py'})] = _Pylint3File
        elif self.options.pyver == 'py23':
            _EXT_TO_LINTER_MAP[frozenset({'.py'})] = _Pylint23File

        errors = multiprocessing.Value('i')
        linter_map = _BreakoutFilesByLinter(files)
        dispatcher = functools.partial(_Dispatcher, errors,
                                       self.options.output, self.options.debug)

        # Special case one file as it's common -- faster to avoid parallel startup.
        if not linter_map:
            return 0
        elif sum(len(x) for x in linter_map.values()) == 1:
            linter, files = next(iter(linter_map.items()))
            dispatcher(linter, files[0])
        else:
            # Run the linter in parallel on the files.
            with parallel.BackgroundTaskRunner(dispatcher) as q:
                for linter, files in linter_map.items():
                    for path in files:
                        q.put([linter, path])

        if errors.value:
            logging.error('Found lint errors in %i files.', errors.value)
            sys.exit(1)
Beispiel #27
0
 def _DownloadCrashesInBackground(self):
     """Create a worker process for downloading stack traces."""
     with parallel.BackgroundTaskRunner(self._DownloadStackTrace,
                                        queue=self.crash_triage_queue,
                                        processes=self.jobs):
         yield
 def VerifyDefaultQueue(self):
     """Verify that BackgroundTaskRunner will create a queue on it's own."""
     with parallel.BackgroundTaskRunner(self._HelloWorld) as queue:
         queue.put([])
         self.printed_hello.wait()
def GenerateBreakpadSymbols(board,
                            breakpad_dir=None,
                            strip_cfi=False,
                            generate_count=None,
                            sysroot=None,
                            num_processes=None,
                            clean_breakpad=False,
                            exclude_dirs=(),
                            file_list=None):
    """Generate symbols for this board.

  If |file_list| is None, symbols are generated for all executables, otherwise
  only for the files included in |file_list|.

  TODO(build):
  This should be merged with buildbot_commands.GenerateBreakpadSymbols()
  once we rewrite cros_generate_breakpad_symbols in python.

  Args:
    board: The board whose symbols we wish to generate
    breakpad_dir: The full path to the breakpad directory where symbols live
    strip_cfi: Do not generate CFI data
    generate_count: If set, only generate this many symbols (meant for testing)
    sysroot: The root where to find the corresponding ELFs
    num_processes: Number of jobs to run in parallel
    clean_breakpad: Should we `rm -rf` the breakpad output dir first; note: we
      do not do any locking, so do not run more than one in parallel when True
    exclude_dirs: List of dirs (relative to |sysroot|) to not search
    file_list: Only generate symbols for files in this list. Each file must be a
      full path (including |sysroot| prefix).
      TODO(build): Support paths w/o |sysroot|.

  Returns:
    The number of errors that were encountered.
  """
    if breakpad_dir is None:
        breakpad_dir = FindBreakpadDir(board)
    if sysroot is None:
        sysroot = cros_build_lib.GetSysroot(board=board)
    if clean_breakpad:
        logging.info('cleaning out %s first', breakpad_dir)
        osutils.RmDir(breakpad_dir, ignore_missing=True, sudo=True)
    # Make sure non-root can write out symbols as needed.
    osutils.SafeMakedirs(breakpad_dir, sudo=True)
    if not os.access(breakpad_dir, os.W_OK):
        cros_build_lib.SudoRunCommand(
            ['chown', '-R', str(os.getuid()), breakpad_dir])
    debug_dir = FindDebugDir(board)
    exclude_paths = [os.path.join(debug_dir, x) for x in exclude_dirs]
    if file_list is None:
        file_list = []
    file_filter = dict.fromkeys([os.path.normpath(x) for x in file_list],
                                False)

    logging.info('generating breakpad symbols using %s', debug_dir)

    # Let's locate all the debug_files and elfs first along with the debug file
    # sizes.  This way we can start processing the largest files first in parallel
    # with the small ones.
    # If |file_list| was given, ignore all other files.
    targets = []
    for root, dirs, files in os.walk(debug_dir):
        if root in exclude_paths:
            logging.info('Skipping excluded dir %s', root)
            del dirs[:]
            continue

        for debug_file in files:
            debug_file = os.path.join(root, debug_file)
            # Turn /build/$BOARD/usr/lib/debug/sbin/foo.debug into
            # /build/$BOARD/sbin/foo.
            elf_file = os.path.join(sysroot, debug_file[len(debug_dir) + 1:-6])

            if file_filter:
                if elf_file in file_filter:
                    file_filter[elf_file] = True
                elif debug_file in file_filter:
                    file_filter[debug_file] = True
                else:
                    continue

            # Filter out files based on common issues with the debug file.
            if not debug_file.endswith('.debug'):
                continue

            elif debug_file.endswith('.ko.debug'):
                logging.debug('Skipping kernel module %s', debug_file)
                continue

            elif os.path.islink(debug_file):
                # The build-id stuff is common enough to filter out by default.
                if '/.build-id/' in debug_file:
                    msg = logging.debug
                else:
                    msg = logging.warning
                msg('Skipping symbolic link %s', debug_file)
                continue

            # Filter out files based on common issues with the elf file.
            if not os.path.exists(elf_file):
                # Sometimes we filter out programs from /usr/bin but leave behind
                # the .debug file.
                logging.warning('Skipping missing %s', elf_file)
                continue

            targets.append((os.path.getsize(debug_file), elf_file, debug_file))

    bg_errors = multiprocessing.Value('i')
    if file_filter:
        files_not_found = [
            x for x, found in file_filter.iteritems() if not found
        ]
        bg_errors.value += len(files_not_found)
        if files_not_found:
            logging.error('Failed to find requested files: %s',
                          files_not_found)

    # Now start generating symbols for the discovered elfs.
    with parallel.BackgroundTaskRunner(GenerateBreakpadSymbol,
                                       breakpad_dir=breakpad_dir,
                                       strip_cfi=strip_cfi,
                                       num_errors=bg_errors,
                                       processes=num_processes) as queue:
        for _, elf_file, debug_file in sorted(targets, reverse=True):
            if generate_count == 0:
                break

            queue.put([elf_file, debug_file])
            if generate_count is not None:
                generate_count -= 1
                if generate_count == 0:
                    break

    return bg_errors.value
Beispiel #30
0
 def _PrintStackTracesInBackground(self):
     with parallel.BackgroundTaskRunner(self._ProcessStackTrace,
                                        queue=self.stack_trace_queue,
                                        processes=1,
                                        onexit=self._PrintStackTraces):
         yield