def testReportStats(self):
    retry_stats.SetupStats()

    # Insert some stats to report.
    retry_stats.RetryWithStats(
        self.CAT, self.handlerNoRetry, 3, self.callSuccess)
    retry_stats.RetryWithStats(
        self.CAT_B, self.handlerNoRetry, 3, self.callSuccess)
    self.assertRaises(TestRetryException,
                      retry_stats.RetryWithStats,
                      self.CAT, self.handlerRetry, 3, self.callFailure)

    out = StringIO()
    retry_stats.ReportStats(out)

    # Expecting reports for both CAT and CAT_B used above.
    expected = """************************************************************
** Performance Statistics for Test Service A
**
** Success: 1
** Failure: 1
** Retries: 3
** Total: 2
************************************************************
************************************************************
** Performance Statistics for Test Service B
**
** Success: 1
** Failure: 0
** Retries: 0
** Total: 1
************************************************************
"""

    self.assertEqual(out.getvalue(), expected)
Beispiel #2
0
    def setUp(self):
        for cmd in ((osutils, 'WriteFile'), (commands, 'UploadArchivedFile'),
                    (alerts, 'SendEmail')):
            self.StartPatcher(mock.patch.object(*cmd, autospec=True))
        retry_stats.SetupStats()

        self.PatchObject(report_stages.ReportStage,
                         '_GetBuildDuration',
                         return_value=1000)
        self.PatchObject(toolchain, 'GetToolchainsForBoard')
        self.PatchObject(toolchain, 'GetArchForTarget', return_value='x86')

        # We need to mock out the function in risk_report that calls the real
        # CL-Scanner API to avoid relying on external dependencies in the test.
        self.PatchObject(risk_report,
                         '_GetCLRisks',
                         return_value={'1234': 1.0})

        # Set up a general purpose cidb mock. Tests with more specific
        # mock requirements can replace this with a separate call to
        # SetupMockCidb
        self.mock_cidb = mock.MagicMock()
        cidb.CIDBConnectionFactory.SetupMockCidb(self.mock_cidb)

        # Setup topology for unittests
        keyvals = {topology.DATASTORE_WRITER_CREDS_KEY: './foo/bar.cert'}
        topology_unittest.FakeFetchTopologyFromCIDB(keyvals=keyvals)

        self._Prepare()
  def testReportStatsEmpty(self):
    retry_stats.SetupStats()

    out = StringIO()
    retry_stats.ReportStats(out)

    # No data collected means no categories are known, nothing to report.
    self.assertEqual(out.getvalue(), '')
Beispiel #4
0
def UploadSymbols(sym_paths, upload_url, product_name, dedupe_namespace=None,
                  failed_list=None, upload_limit=None, strip_cfi=None):
  """Upload all the generated symbols for |board| to the crash server

  Args:
    sym_paths: Specific symbol files (or dirs of sym files) to upload,
      otherwise search |breakpad_dir|
    upload_url: URL of crash server to upload too.
    product_name: A string for crash server stats purposes.
                  Usually 'ChromeOS' or 'Android'.
    dedupe_namespace: None for no deduping, or string namespace in isolate.
    failed_list: A filename at which to write out a list of our failed uploads.
    upload_limit: Integer listing how many files to upload. None for no limit.
    strip_cfi: File size at which we strip out CFI data. None for no limit.

  Returns:
    The number of errors that were encountered.
  """
  retry_stats.SetupStats()

  # Note: This method looks like each step of processing is performed
  # sequentially for all SymbolFiles, but instead each step is a generator that
  # produces the next iteration only when it's read. This means that (except for
  # some batching) each SymbolFile goes through all of these steps before the
  # next one is processed at all.

  # This is used to hold striped
  with osutils.TempDir(prefix='upload_symbols.') as tempdir:
    symbols = FindSymbolFiles(tempdir, sym_paths)

    # Sort all of our symbols so the largest ones (probably the most important)
    # are processed first.
    symbols = list(symbols)
    symbols.sort(key=lambda s: s.FileSize(), reverse=True)

    if upload_limit is not None:
      # Restrict symbols processed to the limit.
      symbols = itertools.islice(symbols, None, upload_limit)

    # Strip CFI, if needed.
    symbols = (AdjustSymbolFileSize(s, tempdir, strip_cfi) for s in symbols)

    # Skip duplicates.
    if dedupe_namespace:
      symbols = FindDuplicates(symbols, dedupe_namespace)

    # Perform uploads
    symbols = PerformSymbolsFileUpload(symbols, upload_url, product_name)

    # Record for future deduping.
    if dedupe_namespace:
      symbols = PostForDeduplication(symbols, dedupe_namespace)

    # Log the final results, and consume the symbols generator fully.
    failures = ReportResults(symbols, failed_list)

  return failures
    def setUp(self):
        for cmd in ((osutils, 'WriteFile'), (commands, 'UploadArchivedFile'),
                    (alerts, 'SendEmail')):
            self.StartPatcher(mock.patch.object(*cmd, autospec=True))
        retry_stats.SetupStats()

        # Set up a general purpose cidb mock. Tests with more specific
        # mock requirements can replace this with a separate call to
        # SetupMockCidb
        cidb.CIDBConnectionFactory.SetupMockCidb(mock.MagicMock())

        self._Prepare()
    def setUp(self):
        self.db = fake_cidb.FakeCIDBConnection()
        cidb.CIDBConnectionFactory.SetupMockCidb(self.db)
        retry_stats.SetupStats()
        os.environ['BUILDBOT_MASTERNAME'] = constants.WATERFALL_EXTERNAL

        master_build_id = self.db.InsertBuild('master_build',
                                              constants.WATERFALL_EXTERNAL, 1,
                                              'master_build_config',
                                              'bot_hostname')

        self._Prepare(build_id=None, master_build_id=master_build_id)
Beispiel #7
0
    def testFailureNoRetry(self):
        """Verify that we can handle a failure if the handler doesn't retry."""
        retry_stats.SetupStats()
        self._verifyStats(self.CAT)

        # Fail once without retries.
        self.assertRaises(TestRetryException, retry_stats.RetryWithStats,
                          self.CAT, self.handlerNoRetry, 3, self.callFailure)
        self._verifyStats(self.CAT, failure=1)

        # Fail twice without retries.
        self.assertRaises(TestRetryException, retry_stats.RetryWithStats,
                          self.CAT, self.handlerNoRetry, 3, self.callFailure)
        self._verifyStats(self.CAT, failure=2)
Beispiel #8
0
    def setUp(self):
        self.db = fake_cidb.FakeCIDBConnection()
        self.buildstore = FakeBuildStore(self.db)
        cidb.CIDBConnectionFactory.SetupMockCidb(self.db)
        retry_stats.SetupStats()

        master_build_id = self.db.InsertBuild('master_build', 1,
                                              'master_build_config',
                                              'bot_hostname')

        self.PatchObject(toolchain, 'GetToolchainsForBoard')
        self.PatchObject(toolchain, 'GetArchForTarget', return_value='x86')

        self._Prepare(build_id=None, master_build_id=master_build_id)
Beispiel #9
0
    def setUp(self):
        self.db = fake_cidb.FakeCIDBConnection()
        cidb.CIDBConnectionFactory.SetupMockCidb(self.db)
        retry_stats.SetupStats()
        os.environ['BUILDBOT_MASTERNAME'] = waterfall.WATERFALL_EXTERNAL

        master_build_id = self.db.InsertBuild('master_build',
                                              waterfall.WATERFALL_EXTERNAL, 1,
                                              'master_build_config',
                                              'bot_hostname')

        self.PatchObject(toolchain, 'GetToolchainsForBoard')
        self.PatchObject(toolchain, 'GetArchForTarget', return_value='x86')

        self._Prepare(build_id=None, master_build_id=master_build_id)
Beispiel #10
0
    def testFailureRetry(self):
        """Verify that we can handle a failure if we use all retries."""
        retry_stats.SetupStats()
        self._verifyStats(self.CAT)

        # Fail once with exhausted retries.
        self.assertRaises(TestRetryException, retry_stats.RetryWithStats,
                          self.CAT, self.handlerRetry, 3, self.callFailure)
        self._verifyStats(self.CAT, failure=1,
                          retry=3)  # 3 retries = 4 attempts.

        # Fail twice with exhausted retries.
        self.assertRaises(TestRetryException, retry_stats.RetryWithStats,
                          self.CAT, self.handlerRetry, 3, self.callFailure)
        self._verifyStats(self.CAT, failure=2, retry=6)
  def testSuccess(self):
    """Verify that we can handle a successful call."""
    retry_stats.SetupStats()
    self._verifyStats(self.CAT)

    # Succeed once.
    result = retry_stats.RetryWithStats(
        self.CAT, self.handlerNoRetry, 3, self.callSuccess)
    self.assertEqual(result, self.SUCCESS_RESULT)
    self._verifyStats(self.CAT, success=1)

    # Succeed twice.
    result = retry_stats.RetryWithStats(
        self.CAT, self.handlerNoRetry, 3, self.callSuccess)
    self.assertEqual(result, self.SUCCESS_RESULT)
    self._verifyStats(self.CAT, success=2)
    def setUp(self):
        for cmd in ((osutils, 'WriteFile'), (commands, 'UploadArchivedFile'),
                    (alerts, 'SendEmail')):
            self.StartPatcher(mock.patch.object(*cmd, autospec=True))
        retry_stats.SetupStats()

        self.PatchObject(report_stages.ReportStage,
                         '_GetBuildDuration',
                         return_value=1000)

        # Set up a general purpose cidb mock. Tests with more specific
        # mock requirements can replace this with a separate call to
        # SetupMockCidb
        self.mock_cidb = mock.MagicMock()
        cidb.CIDBConnectionFactory.SetupMockCidb(self.mock_cidb)

        self._Prepare()
  def testReportCategoryStatsEmpty(self):
    retry_stats.SetupStats()

    out = StringIO()

    retry_stats.ReportCategoryStats(out, self.CAT)

    expected = """************************************************************
** Performance Statistics for Test Service A
**
** Success: 0
** Failure: 0
** Retries: 0
** Total: 0
************************************************************
"""

    self.assertEqual(out.getvalue(), expected)
  def testSuccessRetry(self):
    """Verify that we can handle a successful call after tries."""
    retry_stats.SetupStats()
    self._verifyStats(self.CAT)

    # Use this scoped list as a persistent counter.
    call_counter = ['fail 1', 'fail 2']

    def callRetrySuccess():
      if call_counter:
        raise TestRetryException(call_counter.pop())
      else:
        return self.SUCCESS_RESULT

    # Retry twice, then succeed.
    result = retry_stats.RetryWithStats(
        self.CAT, self.handlerRetry, 3, callRetrySuccess)
    self.assertEqual(result, self.SUCCESS_RESULT)
    self._verifyStats(self.CAT, success=1, retry=2)
Beispiel #15
0
    def setUp(self):
        for cmd in ((osutils, 'WriteFile'), (commands, 'UploadArchivedFile'),
                    (alerts, 'SendEmail')):
            self.StartPatcher(mock.patch.object(*cmd, autospec=True))
        retry_stats.SetupStats()

        self.PatchObject(report_stages.ReportStage,
                         '_GetBuildDuration',
                         return_value=1000)
        self.PatchObject(toolchain, 'GetToolchainsForBoard')
        self.PatchObject(toolchain, 'GetArchForTarget', return_value='x86')

        # Set up a general purpose cidb mock. Tests with more specific
        # mock requirements can replace this with a separate call to
        # SetupMockCidb
        self.mock_cidb = mock.MagicMock()
        self.buildstore = FakeBuildStore(self.mock_cidb)
        cidb.CIDBConnectionFactory.SetupMockCidb(self.mock_cidb)

        # Setup topology for unittests
        keyvals = {topology.DATASTORE_WRITER_CREDS_KEY: './foo/bar.cert'}
        topology_unittest.FakeFetchTopology(keyvals=keyvals)

        self._Prepare()
 def testSetupStats(self):
   """Verify that we do something when we setup a new stats category."""
   # Show that setup does something.
   self.assertEqual(retry_stats._STATS_COLLECTION, None)
   retry_stats.SetupStats()
   self.assertNotEqual(retry_stats._STATS_COLLECTION, None)
Beispiel #17
0
def main(argv):
    # We get false positives with the options object.
    # pylint: disable=attribute-defined-outside-init

    # Turn on strict sudo checks.
    cros_build_lib.STRICT_SUDO = True

    # Set umask to 022 so files created by buildbot are readable.
    os.umask(0o22)

    parser = _CreateParser()
    options = ParseCommandLine(parser, argv)

    # Fetch our site_config now, because we need it to do anything else.
    site_config = config_lib.GetConfig()

    _PostParseCheck(parser, options, site_config)

    cros_build_lib.AssertOutsideChroot()

    if options.enable_buildbot_tags:
        logging.EnableBuildbotMarkers()

    if (options.buildbot and not options.debug
            and not options.build_config_name == constants.BRANCH_UTIL_CONFIG
            and not cros_build_lib.HostIsCIBuilder()):
        # --buildbot can only be used on a real builder, unless it's debug, or
        # 'branch-util'.
        cros_build_lib.Die('This host is not a supported build machine.')

    # Only one config arg is allowed in this mode, which was confirmed earlier.
    build_config = site_config[options.build_config_name]

    # TODO: Re-enable this block when reference_repo support handles this
    #       properly. (see chromium:330775)
    # if options.reference_repo is None:
    #   repo_path = os.path.join(options.sourceroot, '.repo')
    #   # If we're being run from a repo checkout, reuse the repo's git pool to
    #   # cut down on sync time.
    #   if os.path.exists(repo_path):
    #     options.reference_repo = options.sourceroot

    if options.reference_repo:
        if not os.path.exists(options.reference_repo):
            parser.error('Reference path %s does not exist' %
                         (options.reference_repo, ))
        elif not os.path.exists(os.path.join(options.reference_repo, '.repo')):
            parser.error('Reference path %s does not look to be the base of a '
                         'repo checkout; no .repo exists in the root.' %
                         (options.reference_repo, ))

    if (options.buildbot or options.remote_trybot) and not options.resume:
        if not options.cgroups:
            parser.error(
                'Options --buildbot/--remote-trybot and --nocgroups cannot '
                'be used together.  Cgroup support is required for '
                'buildbot/remote-trybot mode.')
        if not cgroups.Cgroup.IsSupported():
            parser.error(
                'Option --buildbot/--remote-trybot was given, but this '
                'system does not support cgroups.  Failing.')

        missing = osutils.FindMissingBinaries(_BUILDBOT_REQUIRED_BINARIES)
        if missing:
            parser.error(
                'Option --buildbot/--remote-trybot requires the following '
                "binaries which couldn't be found in $PATH: %s" %
                (', '.join(missing)))

    if options.reference_repo:
        options.reference_repo = os.path.abspath(options.reference_repo)

    # Sanity check of buildroot- specifically that it's not pointing into the
    # midst of an existing repo since git-repo doesn't support nesting.
    if (not repository.IsARepoRoot(options.buildroot)
            and git.FindRepoDir(options.buildroot)):
        cros_build_lib.Die(
            'Configured buildroot %s is a subdir of an existing repo checkout.'
            % options.buildroot)

    if not options.log_dir:
        options.log_dir = os.path.join(options.buildroot, _DEFAULT_LOG_DIR)

    log_file = None
    if options.tee:
        log_file = os.path.join(options.log_dir, _BUILDBOT_LOG_FILE)
        osutils.SafeMakedirs(options.log_dir)
        _BackupPreviousLog(log_file)

    with cros_build_lib.ContextManagerStack() as stack:
        options.preserve_paths = set()
        if log_file is not None:
            # We don't want the critical section to try to clean up the tee process,
            # so we run Tee (forked off) outside of it. This prevents a deadlock
            # because the Tee process only exits when its pipe is closed, and the
            # critical section accidentally holds on to that file handle.
            stack.Add(tee.Tee, log_file)
            options.preserve_paths.add(_DEFAULT_LOG_DIR)

        critical_section = stack.Add(cleanup.EnforcedCleanupSection)
        stack.Add(sudo.SudoKeepAlive)

        if not options.resume:
            # If we're in resume mode, use our parents tempdir rather than
            # nesting another layer.
            stack.Add(osutils.TempDir, prefix='cbuildbot-tmp', set_global=True)
            logging.debug('Cbuildbot tempdir is %r.', os.environ.get('TMP'))

        if options.cgroups:
            stack.Add(cgroups.SimpleContainChildren, 'cbuildbot')

        # Mark everything between EnforcedCleanupSection and here as having to
        # be rolled back via the contextmanager cleanup handlers.  This
        # ensures that sudo bits cannot outlive cbuildbot, that anything
        # cgroups would kill gets killed, etc.
        stack.Add(critical_section.ForkWatchdog)

        if options.mock_tree_status is not None:
            stack.Add(_ObjectMethodPatcher,
                      tree_status,
                      '_GetStatus',
                      return_value=options.mock_tree_status)

        if options.mock_slave_status is not None:
            with open(options.mock_slave_status, 'r') as f:
                mock_statuses = pickle.load(f)
                for key, value in mock_statuses.iteritems():
                    mock_statuses[key] = builder_status_lib.BuilderStatus(
                        **value)
            stack.Add(_ObjectMethodPatcher,
                      completion_stages.MasterSlaveSyncCompletionStage,
                      '_FetchSlaveStatuses',
                      return_value=mock_statuses)

        stack.Add(_SetupConnections, options, build_config)
        retry_stats.SetupStats()

        timeout_display_message = None
        # For master-slave builds: Update slave's timeout using master's published
        # deadline.
        if options.buildbot and options.master_build_id is not None:
            slave_timeout = None
            if cidb.CIDBConnectionFactory.IsCIDBSetup():
                cidb_handle = cidb.CIDBConnectionFactory.GetCIDBConnectionForBuilder(
                )
                if cidb_handle:
                    slave_timeout = cidb_handle.GetTimeToDeadline(
                        options.master_build_id)

            if slave_timeout is not None:
                # We artificially set a minimum slave_timeout because '0' is handled
                # specially, and because we don't want to timeout while trying to set
                # things up.
                slave_timeout = max(slave_timeout, 20)
                if options.timeout == 0 or slave_timeout < options.timeout:
                    logging.info(
                        'Updating slave build timeout to %d seconds enforced '
                        'by the master', slave_timeout)
                    options.timeout = slave_timeout
                    timeout_display_message = (
                        'This build has reached the timeout deadline set by the master. '
                        'Either this stage or a previous one took too long (see stage '
                        'timing historical summary in ReportStage) or the build failed '
                        'to start on time.')
            else:
                logging.warning(
                    'Could not get master deadline for master-slave build. '
                    'Can not set slave timeout.')

        if options.timeout > 0:
            stack.Add(timeout_util.FatalTimeout, options.timeout,
                      timeout_display_message)
        try:
            _RunBuildStagesWrapper(options, site_config, build_config)
        except failures_lib.ExitEarlyException as ex:
            # This build finished successfully. Do not re-raise ExitEarlyException.
            logging.info('One stage exited early: %s', ex)