Example #1
0
    def FormatAndPrintResults(self, bisect_results):
        """Prints the results from a bisection run in a readable format.

    Also prints annotations creating buildbot step "Results".

    Args:
      bisect_results: BisectResult object containing results to be printed.
    """
        if bisect_results.abort_reason:
            self._PrintAbortResults(bisect_results.abort_reason)
            return

        if self.opts.output_buildbot_annotations:
            bisect_utils.OutputAnnotationStepStart('Build Status Per Revision')

        print
        print 'Full results of bisection:'
        for revision_state in bisect_results.state.GetRevisionStates():
            build_status = revision_state.passed

            if type(build_status) is bool:
                if build_status:
                    build_status = 'Good'
                else:
                    build_status = 'Bad'

            print '  %20s  %40s  %s' % (revision_state.depot,
                                        revision_state.revision, build_status)
        print

        if self.opts.output_buildbot_annotations:
            bisect_utils.OutputAnnotationStepClosed()
            # The perf dashboard scrapes the "results" step in order to comment on
            # bugs. If you change this, please update the perf dashboard as well.
            bisect_utils.OutputAnnotationStepStart('Results')

        self._PrintBanner(bisect_results)
        self._PrintWarnings(bisect_results.warnings)

        if bisect_results.culprit_revisions and bisect_results.confidence:
            for culprit in bisect_results.culprit_revisions:
                cl, info, depot = culprit
                self._PrintRevisionInfo(cl, info, depot)
            if bisect_results.other_regressions:
                self._PrintOtherRegressions(bisect_results.other_regressions)
        self._PrintRetestResults(bisect_results)
        self._PrintTestedCommitsTable(bisect_results.state.GetRevisionStates(),
                                      bisect_results.first_working_revision,
                                      bisect_results.last_broken_revision,
                                      bisect_results.confidence,
                                      final_step=True)
        self._PrintStepTime(bisect_results.state.GetRevisionStates())
        self._PrintReproSteps()
        self._PrintThankYou()
        if self.opts.output_buildbot_annotations:
            bisect_utils.OutputAnnotationStepClosed()
Example #2
0
    def _PrintAbortResults(self, abort_reason):

        if self.opts.output_buildbot_annotations:
            bisect_utils.OutputAnnotationStepStart('Results')
        print ABORT_REASON_TEMPLATE % {
            'abort_reason': abort_reason,
            'bug_id': self.opts.bug_id or 'NOT SPECIFIED',
            'command': self.opts.command,
            'metric': '/'.join(self.opts.metric),
            'good_revision': self.opts.good_revision,
            'bad_revision': self.opts.bad_revision,
        }
        self._PrintThankYou()
        if self.opts.output_buildbot_annotations:
            bisect_utils.OutputAnnotationStepClosed()
Example #3
0
def _SetupAndRunPerformanceTest(config, path_to_file, path_to_goma):
    """Attempts to build and run the current revision with and without the
  current patch, with the parameters passed in.

  Args:
    config: The config read from run-perf-test.cfg.
    path_to_file: Path to the bisect-perf-regression.py script.
    path_to_goma: Path to goma directory.

  Returns:
    0 on success, otherwise 1.
  """
    try:
        with Goma(path_to_goma) as goma:
            config['use_goma'] = bool(path_to_goma)
            _RunPerformanceTest(config, path_to_file)
        return 0
    except RuntimeError, e:
        bisect_utils.OutputAnnotationStepClosed()
        _OutputFailedResults('Error: %s' % e.message)
        return 1
    def _PrintAbortResults(self, abort_reason):
        if self.opts.output_buildbot_annotations:
            bisect_utils.OutputAnnotationStepStart('Results')

        # Metric string in config is not split in case of return code mode.
        if (self.opts.metric and
                self.opts.bisect_mode != bisect_utils.BISECT_MODE_RETURN_CODE):
            metric = '/'.join(self.opts.metric)
        else:
            metric = self.opts.metric

        print ABORT_REASON_TEMPLATE % {
            'abort_reason': abort_reason,
            'bug_id': self.opts.bug_id or 'NOT SPECIFIED',
            'command': self.opts.command,
            'metric': metric,
            'good_revision': self.opts.good_revision,
            'bad_revision': self.opts.bad_revision,
        }
        self._PrintThankYou()
        if self.opts.output_buildbot_annotations:
            bisect_utils.OutputAnnotationStepClosed()
Example #5
0
def _RunBisectionScript(config, working_directory, path_to_file, path_to_goma,
                        dry_run):
    """Attempts to execute src/tools/bisect-perf-regression.py with the parameters
  passed in.

  Args:
    config: A dict containing the parameters to pass to the script.
    working_directory: A working directory to provide to the
      bisect-perf-regression.py script, where it will store it's own copy of
      the depot.
    path_to_file: Path to the bisect-perf-regression.py script.
    path_to_goma: Path to goma directory.
    dry_run: Do a dry run, skipping sync, build, and performance testing steps.

  Returns:
    0 on success, otherwise 1.
  """
    bisect_utils.OutputAnnotationStepStart('Config')
    print
    for k, v in config.iteritems():
        print '  %s : %s' % (k, v)
    print
    bisect_utils.OutputAnnotationStepClosed()

    cmd = [
        'python',
        os.path.join(path_to_file, 'bisect-perf-regression.py'), '-c',
        config['command'], '-g', config['good_revision'], '-b',
        config['bad_revision'], '-m', config['metric'], '--working_directory',
        working_directory, '--output_buildbot_annotations'
    ]

    if config['repeat_count']:
        cmd.extend(['-r', config['repeat_count']])

    if config['truncate_percent']:
        cmd.extend(['-t', config['truncate_percent']])

    if config['max_time_minutes']:
        cmd.extend(['--max_time_minutes', config['max_time_minutes']])

    cmd.extend(['--build_preference', 'ninja'])

    if '--browser=cros' in config['command']:
        cmd.extend(['--target_platform', 'cros'])

        if os.environ[CROS_BOARD_ENV] and os.environ[CROS_IP_ENV]:
            cmd.extend(['--cros_board', os.environ[CROS_BOARD_ENV]])
            cmd.extend(['--cros_remote_ip', os.environ[CROS_IP_ENV]])
        else:
            print 'Error: Cros build selected, but BISECT_CROS_IP or'\
                  'BISECT_CROS_BOARD undefined.'
            print
            return 1

    if 'android' in config['command']:
        cmd.extend(['--target_platform', 'android'])

    if path_to_goma:
        cmd.append('--use_goma')

    if dry_run:
        cmd.extend([
            '--debug_ignore_build', '--debug_ignore_sync',
            '--debug_ignore_perf_test'
        ])
    cmd = [str(c) for c in cmd]

    with Goma(path_to_goma) as goma:
        return_code = subprocess.call(cmd)

    if return_code:
        print 'Error: bisect-perf-regression.py returned with error %d' %\
            return_code
        print

    return return_code
Example #6
0
def _RunPerformanceTest(config, path_to_file):
    # Bisect script expects to be run from src
    os.chdir(os.path.join(path_to_file, '..'))

    bisect_utils.OutputAnnotationStepStart('Building With Patch')

    opts = _CreateBisectOptionsFromConfig(config)
    b = bisect.BisectPerformanceMetrics(None, opts)

    if bisect_utils.RunGClient(['runhooks']):
        raise RuntimeError('Failed to run gclient runhooks')

    if not b.BuildCurrentRevision('chromium'):
        raise RuntimeError('Patched version failed to build.')

    bisect_utils.OutputAnnotationStepClosed()
    bisect_utils.OutputAnnotationStepStart('Running With Patch')

    results_with_patch = b.RunPerformanceTestAndParseResults(
        opts.command,
        opts.metric,
        reset_on_first_run=True,
        results_label='Patch')

    if results_with_patch[1]:
        raise RuntimeError('Patched version failed to run performance test.')

    bisect_utils.OutputAnnotationStepClosed()

    bisect_utils.OutputAnnotationStepStart('Reverting Patch')
    if bisect_utils.RunGClient(['revert']):
        raise RuntimeError('Failed to run gclient runhooks')
    bisect_utils.OutputAnnotationStepClosed()

    bisect_utils.OutputAnnotationStepStart('Building Without Patch')

    if bisect_utils.RunGClient(['runhooks']):
        raise RuntimeError('Failed to run gclient runhooks')

    if not b.BuildCurrentRevision('chromium'):
        raise RuntimeError('Unpatched version failed to build.')

    bisect_utils.OutputAnnotationStepClosed()
    bisect_utils.OutputAnnotationStepStart('Running Without Patch')

    results_without_patch = b.RunPerformanceTestAndParseResults(
        opts.command,
        opts.metric,
        upload_on_last_run=True,
        results_label='ToT')

    if results_without_patch[1]:
        raise RuntimeError('Unpatched version failed to run performance test.')

    # Find the link to the cloud stored results file.
    output = results_without_patch[2]
    cloud_file_link = [
        t for t in output.splitlines()
        if 'storage.googleapis.com/chromium-telemetry/html-results/' in t
    ]
    if cloud_file_link:
        # What we're getting here is basically "View online at http://..." so parse
        # out just the url portion.
        cloud_file_link = cloud_file_link[0]
        cloud_file_link = [
            t for t in cloud_file_link.split(' ')
            if 'storage.googleapis.com/chromium-telemetry/html-results/' in t
        ]
        assert cloud_file_link, "Couldn't parse url from output."
        cloud_file_link = cloud_file_link[0]
    else:
        cloud_file_link = ''

    # Calculate the % difference in the means of the 2 runs.
    percent_diff_in_means = (results_with_patch[0]['mean'] / max(
        0.0001, results_without_patch[0]['mean'])) * 100.0 - 100.0
    std_err = bisect.CalculatePooledStandardError(
        [results_with_patch[0]['values'], results_without_patch[0]['values']])

    bisect_utils.OutputAnnotationStepClosed()
    bisect_utils.OutputAnnotationStepStart('Results - %.02f +- %0.02f delta' %
                                           (percent_diff_in_means, std_err))
    print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(
        20, ' '), 'Std. Error'.center(20, ' '))
    print ' %s %s %s' % (
        'Patch'.center(10, ' '),
        ('%.02f' % results_with_patch[0]['mean']).center(20, ' '),
        ('%.02f' % results_with_patch[0]['std_err']).center(20, ' '))
    print ' %s %s %s' % (
        'No Patch'.center(10, ' '),
        ('%.02f' % results_without_patch[0]['mean']).center(20, ' '),
        ('%.02f' % results_without_patch[0]['std_err']).center(20, ' '))
    if cloud_file_link:
        bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
    bisect_utils.OutputAnnotationStepClosed()
Example #7
0
def _OutputFailedResults(text_to_print):
    bisect_utils.OutputAnnotationStepStart('Results - Failed')
    print
    print text_to_print
    print
    bisect_utils.OutputAnnotationStepClosed()
def _RunBisectionScript(config, working_directory, path_to_file, path_to_goma,
                        path_to_extra_src, dry_run):
    """Attempts to execute src/tools/bisect-perf-regression.py with the parameters
  passed in.

  Args:
    config: A dict containing the parameters to pass to the script.
    working_directory: A working directory to provide to the
      bisect-perf-regression.py script, where it will store it's own copy of
      the depot.
    path_to_file: Path to the bisect-perf-regression.py script.
    path_to_goma: Path to goma directory.
    path_to_extra_src: Path to extra source file.
    dry_run: Do a dry run, skipping sync, build, and performance testing steps.

  Returns:
    0 on success, otherwise 1.
  """
    bisect_utils.OutputAnnotationStepStart('Config')
    print
    for k, v in config.iteritems():
        print '  %s : %s' % (k, v)
    print
    bisect_utils.OutputAnnotationStepClosed()

    cmd = [
        'python',
        os.path.join(path_to_file, 'bisect-perf-regression.py'), '-c',
        config['command'], '-g', config['good_revision'], '-b',
        config['bad_revision'], '-m', config['metric'], '--working_directory',
        working_directory, '--output_buildbot_annotations'
    ]

    if config['repeat_count']:
        cmd.extend(['-r', config['repeat_count']])

    if config['truncate_percent']:
        cmd.extend(['-t', config['truncate_percent']])

    if config['max_time_minutes']:
        cmd.extend(['--max_time_minutes', config['max_time_minutes']])

    if config.has_key('bisect_mode'):
        cmd.extend(['--bisect_mode', config['bisect_mode']])

    cmd.extend(['--build_preference', 'ninja'])

    if '--browser=cros' in config['command']:
        cmd.extend(['--target_platform', 'cros'])

        if os.environ[CROS_BOARD_ENV] and os.environ[CROS_IP_ENV]:
            cmd.extend(['--cros_board', os.environ[CROS_BOARD_ENV]])
            cmd.extend(['--cros_remote_ip', os.environ[CROS_IP_ENV]])
        else:
            print 'Error: Cros build selected, but BISECT_CROS_IP or'\
                  'BISECT_CROS_BOARD undefined.'
            print
            return 1

    if 'android' in config['command']:
        if 'android-chrome' in config['command']:
            cmd.extend(['--target_platform', 'android-chrome'])
        else:
            cmd.extend(['--target_platform', 'android'])

    if path_to_goma:
        # crbug.com/330900. For Windows XP platforms, GOMA service is not supported.
        # Moreover we don't compile chrome when gs_bucket flag is set instead
        # use builds archives, therefore ignore GOMA service for Windows XP.
        if config.get('gs_bucket') and platform.release() == 'XP':
            print(
                'Goma doesn\'t have a win32 binary, therefore it is not supported '
                'on Windows XP platform. Please refer to crbug.com/330900.')
            path_to_goma = None
        cmd.append('--use_goma')

    if path_to_extra_src:
        cmd.extend(['--extra_src', path_to_extra_src])

    # These flags are used to download build archives from cloud storage if
    # available, otherwise will post a try_job_http request to build it on
    # tryserver.
    if config.get('gs_bucket'):
        if config.get('builder_host') and config.get('builder_port'):
            cmd.extend([
                '--gs_bucket', config['gs_bucket'], '--builder_host',
                config['builder_host'], '--builder_port',
                config['builder_port']
            ])
        else:
            print(
                'Error: Specified gs_bucket, but missing builder_host or '
                'builder_port information in config.')
            return 1

    if dry_run:
        cmd.extend([
            '--debug_ignore_build', '--debug_ignore_sync',
            '--debug_ignore_perf_test'
        ])
    cmd = [str(c) for c in cmd]

    with Goma(path_to_goma) as goma:
        return_code = subprocess.call(cmd)

    if return_code:
        print 'Error: bisect-perf-regression.py returned with error %d' %\
            return_code
        print

    return return_code