예제 #1
0
def _GetTests():
    base_dir = util.GetBaseDir()
    tests = discover.DiscoverClasses(base_dir,
                                     base_dir,
                                     test.Test,
                                     index_by_class_name=True)
    return dict((test.GetName(), test) for test in tests.itervalues())
예제 #2
0
def AddResultsOptions(parser):
  group = optparse.OptionGroup(parser, 'Results options')
  group.add_option(
      '--output-format',
      action='append',
      dest='output_formats',
      choices=_OUTPUT_FORMAT_CHOICES,
      default=[],
      help='Output format. Defaults to "%%default". '
      'Can be %s.' % ', '.join(_OUTPUT_FORMAT_CHOICES))
  group.add_option(
      '--output-dir',
      default=util.GetBaseDir(),
      help='Where to save output data after the run.')
  group.add_option(
      '--reset-results', action='store_true', help='Delete all stored results.')
  group.add_option(
      '--upload-results',
      action='store_true',
      help='Upload the results to cloud storage.')
  group.add_option(
      '--upload-bucket',
      default='output',
      help='Storage bucket to use for the uploaded results. ' +
      'Defaults to output bucket. Supported values are: ' +
      ', '.join(cloud_storage.BUCKET_ALIAS_NAMES) +
      '; or a valid cloud storage bucket name.')
  group.add_option(
      '--results-label',
      default=None,
      help='Optional label to use for the results of a run .')
  parser.add_option_group(group)
예제 #3
0
def AddResultsOptions(parser):
  group = optparse.OptionGroup(parser, 'Results options')
  group.add_option('--output-format', action='append', dest='output_formats',
                    choices=_OUTPUT_FORMAT_CHOICES, default=[],
                    help='Output format. Defaults to "%%default". '
                    'Can be %s.' % ', '.join(_OUTPUT_FORMAT_CHOICES))
  group.add_option('-o', '--output',
                    dest='output_file',
                    default=None,
                    help='Redirects output to a file. Defaults to stdout.')
  group.add_option('--output-dir', default=util.GetBaseDir(),
                    help='Where to save output data after the run.')
  group.add_option('--output-trace-tag',
                    default='',
                    help='Append a tag to the key of each result trace. Use '
                    'with html, buildbot, csv-pivot-table output formats.')
  group.add_option('--reset-results', action='store_true',
                    help='Delete all stored results.')
  group.add_option('--upload-results', action='store_true',
                    help='Upload the results to cloud storage.')
  group.add_option('--upload-bucket', default='output',
                    choices=cloud_storage.BUCKET_ALIAS_NAMES,
                    help='Storage bucket to use for the uploaded results. ' +
                    'Defaults to output bucket. Supported values are: ' +
                    ', '.join(cloud_storage.BUCKET_ALIAS_NAMES) + '.')
  group.add_option('--results-label',
                    default=None,
                    help='Optional label to use for the results of a run .')
  group.add_option('--suppress_gtest_report',
                   default=False,
                   help='Whether to suppress GTest progress report.')
  parser.add_option_group(group)
  def __init__(self):
    super(SmallProfileCreator, self).__init__()
    typical_25 = os.path.join(util.GetBaseDir(), 'page_sets', 'typical_25.py')
    self._page_set = page_set.PageSet.FromFile(typical_25)

    # Open all links in the same tab save for the last _NUM_TABS links which
    # are each opened in a new tab.
    self._NUM_TABS = 5
예제 #5
0
def _CreatePageSetForUrl(url):
    ps_name = urlparse.urlparse(url).hostname + '.json'
    ps_path = os.path.join(util.GetBaseDir(), 'page_sets', ps_name)
    ps = {'archive_data_file': '../data/%s' % ps_name, 'pages': [{'url': url}]}
    with open(ps_path, 'w') as f:
        f.write(json.dumps(ps))
    print 'Created new page set %s' % ps_path
    return page_set.PageSet.FromFile(ps_path)
예제 #6
0
  def CreatePageSet(self, options):  # pylint: disable=W0613
    """Get the page set this test will run on.

    By default, it will create a page set from the file at this test's
    page_set attribute. Override to generate a custom page set.
    """
    assert hasattr(self, 'page_set'), 'This test has no "page_set" attribute.'
    return page_set.PageSet.FromFile(
        os.path.join(util.GetBaseDir(), self.page_set))
예제 #7
0
def _GetTests():
    # Lazy load and cache results.
    if not hasattr(_GetTests, 'tests'):
        base_dir = util.GetBaseDir()
        _GetTests.tests = discover.DiscoverClasses(base_dir,
                                                   base_dir,
                                                   test.Test,
                                                   index_by_class_name=True)
    return _GetTests.tests
예제 #8
0
def ArgumentParser():
    """Create an ArgumentParser defining options required by the processor."""
    all_output_formats = sorted(
        set(SUPPORTED_FORMATS).union(command_line.LEGACY_OUTPUT_FORMATS))
    parser = argparse.ArgumentParser(add_help=False)
    group = parser.add_argument_group(title='Result processor options')
    group.add_argument(
        '--output-format',
        action='append',
        dest='output_formats',
        metavar='FORMAT',
        choices=all_output_formats,
        help=' '.join([
            'Output format to produce.',
            'May be used multiple times to produce multiple outputs.',
            'Avaliable formats: %s.' % ', '.join(all_output_formats),
            'Defaults to: html.'
        ]))
    group.add_argument('--output-dir',
                       default=util.GetBaseDir(),
                       metavar='DIR_PATH',
                       help=' '.join([
                           'Path to a directory where to write final results.',
                           'Default: %(default)s.'
                       ]))
    group.add_argument(
        '--intermediate-dir',
        metavar='DIR_PATH',
        help=' '.join([
            'Path to a directory where to store intermediate results.',
            'If not provided, the default is to create a new directory',
            'within "{output_dir}/artifacts/".'
        ]))
    group.add_argument(
        '--reset-results',
        action='store_true',
        help='Remove any previous files in the output directory. The default '
        'is to append to existing results.')
    group.add_argument(
        '--results-label',
        metavar='LABEL',
        help='Label to identify the results generated by this run.')
    group.add_argument('--upload-results',
                       action='store_true',
                       help='Upload generated artifacts to cloud storage.')
    group.add_argument(
        '--upload-bucket',
        default='output',
        metavar='BUCKET',
        help=' '.join([
            'Storage bucket to use for uploading artifacts.',
            'Supported values are: %s; or a valid cloud storage bucket name.' %
            ', '.join(cloud_storage.BUCKET_ALIAS_NAMES),
            'Defaults to: %(default)s.'
        ]))
    group.set_defaults(legacy_output_formats=[])
    return parser
예제 #9
0
def _GetTests():
  # Lazy load and cache results.
  if not hasattr(_GetTests, 'tests'):
    base_dir = util.GetBaseDir()
    tests = discover.DiscoverClasses(base_dir, base_dir, test.Test,
                                     index_by_class_name=True)
    tests = dict((test.GetName(), test) for test in tests.itervalues())
    _GetTests.tests = tests
  return _GetTests.tests
예제 #10
0
    def CreatePageSet(cls, options):  # pylint: disable=W0613
        """Get the page set this test will run on.

    By default, it will create a page set from the file at this test's
    page_set attribute. Override to generate a custom page set.
    """
        if not hasattr(cls, 'page_set'):
            raise NotImplementedError('This test has no "page_set" attribute.')
        return page_set.PageSet.FromFile(
            os.path.join(util.GetBaseDir(), cls.page_set))
예제 #11
0
def _GetOutputStream(output_format, output_file):
    assert output_format in _OUTPUT_FORMAT_CHOICES, 'Must specify a valid format.'
    assert output_format not in ('gtest', 'none'), (
        'Cannot set stream for \'gtest\' or \'none\' output formats.')

    if output_file is None:
        if output_format != 'html' and output_format != 'json':
            return sys.stdout
        output_file = os.path.join(util.GetBaseDir(),
                                   'results.' + output_format)

    output_file = os.path.expanduser(output_file)
    open(output_file, 'a').close()  # Create file if it doesn't exist.
    return open(output_file, 'r+')
예제 #12
0
def _Tests():
    base_dir = util.GetBaseDir()
    tests = discover.DiscoverClasses(base_dir,
                                     base_dir,
                                     test.Test,
                                     index_by_class_name=True).values()
    page_tests = discover.DiscoverClasses(base_dir,
                                          base_dir,
                                          page_test.PageTest,
                                          index_by_class_name=True).values()
    page_tests = [
        test_class for test_class in page_tests
        if not issubclass(test_class, profile_creator.ProfileCreator)
    ]
    return tests + page_tests
예제 #13
0
def _DiscoverProfileExtenderClasses():
    profile_extenders_dir = (os.path.abspath(
        os.path.join(util.GetBaseDir(), '..', 'perf', 'profile_creators')))
    base_dir = os.path.abspath(os.path.join(profile_extenders_dir, '..'))

    profile_extenders_unfiltered = discover.DiscoverClasses(
        profile_extenders_dir, base_dir, profile_extender.ProfileExtender)

    # Remove 'extender' suffix from keys.
    profile_extenders = {}
    for test_name, test_class in profile_extenders_unfiltered.iteritems():
        assert test_name.endswith('_extender')
        test_name = test_name[:-len('_extender')]
        profile_extenders[test_name] = test_class
    return profile_extenders
예제 #14
0
def PrepareResults(test, options):
    if not isinstance(test, page_measurement.PageMeasurement):
        # Sort of hacky. The default for non-Measurements should be "gtest."
        if options.output_format != 'none':
            options.output_format = 'gtest'

    if options.output_format == 'html' and not options.output_file:
        options.output_file = os.path.join(util.GetBaseDir(), 'results.html')

    if hasattr(options, 'output_file') and options.output_file:
        output_file = os.path.expanduser(options.output_file)
        open(output_file, 'a').close()  # Create file if it doesn't exist.
        output_stream = open(output_file, 'r+')
    else:
        output_stream = sys.stdout
    if not hasattr(options, 'output_format'):
        options.output_format = _OUTPUT_FORMAT_CHOICES[0]
    if not hasattr(options, 'output_trace_tag'):
        options.output_trace_tag = ''

    if options.output_format == 'none':
        return page_measurement_results.PageMeasurementResults(
            output_stream, trace_tag=options.output_trace_tag)
    elif options.output_format == 'csv':
        return csv_page_measurement_results.CsvPageMeasurementResults(
            output_stream, test.results_are_the_same_on_every_page)
    elif options.output_format == 'block':
        return block_page_measurement_results.BlockPageMeasurementResults(
            output_stream)
    elif options.output_format == 'buildbot':
        return buildbot_page_measurement_results.BuildbotPageMeasurementResults(
            output_stream, trace_tag=options.output_trace_tag)
    elif options.output_format == 'gtest':
        return gtest_test_results.GTestTestResults(output_stream)
    elif options.output_format == 'html':
        return html_page_measurement_results.HtmlPageMeasurementResults(
            output_stream,
            test.__class__.__name__,
            options.reset_results,
            options.upload_results,
            options.browser_type,
            options.results_label,
            trace_tag=options.output_trace_tag)
    else:
        # Should never be reached. The parser enforces the choices.
        raise Exception(
            'Invalid --output-format "%s". Valid choices are: %s' %
            (options.output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))
예제 #15
0
  def __init__(self):
    super(ExtensionsProfileCreator, self).__init__()
    typical_25 = os.path.join(util.GetBaseDir(), 'page_sets', 'typical_25.json')
    self._page_set = page_set.PageSet.FromFile(typical_25)

    # Directory into which the output profile is written.
    self._output_profile_path = None

    # List of extensions to install.
    self._extensions_to_install = []

    # Theme to install (if any).
    self._theme_to_install = None

    # Directory to download extension files into.
    self._extension_download_dir = None

    # Have the extensions been installed yet?
    self._extensions_installed = False

    # List of files to delete after run.
    self._files_to_cleanup = []
예제 #16
0
                                                  test_class.Description())
        print >> sys.stderr

    filtered_tests = [
        test_class for test_class in tests
        if issubclass(test_class, page_test.PageTest)
    ]
    if filtered_tests:
        print >> sys.stderr, 'Available page tests are:'
        for test_class in sorted(filtered_tests, key=lambda t: t.Name()):
            print >> sys.stderr, format_string % (test_class.Name(),
                                                  test_class.Description())
        print >> sys.stderr


config = environment.Environment([util.GetBaseDir()])


def main():
    # Get the command name from the command line.
    if len(sys.argv) > 1 and sys.argv[1] == '--help':
        sys.argv[1] = 'help'

    command_name = 'run'
    for arg in sys.argv[1:]:
        if not arg.startswith('-'):
            command_name = arg
            break

    # Validate and interpret the command name.
    commands = _MatchingCommands(command_name)
예제 #17
0
def CreateResults(metadata, options):
  """
  Args:
    options: Contains the options specified in AddResultsOptions.
  """
  # TODO(chrishenry): This logic prevents us from having multiple
  # OutputFormatters. We should have an output_file per OutputFormatter.
  # Maybe we should have --output-dir instead of --output-file?
  if options.output_format == 'html' and not options.output_file:
    options.output_file = os.path.join(util.GetBaseDir(), 'results.html')
  elif options.output_format == 'json' and not options.output_file:
    options.output_file = os.path.join(util.GetBaseDir(), 'results.json')

  if hasattr(options, 'output_file') and options.output_file:
    output_file = os.path.expanduser(options.output_file)
    open(output_file, 'a').close()  # Create file if it doesn't exist.
    output_stream = open(output_file, 'r+')
  else:
    output_stream = sys.stdout
  if not hasattr(options, 'output_format'):
    options.output_format = _OUTPUT_FORMAT_CHOICES[0]
  if not hasattr(options, 'output_trace_tag'):
    options.output_trace_tag = ''

  output_formatters = []
  output_skipped_tests_summary = True
  reporter = None
  if options.output_format == 'none':
    pass
  elif options.output_format == 'csv':
    output_formatters.append(csv_output_formatter.CsvOutputFormatter(
        output_stream))
  elif options.output_format == 'buildbot':
    output_formatters.append(buildbot_output_formatter.BuildbotOutputFormatter(
        output_stream, trace_tag=options.output_trace_tag))
  elif options.output_format == 'gtest':
    # TODO(chrishenry): This is here to not change the output of
    # gtest. Let's try enabling skipped tests summary for gtest test
    # results too (in a separate patch), and see if we break anything.
    output_skipped_tests_summary = False
  elif options.output_format == 'html':
    # TODO(chrishenry): We show buildbot output so that users can grep
    # through the results easily without needing to open the html
    # file.  Another option for this is to output the results directly
    # in gtest-style results (via some sort of progress reporter),
    # as we plan to enable gtest-style output for all output formatters.
    output_formatters.append(buildbot_output_formatter.BuildbotOutputFormatter(
        sys.stdout, trace_tag=options.output_trace_tag))
    output_formatters.append(html_output_formatter.HtmlOutputFormatter(
        output_stream, metadata, options.reset_results,
        options.upload_results, options.browser_type,
        options.results_label, trace_tag=options.output_trace_tag))
  elif options.output_format == 'json':
    output_formatters.append(
        json_output_formatter.JsonOutputFormatter(output_stream, metadata))
  else:
    # Should never be reached. The parser enforces the choices.
    raise Exception('Invalid --output-format "%s". Valid choices are: %s'
                    % (options.output_format,
                       ', '.join(_OUTPUT_FORMAT_CHOICES)))

  if options.suppress_gtest_report:
    reporter = progress_reporter.ProgressReporter()
  else:
    reporter = gtest_progress_reporter.GTestProgressReporter(
        sys.stdout, output_skipped_tests_summary=output_skipped_tests_summary)
  return page_test_results.PageTestResults(
      output_formatters=output_formatters, progress_reporter=reporter)
예제 #18
0
def InstallHooks():
    RemoveAllStalePycFiles(util.GetTelemetryDir())
    RemoveAllStalePycFiles(util.GetBaseDir())
    InstallUnhandledExceptionFormatter()
    InstallStackDumpOnSigusr1()
    InstallTerminationHook()
  def CreateParser(self, *args, **kwargs):
    parser = optparse.OptionParser(*args, **kwargs)

    # Selection group
    group = optparse.OptionGroup(parser, 'Which browser to use')
    group.add_option('--browser',
        dest='browser_type',
        default=None,
        help='Browser type to run, '
             'in order of priority. Supported values: list,%s' %
             browser_finder.ALL_BROWSER_TYPES)
    group.add_option('--browser-executable',
        dest='browser_executable',
        help='The exact browser to run.')
    group.add_option('--chrome-root',
        dest='chrome_root',
        help='Where to look for chrome builds.'
             'Defaults to searching parent dirs by default.')
    group.add_option('--device',
        dest='android_device',
        help='The android device ID to use'
             'If not specified, only 0 or 1 connected devcies are supported.')
    group.add_option('--keep_test_server_ports', action='store_true',
        help='Indicates the test server ports must be '
             'kept. When this is run via a sharder '
             'the test server ports should be kept and '
             'should not be reset.')
    group.add_option(
        '--remote',
        dest='cros_remote',
        help='The IP address of a remote ChromeOS device to use.')
    identity = None
    testing_rsa = os.path.join(
        util.GetChromiumSrcDir(),
        'third_party', 'chromite', 'ssh_keys', 'testing_rsa')
    if os.path.exists(testing_rsa):
      identity = testing_rsa
    group.add_option('--identity',
        dest='cros_ssh_identity',
        default=identity,
        help='The identity file to use when ssh\'ing into the ChromeOS device')
    parser.add_option_group(group)

    # Browser options
    group = optparse.OptionGroup(parser, 'Browser options')
    profile_choices = profile_types.GetProfileTypes()
    group.add_option('--profile-type',
        dest='profile_type',
        type='choice',
        default='clean',
        choices=profile_choices,
        help=('The user profile to use. A clean profile is used by default. '
              'Supported values: ' + ', '.join(profile_choices)))
    group.add_option('--profile-dir',
        dest='profile_dir',
        help='Profile directory to launch the browser with. '
             'A clean profile is used by default')
    group.add_option('--extra-browser-args',
        dest='extra_browser_args_as_string',
        help='Additional arguments to pass to the browser when it starts')
    group.add_option('--extra-wpr-args',
        dest='extra_wpr_args_as_string',
        help=('Additional arguments to pass to Web Page Replay. '
              'See third_party/webpagereplay/replay.py for usage.'))
    group.add_option('--show-stdout',
        action='store_true',
        help='When possible, will display the stdout of the process')
    parser.add_option_group(group)

    # Page set options
    group = optparse.OptionGroup(parser, 'Page set options')
    group.add_option('--pageset-shuffle', action='store_true',
        dest='pageset_shuffle',
        help='Shuffle the order of pages within a pageset.')
    group.add_option('--pageset-shuffle-order-file',
        dest='pageset_shuffle_order_file', default=None,
        help='Filename of an output of a previously run test on the current ' +
        'pageset. The tests will run in the same order again, overriding ' +
        'what is specified by --page-repeat and --pageset-repeat.')
    parser.add_option_group(group)

    group = optparse.OptionGroup(parser, 'Web Page Replay options')
    group.add_option('--allow-live-sites',
        dest='allow_live_sites', action='store_true',
        help='Run against live sites if the Web Page Replay archives don\'t '
             'exist. Without this flag, the test will just fail instead '
             'of running against live sites.')
    parser.add_option_group(group)

    # Debugging options
    group = optparse.OptionGroup(parser, 'When things go wrong')
    profiler_choices = profiler_finder.GetAllAvailableProfilers(None)
    group.add_option(
      '--profiler', default=None, type='choice',
      choices=profiler_choices,
      help=('Record profiling data using this tool. Supported values: ' +
            ', '.join(profiler_choices)))
    group.add_option(
      '-v', '--verbose', action='count', dest='verbosity',
      help='Increase verbosity level (repeat as needed)')
    group.add_option('--print-bootstrap-deps',
                     action='store_true',
                     help='Output bootstrap deps list.')
    parser.add_option_group(group)

    # Platform options
    group = optparse.OptionGroup(parser, 'Platform options')
    group.add_option('--no-performance-mode', action='store_true',
        help='Some platforms run on "full performance mode" where the '
        'test is executed at maximum CPU speed in order to minimize noise '
        '(specially important for dashboards / continuous builds). '
        'This option prevents Telemetry from tweaking such platform settings.')
    parser.add_option_group(group)

    # Repeat options
    repeat_options.RepeatOptions.AddCommandLineOptions(parser)

    real_parse = parser.parse_args
    def ParseArgs(args=None):
      defaults = parser.get_default_values()
      for k, v in defaults.__dict__.items():
        if k in self.__dict__ and self.__dict__[k] != None:
          continue
        self.__dict__[k] = v
      ret = real_parse(args, self) # pylint: disable=E1121

      if self.verbosity >= 2:
        logging.basicConfig(level=logging.DEBUG)
      elif self.verbosity:
        logging.basicConfig(level=logging.INFO)
      else:
        logging.basicConfig(level=logging.WARNING)

      if self.browser_executable and not self.browser_type:
        self.browser_type = 'exact'
      if self.browser_type == 'list':
        try:
          types = browser_finder.GetAllAvailableBrowserTypes(self)
        except browser_finder.BrowserFinderException, ex:
          sys.stderr.write('ERROR: ' + str(ex))
          sys.exit(1)
        sys.stdout.write('Available browsers:\n')
        sys.stdout.write('  %s\n' % '\n  '.join(types))
        sys.exit(0)
      if self.extra_browser_args_as_string: # pylint: disable=E1101
        tmp = shlex.split(
          self.extra_browser_args_as_string) # pylint: disable=E1101
        self.extra_browser_args.extend(tmp)
        delattr(self, 'extra_browser_args_as_string')
      if self.extra_wpr_args_as_string: # pylint: disable=E1101
        tmp = shlex.split(
          self.extra_wpr_args_as_string) # pylint: disable=E1101
        self.extra_wpr_args.extend(tmp)
        delattr(self, 'extra_wpr_args_as_string')
      if self.profile_type == 'default':
        self.dont_override_profile = True

      if ((hasattr(self, 'output_format') and self.output_format == 'html') and
          (not hasattr(self, 'output_file') or not self.output_file)):
        self.output_file = os.path.join(util.GetBaseDir(), 'results.html')

      # Parse repeat options
      self.repeat_options.UpdateFromParseResults(self, parser)

      # TODO(jeremy): I'm in the process of adding explicit knowledge of profile
      # directories to Telemetry. As part of this work profile_type needs to be
      # reworked to not override profile_dir.
      if not self.profile_dir:
        self.profile_dir = profile_types.GetProfileDir(self.profile_type)

      return ret