Exemple #1
0
 def testFetchPathInitialized(self, binary_manager_mock):
     expected = [
         mock.call.binary_manager.BinaryManager(['base_config_object']),
         mock.call.binary_manager.BinaryManager().FetchPath(
             'dep', 'plat_arch')
     ]
     binary_manager.InitDependencyManager(None)
     binary_manager.FetchPath('dep', 'plat', 'arch')
     binary_manager_mock.assert_call_args(expected)
Exemple #2
0
def ParseArgs(environment, args=None, results_arg_parser=None):
    """Parse command line arguments.

  Args:
    environment: A ProjectConfig object with information about the benchmark
      runtime environment.
    args: An optional list of arguments to parse. Defaults to obtain the
      arguments from sys.argv.
    results_arg_parser: An optional parser defining extra command line options
      for an external results_processor.

  Returns:
    An options object with the values parsed from the command line.
  """
    if args is None:
        args = sys.argv[1:]
    if len(args) > 0:
        if args[0] == 'help':
            # The old command line allowed "help" as a command. Now just translate
            # to "--help" to teach users about the new interface.
            args[0] = '--help'
        elif args[0] not in ['list', 'run', '-h', '--help']:
            args.insert(0, 'run')  # Default command.

    # TODO(crbug.com/981349): When optparse is gone, this should just call
    # parse_args on the fully formed top level parser. For now we still need
    # to allow unknown args, which are then passed below to the legacy parsers.
    parser, legacy_parsers = _ArgumentParsers(environment, args,
                                              results_arg_parser)
    parsed_args, unknown_args = parser.parse_known_args(args)

    # TODO(crbug.com/981349): Ideally, most of the following should be moved
    # to after argument parsing is completed and before (or at the time) when
    # arguments are processed.

    # The log level is set in browser_options.
    # Clear the log handlers to ensure we can set up logging properly here.
    logging.getLogger().handlers = []
    logging.basicConfig(format=DEFAULT_LOG_FORMAT)

    binary_manager.InitDependencyManager(environment.client_configs)

    command = _COMMANDS[parsed_args.command]
    opt_parser = legacy_parsers[parsed_args.command]

    # Set the default chrome root variable.
    opt_parser.set_defaults(chrome_root=environment.default_chrome_root)

    options, positional_args = opt_parser.parse_args(unknown_args)
    options.positional_args = positional_args
    command.ProcessCommandLineArgs(opt_parser, options, environment)

    # Merge back our argparse args with the optparse options.
    for arg in vars(parsed_args):
        setattr(options, arg, getattr(parsed_args, arg))
    return options
Exemple #3
0
def Main(environment, **log_config_kwargs):
    # the log level is set in browser_options
    log_config_kwargs.pop('level', None)
    log_config_kwargs.setdefault('format', DEFAULT_LOG_FORMAT)
    logging.basicConfig(**log_config_kwargs)

    parser = argparse.ArgumentParser(
        usage='Record a benchmark or a story (page set).')
    parser.add_argument(
        'benchmark',
        help=(
            'benchmark name. This argument is optional. If both benchmark name '
            'and story name are specified, this takes precedence as the '
            'target of the recording.'),
        nargs='?')
    parser.add_argument('--story', help='story (page set) name')
    parser.add_argument('--list-stories',
                        dest='list_stories',
                        action='store_true',
                        help='list all story names.')
    parser.add_argument('--list-benchmarks',
                        dest='list_benchmarks',
                        action='store_true',
                        help='list all benchmark names.')
    parser.add_argument('--upload',
                        action='store_true',
                        help='upload to cloud storage.')

    args, extra_args = parser.parse_known_args()

    if args.list_benchmarks or args.list_stories:
        if args.list_benchmarks:
            _PrintAllBenchmarks(environment.top_level_dir, sys.stderr)
        if args.list_stories:
            _PrintAllStories(environment.top_level_dir, sys.stderr)
        return 0

    target = args.benchmark or args.story

    if not target:
        sys.stderr.write(
            'Please specify target (benchmark or story). Please refer '
            'usage below\n\n')
        parser.print_help()
        return 0

    binary_manager.InitDependencyManager(environment.client_configs)

    # TODO(nednguyen): update WprRecorder so that it handles the difference
    # between recording a benchmark vs recording a story better based on
    # the distinction between args.benchmark & args.story
    wpr_recorder = WprRecorder(environment.top_level_dir, target, extra_args)
    results = wpr_recorder.CreateResults()
    wpr_recorder.Record(results)
    wpr_recorder.HandleResults(results, args.upload)
    return min(255, results.num_failed)
Exemple #4
0
def FetchTelemetryDependencies(
  platform=None, client_configs=None, chrome_reference_browser=False):
  if not platform:
    platform = platform_module.GetHostPlatform()
  if binary_manager.NeedsInit():
    binary_manager.InitDependencyManager(client_configs)
  else:
    raise Exception('Binary manager already initialized with other configs.')
  binary_manager.FetchBinaryDependencies(
    platform, client_configs, chrome_reference_browser)
 def testLocalPathInitialized(self, base_config_mock, dep_manager_mock):
   base_config_mock.return_value = 'base_config_object'
   expected = [mock.call.dependency_manager.DependencyManager(
                  ['base_config_object']),
               mock.call.dependency_manager.DependencyManager().LocalPath(
                   'dep', 'plat_arch')]
   binary_manager.InitDependencyManager(None)
   binary_manager.LocalPath('dep', 'plat', 'arch')
   dep_manager_mock.assert_call_args(expected)
   base_config_mock.assert_called_once_with(
       binary_manager.TELEMETRY_PROJECT_CONFIG)
 def _BringUpWpr(self):
     """Start the WPR server on the host and the forwarder on the device."""
     print 'Starting WPR on host...'
     _DownloadFromCloudStorage(self._WPR_BUCKET, self._wpr_archive_hash)
     if binary_manager.NeedsInit():
         binary_manager.InitDependencyManager([])
     self._wpr_server = webpagereplay_go_server.ReplayServer(
         self._wpr_archive, '127.0.0.1', 0, 0, replay_options=[])
     ports = self._wpr_server.StartServer()[:-1]
     self._host_http_port = ports[0]
     self._host_https_port = ports[1]
Exemple #7
0
def main(environment):
    # The log level is set in browser_options.
    # Clear the log handlers to ensure we can set up logging properly here.
    logging.getLogger().handlers = []
    logging.basicConfig(format=DEFAULT_LOG_FORMAT)

    ps_util.EnableListingStrayProcessesUponExitHook()

    # Get the command name from the command line.
    if len(sys.argv) > 1 and sys.argv[1] == '--help':
        sys.argv[1] = 'help'

    command_name = 'run'
    for arg in sys.argv[1:]:
        if not arg.startswith('-'):
            command_name = arg
            break

    # TODO(eakuefner): Remove this hack after we port to argparse.
    if command_name == 'help' and len(sys.argv) > 2 and sys.argv[2] == 'run':
        command_name = 'run'
        sys.argv[2] = '--help'

    # Validate and interpret the command name.
    commands = _MatchingCommands(command_name)
    if len(commands) > 1:
        print >> sys.stderr, (
            '"%s" is not a %s command. Did you mean one of these?' %
            (command_name, _ScriptName()))
        for command in commands:
            print >> sys.stderr, '  %-10s %s' % (command.Name(),
                                                 command.Description())
        return 1
    if commands:
        command = commands[0]
    else:
        command = Run

    binary_manager.InitDependencyManager(environment.client_configs)

    # Parse and run the command.
    parser = command.CreateParser()
    command.AddCommandLineArgs(parser, environment)

    # Set the default chrome root variable.
    parser.set_defaults(chrome_root=environment.default_chrome_root)

    options, args = parser.parse_args()
    if commands:
        args = args[1:]
    options.positional_args = args
    command.ProcessCommandLineArgs(parser, options, environment)

    return command().Run(options)
 def testInitializationWithEnvironmentConfig(
     self, base_config_mock, dep_manager_mock):
   base_config_mock.side_effect = ['base_config_object1',
                                   'base_config_object2']
   environment_config = os.path.join('some', 'config', 'path')
   binary_manager.InitDependencyManager(environment_config)
   expected_calls = [mock.call(binary_manager.TELEMETRY_PROJECT_CONFIG),
                     mock.call(environment_config)]
   self.assertEqual(expected_calls, base_config_mock.call_args_list)
   # Make sure the environment config is passed first.
   dep_manager_mock.assert_called_once_with(
       ['base_config_object2', 'base_config_object1'])
def Run(project_config, test_run_options, args):
    binary_manager.InitDependencyManager(project_config.client_configs)
    parser = argparse.ArgumentParser(description='Run a browser test suite')
    parser.add_argument('test', type=str, help='Name of the test suite to run')
    parser.add_argument(
        '--write-abbreviated-json-results-to',
        metavar='FILENAME',
        action='store',
        help=(
            'If specified, writes the full results to that path in json form.'
        ))
    option, extra_args = parser.parse_known_args(args)

    for start_dir in project_config.start_dirs:
        modules_to_classes = discover.DiscoverClasses(
            start_dir,
            project_config.top_level_dir,
            base_class=serially_executed_browser_test_case.
            SeriallyBrowserTestCase)
        browser_test_classes = modules_to_classes.values()

    ValidateDistinctNames(browser_test_classes)

    test_class = None
    for cl in browser_test_classes:
        if cl.Name() == option.test:
            test_class = cl

    if not test_class:
        print 'Cannot find test class with name matched %s' % option.test
        print 'Available tests: %s' % '\n'.join(cl.Name()
                                                for cl in browser_test_classes)
        return 1

    options = ProcessCommandLineOptions(test_class, extra_args)

    suite = unittest.TestSuite()
    for test in LoadTests(test_class, options):
        suite.addTest(test)

    results = unittest.TextTestRunner(
        verbosity=test_run_options.verbosity).run(suite)
    if option.write_abbreviated_json_results_to:
        with open(option.write_abbreviated_json_results_to, 'w') as f:
            json_results = {'failures': [], 'valid': True}
            for (failed_test_case, _) in results.failures:
                json_results['failures'].append(failed_test_case.id())
            json.dump(json_results, f)
    return len(results.failures)
 def testInitializationNoEnvironmentConfig(self, base_config_mock,
                                           binary_manager_mock):
     base_config_mock.side_effect = [
         'base_config_object1', 'base_config_object2', 'base_config_object3'
     ]
     binary_manager.InitDependencyManager(None)
     base_config_mock.assert_has_calls([
         mock.call.base_config.BaseConfig(
             binary_manager.TELEMETRY_PROJECT_CONFIG),
         mock.call.base_config.BaseConfig(
             binary_manager.CHROME_BINARY_CONFIG)
     ])
     self.assertEqual(2, base_config_mock.call_count)
     binary_manager_mock.assert_called_once_with(
         ['base_config_object1', 'base_config_object2'])
Exemple #11
0
def _SetUpProcess(child, context):
    args = context.finder_options
    if binary_manager.NeedsInit():
        # On windows, typ doesn't keep the DependencyManager initialization in the
        # child processes.
        binary_manager.InitDependencyManager(context.client_configs)
    if args.remote_platform_options.device == 'android':
        android_devices = android_device.FindAllAvailableDevices(args)
        if not android_devices:
            raise RuntimeError("No Android device found")
        android_devices.sort(key=lambda device: device.name)
        args.remote_platform_options.device = (
            android_devices[child.worker_num - 1].guid)
    browser_test_context._global_test_context = context
    context.test_class.SetUpProcess()
Exemple #12
0
  def main(cls, args=None, stream=None):  # pylint: disable=W0221
    # We override the superclass so that we can hook in the 'stream' arg.
    parser = cls.CreateParser()
    cls.AddCommandLineArgs(parser, None)
    options, positional_args = parser.parse_args(args)
    options.positional_args = positional_args

    # Must initialize the DependencyManager before calling
    # browser_finder.FindBrowser(args)
    binary_manager.InitDependencyManager(options.client_config)
    cls.ProcessCommandLineArgs(parser, options, None)

    obj = cls()
    if stream is not None:
      obj.stream = stream
    return obj.Run(options)
 def testFetchPathInitialized(self, base_config_mock, binary_manager_mock):
     base_config_mock.return_value = 'base_config_object'
     expected = [
         mock.call.binary_manager.BinaryManager(['base_config_object']),
         mock.call.binary_manager.BinaryManager().FetchPath(
             'dep', 'plat_arch')
     ]
     binary_manager.InitDependencyManager(None)
     binary_manager.FetchPath('dep', 'plat', 'arch')
     binary_manager_mock.assert_call_args(expected)
     base_config_mock.assert_has_calls([
         mock.call.base_config.BaseConfig(
             binary_manager.TELEMETRY_PROJECT_CONFIG),
         mock.call.base_config.BaseConfig(
             binary_manager.CHROME_BINARY_CONFIG)
     ])
     self.assertEqual(2, base_config_mock.call_count)
def Main():
  binary_manager.InitDependencyManager(None)
  options = browser_options.BrowserFinderOptions()
  parser = options.CreateParser(
      '%%prog <--profile-type-to-generate=...> <--browser=...> <--output-dir>')
  AddCommandLineArgs(parser)
  _, _ = parser.parse_args()
  ProcessCommandLineArgs(parser, options)

  # Generate profile.
  profile_extenders = _DiscoverProfileExtenderClasses()
  profile_extender_class = profile_extenders[options.profile_type_to_generate]

  generator = ProfileGenerator(profile_extender_class,
                               options.profile_type_to_generate)
  generator.Create(options, options.output_dir)
  return 0
def Main(base_dir):

  parser = argparse.ArgumentParser(
      usage='Record a benchmark or a story (page set).')
  parser.add_argument(
      'benchmark',
      help=('benchmark name. This argument is optional. If both benchmark name '
            'and story name are specified, this takes precedence as the '
            'target of the recording.'),
      nargs='?')
  parser.add_argument('--story', help='story (page set) name')
  parser.add_argument('--list-stories', dest='list_stories',
                      action='store_true', help='list all story names.')
  parser.add_argument('--list-benchmarks', dest='list_benchmarks',
                      action='store_true', help='list all benchmark names.')
  parser.add_argument('--upload', action='store_true',
                      help='upload to cloud storage.')
  args, extra_args = parser.parse_known_args()

  if args.list_benchmarks or args.list_stories:
    if args.list_benchmarks:
      _PrintAllBenchmarks(base_dir, sys.stderr)
    if args.list_stories:
      _PrintAllStories(base_dir, sys.stderr)
    return 0

  target = args.benchmark or args.story

  if not target:
    sys.stderr.write('Please specify target (benchmark or story). Please refer '
                     'usage below\n\n')
    parser.print_help()
    return 0

  # TODO(aiolos): We should add getting the client config from the
  # benchmark.Environment once it's added. Not currently needed though.
  binary_manager.InitDependencyManager(None)

  # TODO(nednguyen): update WprRecorder so that it handles the difference
  # between recording a benchmark vs recording a story better based on
  # the distinction between args.benchmark & args.story
  wpr_recorder = WprRecorder(base_dir, target, extra_args)
  results = wpr_recorder.CreateResults()
  wpr_recorder.Record(results)
  wpr_recorder.HandleResults(results, args.upload)
  return min(255, len(results.failures))
    def __init__(self, archive_path, replay_host, http_port, https_port,
                 replay_options):
        """Initialize ReplayServer.

    Args:
      archive_path: a path to a specific WPR archive (required).
      replay_host: the hostname to serve traffic.
      http_port: an integer port on which to serve HTTP traffic. May be zero
          to let the OS choose an available port.
      https_port: an integer port on which to serve HTTPS traffic. May be zero
          to let the OS choose an available port.
      replay_options: an iterable of option strings to forward to replay.py
    """
        if binary_manager.NeedsInit():
            binary_manager.InitDependencyManager(None)
        self._wpr_server = webpagereplay_go_server.ReplayServer(
            archive_path, replay_host, http_port, https_port, replay_options,
            binary_manager.FetchPath)
    def _FetchBinaryManagerPath(self, dependency_name):
        """Fetches the path for the dependency via binary_manager.

    Initializes binary_manager with defaults if it is not already initialized.

    Args:
      dependency_name: A string containing the name of the dependency.

    Returns:
      A string containing the path to the dependency, or None if it could not be
      found.
    """
        if binary_manager.NeedsInit():
            logging.info(
                'binary_manager was not initialized. Initializing with default '
                'values.')
            binary_manager.InitDependencyManager(None)
        return binary_manager.FetchPath(dependency_name, self._os, self._arch,
                                        self._os_version)
def _SetUpProcess(child, context):  # pylint: disable=unused-argument
    ps_util.EnableListingStrayProcessesUponExitHook()
    if binary_manager.NeedsInit():
        # Typ doesn't keep the DependencyManager initialization in the child
        # processes.
        binary_manager.InitDependencyManager(context.client_config)
    args = context
    # We need to reset the handlers in case some other parts of telemetry already
    # set it to make this work.
    if not args.disable_logging_config:
        logging.getLogger().handlers = []
        logging.basicConfig(
            level=logging.INFO,
            format=
            '(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d'
            '  %(message)s')
    if args.device and args.device == 'android':
        android_devices = android_device.FindAllAvailableDevices(args)
        if not android_devices:
            raise RuntimeError("No Android device found")
        android_devices.sort(key=lambda device: device.name)
        args.device = android_devices[child.worker_num - 1].guid
    options_for_unittests.Push(args)
Exemple #19
0
def _SetUpProcess(child, context): # pylint: disable=W0613
  ps_util.EnableListingStrayProcessesUponExitHook()
  if binary_manager.NeedsInit():
    # Typ doesn't keep the DependencyManager initialization in the child
    # processes.
    binary_manager.InitDependencyManager(context.client_config)
  # We need to reset the handlers in case some other parts of telemetry already
  # set it to make this work.
  logging.getLogger().handlers = []
  logging.basicConfig(
      level=logging.INFO,
      format='(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d  '
             '%(message)s')
  args = context
  if not args.disable_logging_config:
    logging.getLogger().handlers = []
    logging.basicConfig(
        level=logging.INFO,
        format='(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d'
              '  %(message)s')
  if args.device and args.device == 'android':
    android_devices = device_finder.GetDevicesMatchingOptions(args)
    args.device = android_devices[child.worker_num-1].guid
  options_for_unittests.Push(args)
def RunTests(args):
  parser = _CreateTestArgParsers()
  try:
    options, extra_args = parser.parse_known_args(args)
  except arg_parser._Bailout:
    PrintTelemetryHelp()
    return parser.exit_status
  binary_manager.InitDependencyManager(options.client_configs)
  for start_dir in options.start_dirs:
    modules_to_classes = discover.DiscoverClasses(
        start_dir,
        options.top_level_dir,
        base_class=serially_executed_browser_test_case.
        SeriallyExecutedBrowserTestCase)
    browser_test_classes = modules_to_classes.values()

  _ValidateDistinctNames(browser_test_classes)

  test_class = None
  for cl in browser_test_classes:
    if cl.Name() == options.test:
      test_class = cl
      break

  if not test_class:
    print('Cannot find test class with name matching %s' % options.test)
    print('Available tests: %s' % '\n'.join(
        cl.Name() for cl in browser_test_classes))
    return 1

  test_class._typ_runner = typ_runner = typ.Runner()

  # Create test context.
  typ_runner.context = browser_test_context.TypTestContext()
  for c in options.client_configs:
    typ_runner.context.client_configs.append(c)
  typ_runner.context.finder_options = ProcessCommandLineOptions(
      test_class, options, extra_args)
  typ_runner.context.test_class = test_class
  typ_runner.context.expectations_files = options.expectations_files
  test_times = None
  if options.read_abbreviated_json_results_from:
    with open(options.read_abbreviated_json_results_from, 'r') as f:
      abbr_results = json.load(f)
      test_times = abbr_results.get('times')

  # Setup typ.Runner instance.
  typ_runner.args.all = options.all
  typ_runner.args.expectations_files = options.expectations_files
  typ_runner.args.jobs = options.jobs
  typ_runner.args.list_only = options.list_only
  typ_runner.args.metadata = options.metadata
  typ_runner.args.passthrough = options.passthrough
  typ_runner.args.path = options.path
  typ_runner.args.quiet = options.quiet
  typ_runner.args.repeat = options.repeat
  typ_runner.args.repository_absolute_path = options.repository_absolute_path
  typ_runner.args.retry_limit = options.retry_limit
  typ_runner.args.retry_only_retry_on_failure_tests = (
      options.retry_only_retry_on_failure_tests)
  typ_runner.args.skip = options.skip
  typ_runner.args.suffixes = TEST_SUFFIXES
  typ_runner.args.tags = options.tags
  typ_runner.args.test_name_prefix = options.test_name_prefix
  typ_runner.args.test_filter = options.test_filter
  typ_runner.args.test_results_server = options.test_results_server
  typ_runner.args.test_type = options.test_type
  typ_runner.args.top_level_dir = options.top_level_dir
  typ_runner.args.write_full_results_to = options.write_full_results_to
  typ_runner.args.write_trace_to = options.write_trace_to
  typ_runner.args.disable_resultsink = options.disable_resultsink

  typ_runner.classifier = _GetClassifier(typ_runner)
  typ_runner.path_delimiter = test_class.GetJSONResultsDelimiter()
  typ_runner.setup_fn = _SetUpProcess
  typ_runner.teardown_fn = _TearDownProcess

  tests_to_run = LoadTestCasesToBeRun(
      test_class=test_class, finder_options=typ_runner.context.finder_options,
      filter_tests_after_sharding=options.filter_tests_after_sharding,
      total_shards=options.total_shards, shard_index=options.shard_index,
      test_times=test_times,
      debug_shard_distributions=options.debug_shard_distributions,
      typ_runner=typ_runner)
  for t in tests_to_run:
    typ_runner.context.test_case_ids_to_run.add(t.id())
  typ_runner.context.Freeze()
  browser_test_context._global_test_context = typ_runner.context

  # several class level variables are set for GPU tests  when
  # LoadTestCasesToBeRun is called. Functions line ExpectationsFiles and
  # GenerateTags which use these variables should be called after
  # LoadTestCasesToBeRun

  test_class_expectations_files = test_class.ExpectationsFiles()
  # all file paths in test_class_expectations-files must be absolute
  assert all(os.path.isabs(path) for path in test_class_expectations_files)
  typ_runner.args.expectations_files.extend(
      test_class_expectations_files)
  typ_runner.args.ignored_tags.extend(test_class.IgnoredTags())

  # Since sharding logic is handled by browser_test_runner harness by passing
  # browser_test_context.test_case_ids_to_run to subprocess to indicate test
  # cases to be run, we explicitly disable sharding logic in typ.
  typ_runner.args.total_shards = 1
  typ_runner.args.shard_index = 0

  typ_runner.args.timing = True
  typ_runner.args.verbose = options.verbose
  typ_runner.win_multiprocessing = typ.WinMultiprocessing.importable

  try:
    ret, _, _ = typ_runner.run()
  except KeyboardInterrupt:
    print("interrupted, exiting", file=sys.stderr)
    ret = 130
  return ret
        def ParseArgs(args=None):
            defaults = parser.get_default_values()
            for k, v in defaults.__dict__.items():
                if k in self.__dict__ and self.__dict__[k] != None:
                    continue
                self.__dict__[k] = v
            ret = real_parse(args, self)  # pylint: disable=E1121

            if self.verbosity >= 2:
                logging.getLogger().setLevel(logging.DEBUG)
            elif self.verbosity:
                logging.getLogger().setLevel(logging.INFO)
            else:
                logging.getLogger().setLevel(logging.WARNING)

            if self.chromium_output_dir:
                os.environ['CHROMIUM_OUTPUT_DIR'] = self.chromium_output_dir

            # Parse remote platform options.
            self.BuildRemotePlatformOptions()

            if self.remote_platform_options.device == 'list':
                if binary_manager.NeedsInit():
                    binary_manager.InitDependencyManager([])
                devices = device_finder.GetDevicesMatchingOptions(self)
                print 'Available devices:'
                for device in devices:
                    print ' ', device.name
                sys.exit(0)

            if self.browser_executable and not self.browser_type:
                self.browser_type = 'exact'
            if self.browser_type == 'list':
                if binary_manager.NeedsInit():
                    binary_manager.InitDependencyManager([])
                devices = device_finder.GetDevicesMatchingOptions(self)
                if not devices:
                    sys.exit(0)
                browser_types = {}
                for device in devices:
                    try:
                        possible_browsers = browser_finder.GetAllAvailableBrowsers(
                            self, device)
                        browser_types[device.name] = sorted([
                            browser.browser_type
                            for browser in possible_browsers
                        ])
                    except browser_finder_exceptions.BrowserFinderException as ex:
                        print >> sys.stderr, 'ERROR: ', ex
                        sys.exit(1)
                print 'Available browsers:'
                if len(browser_types) == 0:
                    print '  No devices were found.'
                for device_name in sorted(browser_types.keys()):
                    print '  ', device_name
                    for browser_type in browser_types[device_name]:
                        print '    ', browser_type
                sys.exit(0)

            # Parse browser options.
            self.browser_options.UpdateFromParseResults(self)

            return ret
Exemple #22
0
        def ParseArgs(args=None):
            defaults = parser.get_default_values()
            for k, v in defaults.__dict__.items():
                if k in self.__dict__ and self.__dict__[k] != None:
                    continue
                self.__dict__[k] = v
            ret = real_parse(args, self)  # pylint: disable=E1121

            if self.chromium_output_dir:
                os.environ['CHROMIUM_OUTPUT_DIR'] = self.chromium_output_dir

            # Set up Android emulator if necessary.
            self.ParseAndroidEmulatorOptions()

            # Parse remote platform options.
            self.BuildRemotePlatformOptions()

            if self.remote_platform_options.device == 'list':
                if binary_manager.NeedsInit():
                    binary_manager.InitDependencyManager([])
                devices = device_finder.GetDevicesMatchingOptions(self)
                print 'Available devices:'
                for device in devices:
                    print ' ', device.name
                sys.exit(0)

            if self.browser_executable and not self.browser_type:
                self.browser_type = 'exact'
            if self.browser_type == 'list':
                if binary_manager.NeedsInit():
                    binary_manager.InitDependencyManager([])
                devices = device_finder.GetDevicesMatchingOptions(self)
                if not devices:
                    sys.exit(0)
                browser_types = {}
                for device in devices:
                    try:
                        possible_browsers = browser_finder.GetAllAvailableBrowsers(
                            self, device)
                        browser_types[device.name] = sorted([
                            browser.browser_type
                            for browser in possible_browsers
                        ])
                    except browser_finder_exceptions.BrowserFinderException as ex:
                        print >> sys.stderr, 'ERROR: ', ex
                        sys.exit(1)
                print 'Available browsers:'
                if len(browser_types) == 0:
                    print '  No devices were found.'
                for device_name in sorted(browser_types.keys()):
                    print '  ', device_name
                    for browser_type in browser_types[device_name]:
                        print '    ', browser_type
                    if len(browser_types[device_name]) == 0:
                        print '     No browsers found for this device'
                sys.exit(0)

            if ((self.browser_type == 'cros-chrome'
                 or self.browser_type == 'lacros-chrome') and self.cros_remote
                    and (self.cros_remote_ssh_port < 0)):
                try:
                    self.cros_remote_ssh_port = socket.getservbyname('ssh')
                except OSError as e:
                    raise RuntimeError(
                        'Running a CrOS test in remote mode, but failed to retrieve port '
                        'used by SSH service. This likely means SSH is not installed on '
                        'the system. Original error: %s' % e)

            # Profiling other periods along with the story_run period leads to running
            # multiple profiling processes at the same time. The effects of performing
            # muliple CPU profiling at the same time is unclear and may generate
            # incorrect profiles so this will not be supported.
            if (len(self.interval_profiling_periods) > 1
                    and 'story_run' in self.interval_profiling_periods):
                print 'Cannot specify other periods along with the story_run period.'
                sys.exit(1)

            self.interval_profiler_options = shlex.split(
                self.interval_profiler_options, posix=(not _IsWin()))

            # Parse browser options.
            self.browser_options.UpdateFromParseResults(self)

            return ret
 def testLocalPathInitialized(self, binary_manager_mock):
     binary_manager.InitDependencyManager(None)
     binary_manager.LocalPath('dep', 'arch', 'plat')
     binary_manager_mock.return_value.LocalPath.assert_called_with(
         'dep', 'plat', 'arch', None)
Exemple #24
0
 def _PrepareDevice(device):
     if not 'BUILDTYPE' in os.environ:
         os.environ['BUILDTYPE'] = 'Release'
     binary_manager.InitDependencyManager(None)
     return android_profiling_helper.PrepareDeviceForPerf(device)
Exemple #25
0
def RunTests(args):
    parser = _CreateTestArgParsers()
    try:
        options, extra_args = parser.parse_known_args(args)
    except arg_parser._Bailout:
        return parser.exit_status
    binary_manager.InitDependencyManager(options.client_configs)

    for start_dir in options.start_dirs:
        modules_to_classes = discover.DiscoverClasses(
            start_dir,
            options.top_level_dir,
            base_class=serially_executed_browser_test_case.
            SeriallyExecutedBrowserTestCase)
        browser_test_classes = modules_to_classes.values()

    _ValidateDistinctNames(browser_test_classes)

    test_class = None
    for cl in browser_test_classes:
        if cl.Name() == options.test:
            test_class = cl
            break

    if not test_class:
        print 'Cannot find test class with name matching %s' % options.test
        print 'Available tests: %s' % '\n'.join(cl.Name()
                                                for cl in browser_test_classes)
        return 1

    # Create test context.
    context = browser_test_context.TypTestContext()
    for c in options.client_configs:
        context.client_configs.append(c)
    context.finder_options = ProcessCommandLineOptions(test_class, options,
                                                       extra_args)
    context.test_class = test_class
    test_times = None
    if options.read_abbreviated_json_results_from:
        with open(options.read_abbreviated_json_results_from, 'r') as f:
            abbr_results = json.load(f)
            test_times = abbr_results.get('times')
    tests_to_run = LoadTestCasesToBeRun(
        test_class=test_class,
        finder_options=context.finder_options,
        filter_regex_str=options.test_filter,
        filter_tests_after_sharding=options.filter_tests_after_sharding,
        total_shards=options.total_shards,
        shard_index=options.shard_index,
        test_times=test_times,
        debug_shard_distributions=options.debug_shard_distributions)
    for t in tests_to_run:
        context.test_case_ids_to_run.add(t.id())
    context.Freeze()
    browser_test_context._global_test_context = context

    # Setup typ runner.
    runner = typ.Runner()

    runner.context = context
    runner.setup_fn = _SetUpProcess
    runner.teardown_fn = _TearDownProcess

    runner.args.jobs = options.jobs
    runner.args.metadata = options.metadata
    runner.args.passthrough = options.passthrough
    runner.args.path = options.path
    runner.args.retry_limit = options.retry_limit
    runner.args.test_results_server = options.test_results_server
    runner.args.test_type = options.test_type
    runner.args.top_level_dir = options.top_level_dir
    runner.args.write_full_results_to = options.write_full_results_to
    runner.args.write_trace_to = options.write_trace_to
    runner.args.list_only = options.list_only
    runner.classifier = _GetClassifier(options)

    runner.args.suffixes = TEST_SUFFIXES

    # Since sharding logic is handled by browser_test_runner harness by passing
    # browser_test_context.test_case_ids_to_run to subprocess to indicate test
    # cases to be run, we explicitly disable sharding logic in typ.
    runner.args.total_shards = 1
    runner.args.shard_index = 0

    runner.args.timing = True
    runner.args.verbose = options.verbose
    runner.win_multiprocessing = typ.WinMultiprocessing.importable
    try:
        ret, _, _ = runner.run()
    except KeyboardInterrupt:
        print >> sys.stderr, "interrupted, exiting"
        ret = 130
    return ret
Exemple #26
0
def Run(project_config, test_run_options, args, **log_config_kwargs):
    # the log level is set in browser_options
    log_config_kwargs.pop('level', None)
    log_config_kwargs.setdefault('format', DEFAULT_LOG_FORMAT)
    logging.basicConfig(**log_config_kwargs)

    binary_manager.InitDependencyManager(project_config.client_configs)
    parser = argparse.ArgumentParser(description='Run a browser test suite')
    parser.add_argument('test', type=str, help='Name of the test suite to run')
    parser.add_argument(
        '--write-abbreviated-json-results-to',
        metavar='FILENAME',
        action='store',
        help=(
            'If specified, writes the full results to that path in json form.'
        ))
    parser.add_argument(
        '--test-filter',
        type=str,
        default='',
        action='store',
        help='Run only tests whose names match the given filter regexp.')
    parser.add_argument(
        '--total-shards',
        default=1,
        type=int,
        help='Total number of shards being used for this test run. (The user of '
        'this script is responsible for spawning all of the shards.)')
    parser.add_argument(
        '--shard-index',
        default=0,
        type=int,
        help='Shard index (0..total_shards-1) of this test run.')
    parser.add_argument(
        '--filter-tests-after-sharding',
        default=False,
        action='store_true',
        help=(
            'Apply the test filter after tests are split for sharding. Useful '
            'for reproducing bugs related to the order in which tests run.'))
    parser.add_argument(
        '--read-abbreviated-json-results-from',
        metavar='FILENAME',
        action='store',
        help=
        ('If specified, reads abbreviated results from that path in json form. '
         'The file format is that written by '
         '--write-abbreviated-json-results-to. This information is used to more '
         'evenly distribute tests among shards.'))
    parser.add_argument(
        '--debug-shard-distributions',
        action='store_true',
        default=False,
        help='Print debugging information about the shards\' test distributions'
    )

    option, extra_args = parser.parse_known_args(args)

    for start_dir in project_config.start_dirs:
        modules_to_classes = discover.DiscoverClasses(
            start_dir,
            project_config.top_level_dir,
            base_class=serially_executed_browser_test_case.
            SeriallyExecutedBrowserTestCase)
        browser_test_classes = modules_to_classes.values()

    _ValidateDistinctNames(browser_test_classes)

    test_class = None
    for cl in browser_test_classes:
        if cl.Name() == option.test:
            test_class = cl
            break

    if not test_class:
        print 'Cannot find test class with name matching %s' % option.test
        print 'Available tests: %s' % '\n'.join(cl.Name()
                                                for cl in browser_test_classes)
        return 1

    options = ProcessCommandLineOptions(test_class, project_config, extra_args)

    test_times = None
    if option.read_abbreviated_json_results_from:
        with open(option.read_abbreviated_json_results_from, 'r') as f:
            abbr_results = json.load(f)
            test_times = abbr_results.get('times')

    suite = unittest.TestSuite()
    for test in _LoadTests(test_class, options, option.test_filter,
                           option.filter_tests_after_sharding,
                           option.total_shards, option.shard_index, test_times,
                           option.debug_shard_distributions):
        suite.addTest(test)

    results = unittest.TextTestRunner(verbosity=test_run_options.verbosity,
                                      resultclass=BrowserTestResult).run(suite)
    if option.write_abbreviated_json_results_to:
        with open(option.write_abbreviated_json_results_to, 'w') as f:
            json_results = {
                'failures': [],
                'successes': [],
                'times': {},
                'valid': True
            }
            # Treat failures and errors identically in the JSON
            # output. Failures are those which cooperatively fail using
            # Python's unittest APIs; errors are those which abort the test
            # case early with an execption.
            failures = []
            for fail, _ in results.failures + results.errors:
                # When errors in thrown in individual test method or setUp or tearDown,
                # fail would be an instance of unittest.TestCase.
                if isinstance(fail, unittest.TestCase):
                    failures.append(fail.shortName())
                else:
                    # When errors in thrown in setupClass or tearDownClass, an instance of
                    # _ErrorHolder is is placed in results.errors list. We use the id()
                    # as failure name in this case since shortName() is not available.
                    failures.append(fail.id())
            failures = sorted(list(failures))
            for failure_id in failures:
                json_results['failures'].append(failure_id)
            for passed_test_case in results.successes:
                json_results['successes'].append(passed_test_case.shortName())
            json_results['times'].update(results.times)
            json.dump(json_results, f)
    return len(results.failures + results.errors)
def main(environment, extra_commands=None, **log_config_kwargs):
  # The log level is set in browser_options.
  log_config_kwargs.pop('level', None)
  log_config_kwargs.setdefault('format', DEFAULT_LOG_FORMAT)
  logging.basicConfig(**log_config_kwargs)

  ps_util.EnableListingStrayProcessesUponExitHook()

  # Get the command name from the command line.
  if len(sys.argv) > 1 and sys.argv[1] == '--help':
    sys.argv[1] = 'help'

  command_name = 'run'
  for arg in sys.argv[1:]:
    if not arg.startswith('-'):
      command_name = arg
      break

  # TODO(eakuefner): Remove this hack after we port to argparse.
  if command_name == 'help' and len(sys.argv) > 2 and sys.argv[2] == 'run':
    command_name = 'run'
    sys.argv[2] = '--help'

  if extra_commands is None:
    extra_commands = []
  all_commands = [Help, List, Run] + extra_commands

  # Validate and interpret the command name.
  commands = _MatchingCommands(command_name, all_commands)
  if len(commands) > 1:
    print >> sys.stderr, ('"%s" is not a %s command. Did you mean one of these?'
                          % (command_name, _ScriptName()))
    for command in commands:
      print >> sys.stderr, '  %-10s %s' % (
          command.Name(), command.Description())
    return 1
  if commands:
    command = commands[0]
  else:
    command = Run

  binary_manager.InitDependencyManager(environment.client_configs)

  # Parse and run the command.
  parser = command.CreateParser()
  command.AddCommandLineArgs(parser, environment)

  # Set the default chrome root variable.
  parser.set_defaults(chrome_root=environment.default_chrome_root)


  if isinstance(parser, argparse.ArgumentParser):
    commandline_args = sys.argv[1:]
    options, args = parser.parse_known_args(commandline_args[1:])
    command.ProcessCommandLineArgs(parser, options, args, environment)
  else:
    options, args = parser.parse_args()
    if commands:
      args = args[1:]
    options.positional_args = args
    command.ProcessCommandLineArgs(parser, options, environment)

  if command == Help:
    command_instance = command(all_commands)
  else:
    command_instance = command()
  if isinstance(command_instance, command_line.OptparseCommand):
    return command_instance.Run(options)
  else:
    return command_instance.Run(options, args)
        def ParseArgs(args=None):
            defaults = parser.get_default_values()
            for k, v in defaults.__dict__.items():
                if k in self.__dict__ and self.__dict__[k] != None:
                    continue
                self.__dict__[k] = v
            ret = real_parse(args, self)  # pylint: disable=E1121

            if self.chromium_output_dir:
                os.environ['CHROMIUM_OUTPUT_DIR'] = self.chromium_output_dir

            # Parse remote platform options.
            self.BuildRemotePlatformOptions()

            if self.remote_platform_options.device == 'list':
                if binary_manager.NeedsInit():
                    binary_manager.InitDependencyManager([])
                devices = device_finder.GetDevicesMatchingOptions(self)
                print 'Available devices:'
                for device in devices:
                    print ' ', device.name
                sys.exit(0)

            if self.browser_executable and not self.browser_type:
                self.browser_type = 'exact'
            if self.browser_type == 'list':
                if binary_manager.NeedsInit():
                    binary_manager.InitDependencyManager([])
                devices = device_finder.GetDevicesMatchingOptions(self)
                if not devices:
                    sys.exit(0)
                browser_types = {}
                for device in devices:
                    try:
                        possible_browsers = browser_finder.GetAllAvailableBrowsers(
                            self, device)
                        browser_types[device.name] = sorted([
                            browser.browser_type
                            for browser in possible_browsers
                        ])
                    except browser_finder_exceptions.BrowserFinderException as ex:
                        print >> sys.stderr, 'ERROR: ', ex
                        sys.exit(1)
                print 'Available browsers:'
                if len(browser_types) == 0:
                    print '  No devices were found.'
                for device_name in sorted(browser_types.keys()):
                    print '  ', device_name
                    for browser_type in browser_types[device_name]:
                        print '    ', browser_type
                    if len(browser_types[device_name]) == 0:
                        print '     No browsers found for this device'
                sys.exit(0)

            # Profiling other periods along with the story_run period leads to running
            # multiple profiling processes at the same time. The effects of performing
            # muliple CPU profiling at the same time is unclear and may generate
            # incorrect profiles so this will not be supported.
            if (len(self.interval_profiling_periods) > 1
                    and 'story_run' in self.interval_profiling_periods):
                print 'Cannot specify other periods along with the story_run period.'
                sys.exit(1)

            self.interval_profiler_options = shlex.split(
                self.interval_profiler_options)

            # Parse browser options.
            self.browser_options.UpdateFromParseResults(self)

            return ret
Exemple #29
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License

import telemetry.core
import sys
from telemetry.internal.browser import browser_options
from telemetry.internal.browser import browser_finder

# Initialize the dependency manager
from telemetry.internal.util import binary_manager
from chrome_telemetry_build import chromium_config
binary_manager.InitDependencyManager(
    chromium_config.ChromiumConfig().client_config)

from telemetry.timeline import tracing_config

from json import dumps

options = browser_options.BrowserFinderOptions()
parser = options.CreateParser()
(_, args) = parser.parse_args()

browserFactory = browser_finder.FindBrowser(options)

with browserFactory.Create(options) as browser:
    tab = browser.tabs.New()
    tab.Activate()
    for i in browser.tabs:
    self._perf_instance = None
    self._categories = None

  def __repr__(self):
    return 'perf profile'

  @staticmethod
  def IsSupported():
    return bool(android_profiling_helper)

  @staticmethod
  def _PrepareDevice(presentation.device):
    if not 'BUILDTYPE' in os.environ:
      os.environ['BUILDTYPE'] = 'Release'
    if binary_manager.NeedsInit():
      binary_manager.InitDependencyManager(None)
    return android_profiling_helper.PrepareDeviceForPerf(presentation.device)

  @classmethod
  def GetCategories(cls, presentation.device):
    perf_binary = cls._PrepareDevice(presentation.device)
    # Perf binary returns non-zero exit status on "list" command.
    return presentation.device.RunShellCommand([perf_binary, 'list'], check_return=False)

  @py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
  def StartAgentTracing(self, config, timeout=None):
    self._categories = _ComputePerfCategories(config)
    self._perf_instance = _PerfProfiler(self._device,
                                        self._perf_binary,
                                        self._categories)
    return True