Beispiel #1
0
def _GTestRunTest(request):
    arguments = {}
    swarming_extra_args = []

    dimensions = _GetDimensions(request, arguments)

    test = request.get('test')
    if test:
        arguments['test'] = test
        swarming_extra_args.append('--gtest_filter=' + test)

    swarming_extra_args.append('--gtest_repeat=1')

    extra_test_args = request.get('extra_test_args')
    if extra_test_args:
        extra_test_args = json.loads(extra_test_args)
        if not isinstance(extra_test_args, list):
            raise TypeError('extra_test_args must be a list: %s' %
                            extra_test_args)
        arguments['extra_test_args'] = json.dumps(extra_test_args)
        swarming_extra_args += extra_test_args

    swarming_extra_args += _SWARMING_EXTRA_ARGS

    return arguments, quest_module.RunTest(dimensions, swarming_extra_args)
Beispiel #2
0
def _TelemetryRunTest(request):
    arguments = {}
    swarming_extra_args = []

    dimensions = request.get('dimensions')
    if not dimensions:
        return {}, None
    dimensions = json.loads(dimensions)
    arguments['dimensions'] = json.dumps(dimensions)

    benchmark = request.get('benchmark')
    if not benchmark:
        raise TypeError('Missing "benchmark" argument.')
    arguments['benchmark'] = benchmark
    swarming_extra_args.append(benchmark)

    story = request.get('story')
    if story:
        arguments['story'] = story
        swarming_extra_args += ('--story-filter', story)

    # TODO: Workaround for crbug.com/677843.
    if (benchmark.startswith('startup.warm')
            or benchmark.startswith('start_with_url.warm')):
        swarming_extra_args += ('--pageset-repeat', '2')
    else:
        swarming_extra_args += ('--pageset-repeat', '1')

    browser = request.get('browser')
    if not browser:
        raise TypeError('Missing "browser" argument.')
    arguments['browser'] = browser
    swarming_extra_args += ('--browser', browser)

    extra_test_args = request.get('extra_test_args')
    if extra_test_args:
        extra_test_args = json.loads(extra_test_args)
        if not isinstance(extra_test_args, list):
            raise TypeError('extra_test_args must be a list: %s' %
                            extra_test_args)
        arguments['extra_test_args'] = json.dumps(extra_test_args)
        swarming_extra_args += extra_test_args

    # TODO: Remove `=` in 2018. It was fixed on the chromium side in r496979,
    # but any bisects on commit ranges older than August 25 will still fail.
    swarming_extra_args += ('-v', '--upload-results',
                            '--output-format=histograms', '--results-label',
                            '')
    swarming_extra_args += _SWARMING_EXTRA_ARGS

    return arguments, quest_module.RunTest(dimensions, swarming_extra_args)
Beispiel #3
0
  def testMinimumArguments(self):
    arguments = {
        'configuration': 'chromium-rel-mac11-pro',
        'target': 'net_perftests',
        'dimensions': '{}',
    }

    expected_quests = [
        quest.FindIsolate('chromium-rel-mac11-pro', 'net_perftests'),
        quest.RunTest({}, _MIN_GTEST_RUN_TEST_ARGUMENTS),
    ]
    print quest_generator.GenerateQuests(arguments)[1][1]._extra_args
    self.assertEqual(quest_generator.GenerateQuests(arguments),
                     (arguments, expected_quests))
Beispiel #4
0
    def New(cls, configuration, test_suite, test, metric, auto_explore):
        # Get list of quests.
        quests = [quest.FindIsolated(configuration=configuration)]
        if test_suite:
            quests.append(quest.RunTest(test_suite=test_suite, test=test))
        if metric:
            quests.append(quest.ReadValue(metric=metric))

        # Create job.
        return cls(configuration=configuration,
                   test_suite=test_suite,
                   test=test,
                   metric=metric,
                   auto_explore=auto_explore,
                   state=_JobState(quests))
Beispiel #5
0
    def testAllArguments(self):
        arguments = {
            'configuration': 'chromium-rel-mac11-pro',
            'target': 'net_perftests',
            'dimensions': '{"key": "value"}',
            'test': 'test_name',
        }

        expected_quests = [
            quest.FindIsolate('chromium-rel-mac11-pro', 'net_perftests'),
            quest.RunTest({'key': 'value'}, _ALL_GTEST_RUN_TEST_ARGUMENTS),
        ]
        print quest_generator.GenerateQuests(arguments)[1][1]._extra_args
        self.assertEqual(quest_generator.GenerateQuests(arguments),
                         (arguments, expected_quests))
  def testStartupBenchmarkRepeatCount(self):
    arguments = {
        'configuration': 'chromium-rel-mac11-pro',
        'target': 'telemetry_perf_tests',
        'dimensions': '{}',
        'benchmark': 'start_with_url.warm.startup_pages',
        'browser': 'release',
    }

    expected_quests = [
        quest.FindIsolate('chromium-rel-mac11-pro', 'telemetry_perf_tests'),
        quest.RunTest({}, _STARTUP_BENCHMARK_RUN_TEST_ARGUMENTS),
    ]
    self.assertEqual(quest_generator.GenerateQuests(arguments),
                     (arguments, expected_quests))
  def testMinimumArguments(self):
    arguments = {
        'configuration': 'chromium-rel-mac11-pro',
        'target': 'telemetry_perf_tests',
        'dimensions': '{}',
        'benchmark': 'speedometer',
        'browser': 'release',
    }

    expected_quests = [
        quest.FindIsolate('chromium-rel-mac11-pro', 'telemetry_perf_tests'),
        quest.RunTest({}, _MIN_TELEMETRY_RUN_TEST_ARGUMENTS),
    ]
    self.assertEqual(quest_generator.GenerateQuests(arguments),
                     (arguments, expected_quests))
Beispiel #8
0
  def testAllArguments(self):
    arguments = {
        'configuration': 'chromium-rel-mac11-pro',
        'target': 'net_perftests',
        'dimensions': '{"key": "value"}',
        'chart': 'chart_name',
        'trace': 'trace_name',
    }

    expected_quests = [
        quest.FindIsolate('chromium-rel-mac11-pro', 'net_perftests'),
        quest.RunTest({'key': 'value'}, _MIN_GTEST_RUN_TEST_ARGUMENTS),
        quest.ReadGraphJsonValue('chart_name', 'trace_name'),
    ]
    self.assertEqual(quest_generator.GenerateQuests(arguments),
                     (arguments, expected_quests))
  def testAllArguments(self):
    arguments = {
        'configuration': 'chromium-rel-mac11-pro',
        'target': 'telemetry_perf_tests',
        'dimensions': '{"key": "value"}',
        'benchmark': 'speedometer',
        'browser': 'release',
        'story': 'http://www.fifa.com/',
    }

    expected_quests = [
        quest.FindIsolate('chromium-rel-mac11-pro', 'telemetry_perf_tests'),
        quest.RunTest({'key': 'value'}, _ALL_TELEMETRY_RUN_TEST_ARGUMENTS),
    ]
    self.assertEqual(quest_generator.GenerateQuests(arguments),
                     (arguments, expected_quests))
Beispiel #10
0
  def New(cls, configuration, test_suite, test, metric, auto_explore):
    # Get list of quests.
    quests = [quest_module.FindIsolate(configuration)]
    if test_suite:
      quests.append(quest_module.RunTest(configuration, test_suite, test))
    if metric:
      quests.append(quest_module.ReadValue(metric, test))

    # Create job.
    return cls(
        configuration=configuration,
        test_suite=test_suite,
        test=test,
        metric=metric,
        auto_explore=auto_explore,
        state=_JobState(quests, _DEFAULT_MAX_ATTEMPTS))
Beispiel #11
0
  def post(self):
    # TODO(dtu): Read the parameters from the request object.
    # Not doing it for now because it's easier to run tests this way.
    configuration = 'linux'
    test_suite = 'tab_switching.typical_25'
    test = 'http://www.airbnb.com/'
    metric = 'asdf'
    commits = (('chromium/src', 'a'), ('chromium/src', 'b'))

    # Validate parameters.
    if metric and not test_suite:
      raise ValueError("Specified a metric but there's no test_suite to run.")

    # Convert parameters to canonical internal representation.

    # Get list of changes.
    changes = []
    for repository, git_hash in commits:
      base_commit = change.Dep(repository=repository, git_hash=git_hash)
      changes.append(change.Change(base_commit=base_commit))

    # Get list of quests.
    quests = [quest.FindIsolated(configuration=configuration)]
    if test_suite:
      quests.append(quest.RunTest(test_suite=test_suite, test=test))
    if metric:
      quests.append(quest.ReadTestResults(metric=metric))

    # Create job.
    job = job_module.Job(
        configuration=configuration,
        test_suite=test_suite,
        test=test,
        metric=metric,
        auto_explore=True,
        changes=changes,
        quests=quests)
    job_id = job.put().urlsafe()

    # Start job.
    task = taskqueue.add(queue_name='job-queue', target='pinpoint',
                         url='/run/' + job_id)
    job.task = task.name
    job.put()

    # Show status page.
    self.redirect('/job/' + job_id)
Beispiel #12
0
def _GTestRunTest(request):
  arguments = {}
  swarming_extra_args = []

  dimensions = request.get('dimensions')
  if not dimensions:
    return {}, None
  dimensions = json.loads(dimensions)
  arguments['dimensions'] = json.dumps(dimensions)

  test = request.get('test')
  if test:
    arguments['test'] = test
    swarming_extra_args.append('--gtest_filter=' + test)

  swarming_extra_args.append('--gtest_repeat=1')

  swarming_extra_args += _SWARMING_EXTRA_ARGS

  return arguments, quest_module.RunTest(dimensions, swarming_extra_args)
    def testAllArguments(self):
        arguments = {
            'configuration': 'chromium-rel-mac11-pro',
            'target': 'telemetry_perf_tests',
            'dimensions': '{"key": "value"}',
            'benchmark': 'speedometer',
            'browser': 'release',
            'tir_label': 'pcv1-cold',
            'chart': 'timeToFirst',
            'trace': 'trace_name',
        }

        expected_quests = [
            quest.FindIsolate('chromium-rel-mac11-pro',
                              'telemetry_perf_tests'),
            quest.RunTest({'key': 'value'}, _MIN_TELEMETRY_RUN_TEST_ARGUMENTS),
            quest.ReadChartJsonValue('timeToFirst', 'pcv1-cold', 'trace_name'),
        ]
        self.assertEqual(quest_generator.GenerateQuests(arguments),
                         (arguments, expected_quests))
Beispiel #14
0
def _TelemetryRunTest(request):
  arguments = {}
  swarming_extra_args = []

  dimensions = request.get('dimensions')
  if not dimensions:
    return {}, None
  dimensions = json.loads(dimensions)
  arguments['dimensions'] = json.dumps(dimensions)

  benchmark = request.get('benchmark')
  if not benchmark:
    raise TypeError('Missing "benchmark" argument.')
  arguments['benchmark'] = benchmark
  swarming_extra_args.append(benchmark)

  story = request.get('story')
  if story:
    arguments['story'] = story
    swarming_extra_args += ('--story-filter', story)

  # TODO: Workaround for crbug.com/677843.
  if (benchmark.startswith('startup.warm') or
      benchmark.startswith('start_with_url.warm')):
    swarming_extra_args += ('--pageset-repeat', '2')
  else:
    swarming_extra_args += ('--pageset-repeat', '1')

  browser = request.get('browser')
  if not browser:
    raise TypeError('Missing "browser" argument.')
  arguments['browser'] = browser
  swarming_extra_args += ('--browser', browser)

  swarming_extra_args += ('-v', '--upload-results',
                          '--output-format', 'chartjson')
  swarming_extra_args += _SWARMING_EXTRA_ARGS

  return arguments, quest_module.RunTest(dimensions, swarming_extra_args)