Example #1
0
 def testAllArguments(self):
   arguments = {
       'configuration': 'chromium-rel-mac11-pro',
       'target': 'telemetry_perf_tests',
   }
   expected_quests = [
       quest.FindIsolate('chromium-rel-mac11-pro', 'telemetry_perf_tests'),
   ]
   self.assertEqual(quest_generator.GenerateQuests(arguments),
                    (arguments, expected_quests))
Example #2
0
def _FindIsolate(request):
  arguments = {}

  configuration = request.get('configuration')
  if not configuration:
    raise TypeError('Missing "configuration" argument.')
  arguments['configuration'] = configuration

  target = request.get('target')
  if not target:
    raise TypeError('Missing "target" argument.')
  arguments['target'] = target

  return arguments, quest_module.FindIsolate(configuration, target)
Example #3
0
  def testMinimumArguments(self):
    arguments = {
        'configuration': 'chromium-rel-mac11-pro',
        'target': 'net_perftests',
        'dimensions': '{}',
    }

    expected_quests = [
        quest.FindIsolate('chromium-rel-mac11-pro', 'net_perftests'),
        quest.RunTest({}, _MIN_GTEST_RUN_TEST_ARGUMENTS),
    ]
    print quest_generator.GenerateQuests(arguments)[1][1]._extra_args
    self.assertEqual(quest_generator.GenerateQuests(arguments),
                     (arguments, expected_quests))
Example #4
0
    def testAllArguments(self):
        arguments = {
            'configuration': 'chromium-rel-mac11-pro',
            'target': 'net_perftests',
            'dimensions': '{"key": "value"}',
            'test': 'test_name',
        }

        expected_quests = [
            quest.FindIsolate('chromium-rel-mac11-pro', 'net_perftests'),
            quest.RunTest({'key': 'value'}, _ALL_GTEST_RUN_TEST_ARGUMENTS),
        ]
        print quest_generator.GenerateQuests(arguments)[1][1]._extra_args
        self.assertEqual(quest_generator.GenerateQuests(arguments),
                         (arguments, expected_quests))
  def testStartupBenchmarkRepeatCount(self):
    arguments = {
        'configuration': 'chromium-rel-mac11-pro',
        'target': 'telemetry_perf_tests',
        'dimensions': '{}',
        'benchmark': 'start_with_url.warm.startup_pages',
        'browser': 'release',
    }

    expected_quests = [
        quest.FindIsolate('chromium-rel-mac11-pro', 'telemetry_perf_tests'),
        quest.RunTest({}, _STARTUP_BENCHMARK_RUN_TEST_ARGUMENTS),
    ]
    self.assertEqual(quest_generator.GenerateQuests(arguments),
                     (arguments, expected_quests))
  def testMinimumArguments(self):
    arguments = {
        'configuration': 'chromium-rel-mac11-pro',
        'target': 'telemetry_perf_tests',
        'dimensions': '{}',
        'benchmark': 'speedometer',
        'browser': 'release',
    }

    expected_quests = [
        quest.FindIsolate('chromium-rel-mac11-pro', 'telemetry_perf_tests'),
        quest.RunTest({}, _MIN_TELEMETRY_RUN_TEST_ARGUMENTS),
    ]
    self.assertEqual(quest_generator.GenerateQuests(arguments),
                     (arguments, expected_quests))
Example #7
0
  def testAllArguments(self):
    arguments = {
        'configuration': 'chromium-rel-mac11-pro',
        'target': 'net_perftests',
        'dimensions': '{"key": "value"}',
        'chart': 'chart_name',
        'trace': 'trace_name',
    }

    expected_quests = [
        quest.FindIsolate('chromium-rel-mac11-pro', 'net_perftests'),
        quest.RunTest({'key': 'value'}, _MIN_GTEST_RUN_TEST_ARGUMENTS),
        quest.ReadGraphJsonValue('chart_name', 'trace_name'),
    ]
    self.assertEqual(quest_generator.GenerateQuests(arguments),
                     (arguments, expected_quests))
  def testAllArguments(self):
    arguments = {
        'configuration': 'chromium-rel-mac11-pro',
        'target': 'telemetry_perf_tests',
        'dimensions': '{"key": "value"}',
        'benchmark': 'speedometer',
        'browser': 'release',
        'story': 'http://www.fifa.com/',
    }

    expected_quests = [
        quest.FindIsolate('chromium-rel-mac11-pro', 'telemetry_perf_tests'),
        quest.RunTest({'key': 'value'}, _ALL_TELEMETRY_RUN_TEST_ARGUMENTS),
    ]
    self.assertEqual(quest_generator.GenerateQuests(arguments),
                     (arguments, expected_quests))
Example #9
0
  def New(cls, configuration, test_suite, test, metric, auto_explore):
    # Get list of quests.
    quests = [quest_module.FindIsolate(configuration)]
    if test_suite:
      quests.append(quest_module.RunTest(configuration, test_suite, test))
    if metric:
      quests.append(quest_module.ReadValue(metric, test))

    # Create job.
    return cls(
        configuration=configuration,
        test_suite=test_suite,
        test=test,
        metric=metric,
        auto_explore=auto_explore,
        state=_JobState(quests, _DEFAULT_MAX_ATTEMPTS))
    def testAllArguments(self):
        arguments = {
            'configuration': 'chromium-rel-mac11-pro',
            'target': 'telemetry_perf_tests',
            'dimensions': '{"key": "value"}',
            'benchmark': 'speedometer',
            'browser': 'release',
            'tir_label': 'pcv1-cold',
            'chart': 'timeToFirst',
            'trace': 'trace_name',
        }

        expected_quests = [
            quest.FindIsolate('chromium-rel-mac11-pro',
                              'telemetry_perf_tests'),
            quest.RunTest({'key': 'value'}, _MIN_TELEMETRY_RUN_TEST_ARGUMENTS),
            quest.ReadChartJsonValue('timeToFirst', 'pcv1-cold', 'trace_name'),
        ]
        self.assertEqual(quest_generator.GenerateQuests(arguments),
                         (arguments, expected_quests))