def testMissingArguments(self): arguments = {'target': 'telemetry_perf_tests'} # configuration is missing. with self.assertRaises(TypeError): quest_generator.GenerateQuests(arguments) arguments = {'configuration': 'chromium-rel-mac11-pro'} # target is missing. with self.assertRaises(TypeError): quest_generator.GenerateQuests(arguments)
def testMinimumArguments(self): arguments = { 'configuration': 'chromium-rel-mac11-pro', 'target': 'net_perftests', 'dimensions': '{}', } expected_quests = [ quest.FindIsolate('chromium-rel-mac11-pro', 'net_perftests'), quest.RunTest({}, _MIN_GTEST_RUN_TEST_ARGUMENTS), ] print quest_generator.GenerateQuests(arguments)[1][1]._extra_args self.assertEqual(quest_generator.GenerateQuests(arguments), (arguments, expected_quests))
def testAllArguments(self): arguments = { 'configuration': 'chromium-rel-mac11-pro', 'target': 'net_perftests', 'dimensions': '{"key": "value"}', 'test': 'test_name', } expected_quests = [ quest.FindIsolate('chromium-rel-mac11-pro', 'net_perftests'), quest.RunTest({'key': 'value'}, _ALL_GTEST_RUN_TEST_ARGUMENTS), ] print quest_generator.GenerateQuests(arguments)[1][1]._extra_args self.assertEqual(quest_generator.GenerateQuests(arguments), (arguments, expected_quests))
def testAllArguments(self): arguments = { 'configuration': 'chromium-rel-mac11-pro', 'target': 'telemetry_perf_tests', } expected_quests = [ quest.FindIsolate('chromium-rel-mac11-pro', 'telemetry_perf_tests'), ] self.assertEqual(quest_generator.GenerateQuests(arguments), (arguments, expected_quests))
def testMissingArguments(self): arguments = { 'configuration': 'chromium-rel-mac11-pro', 'target': 'telemetry_perf_tests', 'dimensions': '{}', # benchmark is missing. 'browser': 'release', } with self.assertRaises(TypeError): quest_generator.GenerateQuests(arguments) arguments = { 'configuration': 'chromium-rel-mac11-pro', 'target': 'telemetry_perf_tests', 'dimensions': '{}', 'benchmark': 'speedometer', # browser is missing. } with self.assertRaises(TypeError): quest_generator.GenerateQuests(arguments)
def testInvalidExtraTestArgs(self): arguments = { 'configuration': 'chromium-rel-mac11-pro', 'target': 'net_perftests', 'dimensions': '{"key": "value"}', 'test': 'test_name', 'chart': 'chart_name', 'extra_test_args': '"this is a string"', } with self.assertRaises(TypeError): quest_generator.GenerateQuests(arguments)
def testInvalidExtraTestArgs(self): arguments = { 'configuration': 'chromium-rel-mac11-pro', 'target': 'telemetry_perf_tests', 'dimensions': '{}', 'benchmark': 'speedometer', 'browser': 'release', 'extra_test_args': '"this is a string"', } with self.assertRaises(TypeError): quest_generator.GenerateQuests(arguments)
def _CreateJob(self): """Start a new Pinpoint job.""" repeat_count = self.request.get('repeat_count') auto_explore = self.request.get('auto_explore') == '1' bug_id = self.request.get('bug_id') change_1 = { 'commits': [{ 'repository': self.request.get('start_repository'), 'git_hash': self.request.get('start_git_hash') }], } change_2 = { 'commits': [{ 'repository': self.request.get('end_repository'), 'git_hash': self.request.get('end_git_hash') }] } if self.request.get('patch'): change_2['patch'] = self.request.get('patch') # Validate arguments and convert them to canonical internal representation. arguments, quests = quest_generator.GenerateQuests(self.request) repeat_count = _ValidateRepeatCount(repeat_count) bug_id = _ValidateBugId(bug_id) changes = _ValidateChanges(change_1, change_2) # Create job. job = job_module.Job.New(arguments=arguments, quests=quests, auto_explore=auto_explore, repeat_count=repeat_count, bug_id=bug_id) # Add changes. for c in changes: job.AddChange(c) # Put job into datastore. job.put() # Start job. job.Start() job.put() self.response.out.write( json.dumps({ 'jobId': job.job_id, 'jobUrl': job.url, }))
def testMissingArguments(self): arguments = { 'configuration': 'chromium-rel-mac11-pro', 'target': 'net_perftests', 'dimensions': '{"key": "value"}', 'test': 'test_name', 'trace': 'trace_name', } with self.assertRaises(TypeError): quest_generator.GenerateQuests(arguments) arguments = { 'configuration': 'chromium-rel-mac11-pro', 'target': 'net_perftests', 'dimensions': '{"key": "value"}', 'test': 'test_name', 'chart': 'chart_name', } with self.assertRaises(TypeError): quest_generator.GenerateQuests(arguments)
def _CreateJob(self): """Start a new Pinpoint job.""" repeat_count = self.request.get('repeat_count') auto_explore = self.request.get('auto_explore') == '1' bug_id = self.request.get('bug_id') change_1 = { 'base_commit': { 'repository': self.request.get('start_repository'), 'git_hash': self.request.get('start_git_hash') } } change_2 = { 'base_commit': { 'repository': self.request.get('end_repository'), 'git_hash': self.request.get('end_git_hash') } } # Validate arguments and convert them to canonical internal representation. arguments, quests = quest_generator.GenerateQuests(self.request) repeat_count = _ValidateRepeatCount(repeat_count) bug_id = _ValidateBugId(bug_id) changes = _ValidateChanges(change_1, change_2) # Create job. job = job_module.Job.New(arguments=arguments, quests=quests, auto_explore=auto_explore, repeat_count=repeat_count, bug_id=bug_id) # Add changes. for c in changes: job.AddChange(c) # Put job into datastore. job.put() # Start job. job.Start() job.put() # TODO: Figure out if these should be underscores or lowerCamelCase. # TODO: They should match the input arguments. self.response.out.write( json.dumps({ 'jobId': job.job_id, 'jobUrl': job.url, }))
def testStartupBenchmarkRepeatCount(self): arguments = { 'configuration': 'chromium-rel-mac11-pro', 'target': 'telemetry_perf_tests', 'dimensions': '{}', 'benchmark': 'start_with_url.warm.startup_pages', 'browser': 'release', } expected_quests = [ quest.FindIsolate('chromium-rel-mac11-pro', 'telemetry_perf_tests'), quest.RunTest({}, _STARTUP_BENCHMARK_RUN_TEST_ARGUMENTS), ] self.assertEqual(quest_generator.GenerateQuests(arguments), (arguments, expected_quests))
def testMinimumArguments(self): arguments = { 'configuration': 'chromium-rel-mac11-pro', 'target': 'telemetry_perf_tests', 'dimensions': '{}', 'benchmark': 'speedometer', 'browser': 'release', } expected_quests = [ quest.FindIsolate('chromium-rel-mac11-pro', 'telemetry_perf_tests'), quest.RunTest({}, _MIN_TELEMETRY_RUN_TEST_ARGUMENTS), ] self.assertEqual(quest_generator.GenerateQuests(arguments), (arguments, expected_quests))
def testAllArguments(self): arguments = { 'configuration': 'chromium-rel-mac11-pro', 'target': 'net_perftests', 'dimensions': '{"key": "value"}', 'chart': 'chart_name', 'trace': 'trace_name', } expected_quests = [ quest.FindIsolate('chromium-rel-mac11-pro', 'net_perftests'), quest.RunTest({'key': 'value'}, _MIN_GTEST_RUN_TEST_ARGUMENTS), quest.ReadGraphJsonValue('chart_name', 'trace_name'), ] self.assertEqual(quest_generator.GenerateQuests(arguments), (arguments, expected_quests))
def testAllArguments(self): arguments = { 'configuration': 'chromium-rel-mac11-pro', 'target': 'telemetry_perf_tests', 'dimensions': '{"key": "value"}', 'benchmark': 'speedometer', 'browser': 'release', 'story': 'http://www.fifa.com/', } expected_quests = [ quest.FindIsolate('chromium-rel-mac11-pro', 'telemetry_perf_tests'), quest.RunTest({'key': 'value'}, _ALL_TELEMETRY_RUN_TEST_ARGUMENTS), ] self.assertEqual(quest_generator.GenerateQuests(arguments), (arguments, expected_quests))
def testAllArguments(self): arguments = { 'configuration': 'chromium-rel-mac11-pro', 'target': 'telemetry_perf_tests', 'dimensions': '{"key": "value"}', 'benchmark': 'speedometer', 'browser': 'release', 'tir_label': 'pcv1-cold', 'chart': 'timeToFirst', 'trace': 'trace_name', } expected_quests = [ quest.FindIsolate('chromium-rel-mac11-pro', 'telemetry_perf_tests'), quest.RunTest({'key': 'value'}, _MIN_TELEMETRY_RUN_TEST_ARGUMENTS), quest.ReadChartJsonValue('timeToFirst', 'pcv1-cold', 'trace_name'), ] self.assertEqual(quest_generator.GenerateQuests(arguments), (arguments, expected_quests))