def testPopulateAndEvaluateAdderGraph(self): job = job_module.Job.New((), ()) task_graph = task_module.TaskGraph( vertices=[ task_module.TaskVertex(id='input0', vertex_type='constant', payload={'value': 0}), task_module.TaskVertex(id='input1', vertex_type='constant', payload={'value': 1}), task_module.TaskVertex(id='plus', vertex_type='operator+', payload={}), ], edges=[ task_module.Dependency(from_='plus', to='input0'), task_module.Dependency(from_='plus', to='input1'), ], ) task_module.PopulateTaskGraph(job, task_graph) def AdderEvaluator(task, _, accumulator): if task.task_type == 'constant': accumulator[task.id] = task.payload.get('value', 0) elif task.task_type == 'operator+': inputs = [accumulator.get(dep) for dep in task.dependencies] accumulator[task.id] = functools.reduce( lambda a, v: a + v, inputs) accumulator = task_module.Evaluate(job, {}, AdderEvaluator) self.assertEqual(1, accumulator.get('plus'))
def testPopulateCycles(self): job = job_module.Job.New((), ()) task_graph = task_module.TaskGraph( vertices=[ task_module.TaskVertex(id='node_0', vertex_type='process', payload={}), task_module.TaskVertex(id='node_1', vertex_type='process', payload={}) ], edges=[ task_module.Dependency(from_='node_0', to='node_1'), task_module.Dependency(from_='node_1', to='node_0') ]) task_module.PopulateTaskGraph(job, task_graph) calls = {} def CycleEvaluator(task, event, accumulator): logging.debug('Evaluate(%s, %s, %s) called.', task.id, event, accumulator) calls[task.id] = calls.get(task.id, 0) + 1 return None task_module.Evaluate(job, 'test', CycleEvaluator) self.assertDictEqual({'node_0': 1, 'node_1': 1}, calls)
def testPopulateEvaluateCallCounts(self): job = job_module.Job.New((), ()) task_module.PopulateTaskGraph( job, task_module.TaskGraph(vertices=[ task_module.TaskVertex(id='leaf_0', vertex_type='node', payload={}), task_module.TaskVertex(id='leaf_1', vertex_type='node', payload={}), task_module.TaskVertex(id='parent', vertex_type='node', payload={}), ], edges=[ task_module.Dependency(from_='parent', to='leaf_0'), task_module.Dependency(from_='parent', to='leaf_1'), ])) calls = {} def CallCountEvaluator(task, event, accumulator): logging.debug('Evaluate(%s, %s, %s) called.', task.id, event, accumulator) calls[task.id] = calls.get(task.id, 0) + 1 return None task_module.Evaluate(job, 'test', CallCountEvaluator) self.assertDictEqual({ 'leaf_0': 1, 'leaf_1': 1, 'parent': 1, }, calls)
def testPopulateEmptyGraph(self): job = job_module.Job.New((), ()) task_graph = task_module.TaskGraph(vertices=[], edges=[]) task_module.PopulateTaskGraph(job, task_graph) evaluator = mock.MagicMock() evaluator.assert_not_called() task_module.Evaluate(job, 'test', evaluator)
def testMissingDependency(self): job = job_module.Job.New((), ()) task_module.PopulateTaskGraph( job, task_module.TaskGraph(vertices=[ task_module.TaskVertex(id='run_test_bbbbbbb_0', vertex_type='run_test', payload={ 'swarming_server': 'some_server', 'dimensions': DIMENSIONS, 'extra_args': [], }), ], edges=[])) self.assertEqual( { 'run_test_bbbbbbb_0': { 'errors': [{ 'cause': 'DependencyError', 'message': mock.ANY }] } }, task_module.Evaluate( job, event_module.Event(type='validate', target_task=None, payload={}), run_test.Validator()))
def setUp(self): super(FindIsolateEvaluatorBase, self).setUp() self.maxDiff = None # pylint: disable=invalid-name self.job = job_module.Job.New((), ()) task_module.PopulateTaskGraph( self.job, find_isolate.CreateGraph( find_isolate.TaskOptions( builder='Mac Builder', target='telemetry_perf_tests', bucket='luci.bucket', change=change_module.Change.FromDict({ 'commits': [{ 'repository': 'chromium', 'git_hash': '7c7e90be', }], }))))
def setUp(self): super(EvaluatorTest, self).setUp() self.maxDiff = None self.job = job_module.Job.New((), ()) task_module.PopulateTaskGraph( self.job, run_test.CreateGraph( run_test.TaskOptions(build_options=find_isolate.TaskOptions( builder='Some Builder', target='telemetry_perf_tests', bucket='luci.bucket', change=change_module.Change.FromDict({ 'commits': [{ 'repository': 'chromium', 'git_hash': 'aaaaaaa', }] })), swarming_server='some_server', dimensions=DIMENSIONS, extra_args=[], attempts=10)))
def PopulateTaskGraph(self, benchmark=None, chart=None, grouping_label=None, story=None, statistic=None, trace='some_trace', mode='histogram_sets'): task_module.PopulateTaskGraph( self.job, read_value.CreateGraph( read_value.TaskOptions( test_options=run_test.TaskOptions( build_options=find_isolate.TaskOptions( builder='Some Builder', target='telemetry_perf_tests', bucket='luci.bucket', change=change_module.Change.FromDict({ 'commits': [{ 'repository': 'chromium', 'git_hash': 'aaaaaaa', }] })), swarming_server='some_server', dimensions=[], extra_args=[], attempts=10), benchmark=benchmark, histogram_options=read_value.HistogramOptions( grouping_label=grouping_label, story=story, statistic=statistic, histogram_name=chart, ), graph_json_options=read_value.GraphJsonOptions( chart=chart, trace=trace), mode=mode, )))
def setUp(self): super(EvaluatorTest, self).setUp() self.maxDiff = None self.job = job_module.Job.New((), ()) task_module.PopulateTaskGraph( self.job, task_module.TaskGraph( vertices=[ task_module.TaskVertex( id='build_aaaaaaa', vertex_type='find_isolate', payload={ 'builder': 'Some Builder', 'target': 'telemetry_perf_tests', 'bucket': 'luci.bucket', 'change': { 'commits': [{ 'repository': 'chromium', 'git_hash': 'aaaaaaa', }] } }) ] + [ task_module.TaskVertex( id='run_test_aaaaaaa_%s' % (attempt, ), vertex_type='run_test', payload={ 'swarming_server': 'some_server', 'dimensions': DIMENSIONS, 'extra_args': [], }) for attempt in range(11) ], edges=[ task_module.Dependency( from_='run_test_aaaaaaa_%s' % (attempt, ), to='build_aaaaaaa') for attempt in range(11) ], ))
def setUp(self): super(EvaluateTest, self).setUp() self.maxDiff = None # pylint: disable=invalid-name self.job = job_module.Job.New((), ()) task_module.PopulateTaskGraph( self.job, task_module.TaskGraph(vertices=[ task_module.TaskVertex(id='task_0', vertex_type='task', payload={}), task_module.TaskVertex(id='task_1', vertex_type='task', payload={}), task_module.TaskVertex(id='task_2', vertex_type='task', payload={}), ], edges=[ task_module.Dependency(from_='task_2', to='task_0'), task_module.Dependency(from_='task_2', to='task_1'), ]))
def setUp(self): super(FindIsolateEvaluatorBase, self).setUp() self.maxDiff = None # pylint: disable=invalid-name self.job = job_module.Job.New((), ()) task_module.PopulateTaskGraph( self.job, task_module.TaskGraph( vertices=[ task_module.TaskVertex( id='build_7c7e90be', vertex_type='find_isolate', payload={ 'builder': 'Mac Builder', 'target': 'telemetry_perf_tests', 'bucket': 'luci.bucket', 'change': { 'commits': [{ 'repository': 'chromium', 'git_hash': '7c7e90be', }], }, }) ], edges=[]))
def _CreateJob(request): """Creates a new Pinpoint job from WebOb request arguments.""" original_arguments = request.params.mixed() logging.debug('Received Params: %s', original_arguments) # This call will fail if some of the required arguments are not in the # original request. _ValidateRequiredParams(original_arguments) arguments = _ArgumentsWithConfiguration(original_arguments) logging.debug('Updated Params: %s', arguments) # Validate arguments and convert them to canonical internal representation. quests = _GenerateQuests(arguments) # Validate the priority, if it's present. priority = _ValidatePriority(arguments.get('priority')) # Validate and find the associated issue. bug_id, project = _ValidateBugId(arguments.get('bug_id'), arguments.get('project', 'chromium')) comparison_mode = _ValidateComparisonMode(arguments.get('comparison_mode')) comparison_magnitude = _ValidateComparisonMagnitude( arguments.get('comparison_magnitude')) gerrit_server, gerrit_change_id = _ValidatePatch( arguments.get('patch', arguments.get('experiment_patch'))) name = arguments.get('name') pin = _ValidatePin(arguments.get('pin')) tags = _ValidateTags(arguments.get('tags')) user = _ValidateUser(arguments.get('user')) changes = _ValidateChanges(comparison_mode, arguments) # If this is a try job, we assume it's higher priority than bisections, so # we'll set it at a negative priority. if priority not in arguments and comparison_mode == job_state.TRY: priority = -1 # TODO(dberris): Make this the default when we've graduated the beta. use_execution_engine = (arguments.get('experimental_execution_engine') and arguments.get('comparison_mode') == job_state.PERFORMANCE) # Ensure that we have the required fields in tryjob requests. if comparison_mode == 'try': if 'benchmark' not in arguments: raise ValueError('Missing required "benchmark" argument.') # First we check whether there's a quest that's of type 'RunTelemetryTest'. is_telemetry_test = any( [isinstance(q, quest_module.RunTelemetryTest) for q in quests]) if is_telemetry_test and ('story' not in arguments and 'story_tags' not in arguments): raise ValueError( 'Missing either "story" or "story_tags" as arguments for try jobs.' ) # Create job. job = job_module.Job.New(quests if not use_execution_engine else (), changes, arguments=original_arguments, bug_id=bug_id, comparison_mode=comparison_mode, comparison_magnitude=comparison_magnitude, gerrit_server=gerrit_server, gerrit_change_id=gerrit_change_id, name=name, pin=pin, tags=tags, user=user, priority=priority, use_execution_engine=use_execution_engine, project=project) if use_execution_engine: # TODO(dberris): We need to figure out a way to get the arguments to be more # structured when it comes in from the UI, so that we don't need to do the # manual translation of options here. # TODO(dberris): Decide whether we can make some of these hard-coded options # be part of a template that's available in the UI (or by configuration # somewhere else, maybe luci-config?) start_change, end_change = changes target = arguments.get('target') task_options = performance_bisection.TaskOptions( build_option_template=performance_bisection.BuildOptionTemplate( builder=arguments.get('builder'), target=target, bucket=arguments.get('bucket', 'master.tryserver.chromium.perf'), ), test_option_template=performance_bisection.TestOptionTemplate( swarming_server=arguments.get('swarming_server'), dimensions=arguments.get('dimensions'), extra_args=arguments.get('extra_test_args'), ), read_option_template=performance_bisection.ReadOptionTemplate( benchmark=arguments.get('benchmark'), histogram_options=read_value.HistogramOptions( grouping_label=arguments.get('grouping_label'), story=arguments.get('story'), statistic=arguments.get('statistic'), histogram_name=arguments.get('chart'), ), graph_json_options=read_value.GraphJsonOptions( chart=arguments.get('chart'), trace=arguments.get('trace')), mode=('histogram_sets' if target in performance_bisection.EXPERIMENTAL_TARGET_SUPPORT else 'graph_json')), analysis_options=performance_bisection.AnalysisOptions( comparison_magnitude=arguments.get('comparison_magnitude'), min_attempts=10, max_attempts=60, ), start_change=start_change, end_change=end_change, pinned_change=arguments.get('patch'), ) task_module.PopulateTaskGraph( job, performance_bisection.CreateGraph(task_options, arguments)) return job
def _CreateJob(request): """Creates a new Pinpoint job from WebOb request arguments.""" original_arguments = request.params.mixed() logging.debug('Received Params: %s', original_arguments) arguments = _ArgumentsWithConfiguration(original_arguments) logging.debug('Updated Params: %s', arguments) # Validate arguments and convert them to canonical internal representation. quests = _GenerateQuests(arguments) bug_id = _ValidateBugId(arguments.get('bug_id')) comparison_mode = _ValidateComparisonMode(arguments.get('comparison_mode')) comparison_magnitude = _ValidateComparisonMagnitude( arguments.get('comparison_magnitude')) gerrit_server, gerrit_change_id = _ValidatePatch(arguments.get('patch')) name = arguments.get('name') pin = _ValidatePin(arguments.get('pin')) tags = _ValidateTags(arguments.get('tags')) user = _ValidateUser(arguments.get('user')) changes = _ValidateChanges(comparison_mode, arguments) # TODO(dberris): Make this the default when we've graduated the beta. use_execution_engine = (arguments.get('experimental_execution_engine') and arguments.get('comparison_mode') == job_state.PERFORMANCE) # Create job. job = job_module.Job.New(quests if not use_execution_engine else (), changes, arguments=original_arguments, bug_id=bug_id, comparison_mode=comparison_mode, comparison_magnitude=comparison_magnitude, gerrit_server=gerrit_server, gerrit_change_id=gerrit_change_id, name=name, pin=pin, tags=tags, user=user, use_execution_engine=use_execution_engine) if use_execution_engine: # TODO(dberris): We need to figure out a way to get the arguments to be more # structured when it comes in from the UI, so that we don't need to do the # manual translation of options here. # TODO(dberris): Decide whether we can make some of these hard-coded options # be part of a template that's available in the UI (or by configuration # somewhere else, maybe luci-config?) start_change, end_change = changes target = arguments.get('target') task_options = performance_bisection.TaskOptions( build_option_template=performance_bisection.BuildOptionTemplate( builder=arguments.get('builder'), target=target, bucket=arguments.get('bucket', 'master.tryserver.chromium.perf'), ), test_option_template=performance_bisection.TestOptionTemplate( swarming_server=arguments.get('swarming_server'), dimensions=arguments.get('dimensions'), extra_args=arguments.get('extra_test_args'), ), read_option_template=performance_bisection.ReadOptionTemplate( benchmark=arguments.get('benchmark'), histogram_options=read_value.HistogramOptions( grouping_label=arguments.get('grouping_label'), story=arguments.get('story'), statistic=arguments.get('statistic'), histogram_name=arguments.get('chart'), ), graph_json_options=read_value.GraphJsonOptions( chart=arguments.get('chart'), trace=arguments.get('trace')), mode=('histogram_sets' if target in performance_bisection.EXPERIMENTAL_TARGET_SUPPORT else 'graph_json')), analysis_options=performance_bisection.AnalysisOptions( comparison_magnitude=arguments.get('comparison_magnitude'), min_attempts=10, max_attempts=60, ), start_change=start_change, end_change=end_change, pinned_change=arguments.get('patch'), ) task_module.PopulateTaskGraph( job, performance_bisection.CreateGraph(task_options, arguments)) return job
def testPouplateAndEvaluateGrowingGraph(self): job = job_module.Job.New((), ()) task_module.PopulateTaskGraph( job, task_module.TaskGraph(vertices=[ task_module.TaskVertex(id='rev_0', vertex_type='revision', payload={ 'revision': '0', 'position': 0 }), task_module.TaskVertex(id='rev_100', vertex_type='revision', payload={ 'revision': '100', 'position': 100 }), task_module.TaskVertex(id='bisection', vertex_type='bisection', payload={}), ], edges=[ task_module.Dependency(from_='bisection', to='rev_0'), task_module.Dependency(from_='bisection', to='rev_100'), ])) def FindMidpoint(a, b): offset = (b - a) // 2 if offset == 0: return None return a + offset def ExplorationEvaluator(task, event, accumulator): logging.debug('Evaluating: %s, %s, %s', task, event, accumulator) if task.task_type == 'revision': accumulator[task.id] = task.payload return if task.task_type == 'bisection': rev_positions = list( sorted( accumulator.get(dep).get('position') for dep in task.dependencies)) results = list(rev_positions) insertion_list = exploration.Speculate( rev_positions, # Assume we always find a difference between two positions. lambda *_: True, # Do nothing when we encounter an unknown error. lambda _: None, # Provide the function that will find the midpoint between two # revisions. FindMidpoint, # Speculate two levels deep in the bisection space. levels=2) for index, change in insertion_list: results.insert(index, change) new_positions = set(results) - set(rev_positions) if new_positions: def GraphExtender(_): logging.debug('New revisions: %s', new_positions) task_module.ExtendTaskGraph(job, [ task_module.TaskVertex(id='rev_%s' % (rev, ), vertex_type='revision', payload={ 'revision': '%s' % (rev, ), 'position': rev }) for rev in new_positions ], [ task_module.Dependency(from_='bisection', to='rev_%s' % (rev, )) for rev in new_positions ]) return [GraphExtender] accumulator = task_module.Evaluate(job, None, ExplorationEvaluator) self.assertEqual(list(sorted(accumulator)), sorted(['rev_%s' % (rev, ) for rev in range(0, 101)]))
def testMissingDependencyInputs(self): job = job_module.Job.New((), ()) task_module.PopulateTaskGraph( job, task_module.TaskGraph( vertices=[ task_module.TaskVertex(id='build_aaaaaaa', vertex_type='find_isolate', payload={ 'builder': 'Some Builder', 'target': 'telemetry_perf_tests', 'bucket': 'luci.bucket', 'change': { 'commits': [{ 'repository': 'chromium', 'git_hash': 'aaaaaaa', }] } }), task_module.TaskVertex(id='run_test_aaaaaaa_0', vertex_type='run_test', payload={ 'swarming_server': 'some_server', 'dimensions': DIMENSIONS, 'extra_args': [], }), ], edges=[ task_module.Dependency(from_='run_test_aaaaaaa_0', to='build_aaaaaaa') ], )) # This time we're fine, there should be no errors. self.assertEqual({}, task_module.Evaluate( job, event_module.Event(type='validate', target_task=None, payload={}), run_test.Validator())) # Send an initiate message then catch that we've not provided the required # payload in the task when it's ongoing. self.assertEqual( { 'build_aaaaaaa': mock.ANY, 'run_test_aaaaaaa_0': { 'errors': [{ 'cause': 'MissingDependencyInputs', 'message': mock.ANY }] } }, task_module.Evaluate( job, event_module.Event(type='initiate', target_task=None, payload={}), evaluators.FilteringEvaluator( predicate=evaluators.TaskTypeEq('find_isolate'), delegate=evaluators.SequenceEvaluator(evaluators=( functools.partial(FakeNotFoundIsolate, job), evaluators.TaskPayloadLiftingEvaluator(), )), alternative=run_test.Validator()), ))
def PopulateSimpleBisectionGraph(self): """Helper function to populate a task graph representing a bisection. This function will populate the following graph on the associated job initialised in the setUp function: find_culprit | | | +--> read_value(start_cl, [0..min_attempts]) | | | +--> run_test(start_cl, [0..min_attempts]) | | | +--> find_isolate(start_cl) | +--> read_value(end_cl, [0..min_attempts]) | +--> run_test(end_cl, [0..min_attempts]) | +--> find_isolate(end_cl) This is the starting point for all bisections on which we expect the evaluator implementation will be operating with. In this specific case, we're setting min_attempts at 10 and max_attempts at 100, then using the special `commit_0` and `commit_5` git hashes as the range to bisect over. The test base class sets up special meanings for these pseudo-hashes and all infrastructure related to expanding that range. """ task_module.PopulateTaskGraph( self.job, performance_bisection.CreateGraph( performance_bisection.TaskOptions( build_option_template=performance_bisection. BuildOptionTemplate(builder='Some Builder', target='performance_telemetry_test', bucket='luci.bucket'), test_option_template=performance_bisection. TestOptionTemplate( swarming_server='some_server', dimensions=[], extra_args=[], ), read_option_template=performance_bisection. ReadOptionTemplate( benchmark='some_benchmark', histogram_options=read_value.HistogramOptions( grouping_label='some_label', story='some_story', statistic='avg', ), graph_json_options=read_value.GraphJsonOptions( chart='some_chart', trace='some_trace', ), mode='histogram_sets'), analysis_options=performance_bisection.AnalysisOptions( comparison_magnitude=1.0, min_attempts=10, max_attempts=100, ), start_change=change_module.Change.FromDict({ 'commits': [{ 'repository': 'chromium', 'git_hash': 'commit_0' }] }), end_change=change_module.Change.FromDict({ 'commits': [{ 'repository': 'chromium', 'git_hash': 'commit_5' }] }), pinned_change=None, )))