def __call__(self, accumulator):
        # Outline:
        #   - Given the job and task, extend the TaskGraph to add new tasks and
        #     dependencies, being careful to filter the IDs from what we already see
        #     in the accumulator to avoid graph amendment errors.
        #   - If we do encounter graph amendment errors, we should log those and not
        #     block progress because that can only happen if there's concurrent
        #     updates being performed with the same actions.
        build_option_template = BuildOptionTemplate(
            **self.task.payload.get('build_option_template'))
        test_option_template = TestOptionTemplate(
            **self.task.payload.get('test_option_template'))

        # The ReadOptionTemplate is special because it has nested structures, so
        # we'll have to reconstitute those accordingly.
        read_option_template_map = self.task.payload.get(
            'read_option_template')
        read_option_template = ReadOptionTemplate(
            benchmark=self.task.payload.get('read_option_template').get(
                'benchmark'),
            histogram_options=read_value.HistogramOptions(
                **read_option_template_map.get('histogram_options')),
            graph_json_options=read_value.GraphJsonOptions(
                **read_option_template_map.get('graph_json_options')),
            mode=read_option_template_map.get('mode'))

        analysis_options_dict = self.task.payload.get('analysis_options')
        if self.additional_attempts:
            analysis_options_dict['min_attempts'] = min(
                analysis_options_dict.get('min_attempts', 0) +
                self.additional_attempts,
                analysis_options_dict.get('max_attempts', 100))
        analysis_options = AnalysisOptions(**analysis_options_dict)

        new_subgraph = read_value.CreateGraph(
            _CreateReadTaskOptions(build_option_template, test_option_template,
                                   read_option_template, analysis_options,
                                   self.change,
                                   self.task.payload.get('arguments', {})))
        try:
            task_module.ExtendTaskGraph(
                self.job,
                vertices=[
                    # Add all of the new vertices we do not have in the graph yet.
                    v for v in new_subgraph.vertices if v.id not in accumulator
                ],
                dependencies=[
                    # Only add dependencies to the new 'read_value' tasks.
                    task_module.Dependency(from_=self.task.id, to=v.id)
                    for v in new_subgraph.vertices if v.id not in accumulator
                    and v.vertex_type == 'read_value'
                ])
        except task_module.InvalidAmendment as e:
            logging.error('Failed to amend graph: %s', e)
 def PopulateTaskGraph(self,
                       benchmark=None,
                       chart=None,
                       grouping_label=None,
                       story=None,
                       statistic=None,
                       trace='some_trace',
                       mode='histogram_sets'):
   task_module.PopulateTaskGraph(
       self.job,
       read_value.CreateGraph(
           read_value.TaskOptions(
               test_options=run_test.TaskOptions(
                   build_options=find_isolate.TaskOptions(
                       builder='Some Builder',
                       target='telemetry_perf_tests',
                       bucket='luci.bucket',
                       change=change_module.Change.FromDict({
                           'commits': [{
                               'repository': 'chromium',
                               'git_hash': 'aaaaaaa',
                           }]
                       })),
                   swarming_server='some_server',
                   dimensions=[],
                   extra_args=[],
                   attempts=10),
               benchmark=benchmark,
               histogram_options=read_value.HistogramOptions(
                   grouping_label=grouping_label,
                   story=story,
                   statistic=statistic,
                   histogram_name=chart,
               ),
               graph_json_options=read_value.GraphJsonOptions(
                   chart=chart, trace=trace),
               mode=mode,
           )))
Example #3
0
def _CreateJob(request):
    """Creates a new Pinpoint job from WebOb request arguments."""
    original_arguments = request.params.mixed()
    logging.debug('Received Params: %s', original_arguments)

    # This call will fail if some of the required arguments are not in the
    # original request.
    _ValidateRequiredParams(original_arguments)

    arguments = _ArgumentsWithConfiguration(original_arguments)
    logging.debug('Updated Params: %s', arguments)

    # Validate arguments and convert them to canonical internal representation.
    quests = _GenerateQuests(arguments)

    # Validate the priority, if it's present.
    priority = _ValidatePriority(arguments.get('priority'))

    # Validate and find the associated issue.
    bug_id, project = _ValidateBugId(arguments.get('bug_id'),
                                     arguments.get('project', 'chromium'))
    comparison_mode = _ValidateComparisonMode(arguments.get('comparison_mode'))
    comparison_magnitude = _ValidateComparisonMagnitude(
        arguments.get('comparison_magnitude'))
    gerrit_server, gerrit_change_id = _ValidatePatch(
        arguments.get('patch', arguments.get('experiment_patch')))
    name = arguments.get('name')
    pin = _ValidatePin(arguments.get('pin'))
    tags = _ValidateTags(arguments.get('tags'))
    user = _ValidateUser(arguments.get('user'))
    changes = _ValidateChanges(comparison_mode, arguments)

    # If this is a try job, we assume it's higher priority than bisections, so
    # we'll set it at a negative priority.
    if priority not in arguments and comparison_mode == job_state.TRY:
        priority = -1

    # TODO(dberris): Make this the default when we've graduated the beta.
    use_execution_engine = (arguments.get('experimental_execution_engine')
                            and arguments.get('comparison_mode')
                            == job_state.PERFORMANCE)

    # Ensure that we have the required fields in tryjob requests.
    if comparison_mode == 'try':
        if 'benchmark' not in arguments:
            raise ValueError('Missing required "benchmark" argument.')

        # First we check whether there's a quest that's of type 'RunTelemetryTest'.
        is_telemetry_test = any(
            [isinstance(q, quest_module.RunTelemetryTest) for q in quests])
        if is_telemetry_test and ('story' not in arguments
                                  and 'story_tags' not in arguments):
            raise ValueError(
                'Missing either "story" or "story_tags" as arguments for try jobs.'
            )

    # Create job.
    job = job_module.Job.New(quests if not use_execution_engine else (),
                             changes,
                             arguments=original_arguments,
                             bug_id=bug_id,
                             comparison_mode=comparison_mode,
                             comparison_magnitude=comparison_magnitude,
                             gerrit_server=gerrit_server,
                             gerrit_change_id=gerrit_change_id,
                             name=name,
                             pin=pin,
                             tags=tags,
                             user=user,
                             priority=priority,
                             use_execution_engine=use_execution_engine,
                             project=project)

    if use_execution_engine:
        # TODO(dberris): We need to figure out a way to get the arguments to be more
        # structured when it comes in from the UI, so that we don't need to do the
        # manual translation of options here.
        # TODO(dberris): Decide whether we can make some of these hard-coded options
        # be part of a template that's available in the UI (or by configuration
        # somewhere else, maybe luci-config?)
        start_change, end_change = changes
        target = arguments.get('target')
        task_options = performance_bisection.TaskOptions(
            build_option_template=performance_bisection.BuildOptionTemplate(
                builder=arguments.get('builder'),
                target=target,
                bucket=arguments.get('bucket',
                                     'master.tryserver.chromium.perf'),
            ),
            test_option_template=performance_bisection.TestOptionTemplate(
                swarming_server=arguments.get('swarming_server'),
                dimensions=arguments.get('dimensions'),
                extra_args=arguments.get('extra_test_args'),
            ),
            read_option_template=performance_bisection.ReadOptionTemplate(
                benchmark=arguments.get('benchmark'),
                histogram_options=read_value.HistogramOptions(
                    grouping_label=arguments.get('grouping_label'),
                    story=arguments.get('story'),
                    statistic=arguments.get('statistic'),
                    histogram_name=arguments.get('chart'),
                ),
                graph_json_options=read_value.GraphJsonOptions(
                    chart=arguments.get('chart'),
                    trace=arguments.get('trace')),
                mode=('histogram_sets' if target
                      in performance_bisection.EXPERIMENTAL_TARGET_SUPPORT else
                      'graph_json')),
            analysis_options=performance_bisection.AnalysisOptions(
                comparison_magnitude=arguments.get('comparison_magnitude'),
                min_attempts=10,
                max_attempts=60,
            ),
            start_change=start_change,
            end_change=end_change,
            pinned_change=arguments.get('patch'),
        )
        task_module.PopulateTaskGraph(
            job, performance_bisection.CreateGraph(task_options, arguments))
    return job
Example #4
0
def _CreateJob(request):
    """Creates a new Pinpoint job from WebOb request arguments."""
    original_arguments = request.params.mixed()
    logging.debug('Received Params: %s', original_arguments)

    arguments = _ArgumentsWithConfiguration(original_arguments)
    logging.debug('Updated Params: %s', arguments)

    # Validate arguments and convert them to canonical internal representation.
    quests = _GenerateQuests(arguments)

    bug_id = _ValidateBugId(arguments.get('bug_id'))
    comparison_mode = _ValidateComparisonMode(arguments.get('comparison_mode'))
    comparison_magnitude = _ValidateComparisonMagnitude(
        arguments.get('comparison_magnitude'))
    gerrit_server, gerrit_change_id = _ValidatePatch(arguments.get('patch'))
    name = arguments.get('name')
    pin = _ValidatePin(arguments.get('pin'))
    tags = _ValidateTags(arguments.get('tags'))
    user = _ValidateUser(arguments.get('user'))
    changes = _ValidateChanges(comparison_mode, arguments)

    # TODO(dberris): Make this the default when we've graduated the beta.
    use_execution_engine = (arguments.get('experimental_execution_engine')
                            and arguments.get('comparison_mode')
                            == job_state.PERFORMANCE)

    # Create job.
    job = job_module.Job.New(quests if not use_execution_engine else (),
                             changes,
                             arguments=original_arguments,
                             bug_id=bug_id,
                             comparison_mode=comparison_mode,
                             comparison_magnitude=comparison_magnitude,
                             gerrit_server=gerrit_server,
                             gerrit_change_id=gerrit_change_id,
                             name=name,
                             pin=pin,
                             tags=tags,
                             user=user,
                             use_execution_engine=use_execution_engine)

    if use_execution_engine:
        # TODO(dberris): We need to figure out a way to get the arguments to be more
        # structured when it comes in from the UI, so that we don't need to do the
        # manual translation of options here.
        # TODO(dberris): Decide whether we can make some of these hard-coded options
        # be part of a template that's available in the UI (or by configuration
        # somewhere else, maybe luci-config?)
        start_change, end_change = changes
        target = arguments.get('target')
        task_options = performance_bisection.TaskOptions(
            build_option_template=performance_bisection.BuildOptionTemplate(
                builder=arguments.get('builder'),
                target=target,
                bucket=arguments.get('bucket',
                                     'master.tryserver.chromium.perf'),
            ),
            test_option_template=performance_bisection.TestOptionTemplate(
                swarming_server=arguments.get('swarming_server'),
                dimensions=arguments.get('dimensions'),
                extra_args=arguments.get('extra_test_args'),
            ),
            read_option_template=performance_bisection.ReadOptionTemplate(
                benchmark=arguments.get('benchmark'),
                histogram_options=read_value.HistogramOptions(
                    grouping_label=arguments.get('grouping_label'),
                    story=arguments.get('story'),
                    statistic=arguments.get('statistic'),
                    histogram_name=arguments.get('chart'),
                ),
                graph_json_options=read_value.GraphJsonOptions(
                    chart=arguments.get('chart'),
                    trace=arguments.get('trace')),
                mode=('histogram_sets' if target
                      in performance_bisection.EXPERIMENTAL_TARGET_SUPPORT else
                      'graph_json')),
            analysis_options=performance_bisection.AnalysisOptions(
                comparison_magnitude=arguments.get('comparison_magnitude'),
                min_attempts=10,
                max_attempts=60,
            ),
            start_change=start_change,
            end_change=end_change,
            pinned_change=arguments.get('patch'),
        )
        task_module.PopulateTaskGraph(
            job, performance_bisection.CreateGraph(task_options, arguments))
    return job
Example #5
0
    def PopulateSimpleBisectionGraph(self):
        """Helper function to populate a task graph representing a bisection.

    This function will populate the following graph on the associated job
    initialised in the setUp function:

    find_culprit
     |   |
     |   +--> read_value(start_cl, [0..min_attempts])
     |          |
     |          +--> run_test(start_cl, [0..min_attempts])
     |                 |
     |                 +--> find_isolate(start_cl)
     |
     +--> read_value(end_cl, [0..min_attempts])
            |
            +--> run_test(end_cl, [0..min_attempts])
                   |
                   +--> find_isolate(end_cl)


    This is the starting point for all bisections on which we expect the
    evaluator implementation will be operating with. In this specific case,
    we're setting min_attempts at 10 and max_attempts at 100, then using the
    special `commit_0` and `commit_5` git hashes as the range to bisect over.
    The test base class sets up special meanings for these pseudo-hashes and all
    infrastructure related to expanding that range.
    """

        task_module.PopulateTaskGraph(
            self.job,
            performance_bisection.CreateGraph(
                performance_bisection.TaskOptions(
                    build_option_template=performance_bisection.
                    BuildOptionTemplate(builder='Some Builder',
                                        target='performance_telemetry_test',
                                        bucket='luci.bucket'),
                    test_option_template=performance_bisection.
                    TestOptionTemplate(
                        swarming_server='some_server',
                        dimensions=[],
                        extra_args=[],
                    ),
                    read_option_template=performance_bisection.
                    ReadOptionTemplate(
                        benchmark='some_benchmark',
                        histogram_options=read_value.HistogramOptions(
                            grouping_label='some_label',
                            story='some_story',
                            statistic='avg',
                        ),
                        graph_json_options=read_value.GraphJsonOptions(
                            chart='some_chart',
                            trace='some_trace',
                        ),
                        mode='histogram_sets'),
                    analysis_options=performance_bisection.AnalysisOptions(
                        comparison_magnitude=1.0,
                        min_attempts=10,
                        max_attempts=100,
                    ),
                    start_change=change_module.Change.FromDict({
                        'commits': [{
                            'repository': 'chromium',
                            'git_hash': 'commit_0'
                        }]
                    }),
                    end_change=change_module.Change.FromDict({
                        'commits': [{
                            'repository': 'chromium',
                            'git_hash': 'commit_5'
                        }]
                    }),
                    pinned_change=None,
                )))