Beispiel #1
0
def _CreateJob(request):
    """Creates a new Pinpoint job from WebOb request arguments."""
    original_arguments = request.params.mixed()
    logging.debug('Received Params: %s', original_arguments)

    arguments = _ArgumentsWithConfiguration(original_arguments)
    logging.debug('Updated Params: %s', arguments)

    # Validate arguments and convert them to canonical internal representation.
    quests = _GenerateQuests(arguments)

    bug_id = _ValidateBugId(arguments.get('bug_id'))
    comparison_mode = _ValidateComparisonMode(arguments.get('comparison_mode'))
    comparison_magnitude = _ValidateComparisonMagnitude(
        arguments.get('comparison_magnitude'))
    gerrit_server, gerrit_change_id = _ValidatePatch(arguments.get('patch'))
    name = arguments.get('name')
    pin = _ValidatePin(arguments.get('pin'))
    tags = _ValidateTags(arguments.get('tags'))
    user = _ValidateUser(arguments.get('user'))
    changes = _ValidateChanges(comparison_mode, arguments)

    # TODO(dberris): Make this the default when we've graduated the beta.
    use_execution_engine = (arguments.get('experimental_execution_engine')
                            and arguments.get('comparison_mode')
                            == job_state.PERFORMANCE)

    # Create job.
    job = job_module.Job.New(quests if not use_execution_engine else (),
                             changes,
                             arguments=original_arguments,
                             bug_id=bug_id,
                             comparison_mode=comparison_mode,
                             comparison_magnitude=comparison_magnitude,
                             gerrit_server=gerrit_server,
                             gerrit_change_id=gerrit_change_id,
                             name=name,
                             pin=pin,
                             tags=tags,
                             user=user,
                             use_execution_engine=use_execution_engine)

    if use_execution_engine:
        # TODO(dberris): We need to figure out a way to get the arguments to be more
        # structured when it comes in from the UI, so that we don't need to do the
        # manual translation of options here.
        # TODO(dberris): Decide whether we can make some of these hard-coded options
        # be part of a template that's available in the UI (or by configuration
        # somewhere else, maybe luci-config?)
        start_change, end_change = changes
        target = arguments.get('target')
        task_options = performance_bisection.TaskOptions(
            build_option_template=performance_bisection.BuildOptionTemplate(
                builder=arguments.get('builder'),
                target=target,
                bucket=arguments.get('bucket',
                                     'master.tryserver.chromium.perf'),
            ),
            test_option_template=performance_bisection.TestOptionTemplate(
                swarming_server=arguments.get('swarming_server'),
                dimensions=arguments.get('dimensions'),
                extra_args=arguments.get('extra_test_args'),
            ),
            read_option_template=performance_bisection.ReadOptionTemplate(
                benchmark=arguments.get('benchmark'),
                histogram_options=read_value.HistogramOptions(
                    grouping_label=arguments.get('grouping_label'),
                    story=arguments.get('story'),
                    statistic=arguments.get('statistic'),
                    histogram_name=arguments.get('chart'),
                ),
                graph_json_options=read_value.GraphJsonOptions(
                    chart=arguments.get('chart'),
                    trace=arguments.get('trace')),
                mode=('histogram_sets' if target
                      in performance_bisection.EXPERIMENTAL_TARGET_SUPPORT else
                      'graph_json')),
            analysis_options=performance_bisection.AnalysisOptions(
                comparison_magnitude=arguments.get('comparison_magnitude'),
                min_attempts=10,
                max_attempts=60,
            ),
            start_change=start_change,
            end_change=end_change,
            pinned_change=arguments.get('patch'),
        )
        task_module.PopulateTaskGraph(
            job, performance_bisection.CreateGraph(task_options, arguments))
    return job
Beispiel #2
0
def _CreateJob(request):
    """Creates a new Pinpoint job from WebOb request arguments."""
    original_arguments = request.params.mixed()
    logging.debug('Received Params: %s', original_arguments)

    # This call will fail if some of the required arguments are not in the
    # original request.
    _ValidateRequiredParams(original_arguments)

    arguments = _ArgumentsWithConfiguration(original_arguments)
    logging.debug('Updated Params: %s', arguments)

    # Validate arguments and convert them to canonical internal representation.
    quests = _GenerateQuests(arguments)

    # Validate the priority, if it's present.
    priority = _ValidatePriority(arguments.get('priority'))

    # Validate and find the associated issue.
    bug_id, project = _ValidateBugId(arguments.get('bug_id'),
                                     arguments.get('project', 'chromium'))
    comparison_mode = _ValidateComparisonMode(arguments.get('comparison_mode'))
    comparison_magnitude = _ValidateComparisonMagnitude(
        arguments.get('comparison_magnitude'))
    gerrit_server, gerrit_change_id = _ValidatePatch(
        arguments.get('patch', arguments.get('experiment_patch')))
    name = arguments.get('name')
    pin = _ValidatePin(arguments.get('pin'))
    tags = _ValidateTags(arguments.get('tags'))
    user = _ValidateUser(arguments.get('user'))
    changes = _ValidateChanges(comparison_mode, arguments)

    # If this is a try job, we assume it's higher priority than bisections, so
    # we'll set it at a negative priority.
    if priority not in arguments and comparison_mode == job_state.TRY:
        priority = -1

    # TODO(dberris): Make this the default when we've graduated the beta.
    use_execution_engine = (arguments.get('experimental_execution_engine')
                            and arguments.get('comparison_mode')
                            == job_state.PERFORMANCE)

    # Ensure that we have the required fields in tryjob requests.
    if comparison_mode == 'try':
        if 'benchmark' not in arguments:
            raise ValueError('Missing required "benchmark" argument.')

        # First we check whether there's a quest that's of type 'RunTelemetryTest'.
        is_telemetry_test = any(
            [isinstance(q, quest_module.RunTelemetryTest) for q in quests])
        if is_telemetry_test and ('story' not in arguments
                                  and 'story_tags' not in arguments):
            raise ValueError(
                'Missing either "story" or "story_tags" as arguments for try jobs.'
            )

    # Create job.
    job = job_module.Job.New(quests if not use_execution_engine else (),
                             changes,
                             arguments=original_arguments,
                             bug_id=bug_id,
                             comparison_mode=comparison_mode,
                             comparison_magnitude=comparison_magnitude,
                             gerrit_server=gerrit_server,
                             gerrit_change_id=gerrit_change_id,
                             name=name,
                             pin=pin,
                             tags=tags,
                             user=user,
                             priority=priority,
                             use_execution_engine=use_execution_engine,
                             project=project)

    if use_execution_engine:
        # TODO(dberris): We need to figure out a way to get the arguments to be more
        # structured when it comes in from the UI, so that we don't need to do the
        # manual translation of options here.
        # TODO(dberris): Decide whether we can make some of these hard-coded options
        # be part of a template that's available in the UI (or by configuration
        # somewhere else, maybe luci-config?)
        start_change, end_change = changes
        target = arguments.get('target')
        task_options = performance_bisection.TaskOptions(
            build_option_template=performance_bisection.BuildOptionTemplate(
                builder=arguments.get('builder'),
                target=target,
                bucket=arguments.get('bucket',
                                     'master.tryserver.chromium.perf'),
            ),
            test_option_template=performance_bisection.TestOptionTemplate(
                swarming_server=arguments.get('swarming_server'),
                dimensions=arguments.get('dimensions'),
                extra_args=arguments.get('extra_test_args'),
            ),
            read_option_template=performance_bisection.ReadOptionTemplate(
                benchmark=arguments.get('benchmark'),
                histogram_options=read_value.HistogramOptions(
                    grouping_label=arguments.get('grouping_label'),
                    story=arguments.get('story'),
                    statistic=arguments.get('statistic'),
                    histogram_name=arguments.get('chart'),
                ),
                graph_json_options=read_value.GraphJsonOptions(
                    chart=arguments.get('chart'),
                    trace=arguments.get('trace')),
                mode=('histogram_sets' if target
                      in performance_bisection.EXPERIMENTAL_TARGET_SUPPORT else
                      'graph_json')),
            analysis_options=performance_bisection.AnalysisOptions(
                comparison_magnitude=arguments.get('comparison_magnitude'),
                min_attempts=10,
                max_attempts=60,
            ),
            start_change=start_change,
            end_change=end_change,
            pinned_change=arguments.get('patch'),
        )
        task_module.PopulateTaskGraph(
            job, performance_bisection.CreateGraph(task_options, arguments))
    return job
Beispiel #3
0
  def PopulateSimpleBisectionGraph(self):
    """Helper function to populate a task graph representing a bisection.

    This function will populate the following graph on the associated job
    initialised in the setUp function:

    find_culprit
     |   |
     |   +--> read_value(start_cl, [0..min_attempts])
     |          |
     |          +--> run_test(start_cl, [0..min_attempts])
     |                 |
     |                 +--> find_isolate(start_cl)
     |
     +--> read_value(end_cl, [0..min_attempts])
            |
            +--> run_test(end_cl, [0..min_attempts])
                   |
                   +--> find_isolate(end_cl)


    This is the starting point for all bisections on which we expect the
    evaluator implementation will be operating with. In this specific case,
    we're setting min_attempts at 10 and max_attempts at 100, then using the
    special `commit_0` and `commit_5` git hashes as the range to bisect over.
    The test base class sets up special meanings for these pseudo-hashes and all
    infrastructure related to expanding that range.
    """

    task_module.PopulateTaskGraph(
        self.job,
        performance_bisection.CreateGraph(
            performance_bisection.TaskOptions(
                build_option_template=performance_bisection.BuildOptionTemplate(
                    builder='Some Builder',
                    target='performance_telemetry_test',
                    bucket='luci.bucket'),
                test_option_template=performance_bisection.TestOptionTemplate(
                    swarming_server='some_server',
                    dimensions=[],
                    extra_args=[],
                ),
                read_option_template=performance_bisection.ReadOptionTemplate(
                    benchmark='some_benchmark',
                    histogram_options=read_value.HistogramOptions(
                        tir_label='some_tir_label',
                        story='some_story',
                        statistic='avg',
                    ),
                    graph_json_options=read_value.GraphJsonOptions(
                        chart='some_chart',
                        trace='some_trace',
                    ),
                    mode='histogram_sets'),
                analysis_options=performance_bisection.AnalysisOptions(
                    comparison_magnitude=1.0,
                    min_attempts=10,
                    max_attempts=100,
                ),
                start_change=change_module.Change.FromDict({
                    'commits': [{
                        'repository': 'chromium',
                        'git_hash': 'commit_0'
                    }]
                }),
                end_change=change_module.Change.FromDict({
                    'commits': [{
                        'repository': 'chromium',
                        'git_hash': 'commit_5'
                    }]
                }),
                pinned_change=None,
            )))