Esempio n. 1
0
def _CreateJob(request):
    """Creates a new Pinpoint job from WebOb request arguments."""
    original_arguments = request.params.mixed()
    logging.debug('Received Params: %s', original_arguments)

    # This call will fail if some of the required arguments are not in the
    # original request.
    _ValidateRequiredParams(original_arguments)

    arguments = _ArgumentsWithConfiguration(original_arguments)
    logging.debug('Updated Params: %s', arguments)

    # Validate arguments and convert them to canonical internal representation.
    quests = _GenerateQuests(arguments)

    # Validate the priority, if it's present.
    priority = _ValidatePriority(arguments.get('priority'))
    bug_id, project = _ValidateBugId(arguments.get('bug_id'),
                                     arguments.get('project', 'chromium'))
    comparison_mode = _ValidateComparisonMode(arguments.get('comparison_mode'))
    comparison_magnitude = _ValidateComparisonMagnitude(
        arguments.get('comparison_magnitude'))
    gerrit_server, gerrit_change_id = _ValidatePatch(arguments.get('patch'))
    name = arguments.get('name')
    pin = _ValidatePin(arguments.get('pin'))
    tags = _ValidateTags(arguments.get('tags'))
    user = _ValidateUser(arguments.get('user'))
    changes = _ValidateChanges(comparison_mode, arguments)

    # If this is a try job, we assume it's higher priority than bisections, so
    # we'll set it at a negative priority.
    if priority not in arguments and comparison_mode == job_state.TRY:
        priority = -1

    # TODO(dberris): Make this the default when we've graduated the beta.
    use_execution_engine = (arguments.get('experimental_execution_engine')
                            and arguments.get('comparison_mode')
                            == job_state.PERFORMANCE)

    # Ensure that we have the required fields in tryjob requests.
    if comparison_mode == 'try':
        if 'benchmark' not in arguments:
            raise ValueError('Missing required "benchmark" argument.')

        # First we check whether there's a quest that's of type 'RunTelemetryTest'.
        is_telemetry_test = any(
            [isinstance(q, quest_module.RunTelemetryTest) for q in quests])
        if is_telemetry_test and ('story' not in arguments
                                  and 'story_tags' not in arguments):
            raise ValueError(
                'Missing either "story" or "story_tags" as arguments for try jobs.'
            )

    # Create job.
    job = job_module.Job.New(quests if not use_execution_engine else (),
                             changes,
                             arguments=original_arguments,
                             bug_id=bug_id,
                             comparison_mode=comparison_mode,
                             comparison_magnitude=comparison_magnitude,
                             gerrit_server=gerrit_server,
                             gerrit_change_id=gerrit_change_id,
                             name=name,
                             pin=pin,
                             tags=tags,
                             user=user,
                             priority=priority,
                             use_execution_engine=use_execution_engine,
                             project=project)

    if use_execution_engine:
        # TODO(dberris): We need to figure out a way to get the arguments to be more
        # structured when it comes in from the UI, so that we don't need to do the
        # manual translation of options here.
        # TODO(dberris): Decide whether we can make some of these hard-coded options
        # be part of a template that's available in the UI (or by configuration
        # somewhere else, maybe luci-config?)
        start_change, end_change = changes
        target = arguments.get('target')
        task_options = performance_bisection.TaskOptions(
            build_option_template=performance_bisection.BuildOptionTemplate(
                builder=arguments.get('builder'),
                target=target,
                bucket=arguments.get('bucket',
                                     'master.tryserver.chromium.perf'),
            ),
            test_option_template=performance_bisection.TestOptionTemplate(
                swarming_server=arguments.get('swarming_server'),
                dimensions=arguments.get('dimensions'),
                extra_args=arguments.get('extra_test_args'),
            ),
            read_option_template=performance_bisection.ReadOptionTemplate(
                benchmark=arguments.get('benchmark'),
                histogram_options=read_value.HistogramOptions(
                    grouping_label=arguments.get('grouping_label'),
                    story=arguments.get('story'),
                    statistic=arguments.get('statistic'),
                    histogram_name=arguments.get('chart'),
                ),
                graph_json_options=read_value.GraphJsonOptions(
                    chart=arguments.get('chart'),
                    trace=arguments.get('trace')),
                mode=('histogram_sets' if target
                      in performance_bisection.EXPERIMENTAL_TARGET_SUPPORT else
                      'graph_json')),
            analysis_options=performance_bisection.AnalysisOptions(
                comparison_magnitude=arguments.get('comparison_magnitude'),
                min_attempts=10,
                max_attempts=60,
            ),
            start_change=start_change,
            end_change=end_change,
            pinned_change=arguments.get('patch'),
        )
        task_module.PopulateTaskGraph(
            job, performance_bisection.CreateGraph(task_options, arguments))
    return job
Esempio n. 2
0
def _CreateJob(request):
  """Creates a new Pinpoint job from WebOb request arguments."""
  original_arguments = request.params.mixed()
  logging.debug('Received Params: %s', original_arguments)

  arguments = _ArgumentsWithConfiguration(original_arguments)
  logging.debug('Updated Params: %s', arguments)

  # Validate arguments and convert them to canonical internal representation.
  quests = _GenerateQuests(arguments)

  bug_id = _ValidateBugId(arguments.get('bug_id'))
  comparison_mode = _ValidateComparisonMode(arguments.get('comparison_mode'))
  comparison_magnitude = _ValidateComparisonMagnitude(
      arguments.get('comparison_magnitude'))
  gerrit_server, gerrit_change_id = _ValidatePatch(arguments.get('patch'))
  name = arguments.get('name')
  pin = _ValidatePin(arguments.get('pin'))
  tags = _ValidateTags(arguments.get('tags'))
  user = _ValidateUser(arguments.get('user'))
  changes = _ValidateChanges(comparison_mode, arguments)

  # TODO(dberris): Make this the default when we've graduated the beta.
  use_execution_engine = (
      arguments.get('experimental_execution_engine') and
      arguments.get('comparison_mode') == job_state.PERFORMANCE)

  # Create job.
  job = job_module.Job.New(
      quests if not use_execution_engine else (),
      changes,
      arguments=original_arguments,
      bug_id=bug_id,
      comparison_mode=comparison_mode,
      comparison_magnitude=comparison_magnitude,
      gerrit_server=gerrit_server,
      gerrit_change_id=gerrit_change_id,
      name=name,
      pin=pin,
      tags=tags,
      user=user,
      use_execution_engine=use_execution_engine)

  if use_execution_engine:
    # TODO(dberris): We need to figure out a way to get the arguments to be more
    # structured when it comes in from the UI, so that we don't need to do the
    # manual translation of options here.
    # TODO(dberris): Decide whether we can make some of these hard-coded options
    # be part of a template that's available in the UI (or by configuration
    # somewhere else, maybe luci-config?)
    start_change, end_change = changes
    target = arguments.get('target')
    task_options = performance_bisection.TaskOptions(
        build_option_template=performance_bisection.BuildOptionTemplate(
            builder=arguments.get('builder'),
            target=target,
            bucket=arguments.get('bucket', 'master.tryserver.chromium.perf'),
        ),
        test_option_template=performance_bisection.TestOptionTemplate(
            swarming_server=arguments.get('swarming_server'),
            dimensions=arguments.get('dimensions'),
            extra_args=arguments.get('extra_test_args'),
        ),
        read_option_template=performance_bisection.ReadOptionTemplate(
            benchmark=arguments.get('benchmark'),
            histogram_options=read_value.HistogramOptions(
                grouping_label=arguments.get('grouping_label'),
                story=arguments.get('story'),
                statistic=arguments.get('statistic'),
                histogram_name=arguments.get('chart'),
            ),
            graph_json_options=read_value.GraphJsonOptions(
                chart=arguments.get('chart'), trace=arguments.get('trace')),
            mode=('histogram_sets'
                  if target in performance_bisection.EXPERIMENTAL_TARGET_SUPPORT
                  else 'graph_json')),
        analysis_options=performance_bisection.AnalysisOptions(
            comparison_magnitude=arguments.get('comparison_magnitude'),
            min_attempts=10,
            max_attempts=60,
        ),
        start_change=start_change,
        end_change=end_change,
        pinned_change=arguments.get('patch'),
    )
    task_module.PopulateTaskGraph(
        job, performance_bisection.CreateGraph(task_options, arguments))
  return job