def file_issue(testcase,
               issue_tracker,
               security_severity=None,
               user_email=None,
               additional_ccs=None):
    """File an issue for the given test case."""
    logs.log('Filing new issue for testcase: %d' % testcase.key.id())

    policy = issue_tracker_policy.get(issue_tracker.project)
    is_crash = not utils.sub_string_exists_in(NON_CRASH_TYPES,
                                              testcase.crash_type)
    properties = policy.get_new_issue_properties(
        is_security=testcase.security_flag, is_crash=is_crash)

    issue = issue_tracker.new_issue()
    issue.title = data_handler.get_issue_summary(testcase)
    issue.body = data_handler.get_issue_description(testcase,
                                                    reporter=user_email,
                                                    show_reporter=True)

    # Add reproducibility flag label.
    if testcase.one_time_crasher_flag:
        issue.labels.add(policy.label('unreproducible'))
    else:
        issue.labels.add(policy.label('reproducible'))

    # Chromium-specific labels.
    if issue_tracker.project == 'chromium' and testcase.security_flag:
        # Add reward labels if this is from an external fuzzer contribution.
        fuzzer = data_types.Fuzzer.query(
            data_types.Fuzzer.name == testcase.fuzzer_name).get()
        if fuzzer and fuzzer.external_contribution:
            issue.labels.add('reward-topanel')
            issue.labels.add('External-Fuzzer-Contribution')

        update_issue_impact_labels(testcase, issue)

    # Add additional labels from the job definition and fuzzer.
    additional_labels = data_handler.get_additional_values_for_variable(
        'AUTOMATIC_LABELS', testcase.job_type, testcase.fuzzer_name)
    for label in additional_labels:
        issue.labels.add(label)

    # Add additional components from the job definition and fuzzer.
    automatic_components = data_handler.get_additional_values_for_variable(
        'AUTOMATIC_COMPONENTS', testcase.job_type, testcase.fuzzer_name)
    for component in automatic_components:
        issue.components.add(component)

    # Add issue assignee from the job definition and fuzzer.
    automatic_assignee = data_handler.get_additional_values_for_variable(
        'AUTOMATIC_ASSIGNEE', testcase.job_type, testcase.fuzzer_name)
    if automatic_assignee:
        issue.status = policy.status('assigned')
        issue.assignee = automatic_assignee[0]
    else:
        issue.status = properties.status

    # Add additional ccs from the job definition and fuzzer.
    ccs = data_handler.get_additional_values_for_variable(
        'AUTOMATIC_CCS', testcase.job_type, testcase.fuzzer_name)

    # For externally contributed fuzzers, potentially cc the author.
    # Use fully qualified fuzzer name if one is available.
    fully_qualified_fuzzer_name = (testcase.overridden_fuzzer_name
                                   or testcase.fuzzer_name)
    ccs += external_users.cc_users_for_fuzzer(fully_qualified_fuzzer_name,
                                              testcase.security_flag)
    ccs += external_users.cc_users_for_job(testcase.job_type,
                                           testcase.security_flag)

    # Add the user as a cc if requested, and any default ccs for this job.
    # Check for additional ccs or labels from the job definition.
    if additional_ccs:
        ccs += [cc for cc in additional_ccs if cc not in ccs]

    # For user uploads, we assume the uploader is interested in the issue.
    if testcase.uploader_email and testcase.uploader_email not in ccs:
        ccs.append(testcase.uploader_email)

    ccs.extend(properties.ccs)

    # Get view restriction rules for the job.
    issue_restrictions = data_handler.get_value_from_job_definition(
        testcase.job_type, 'ISSUE_VIEW_RESTRICTIONS', 'security')
    should_restrict_issue = (issue_restrictions == 'all'
                             or (issue_restrictions == 'security'
                                 and testcase.security_flag))

    has_accountable_people = bool(ccs)

    # Check for labels with special logic.
    additional_labels = []
    if should_restrict_issue:
        additional_labels.append(policy.label('restrict_view'))

    if has_accountable_people:
        additional_labels.append(policy.label('reported'))

    if testcase.security_flag:
        additional_labels.append(policy.label('security_severity'))

    additional_labels.append(policy.label('os'))

    # Apply label substitutions.
    for label in itertools.chain(properties.labels, additional_labels):
        for result in apply_substitutions(policy, label, testcase,
                                          security_severity):
            issue.labels.add(result)

    issue.body += data_handler.format_issue_information(
        testcase, properties.issue_body_footer)
    if (should_restrict_issue and has_accountable_people
            and policy.deadline_policy_message):
        issue.body += '\n\n' + policy.deadline_policy_message

    for cc in ccs:
        issue.ccs.add(cc)

    # Add additional labels and components from testcase metadata.
    metadata_labels = _get_from_metadata(testcase, 'issue_labels')
    for label in metadata_labels:
        issue.labels.add(label)

    metadata_components = _get_from_metadata(testcase, 'issue_components')
    for component in metadata_components:
        issue.components.add(component)

    issue.reporter = user_email
    issue.save()

    # Update the testcase with this newly created issue.
    testcase.bug_information = str(issue.id)
    testcase.put()

    data_handler.update_group_bug(testcase.group_id)
    return issue.id
Exemple #2
0
def get_command_line_for_application(file_to_run='',
                                     user_profile_index=0,
                                     app_path=None,
                                     app_args=None,
                                     needs_http=False,
                                     write_command_line_file=False):
    """Returns the complete command line required to execute application."""
    if app_args is None:
        app_args = environment.get_value('APP_ARGS')
    if app_path is None:
        app_path = environment.get_value('APP_PATH')

    additional_command_line_flags = get_additional_command_line_flags(
        file_to_run)
    app_args_append_testcase = environment.get_value(
        'APP_ARGS_APPEND_TESTCASE')
    app_directory = environment.get_value('APP_DIR')
    app_name = environment.get_value('APP_NAME')
    apps_argument = environment.get_value('APPS_ARG')
    crash_stacks_directory = environment.get_value('CRASH_STACKTRACES_DIR')
    debugger = environment.get_value('DEBUGGER_PATH')
    device_testcases_directory = android.constants.DEVICE_TESTCASES_DIR
    fuzzer_directory = environment.get_value('FUZZER_DIR')
    extension_argument = environment.get_value('EXTENSION_ARG')
    input_directory = environment.get_value('INPUT_DIR')
    plt = environment.platform()
    root_directory = environment.get_value('ROOT_DIR')
    temp_directory = environment.get_value('BOT_TMPDIR')
    user_profile_argument = environment.get_value('USER_PROFILE_ARG')
    window_argument = environment.get_value('WINDOW_ARG')
    user_profile_directory = get_user_profile_directory(user_profile_index)

    # Create user profile directory and setup contents if needed.
    setup_user_profile_directory_if_needed(user_profile_directory)

    # Handle spaces in APP_PATH.
    # If application path has spaces, then we need to quote it.
    if ' ' in app_path:
        app_path = '"%s"' % app_path

    # Prepend command with interpreter if it is a script.
    interpreter = shell.get_interpreter(app_name)
    if interpreter:
        app_path = '%s %s' % (interpreter, app_path)

    # Start creating the command line.
    command = ''

    launcher = environment.get_value('LAUNCHER_PATH')
    if environment.is_trusted_host() and not launcher:
        # Rebase the file_to_run path to the worker's root (unless we're running
        # under a launcher, which runs on the host).
        from bot.untrusted_runner import file_host
        file_to_run = file_host.rebase_to_worker_root(file_to_run)

    # Default case.
    testcase_path = file_to_run
    testcase_filename = os.path.basename(testcase_path)
    testcase_directory = os.path.dirname(testcase_path)
    testcase_file_url = utils.file_path_to_file_url(testcase_path)
    testcase_http_url = ''

    # Determine where |testcase_file_url| should point depending on platform and
    # whether or not a launcher script is used.
    if file_to_run:
        if launcher:
            # In the case of launcher scripts, the testcase file to be run resides on
            # the host running the launcher script. Thus |testcase_file_url|, which
            # may point to a location on the device for Android job types, does not
            # apply. Instead, the launcher script should be passed the original file
            # to run. By setting |testcase_file_url| to |file_to_run|, we avoid
            # duplicating job definitions solely for supporting launcher scripts.
            testcase_file_url = file_to_run
            # Jobs that have a launcher script which needs to be run on the host will
            # have app_name == launcher. In this case don't prepend launcher to
            # command - just use app_name.
            if os.path.basename(launcher) != app_name:
                command += launcher + ' '
        elif plt in ['ANDROID']:
            # Android-specific testcase path fixup for fuzzers that don't rely on
            # launcher scripts.
            local_testcases_directory = environment.get_value('FUZZ_INPUTS')

            # Check if the file to run is in fuzzed testcases folder. If yes, then we
            # can substitute with a local device path. Otherwise, it is part of some
            # data bundle with resource dependencies and we just need to use http
            # host forwarder for that.
            if file_to_run.startswith(local_testcases_directory):
                testcase_relative_path = (
                    file_to_run[len(local_testcases_directory) + 1:])
                testcase_path = os.path.join(device_testcases_directory,
                                             testcase_relative_path)
                testcase_file_url = utils.file_path_to_file_url(testcase_path)
            else:
                # Force use of host_forwarder based on comment above.
                needs_http = True

        # Check if the testcase needs to be loaded over http.
        # TODO(ochang): Make this work for trusted/untrusted.
        http_ip = '127.0.0.1'
        http_port_1 = environment.get_value('HTTP_PORT_1', 8000)
        relative_testcase_path = file_to_run[len(input_directory +
                                                 os.path.sep):]
        relative_testcase_path = relative_testcase_path.replace('\\', '/')
        testcase_http_url = 'http://%s:%d/%s' % (http_ip, http_port_1,
                                                 relative_testcase_path)

        if needs_http:
            # TODO(unassigned): Support https.
            testcase_file_url = testcase_http_url
            testcase_path = testcase_http_url

    # Compose app arguments.
    all_app_args = ''

    if user_profile_argument:
        all_app_args += ' %s=%s' % (user_profile_argument,
                                    user_profile_directory)
    if extension_argument and EXTENSIONS_PREFIX in testcase_filename:
        all_app_args += ' %s=%s' % (extension_argument, testcase_directory)
    if apps_argument and APPS_PREFIX in testcase_filename:
        all_app_args += ' %s=%s' % (apps_argument, testcase_directory)
    if window_argument:
        all_app_args += ' %s' % window_argument
    if additional_command_line_flags:
        all_app_args += ' %s' % additional_command_line_flags.strip()
    if app_args:
        all_app_args += ' %s' % app_args.strip()
    # Append %TESTCASE% at end if no testcase pattern is found in app arguments.
    if not utils.sub_string_exists_in(
        ['%TESTCASE%', '%TESTCASE_FILE_URL%', '%TESTCASE_HTTP_URL%'],
            all_app_args) and app_args_append_testcase:
        all_app_args += ' %TESTCASE%'
    all_app_args = all_app_args.strip()

    # Build the actual command to run now.
    if debugger:
        command += '%s ' % debugger
    if app_path:
        command += app_path
    if all_app_args:
        command += ' %s' % all_app_args
    command = command.replace('%APP_DIR%', app_directory)
    command = command.replace('%CRASH_STACKTRACES_DIR%',
                              crash_stacks_directory)
    command = command.replace('%DEVICE_TESTCASES_DIR%',
                              device_testcases_directory)
    command = command.replace('%FUZZER_DIR%', fuzzer_directory)
    command = command.replace('%INPUT_DIR%', input_directory)
    command = command.replace('%ROOT_DIR%', root_directory)
    command = command.replace('%TESTCASE%', testcase_path)
    command = command.replace('%TESTCASE_FILE_URL%', testcase_file_url)
    command = command.replace('%TESTCASE_HTTP_URL%', testcase_http_url)
    command = command.replace('%TMP_DIR%', temp_directory)
    command = command.replace('%USER_PROFILE_DIR%', user_profile_directory)

    # Though we attempt to pass all flags that have been used to run html as
    # a test in our content shell job types for backwards compatibility, a
    # deprecation warning in recent revisions now causes it to fail. Remove
    # the --run-layout-test flag to avoid this.
    content_shell_app_names = [
        'content_shell', 'content_shell.exe', 'Content Shell'
    ]
    if (environment.get_value('APP_NAME') in content_shell_app_names
            and environment.get_value('APP_REVISION', 0) >= 558998):
        command = command.replace(' --run-layout-test', '')

    if plt == 'ANDROID' and not launcher:
        # Initial setup phase for command line.
        if write_command_line_file:
            android.adb.write_command_line_file(command, app_path)

        return android.app.get_launch_command(all_app_args, testcase_path,
                                              testcase_file_url)

    # Decide which directory we will run the application from.
    # We are using |app_directory| since it helps to locate pdbs
    # in same directory, other dependencies, etc.
    if os.path.exists(app_directory):
        os.chdir(app_directory)

    return str(command)
Exemple #3
0
def file_issue(testcase,
               itm,
               security_severity=None,
               user_email=None,
               additional_ccs=None):
  """File an issue for the given test case."""
  issue = Issue()
  issue.summary = data_handler.get_issue_summary(testcase)
  issue.body = data_handler.get_issue_description(
      testcase, reporter=user_email, show_reporter=True)

  # Labels applied by default across all issue trackers.
  issue.status = 'New'
  issue.add_label('ClusterFuzz')

  # Add label on memory tool used.
  add_memory_tool_label_if_needed(issue, testcase)

  # Add reproducibility flag label.
  if testcase.one_time_crasher_flag:
    issue.add_label('Unreproducible')
  else:
    issue.add_label('Reproducible')

  # Add security severity flag label.
  add_security_severity_label_if_needed(issue, testcase, security_severity)

  # Get view restriction rules for the job.
  issue_restrictions = data_handler.get_value_from_job_definition(
      testcase.job_type, 'ISSUE_VIEW_RESTRICTIONS', 'security')
  should_restrict_issue = (
      issue_restrictions == 'all' or
      (issue_restrictions == 'security' and testcase.security_flag))

  # Chromium-specific labels.
  if itm.project_name == 'chromium':
    # A different status system is used on the chromium tracker. Since we
    # have already reproduced the crash, we skip the Unconfirmed status.
    issue.status = 'Untriaged'

    # Add OS label.
    if environment.is_chromeos_job(testcase.job_type):
      # ChromeOS fuzzers run on Linux platform, so use correct OS-Chrome for
      # tracking.
      issue.add_label('OS-Chrome')
    elif testcase.platform_id:
      os_label = 'OS-%s' % ((testcase.platform_id.split(':')[0]).capitalize())
      issue.add_label(os_label)

    # Add view restrictions for internal job types.
    add_view_restrictions_if_needed(issue, testcase)

    if testcase.security_flag:
      # Apply labels specific to security bugs.
      issue.add_label('Restrict-View-SecurityTeam')
      issue.add_label('Type-Bug-Security')

      # Add reward labels if this is from an external fuzzer contribution.
      fuzzer = data_types.Fuzzer.query(
          data_types.Fuzzer.name == testcase.fuzzer_name).get()
      if fuzzer and fuzzer.external_contribution:
        issue.add_label('reward-topanel')
        issue.add_label('External-Fuzzer-Contribution')

      data_handler.update_issue_impact_labels(testcase, issue)
    else:
      # Apply labels for functional (non-security) bugs.
      if utils.sub_string_exists_in(NON_CRASH_TYPES, testcase.crash_type):
        # Non-crashing test cases shouldn't be assigned Pri-1.
        issue.add_label('Pri-2')
        issue.add_label('Type-Bug')
      else:
        # Default functional bug labels.
        issue.add_label('Pri-1')
        issue.add_label('Stability-Crash')
        issue.add_label('Type-Bug')

  # AOSP-specific labels.
  elif itm.project_name == 'android':
    if testcase.security_flag:
      # Security bug labels.
      issue.add_cc('*****@*****.**')
      issue.add_label('Type-Security')
      issue.add_label('Restrict-View-Commit')
    else:
      # Functional bug labels.
      issue.add_label('Type-Defect')

  # OSS-Fuzz specific labels.
  elif itm.project_name == 'oss-fuzz':
    if testcase.security_flag:
      # Security bug labels.
      issue.add_label('Type-Bug-Security')
    else:
      # Functional bug labels.
      issue.add_label('Type-Bug')

    if should_restrict_issue:
      issue.add_label('Restrict-View-Commit')

  # Add additional labels from the job definition and fuzzer.
  additional_labels = data_handler.get_additional_values_for_variable(
      'AUTOMATIC_LABELS', testcase.job_type, testcase.fuzzer_name)
  for label in additional_labels:
    issue.add_label(label)

  # Add additional components from the job definition and fuzzer.
  automatic_components = data_handler.get_additional_values_for_variable(
      'AUTOMATIC_COMPONENTS', testcase.job_type, testcase.fuzzer_name)
  for component in automatic_components:
    issue.add_component(component)

  # Add additional ccs from the job definition and fuzzer.
  ccs = data_handler.get_additional_values_for_variable(
      'AUTOMATIC_CCS', testcase.job_type, testcase.fuzzer_name)

  # For externally contributed fuzzers, potentially cc the author.
  # Use fully qualified fuzzer name if one is available.
  fully_qualified_fuzzer_name = (
      testcase.overridden_fuzzer_name or testcase.fuzzer_name)
  ccs += external_users.cc_users_for_fuzzer(fully_qualified_fuzzer_name,
                                            testcase.security_flag)
  ccs += external_users.cc_users_for_job(testcase.job_type,
                                         testcase.security_flag)

  # Add the user as a cc if requested, and any default ccs for this job.
  # Check for additional ccs or labels from the job definition.
  if additional_ccs:
    ccs += [cc for cc in additional_ccs if cc not in ccs]

  # For user uploads, we assume the uploader is interested in the issue.
  if testcase.uploader_email and testcase.uploader_email not in ccs:
    ccs.append(testcase.uploader_email)

  if itm.project_name == 'oss-fuzz' and ccs:
    # Add a reported label for deadline tracking.
    issue.add_label(reported_label())

    if issue.has_label_matching('Restrict-View-Commit'):
      issue.body += '\n\n' + DEADLINE_NOTE

    issue.body += '\n\n' + FIX_NOTE
    issue.body += '\n\n' + QUESTIONS_NOTE

  for cc in ccs:
    issue.add_cc(cc)

  # Add additional labels from testcase metadata.
  metadata_labels = utils.parse_delimited(
      testcase.get_metadata('issue_labels', ''),
      delimiter=',',
      strip=True,
      remove_empty=True)
  for label in metadata_labels:
    issue.add_label(label)

  issue.itm = itm
  issue.reporter = user_email
  issue.save()

  # Update the testcase with this newly created issue.
  testcase.bug_information = str(issue.id)
  testcase.put()

  data_handler.update_group_bug(testcase.group_id)

  return issue.id
Exemple #4
0
def check_for_bad_build(job_type, crash_revision):
    """Return true if the build is bad, i.e. crashes on startup."""
    # Check the bad build check flag to see if we want do this.
    if not environment.get_value('BAD_BUILD_CHECK'):
        return False

    # Create a blank command line with no file to run and no http.
    command = get_command_line_for_application(file_to_run='',
                                               needs_http=False)

    # When checking for bad builds, we use the default window size.
    # We don't want to pick a custom size since it can potentially cause a
    # startup crash and cause a build to be detected incorrectly as bad.
    default_window_argument = environment.get_value('WINDOW_ARG', '')
    if default_window_argument:
        command = command.replace(' %s' % default_window_argument, '')

    # TSAN is slow, and boots slow on first startup. Increase the warmup
    # timeout for this case.
    if environment.tool_matches('TSAN', job_type):
        fast_warmup_timeout = environment.get_value('WARMUP_TIMEOUT')
    else:
        fast_warmup_timeout = environment.get_value('FAST_WARMUP_TIMEOUT')

    # Initialize helper variables.
    is_bad_build = False
    build_run_console_output = ''
    app_directory = environment.get_value('APP_DIR')

    # Exit all running instances.
    process_handler.terminate_stale_application_instances()

    # Check if the build is bad.
    return_code, crash_time, output = process_handler.run_process(
        command,
        timeout=fast_warmup_timeout,
        current_working_directory=app_directory)
    crash_result = CrashResult(return_code, crash_time, output)

    # 1. Need to account for startup crashes with no crash state. E.g. failed to
    #    load shared library. So, ignore state for comparison.
    # 2. Ignore leaks as they don't block a build from reporting regular crashes
    #    and also don't impact regression range calculations.
    if (crash_result.is_crash(ignore_state=True)
            and not crash_result.should_ignore() and
            not crash_result.get_type() in ['Direct-leak', 'Indirect-leak']):
        is_bad_build = True
        build_run_console_output = utils.get_crash_stacktrace_output(
            command, crash_result.get_stacktrace(symbolized=True),
            crash_result.get_stacktrace(symbolized=False))
        logs.log('Bad build for %s detected at r%d.' %
                 (job_type, crash_revision),
                 output=build_run_console_output)

    # Exit all running instances.
    process_handler.terminate_stale_application_instances()

    # Any of the conditions below indicate that bot is in a bad state and it is
    # not caused by the build itself. In that case, just exit.
    build_state = data_handler.get_build_state(job_type, crash_revision)
    if is_bad_build and utils.sub_string_exists_in(BAD_STATE_HINTS, output):
        logs.log_fatal_and_exit(
            'Bad bot environment detected, exiting.',
            output=build_run_console_output,
            snapshot=process_handler.get_runtime_snapshot())

    # If none of the other bots have added information about this build,
    # then add it now.
    if (build_state == data_types.BuildState.UNMARKED
            and not crash_result.should_ignore()):
        data_handler.add_build_metadata(job_type, crash_revision, is_bad_build,
                                        build_run_console_output)

    return is_bad_build