예제 #1
0
def create_variant_tasks_if_needed(testcase):
  """Creates a variant task if needed."""
  if testcase.duplicate_of:
    # If another testcase exists with same params, no need to spend cycles on
    # calculating variants again.
    return

  testcase_id = testcase.key.id()
  project = data_handler.get_project_name(testcase.job_type)
  jobs = data_types.Job.query(data_types.Job.project == project)
  for job in jobs:
    # The variant needs to be tested in a different job type than us.
    job_type = job.name
    if testcase.job_type == job_type:
      continue

    # Don't try to reproduce engine fuzzer testcase with blackbox fuzzer
    # testcases and vice versa.
    if (environment.is_engine_fuzzer_job(testcase.job_type) !=
        environment.is_engine_fuzzer_job(job_type)):
      continue

    # Skip experimental jobs.
    job_environment = job.get_environment()
    if utils.string_is_true(job_environment.get('EXPERIMENTAL')):
      continue

    queue = tasks.queue_for_platform(job.platform)
    tasks.add_task('variant', testcase_id, job_type, queue)

    variant = data_handler.get_testcase_variant(testcase_id, job_type)
    variant.status = data_types.TestcaseVariantStatus.PENDING
    variant.put()
예제 #2
0
def _get_variant_testcase_for_job(testcase, job_type):
    """Return a testcase entity for variant task use. This changes the fuzz
  target params for a particular fuzzing engine."""
    if testcase.job_type == job_type:
        # Update stack operation on same testcase.
        return testcase

    if not environment.is_engine_fuzzer_job(testcase.job_type):
        # For blackbox fuzzer testcases, there is no change of fuzzer required.
        return testcase

    engine_name = environment.get_engine_for_job(job_type)
    project = data_handler.get_project_name(job_type)
    binary_name = testcase.get_metadata('fuzzer_binary_name')
    fully_qualified_fuzzer_name = data_types.fuzz_target_fully_qualified_name(
        engine_name, project, binary_name)

    variant_testcase = data_types.clone_entity(testcase)
    variant_testcase.key = testcase.key
    variant_testcase.fuzzer_name = engine_name
    variant_testcase.overridden_fuzzer_name = fully_qualified_fuzzer_name
    variant_testcase.job_type = job_type

    # Remove put() method to avoid updates. DO NOT REMOVE THIS.
    variant_testcase.put = lambda: None

    return variant_testcase
예제 #3
0
def initialize_device():
    """Prepares android device for app install."""
    if environment.is_engine_fuzzer_job() or environment.is_kernel_fuzzer_job(
    ):
        # These steps are not applicable to libFuzzer and syzkaller jobs and can
        # brick a device on trying to configure device build settings.
        return

    adb.setup_adb()

    # General device configuration settings.
    configure_system_build_properties()
    configure_device_settings()
    add_test_accounts_if_needed()

    # Setup AddressSanitizer if needed.
    sanitizer.setup_asan_if_needed()

    # Reboot device as above steps would need it and also it brings device in a
    # good state.
    reboot()

    # Make sure we are running as root after restart.
    adb.run_as_root()

    # Other configuration tasks (only to done after reboot).
    wifi.configure()
    setup_host_and_device_forwarder_if_needed()
    settings.change_se_linux_to_permissive_mode()
    app.wait_until_optimization_complete()
    ui.clear_notifications()
    ui.unlock_screen()
예제 #4
0
def _setup_x():
    """Start Xvfb and blackbox before running the test application."""
    if environment.platform() != 'LINUX':
        return []

    if environment.is_engine_fuzzer_job():
        # For engine fuzzer jobs like AFL, libFuzzer, Xvfb is not needed as the
        # those fuzz targets do not needed a UI.
        return []

    environment.set_value('DISPLAY', DISPLAY)

    print('Creating virtual display...')
    xvfb_runner = new_process.ProcessRunner('/usr/bin/Xvfb')
    xvfb_process = xvfb_runner.run(additional_args=[
        DISPLAY, '-screen', '0', '1280x1024x24', '-ac', '-nolisten', 'tcp'
    ])
    time.sleep(PROCESS_START_WAIT_SECONDS)

    blackbox_runner = new_process.ProcessRunner('/usr/bin/blackbox')
    blackbox_process = blackbox_runner.run()
    time.sleep(PROCESS_START_WAIT_SECONDS)

    # Return all handles we create so they can be terminated properly at exit.
    return [xvfb_process, blackbox_process]
예제 #5
0
def start_web_server_if_needed():
  """Start web server for blackbox fuzzer jobs (non-engine fuzzer jobs)."""
  if environment.is_engine_fuzzer_job():
    return

  try:
    http_server.start()
  except Exception:
    logs.log_error('Failed to start web server, skipping.')
예제 #6
0
def update_job_weights():
    """Update job weights."""
    for job in data_types.Job.query():
        multiplier = DEFAULT_MULTIPLIER
        if environment.is_engine_fuzzer_job(job.name):
            targets_count = ndb.Key(data_types.FuzzTargetsCount,
                                    job.name).get()
            # If the count is 0, it may be due to a bad build or some other issue. Use
            # the default weight in that case to allow for recovery.
            if targets_count and targets_count.count:
                multiplier = targets_count.count
                multiplier = min(multiplier, TARGET_COUNT_WEIGHT_CAP)

        update_job_weight(job.name, multiplier)
예제 #7
0
def execute_task(testcase_id, job_type):
    """Run a test case with a different job type to see if they reproduce."""
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    if (environment.is_engine_fuzzer_job(testcase.job_type) !=
            environment.is_engine_fuzzer_job(job_type)):
        # We should never reach here. But in case we do, we should bail out as
        # otherwise we will run into exceptions.
        return

    # Use a cloned testcase entity with different fuzz target paramaters for
    # a different fuzzing engine.
    original_job_type = testcase.job_type
    testcase = _get_variant_testcase_for_job(testcase, job_type)

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
    if not file_list:
        return

    # Set up a custom or regular build. We explicitly omit the crash revision
    # since we want to test against the latest build here.
    try:
        build_manager.setup_build()
    except errors.BuildNotFoundError:
        logs.log_warn('Matching build not found.')
        return

    # Check if we have an application path. If not, our build failed to setup
    # correctly.
    if not build_manager.check_app_path():
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.ERROR,
            'Build setup failed with job: ' + job_type)
        return

    # Disable gestures if we're running on a different platform from that of
    # the original test case.
    use_gestures = testcase.platform == environment.platform().lower()

    # Reproduce the crash.
    app_path = environment.get_value('APP_PATH')
    command = testcase_manager.get_command_line_for_application(
        testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
    test_timeout = environment.get_value('TEST_TIMEOUT', 10)
    revision = environment.get_value('APP_REVISION')
    result = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        test_timeout,
        http_flag=testcase.http_flag,
        use_gestures=use_gestures,
        compare_crash=False)

    if result.is_crash() and not result.should_ignore():
        crash_state = result.get_state()
        crash_type = result.get_type()
        security_flag = result.is_security_issue()

        gestures = testcase.gestures if use_gestures else None
        one_time_crasher_flag = not testcase_manager.test_for_reproducibility(
            testcase.fuzzer_name, testcase.actual_fuzzer_name(),
            testcase_file_path, crash_state, security_flag, test_timeout,
            testcase.http_flag, gestures)
        if one_time_crasher_flag:
            status = data_types.TestcaseVariantStatus.FLAKY
        else:
            status = data_types.TestcaseVariantStatus.REPRODUCIBLE

        crash_comparer = CrashComparer(crash_state, testcase.crash_state)
        is_similar = (crash_comparer.is_similar()
                      and security_flag == testcase.security_flag)

        unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
        symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
        crash_stacktrace_output = utils.get_crash_stacktrace_output(
            command, symbolized_crash_stacktrace,
            unsymbolized_crash_stacktrace)
    else:
        status = data_types.TestcaseVariantStatus.UNREPRODUCIBLE
        is_similar = False
        crash_type = None
        crash_state = None
        security_flag = False
        crash_stacktrace_output = 'No crash occurred.'

    if original_job_type == job_type:
        # This case happens when someone clicks 'Update last tested stacktrace using
        # trunk build' button.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        testcase.last_tested_crash_stacktrace = (
            data_handler.filter_stacktrace(crash_stacktrace_output))
        testcase.set_metadata('last_tested_crash_revision',
                              revision,
                              update_testcase=True)
    else:
        # Regular case of variant analysis.
        variant = data_handler.get_testcase_variant(testcase_id, job_type)
        variant.status = status
        variant.revision = revision
        variant.crash_type = crash_type
        variant.crash_state = crash_state
        variant.security_flag = security_flag
        variant.is_similar = is_similar
        variant.platform = environment.platform().lower()
        # Explicitly skipping crash stacktrace for now as it make entities larger
        # and we plan to use only crash paramaters in UI.
        variant.put()
예제 #8
0
def process_command(task):
  """Figures out what to do with the given task and executes the command."""
  logs.log("Executing command '%s'" % task.payload())
  if not task.payload().strip():
    logs.log_error('Empty task received.')
    return

  # Parse task payload.
  task_name = task.command
  task_argument = task.argument
  job_name = task.job

  environment.set_value('TASK_NAME', task_name)
  environment.set_value('TASK_ARGUMENT', task_argument)
  environment.set_value('JOB_NAME', job_name)
  if job_name != 'none':
    job = data_types.Job.query(data_types.Job.name == job_name).get()
    # Job might be removed. In that case, we don't want an exception
    # raised and causing this task to be retried by another bot.
    if not job:
      logs.log_error("Job '%s' not found." % job_name)
      return

    if not job.platform:
      error_string = "No platform set for job '%s'" % job_name
      logs.log_error(error_string)
      raise errors.BadStateError(error_string)

    # A misconfiguration led to this point. Clean up the job if necessary.
    job_queue_suffix = tasks.queue_suffix_for_platform(job.platform)
    bot_queue_suffix = tasks.default_queue_suffix()

    if job_queue_suffix != bot_queue_suffix:
      # This happens rarely, store this as a hard exception.
      logs.log_error(
          'Wrong platform for job %s: job queue [%s], bot queue [%s].' %
          (job_name, job_queue_suffix, bot_queue_suffix))

      # Try to recreate the job in the correct task queue.
      new_queue = (
          tasks.high_end_queue() if task.high_end else tasks.regular_queue())
      new_queue += job_queue_suffix

      # Command override is continuously run by a bot. If we keep failing
      # and recreating the task, it will just DoS the entire task queue.
      # So, we don't create any new tasks in that case since it needs
      # manual intervention to fix the override anyway.
      if not task.is_command_override:
        try:
          tasks.add_task(task_name, task_argument, job_name, new_queue)
        except Exception:
          # This can happen on trying to publish on a non-existent topic, e.g.
          # a topic for a high-end bot on another platform. In this case, just
          # give up.
          logs.log_error('Failed to fix platform and re-add task.')

      # Add a wait interval to avoid overflowing task creation.
      failure_wait_interval = environment.get_value('FAIL_WAIT')
      time.sleep(failure_wait_interval)
      return

    if task_name != 'fuzz':
      # Make sure that our platform id matches that of the testcase (for
      # non-fuzz tasks).
      testcase = data_handler.get_entity_by_type_and_id(data_types.Testcase,
                                                        task_argument)
      if testcase:
        current_platform_id = environment.get_platform_id()
        testcase_platform_id = testcase.platform_id

        # This indicates we are trying to run this job on the wrong platform.
        # This can happen when you have different type of devices (e.g
        # android) on the same platform group. In this case, we just recreate
        # the task.
        if (task_name != 'variant' and testcase_platform_id and
            not utils.fields_match(testcase_platform_id, current_platform_id)):
          logs.log(
              'Testcase %d platform (%s) does not match with ours (%s), exiting'
              % (testcase.key.id(), testcase_platform_id, current_platform_id))
          tasks.add_task(
              task_name,
              task_argument,
              job_name,
              wait_time=utils.random_number(1, TASK_RETRY_WAIT_LIMIT))
          return

    # Some fuzzers contain additional environment variables that should be
    # set for them. Append these for tests generated by these fuzzers and for
    # the fuzz command itself.
    fuzzer_name = None
    if task_name == 'fuzz':
      fuzzer_name = task_argument
    elif testcase:
      fuzzer_name = testcase.fuzzer_name

    # Get job's environment string.
    environment_string = job.get_environment_string()

    if task_name == 'minimize':
      # Let jobs specify a different job and fuzzer to minimize with.
      job_environment = job.get_environment()
      minimize_job_override = job_environment.get('MINIMIZE_JOB_OVERRIDE')
      if minimize_job_override:
        minimize_job = data_types.Job.query(
            data_types.Job.name == minimize_job_override).get()
        if minimize_job:
          environment.set_value('JOB_NAME', minimize_job_override)
          environment_string = minimize_job.get_environment_string()
          environment_string += '\nORIGINAL_JOB_NAME = %s\n' % job_name
          job_name = minimize_job_override
        else:
          logs.log_error(
              'Job for minimization not found: %s.' % minimize_job_override)
          # Fallback to using own job for minimization.

      minimize_fuzzer_override = job_environment.get('MINIMIZE_FUZZER_OVERRIDE')
      fuzzer_name = minimize_fuzzer_override or fuzzer_name

    if fuzzer_name and not environment.is_engine_fuzzer_job(job_name):
      fuzzer = data_types.Fuzzer.query(
          data_types.Fuzzer.name == fuzzer_name).get()
      additional_default_variables = ''
      additional_variables_for_job = ''
      if (fuzzer and hasattr(fuzzer, 'additional_environment_string') and
          fuzzer.additional_environment_string):
        for line in fuzzer.additional_environment_string.splitlines():
          # Job specific values may be defined in fuzzer additional
          # environment variable name strings in the form
          # job_name:VAR_NAME = VALUE.
          if '=' in line and ':' in line.split('=', 1)[0]:
            fuzzer_job_name, environment_definition = line.split(':', 1)
            if fuzzer_job_name == job_name:
              additional_variables_for_job += '\n%s' % environment_definition
            continue

          additional_default_variables += '\n%s' % line

      environment_string += additional_default_variables
      environment_string += additional_variables_for_job

    # Update environment for the job.
    update_environment_for_job(environment_string)

  # Match the cpu architecture with the ones required in the job definition.
  # If they don't match, then bail out and recreate task.
  if not is_supported_cpu_arch_for_job():
    logs.log(
        'Unsupported cpu architecture specified in job definition, exiting.')
    tasks.add_task(
        task_name,
        task_argument,
        job_name,
        wait_time=utils.random_number(1, TASK_RETRY_WAIT_LIMIT))
    return

  # Initial cleanup.
  cleanup_task_state()

  start_web_server_if_needed()

  try:
    run_command(task_name, task_argument, job_name)
  finally:
    # Final clean up.
    cleanup_task_state()
예제 #9
0
def execute_task(testcase_id, job_type):
    """Run analyze task."""
    # Reset redzones.
    environment.reset_current_memory_tool_options(redzone_size=128)

    # Unset window location size and position properties so as to use default.
    environment.set_value('WINDOW_ARG', '')

    # Locate the testcase associated with the id.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED)

    metadata = data_types.TestcaseUploadMetadata.query(
        data_types.TestcaseUploadMetadata.testcase_id == int(
            testcase_id)).get()
    if not metadata:
        logs.log_error('Testcase %s has no associated upload metadata.' %
                       testcase_id)
        testcase.key.delete()
        return

    is_lsan_enabled = environment.get_value('LSAN')
    if is_lsan_enabled:
        # Creates empty local blacklist so all leaks will be visible to uploader.
        leak_blacklist.create_empty_local_blacklist()

    # Store the bot name and timestamp in upload metadata.
    bot_name = environment.get_value('BOT_NAME')
    metadata.bot_name = bot_name
    metadata.timestamp = datetime.datetime.utcnow()
    metadata.put()

    # Adjust the test timeout, if user has provided one.
    if metadata.timeout:
        environment.set_value('TEST_TIMEOUT', metadata.timeout)

    # Adjust the number of retries, if user has provided one.
    if metadata.retries is not None:
        environment.set_value('CRASH_RETRIES', metadata.retries)

    # Set up testcase and get absolute testcase path.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
    if not file_list:
        return

    # Set up build.
    setup_build(testcase)

    # Check if we have an application path. If not, our build failed
    # to setup correctly.
    if not build_manager.check_app_path():
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             'Build setup failed')

        if data_handler.is_first_retry_for_task(testcase):
            build_fail_wait = environment.get_value('FAIL_WAIT')
            tasks.add_task('analyze',
                           testcase_id,
                           job_type,
                           wait_time=build_fail_wait)
        else:
            data_handler.close_invalid_uploaded_testcase(
                testcase, metadata, 'Build setup failed')
        return

    # Update initial testcase information.
    testcase.absolute_path = testcase_file_path
    testcase.job_type = job_type
    testcase.binary_flag = utils.is_binary_file(testcase_file_path)
    testcase.queue = tasks.default_queue()
    testcase.crash_state = ''

    # Set initial testcase metadata fields (e.g. build url, etc).
    data_handler.set_initial_testcase_metadata(testcase)

    # Update minimized arguments and use ones provided during user upload.
    if not testcase.minimized_arguments:
        minimized_arguments = environment.get_value('APP_ARGS') or ''
        additional_command_line_flags = testcase.get_metadata(
            'uploaded_additional_args')
        if additional_command_line_flags:
            minimized_arguments += ' %s' % additional_command_line_flags
        environment.set_value('APP_ARGS', minimized_arguments)
        testcase.minimized_arguments = minimized_arguments

    # Update other fields not set at upload time.
    testcase.crash_revision = environment.get_value('APP_REVISION')
    data_handler.set_initial_testcase_metadata(testcase)
    testcase.put()

    # Initialize some variables.
    gestures = testcase.gestures
    http_flag = testcase.http_flag
    test_timeout = environment.get_value('TEST_TIMEOUT')

    # Get the crash output.
    result = testcase_manager.test_for_crash_with_retries(testcase,
                                                          testcase_file_path,
                                                          test_timeout,
                                                          http_flag=http_flag,
                                                          compare_crash=False)

    # If we don't get a crash, try enabling http to see if we can get a crash.
    # Skip engine fuzzer jobs (e.g. libFuzzer, AFL) for which http testcase paths
    # are not applicable.
    if (not result.is_crash() and not http_flag
            and not environment.is_engine_fuzzer_job()):
        result_with_http = testcase_manager.test_for_crash_with_retries(
            testcase,
            testcase_file_path,
            test_timeout,
            http_flag=True,
            compare_crash=False)
        if result_with_http.is_crash():
            logs.log('Testcase needs http flag for crash.')
            http_flag = True
            result = result_with_http

    # Refresh our object.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    # Set application command line with the correct http flag.
    application_command_line = (
        testcase_manager.get_command_line_for_application(
            testcase_file_path, needs_http=http_flag))

    # Get the crash data.
    crashed = result.is_crash()
    crash_time = result.get_crash_time()
    state = result.get_symbolized_data()
    unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)

    # Get crash info object with minidump info. Also, re-generate unsymbolized
    # stacktrace if needed.
    crash_info, _ = (crash_uploader.get_crash_info_and_stacktrace(
        application_command_line, state.crash_stacktrace, gestures))
    if crash_info:
        testcase.minidump_keys = crash_info.store_minidump()

    if not crashed:
        # Could not reproduce the crash.
        log_message = ('Testcase didn\'t crash in %d seconds (with retries)' %
                       test_timeout)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.FINISHED,
                                             log_message)

        # In the general case, we will not attempt to symbolize if we do not detect
        # a crash. For user uploads, we should symbolize anyway to provide more
        # information about what might be happening.
        crash_stacktrace_output = utils.get_crash_stacktrace_output(
            application_command_line, state.crash_stacktrace,
            unsymbolized_crash_stacktrace)
        testcase.crash_stacktrace = data_handler.filter_stacktrace(
            crash_stacktrace_output)

        # For an unreproducible testcase, retry once on another bot to confirm
        # our results and in case this bot is in a bad state which we didn't catch
        # through our usual means.
        if data_handler.is_first_retry_for_task(testcase):
            testcase.status = 'Unreproducible, retrying'
            testcase.put()

            tasks.add_task('analyze', testcase_id, job_type)
            return

        data_handler.close_invalid_uploaded_testcase(testcase, metadata,
                                                     'Unreproducible')

        # A non-reproducing testcase might still impact production branches.
        # Add the impact task to get that information.
        task_creation.create_impact_task_if_needed(testcase)
        return

    # Update testcase crash parameters.
    testcase.http_flag = http_flag
    testcase.crash_type = state.crash_type
    testcase.crash_address = state.crash_address
    testcase.crash_state = state.crash_state
    crash_stacktrace_output = utils.get_crash_stacktrace_output(
        application_command_line, state.crash_stacktrace,
        unsymbolized_crash_stacktrace)
    testcase.crash_stacktrace = data_handler.filter_stacktrace(
        crash_stacktrace_output)

    # Try to guess if the bug is security or not.
    security_flag = crash_analyzer.is_security_issue(state.crash_stacktrace,
                                                     state.crash_type,
                                                     state.crash_address)
    testcase.security_flag = security_flag

    # If it is, guess the severity.
    if security_flag:
        testcase.security_severity = severity_analyzer.get_security_severity(
            state.crash_type, state.crash_stacktrace, job_type, bool(gestures))

    log_message = ('Testcase crashed in %d seconds (r%d)' %
                   (crash_time, testcase.crash_revision))
    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.FINISHED,
                                         log_message)

    # See if we have to ignore this crash.
    if crash_analyzer.ignore_stacktrace(state.crash_stacktrace):
        data_handler.close_invalid_uploaded_testcase(testcase, metadata,
                                                     'Irrelavant')
        return

    # Test for reproducibility.
    one_time_crasher_flag = not testcase_manager.test_for_reproducibility(
        testcase.fuzzer_name, testcase.actual_fuzzer_name(),
        testcase_file_path, state.crash_state, security_flag, test_timeout,
        http_flag, gestures)
    testcase.one_time_crasher_flag = one_time_crasher_flag

    # Check to see if this is a duplicate.
    data_handler.check_uploaded_testcase_duplicate(testcase, metadata)

    # Set testcase and metadata status if not set already.
    if testcase.status == 'Duplicate':
        # For testcase uploaded by bots (with quiet flag), don't create additional
        # tasks.
        if metadata.quiet_flag:
            data_handler.close_invalid_uploaded_testcase(
                testcase, metadata, 'Duplicate')
            return
    else:
        # New testcase.
        testcase.status = 'Processed'
        metadata.status = 'Confirmed'

        # Reset the timestamp as well, to respect
        # data_types.MIN_ELAPSED_TIME_SINCE_REPORT. Otherwise it may get filed by
        # triage task prematurely without the grouper having a chance to run on this
        # testcase.
        testcase.timestamp = utils.utcnow()

        # Add new leaks to global blacklist to avoid detecting duplicates.
        # Only add if testcase has a direct leak crash and if it's reproducible.
        if is_lsan_enabled:
            leak_blacklist.add_crash_to_global_blacklist_if_needed(testcase)

    # Update the testcase values.
    testcase.put()

    # Update the upload metadata.
    metadata.security_flag = security_flag
    metadata.put()

    _add_default_issue_metadata(testcase)

    # Create tasks to
    # 1. Minimize testcase (minimize).
    # 2. Find regression range (regression).
    # 3. Find testcase impact on production branches (impact).
    # 4. Check whether testcase is fixed (progression).
    # 5. Get second stacktrace from another job in case of
    #    one-time crashes (stack).
    task_creation.create_tasks(testcase)
예제 #10
0
    def do_post(self):
        """Upload a testcase."""
        email = helpers.get_user_email()
        testcase_id = request.get('testcaseId')
        uploaded_file = self.get_upload()
        if testcase_id and not uploaded_file:
            testcase = helpers.get_testcase(testcase_id)
            if not access.can_user_access_testcase(testcase):
                raise helpers.AccessDeniedException()

            # Use minimized testcase for upload (if available).
            key = (testcase.minimized_keys if testcase.minimized_keys
                   and testcase.minimized_keys != 'NA' else
                   testcase.fuzzed_keys)

            uploaded_file = blobs.get_blob_info(key)

            # Extract filename part from blob.
            uploaded_file.filename = os.path.basename(
                uploaded_file.filename.replace('\\', os.sep))

        job_type = request.get('job')
        if not job_type:
            raise helpers.EarlyExitException('Missing job name.', 400)

        job = data_types.Job.query(data_types.Job.name == job_type).get()
        if not job:
            raise helpers.EarlyExitException('Invalid job name.', 400)

        fuzzer_name = request.get('fuzzer')
        job_type_lowercase = job_type.lower()

        for engine in fuzzing.ENGINES:
            if engine.lower() in job_type_lowercase:
                fuzzer_name = engine

        is_engine_job = fuzzer_name and environment.is_engine_fuzzer_job(
            job_type)
        target_name = request.get('target')
        if not is_engine_job and target_name:
            raise helpers.EarlyExitException(
                'Target name is not applicable to non-engine jobs (AFL, libFuzzer).',
                400)

        if is_engine_job and not target_name:
            raise helpers.EarlyExitException(
                'Missing target name for engine job (AFL, libFuzzer).', 400)

        if (target_name
                and not data_types.Fuzzer.VALID_NAME_REGEX.match(target_name)):
            raise helpers.EarlyExitException('Invalid target name.', 400)

        fully_qualified_fuzzer_name = ''
        if is_engine_job and target_name:
            if job.is_external():
                # External jobs don't run and set FuzzTarget entities as part of
                # fuzz_task. Set it here instead.
                fuzz_target = (data_handler.record_fuzz_target(
                    fuzzer_name, target_name, job_type))
                fully_qualified_fuzzer_name = fuzz_target.fully_qualified_name(
                )
                target_name = fuzz_target.binary
            else:
                fully_qualified_fuzzer_name, target_name = find_fuzz_target(
                    fuzzer_name, target_name, job_type)

        if (not access.has_access(need_privileged_access=False,
                                  job_type=job_type,
                                  fuzzer_name=(fully_qualified_fuzzer_name
                                               or fuzzer_name))
                and not _is_uploader_allowed(email)):
            raise helpers.AccessDeniedException()

        multiple_testcases = bool(request.get('multiple'))
        http_flag = bool(request.get('http'))
        high_end_job = bool(request.get('highEnd'))
        bug_information = request.get('issue')
        crash_revision = request.get('revision')
        timeout = request.get('timeout')
        retries = request.get('retries')
        bug_summary_update_flag = bool(request.get('updateIssue'))
        quiet_flag = bool(request.get('quiet'))
        additional_arguments = request.get('args')
        app_launch_command = request.get('cmd')
        platform_id = request.get('platform')
        issue_labels = request.get('issue_labels')
        gestures = request.get('gestures') or '[]'
        stacktrace = request.get('stacktrace')

        crash_data = None
        if job.is_external():
            if not stacktrace:
                raise helpers.EarlyExitException(
                    'Stacktrace required for external jobs.', 400)

            if not crash_revision:
                raise helpers.EarlyExitException(
                    'Revision required for external jobs.', 400)

            crash_data = stack_analyzer.get_crash_data(
                stacktrace,
                fuzz_target=target_name,
                symbolize_flag=False,
                already_symbolized=True,
                detect_ooms_and_hangs=True)
        elif stacktrace:
            raise helpers.EarlyExitException(
                'Should not specify stacktrace for non-external jobs.', 400)

        testcase_metadata = request.get('metadata', {})
        if testcase_metadata:
            try:
                testcase_metadata = json.loads(testcase_metadata)
            except Exception as e:
                raise helpers.EarlyExitException('Invalid metadata JSON.',
                                                 400) from e
            if not isinstance(testcase_metadata, dict):
                raise helpers.EarlyExitException(
                    'Metadata is not a JSON object.', 400)
        if issue_labels:
            testcase_metadata['issue_labels'] = issue_labels

        try:
            gestures = ast.literal_eval(gestures)
        except Exception as e:
            raise helpers.EarlyExitException('Failed to parse gestures.',
                                             400) from e

        archive_state = 0
        bundled = False
        file_path_input = ''

        # Certain modifications such as app launch command, issue updates are only
        # allowed for privileged users.
        privileged_user = access.has_access(need_privileged_access=True)
        if not privileged_user:
            if bug_information or bug_summary_update_flag:
                raise helpers.EarlyExitException(
                    'You are not privileged to update existing issues.', 400)

            need_privileged_access = utils.string_is_true(
                data_handler.get_value_from_job_definition(
                    job_type, 'PRIVILEGED_ACCESS'))
            if need_privileged_access:
                raise helpers.EarlyExitException(
                    'You are not privileged to run this job type.', 400)

            if app_launch_command:
                raise helpers.EarlyExitException(
                    'You are not privileged to run arbitrary launch commands.',
                    400)

            if (testcase_metadata
                    and not _allow_unprivileged_metadata(testcase_metadata)):
                raise helpers.EarlyExitException(
                    'You are not privileged to set testcase metadata.', 400)

            if additional_arguments:
                raise helpers.EarlyExitException(
                    'You are not privileged to add command-line arguments.',
                    400)

            if gestures:
                raise helpers.EarlyExitException(
                    'You are not privileged to run arbitrary gestures.', 400)

        if crash_revision and crash_revision.isdigit():
            crash_revision = int(crash_revision)
        else:
            crash_revision = 0

        if bug_information == '0':  # Auto-recover from this bad input.
            bug_information = None
        if bug_information and not bug_information.isdigit():
            raise helpers.EarlyExitException('Bug is not a number.', 400)

        if not timeout:
            timeout = 0
        elif not timeout.isdigit() or timeout == '0':
            raise helpers.EarlyExitException(
                'Testcase timeout must be a number greater than 0.', 400)
        else:
            timeout = int(timeout)
            if timeout > 120:
                raise helpers.EarlyExitException(
                    'Testcase timeout may not be greater than 120 seconds.',
                    400)

        if retries:
            if retries.isdigit():
                retries = int(retries)
            else:
                retries = None

            if retries is None or retries > MAX_RETRIES:
                raise helpers.EarlyExitException(
                    'Testcase retries must be a number less than %d.' %
                    MAX_RETRIES, 400)
        else:
            retries = None

        job_queue = tasks.queue_for_job(job_type, is_high_end=high_end_job)

        if uploaded_file is not None:
            filename = ''.join([
                x for x in uploaded_file.filename
                if x not in ' ;/?:@&=+$,{}|<>()\\'
            ])
            key = str(uploaded_file.key())
            if archive.is_archive(filename):
                archive_state = data_types.ArchiveStatus.FUZZED
            if archive_state:
                if multiple_testcases:
                    # Create a job to unpack an archive.
                    metadata = data_types.BundledArchiveMetadata()
                    metadata.blobstore_key = key
                    metadata.timeout = timeout
                    metadata.job_queue = job_queue
                    metadata.job_type = job_type
                    metadata.http_flag = http_flag
                    metadata.archive_filename = filename
                    metadata.uploader_email = email
                    metadata.gestures = gestures
                    metadata.crash_revision = crash_revision
                    metadata.additional_arguments = additional_arguments
                    metadata.bug_information = bug_information
                    metadata.platform_id = platform_id
                    metadata.app_launch_command = app_launch_command
                    metadata.fuzzer_name = fuzzer_name
                    metadata.overridden_fuzzer_name = fully_qualified_fuzzer_name
                    metadata.fuzzer_binary_name = target_name
                    metadata.put()

                    tasks.add_task('unpack',
                                   str(metadata.key.id()),
                                   job_type,
                                   queue=tasks.queue_for_job(job_type))

                    # Create a testcase metadata object to show the user their upload.
                    upload_metadata = data_types.TestcaseUploadMetadata()
                    upload_metadata.timestamp = datetime.datetime.utcnow()
                    upload_metadata.filename = filename
                    upload_metadata.blobstore_key = key
                    upload_metadata.original_blobstore_key = key
                    upload_metadata.status = 'Pending'
                    upload_metadata.bundled = True
                    upload_metadata.uploader_email = email
                    upload_metadata.retries = retries
                    upload_metadata.bug_summary_update_flag = bug_summary_update_flag
                    upload_metadata.quiet_flag = quiet_flag
                    upload_metadata.additional_metadata_string = json.dumps(
                        testcase_metadata)
                    upload_metadata.bug_information = bug_information
                    upload_metadata.put()

                    helpers.log('Uploaded multiple testcases.',
                                helpers.VIEW_OPERATION)
                    return self.render_json({'multiple': True})

                file_path_input = guess_input_file(uploaded_file, filename)
                if not file_path_input:
                    raise helpers.EarlyExitException((
                        "Unable to detect which file to launch. The main file\'s name "
                        'must contain either of %s.' % str(RUN_FILE_PATTERNS)),
                                                     400)

        else:
            raise helpers.EarlyExitException('Please select a file to upload.',
                                             400)

        testcase_id = data_handler.create_user_uploaded_testcase(
            key,
            key,
            archive_state,
            filename,
            file_path_input,
            timeout,
            job,
            job_queue,
            http_flag,
            gestures,
            additional_arguments,
            bug_information,
            crash_revision,
            email,
            platform_id,
            app_launch_command,
            fuzzer_name,
            fully_qualified_fuzzer_name,
            target_name,
            bundled,
            retries,
            bug_summary_update_flag,
            quiet_flag,
            additional_metadata=testcase_metadata,
            crash_data=crash_data)

        if not quiet_flag:
            testcase = data_handler.get_testcase_by_id(testcase_id)
            issue = issue_tracker_utils.get_issue_for_testcase(testcase)
            if issue:
                report_url = data_handler.TESTCASE_REPORT_URL.format(
                    domain=data_handler.get_domain(), testcase_id=testcase_id)

                comment = ('ClusterFuzz is analyzing your testcase. '
                           'Developers can follow the progress at %s.' %
                           report_url)
                issue.save(new_comment=comment)

        helpers.log('Uploaded testcase %s' % testcase_id,
                    helpers.VIEW_OPERATION)
        return self.render_json({'id': '%s' % testcase_id})