def test_syzkaller_kasan_android_with_env(self):
        """Test syzkaller kasan."""
        environment.set_value('OS_OVERRIDE', 'ANDROID_KERNEL')
        environment.set_bot_environment()
        self._real_read_data_from_file = utils.read_data_from_file
        test_helpers.patch(self, [
            'clusterfuzz._internal.platforms.android.fetch_artifact.get',
            'clusterfuzz._internal.platforms.android.kernel_utils.get_kernel_hash_and_build_id',
            'clusterfuzz._internal.platforms.android.kernel_utils.get_kernel_name',
            'clusterfuzz._internal.platforms.android.settings.get_product_brand',
            'clusterfuzz._internal.google_cloud_utils.storage.get_file_from_cache_if_exists',
            'clusterfuzz._internal.google_cloud_utils.storage.store_file_in_cache',
            'clusterfuzz._internal.base.utils.write_data_to_file',
            'clusterfuzz._internal.base.utils.read_data_from_file'
        ])
        self.mock.get.side_effect = _mock_fetch_artifact_get
        self.mock.get_kernel_hash_and_build_id.return_value = '40e9b2ff3a2', '12345'
        self.mock.get_kernel_name.return_value = 'device_kernel'
        self.mock.get_product_brand.return_value = 'google'
        self.mock.get_file_from_cache_if_exists.return_value = False
        self.mock.store_file_in_cache.return_value = None
        self.mock.write_data_to_file = None
        self.mock.read_data_from_file.side_effect = self._mock_read_data_from_file

        data = self._read_test_data('kasan_syzkaller_android.txt')
        expected_stack = self._read_test_data(
            'kasan_syzkaller_android_linkified.txt')
        actual_state = stack_analyzer.get_crash_data(data)
        self.assertEqual(actual_state.crash_stacktrace, expected_stack)
Exemplo n.º 2
0
    def is_still_crashing(st_index, stacktrace):
        """Check if the the given stackstrace indicates
      the testcase is still crashing"""
        state = stack_analyzer.get_crash_data(stacktrace,
                                              fuzz_target=fuzz_target_name,
                                              symbolize_flag=False,
                                              already_symbolized=True,
                                              detect_ooms_and_hangs=True)

        crash_comparer = CrashComparer(state.crash_state, testcase.crash_state)
        if not crash_comparer.is_similar():
            return False

        logs.log(f'State for trial {st_index} of {testcase_id} '
                 f'remains similar'
                 f'(old_state={testcase.crash_state}, '
                 f'new_state={state.crash_state}).')

        is_security = crash_analyzer.is_security_issue(state.crash_stacktrace,
                                                       state.crash_type,
                                                       state.crash_address)
        if is_security != testcase.security_flag:
            return False

        logs.log(f'Security flag for trial {st_index} of {testcase_id} '
                 f'still matches'
                 f'({testcase.security_flag}).')
        return True
Exemplo n.º 3
0
    def get_unsymbolized_data(self):
        """Compute unsymbolized crash data if necessary or return cached result."""
        if self._unsymbolized_crash_data:
            return self._unsymbolized_crash_data

        self._unsymbolized_crash_data = stack_analyzer.get_crash_data(
            self.output, symbolize_flag=False)
        return self._unsymbolized_crash_data
Exemplo n.º 4
0
    def get_symbolized_data(self) -> CrashInfo:
        """Compute symbolized crash data if necessary or return cached result."""
        if self._symbolized_crash_data:
            return self._symbolized_crash_data

        self._symbolized_crash_data = stack_analyzer.get_crash_data(
            self.output, symbolize_flag=True)
        return self._symbolized_crash_data
    def test_parse_output_to_processed_report(self):
        """Tests if given output parses to the expected symbolized stack bytes."""
        self.needs_file_delete = False
        state = stack_analyzer.get_crash_data(SAMPLE_OUTPUT_TO_PARSE)
        actual_report_bytes = crash_uploader.get_symbolized_stack_bytes(
            state.crash_type, state.crash_address, state.frames)
        with open(EXPECTED_PROCESSED_REPORT_PATH, 'rb') as expected_report:
            expected_report_bytes = expected_report.read()

        self.assertEqual(actual_report_bytes, expected_report_bytes)
Exemplo n.º 6
0
def handle_update(testcase, revision, stacktrace, error):
    """Handle update."""
    logs.log('Got external update for testcase.',
             testcase_id=testcase.key.id())
    if error:
        _mark_errored(testcase, revision, error)
        return

    last_tested_revision = (testcase.get_metadata('last_tested_revision')
                            or testcase.crash_revision)

    if revision < last_tested_revision:
        logs.log_warn(f'Revision {revision} less than previously tested '
                      f'revision {last_tested_revision}.')
        return

    fuzz_target = testcase.get_fuzz_target()
    if fuzz_target:
        fuzz_target_name = fuzz_target.binary
    else:
        fuzz_target_name = None

    # Record use of fuzz target to avoid garbage collection (since fuzz_task does
    # not run).
    data_handler.record_fuzz_target(fuzz_target.engine, fuzz_target.binary,
                                    testcase.job_type)

    state = stack_analyzer.get_crash_data(stacktrace,
                                          fuzz_target=fuzz_target_name,
                                          symbolize_flag=False,
                                          already_symbolized=True,
                                          detect_ooms_and_hangs=True)
    crash_comparer = CrashComparer(state.crash_state, testcase.crash_state)
    if not crash_comparer.is_similar():
        logs.log(f'State no longer similar ('
                 f'testcase_id={testcase.key.id()}, '
                 f'old_state={testcase.crash_state}, '
                 f'new_state={state.crash_state})')
        _mark_as_fixed(testcase, revision)
        return

    is_security = crash_analyzer.is_security_issue(state.crash_stacktrace,
                                                   state.crash_type,
                                                   state.crash_address)
    if is_security != testcase.security_flag:
        logs.log(f'Security flag for {testcase.key.id()} no longer matches.')
        _mark_as_fixed(testcase, revision)
        return

    logs.log(f'{testcase.key.id()} still crashes.')
    testcase.last_tested_crash_stacktrace = stacktrace
    data_handler.update_progression_completion_metadata(testcase,
                                                        revision,
                                                        is_crash=True)
Exemplo n.º 7
0
  def post(self):
    """Handle a post request."""
    if not auth.get_current_user():
      raise helpers.AccessDeniedException()

    project = request.get('project')
    fuzz_target = request.get('fuzz_target')
    stacktrace = request.get('stacktrace')

    state = stack_analyzer.get_crash_data(
        stacktrace,
        symbolize_flag=False,
        fuzz_target=fuzz_target,
        already_symbolized=True,
        detect_ooms_and_hangs=True)
    security_flag = crash_analyzer.is_security_issue(
        state.crash_stacktrace, state.crash_type, state.crash_address)

    result = {
        'state': state.crash_state,
        'type': state.crash_type,
        'security': security_flag,
    }

    duplicate_testcase = data_handler.find_testcase(
        project, state.crash_type, state.crash_state, security_flag)
    if duplicate_testcase:
      result['result'] = 'duplicate'
      result['duplicate_id'] = duplicate_testcase.key.id()

      bug_id = (
          duplicate_testcase.bug_information or
          duplicate_testcase.group_bug_information)
      if bug_id:
        result['bug_id'] = str(bug_id)
    else:
      result['result'] = 'new'

    return self.render_json(result)
Exemplo n.º 8
0
  def test_lkl_linkification(self):
    """Test lkl linkification."""
    environment.set_bot_environment()
    self._real_read_data_from_file = utils.read_data_from_file
    test_helpers.patch(self, [
        'clusterfuzz._internal.platforms.android.fetch_artifact.get',
        'clusterfuzz._internal.google_cloud_utils.storage.get_file_from_cache_if_exists',
        'clusterfuzz._internal.google_cloud_utils.storage.store_file_in_cache',
        'clusterfuzz._internal.base.utils.write_data_to_file',
        'clusterfuzz._internal.base.utils.read_data_from_file'
    ])
    self.mock.get.side_effect = _mock_fetch_artifact_get
    self.mock.get_file_from_cache_if_exists.return_value = False
    self.mock.store_file_in_cache.return_value = None
    self.mock.write_data_to_file = None
    self.mock.read_data_from_file.side_effect = self._mock_read_data_from_file

    data = self._read_test_data('lkl_libfuzzer_symbolized.txt')
    expected_stack = self._read_test_data(
        'lkl_libfuzzer_symbolized_linkified.txt')
    actual_state = stack_analyzer.get_crash_data(data)
    self.assertEqual(actual_state.crash_stacktrace, expected_stack)
Exemplo n.º 9
0
    def do_post(self):
        """Upload a testcase."""
        email = helpers.get_user_email()
        testcase_id = request.get('testcaseId')
        uploaded_file = self.get_upload()
        if testcase_id and not uploaded_file:
            testcase = helpers.get_testcase(testcase_id)
            if not access.can_user_access_testcase(testcase):
                raise helpers.AccessDeniedException()

            # Use minimized testcase for upload (if available).
            key = (testcase.minimized_keys if testcase.minimized_keys
                   and testcase.minimized_keys != 'NA' else
                   testcase.fuzzed_keys)

            uploaded_file = blobs.get_blob_info(key)

            # Extract filename part from blob.
            uploaded_file.filename = os.path.basename(
                uploaded_file.filename.replace('\\', os.sep))

        job_type = request.get('job')
        if not job_type:
            raise helpers.EarlyExitException('Missing job name.', 400)

        job = data_types.Job.query(data_types.Job.name == job_type).get()
        if not job:
            raise helpers.EarlyExitException('Invalid job name.', 400)

        fuzzer_name = request.get('fuzzer')
        job_type_lowercase = job_type.lower()

        for engine in fuzzing.ENGINES:
            if engine.lower() in job_type_lowercase:
                fuzzer_name = engine

        is_engine_job = fuzzer_name and environment.is_engine_fuzzer_job(
            job_type)
        target_name = request.get('target')
        if not is_engine_job and target_name:
            raise helpers.EarlyExitException(
                'Target name is not applicable to non-engine jobs (AFL, libFuzzer).',
                400)

        if is_engine_job and not target_name:
            raise helpers.EarlyExitException(
                'Missing target name for engine job (AFL, libFuzzer).', 400)

        if (target_name
                and not data_types.Fuzzer.VALID_NAME_REGEX.match(target_name)):
            raise helpers.EarlyExitException('Invalid target name.', 400)

        fully_qualified_fuzzer_name = ''
        if is_engine_job and target_name:
            if job.is_external():
                # External jobs don't run and set FuzzTarget entities as part of
                # fuzz_task. Set it here instead.
                fuzz_target = (data_handler.record_fuzz_target(
                    fuzzer_name, target_name, job_type))
                fully_qualified_fuzzer_name = fuzz_target.fully_qualified_name(
                )
                target_name = fuzz_target.binary
            else:
                fully_qualified_fuzzer_name, target_name = find_fuzz_target(
                    fuzzer_name, target_name, job_type)

        if (not access.has_access(need_privileged_access=False,
                                  job_type=job_type,
                                  fuzzer_name=(fully_qualified_fuzzer_name
                                               or fuzzer_name))
                and not _is_uploader_allowed(email)):
            raise helpers.AccessDeniedException()

        multiple_testcases = bool(request.get('multiple'))
        http_flag = bool(request.get('http'))
        high_end_job = bool(request.get('highEnd'))
        bug_information = request.get('issue')
        crash_revision = request.get('revision')
        timeout = request.get('timeout')
        retries = request.get('retries')
        bug_summary_update_flag = bool(request.get('updateIssue'))
        quiet_flag = bool(request.get('quiet'))
        additional_arguments = request.get('args')
        app_launch_command = request.get('cmd')
        platform_id = request.get('platform')
        issue_labels = request.get('issue_labels')
        gestures = request.get('gestures') or '[]'
        stacktrace = request.get('stacktrace')

        crash_data = None
        if job.is_external():
            if not stacktrace:
                raise helpers.EarlyExitException(
                    'Stacktrace required for external jobs.', 400)

            if not crash_revision:
                raise helpers.EarlyExitException(
                    'Revision required for external jobs.', 400)

            crash_data = stack_analyzer.get_crash_data(
                stacktrace,
                fuzz_target=target_name,
                symbolize_flag=False,
                already_symbolized=True,
                detect_ooms_and_hangs=True)
        elif stacktrace:
            raise helpers.EarlyExitException(
                'Should not specify stacktrace for non-external jobs.', 400)

        testcase_metadata = request.get('metadata', {})
        if testcase_metadata:
            try:
                testcase_metadata = json.loads(testcase_metadata)
            except Exception as e:
                raise helpers.EarlyExitException('Invalid metadata JSON.',
                                                 400) from e
            if not isinstance(testcase_metadata, dict):
                raise helpers.EarlyExitException(
                    'Metadata is not a JSON object.', 400)
        if issue_labels:
            testcase_metadata['issue_labels'] = issue_labels

        try:
            gestures = ast.literal_eval(gestures)
        except Exception as e:
            raise helpers.EarlyExitException('Failed to parse gestures.',
                                             400) from e

        archive_state = 0
        bundled = False
        file_path_input = ''

        # Certain modifications such as app launch command, issue updates are only
        # allowed for privileged users.
        privileged_user = access.has_access(need_privileged_access=True)
        if not privileged_user:
            if bug_information or bug_summary_update_flag:
                raise helpers.EarlyExitException(
                    'You are not privileged to update existing issues.', 400)

            need_privileged_access = utils.string_is_true(
                data_handler.get_value_from_job_definition(
                    job_type, 'PRIVILEGED_ACCESS'))
            if need_privileged_access:
                raise helpers.EarlyExitException(
                    'You are not privileged to run this job type.', 400)

            if app_launch_command:
                raise helpers.EarlyExitException(
                    'You are not privileged to run arbitrary launch commands.',
                    400)

            if (testcase_metadata
                    and not _allow_unprivileged_metadata(testcase_metadata)):
                raise helpers.EarlyExitException(
                    'You are not privileged to set testcase metadata.', 400)

            if additional_arguments:
                raise helpers.EarlyExitException(
                    'You are not privileged to add command-line arguments.',
                    400)

            if gestures:
                raise helpers.EarlyExitException(
                    'You are not privileged to run arbitrary gestures.', 400)

        if crash_revision and crash_revision.isdigit():
            crash_revision = int(crash_revision)
        else:
            crash_revision = 0

        if bug_information == '0':  # Auto-recover from this bad input.
            bug_information = None
        if bug_information and not bug_information.isdigit():
            raise helpers.EarlyExitException('Bug is not a number.', 400)

        if not timeout:
            timeout = 0
        elif not timeout.isdigit() or timeout == '0':
            raise helpers.EarlyExitException(
                'Testcase timeout must be a number greater than 0.', 400)
        else:
            timeout = int(timeout)
            if timeout > 120:
                raise helpers.EarlyExitException(
                    'Testcase timeout may not be greater than 120 seconds.',
                    400)

        if retries:
            if retries.isdigit():
                retries = int(retries)
            else:
                retries = None

            if retries is None or retries > MAX_RETRIES:
                raise helpers.EarlyExitException(
                    'Testcase retries must be a number less than %d.' %
                    MAX_RETRIES, 400)
        else:
            retries = None

        job_queue = tasks.queue_for_job(job_type, is_high_end=high_end_job)

        if uploaded_file is not None:
            filename = ''.join([
                x for x in uploaded_file.filename
                if x not in ' ;/?:@&=+$,{}|<>()\\'
            ])
            key = str(uploaded_file.key())
            if archive.is_archive(filename):
                archive_state = data_types.ArchiveStatus.FUZZED
            if archive_state:
                if multiple_testcases:
                    # Create a job to unpack an archive.
                    metadata = data_types.BundledArchiveMetadata()
                    metadata.blobstore_key = key
                    metadata.timeout = timeout
                    metadata.job_queue = job_queue
                    metadata.job_type = job_type
                    metadata.http_flag = http_flag
                    metadata.archive_filename = filename
                    metadata.uploader_email = email
                    metadata.gestures = gestures
                    metadata.crash_revision = crash_revision
                    metadata.additional_arguments = additional_arguments
                    metadata.bug_information = bug_information
                    metadata.platform_id = platform_id
                    metadata.app_launch_command = app_launch_command
                    metadata.fuzzer_name = fuzzer_name
                    metadata.overridden_fuzzer_name = fully_qualified_fuzzer_name
                    metadata.fuzzer_binary_name = target_name
                    metadata.put()

                    tasks.add_task('unpack',
                                   str(metadata.key.id()),
                                   job_type,
                                   queue=tasks.queue_for_job(job_type))

                    # Create a testcase metadata object to show the user their upload.
                    upload_metadata = data_types.TestcaseUploadMetadata()
                    upload_metadata.timestamp = datetime.datetime.utcnow()
                    upload_metadata.filename = filename
                    upload_metadata.blobstore_key = key
                    upload_metadata.original_blobstore_key = key
                    upload_metadata.status = 'Pending'
                    upload_metadata.bundled = True
                    upload_metadata.uploader_email = email
                    upload_metadata.retries = retries
                    upload_metadata.bug_summary_update_flag = bug_summary_update_flag
                    upload_metadata.quiet_flag = quiet_flag
                    upload_metadata.additional_metadata_string = json.dumps(
                        testcase_metadata)
                    upload_metadata.bug_information = bug_information
                    upload_metadata.put()

                    helpers.log('Uploaded multiple testcases.',
                                helpers.VIEW_OPERATION)
                    return self.render_json({'multiple': True})

                file_path_input = guess_input_file(uploaded_file, filename)
                if not file_path_input:
                    raise helpers.EarlyExitException((
                        "Unable to detect which file to launch. The main file\'s name "
                        'must contain either of %s.' % str(RUN_FILE_PATTERNS)),
                                                     400)

        else:
            raise helpers.EarlyExitException('Please select a file to upload.',
                                             400)

        testcase_id = data_handler.create_user_uploaded_testcase(
            key,
            key,
            archive_state,
            filename,
            file_path_input,
            timeout,
            job,
            job_queue,
            http_flag,
            gestures,
            additional_arguments,
            bug_information,
            crash_revision,
            email,
            platform_id,
            app_launch_command,
            fuzzer_name,
            fully_qualified_fuzzer_name,
            target_name,
            bundled,
            retries,
            bug_summary_update_flag,
            quiet_flag,
            additional_metadata=testcase_metadata,
            crash_data=crash_data)

        if not quiet_flag:
            testcase = data_handler.get_testcase_by_id(testcase_id)
            issue = issue_tracker_utils.get_issue_for_testcase(testcase)
            if issue:
                report_url = data_handler.TESTCASE_REPORT_URL.format(
                    domain=data_handler.get_domain(), testcase_id=testcase_id)

                comment = ('ClusterFuzz is analyzing your testcase. '
                           'Developers can follow the progress at %s.' %
                           report_url)
                issue.save(new_comment=comment)

        helpers.log('Uploaded testcase %s' % testcase_id,
                    helpers.VIEW_OPERATION)
        return self.render_json({'id': '%s' % testcase_id})
    def process_bad_units(self, bad_units_path, quarantine_corpus_path,
                          crashes):
        """Process bad units found during merge."""
        # TODO(ochang): A lot of this function is similar to parts of fuzz_task.
        # Ideally fuzz_task can be refactored in a way that lets us share the common
        # code.

        environment.reset_current_memory_tool_options(
            redzone_size=DEFAULT_REDZONE)
        self.runner.process_sanitizer_options()

        logs.log('Processing bad units.')
        corpus_file_paths = _get_corpus_file_paths(bad_units_path)
        num_bad_units = 0

        # Run each corpus item individually.
        for i, unit_path in enumerate(corpus_file_paths, 1):
            if i % 100 == 0:
                logs.log('Up to %d' % i)

            unit_name = os.path.basename(unit_path)
            if unit_name.startswith('timeout-') or unit_name.startswith(
                    'oom-'):
                # Don't waste time re-running timeout or oom testcases.
                self._quarantine_unit(unit_path, quarantine_corpus_path)
                num_bad_units += 1
                continue

            try:
                result = self._run_single_unit(unit_path)
            except TimeoutError:
                # Slow unit. Quarantine it.
                self._quarantine_unit(unit_path, quarantine_corpus_path)
                num_bad_units += 1
                continue

            if not crash_analyzer.is_memory_tool_crash(result.output):
                # Didn't crash.
                continue

            # Get memory tool crash information.
            state = stack_analyzer.get_crash_data(result.output,
                                                  symbolize_flag=True)

            # Crashed or caused a leak. Quarantine it.
            unit_path = self._quarantine_unit(unit_path,
                                              quarantine_corpus_path)
            num_bad_units += 1

            if crash_analyzer.ignore_stacktrace(state.crash_stacktrace):
                continue

            # Local de-duplication.
            if state.crash_state not in crashes:
                security_flag = crash_analyzer.is_security_issue(
                    state.crash_stacktrace, state.crash_type,
                    state.crash_address)
                crashes[state.crash_state] = CorpusCrash(
                    state.crash_state, state.crash_type, state.crash_address,
                    state.crash_stacktrace, unit_path, security_flag)

        logs.log('Found %d bad units, %d unique crashes.' %
                 (num_bad_units, len(crashes)))