示例#1
0
def sanitizer_substitution(label, testcase, _):
    """Sanitizer substitution."""
    stacktrace = data_handler.get_stacktrace(testcase)
    memory_tool_labels = get_memory_tool_labels(stacktrace)

    return [
        label.replace('%SANITIZER%', memory_tool)
        for memory_tool in memory_tool_labels
    ]
示例#2
0
def _prepare_predator_message(testcase):
    """Prepare the json sent to the Predator service for the given test case."""
    result, error_message = _is_predator_testcase(testcase)
    if not result:
        _set_predator_result_with_error(testcase, error_message)
        return None

    crash_revisions_dict, crash_revision_hash = _prepare_component_revisions_dict(
        testcase.crash_revision, testcase.job_type)
    # Do a None check since we can return {} for revision = 0.
    if crash_revisions_dict is None:
        _set_predator_result_with_error(
            testcase, 'Failed to fetch component revisions for revision %s.' %
            testcase.crash_revision)
        return None

    dependency_rolls = []
    start_revision_hash = end_revision_hash = None
    if ':' in testcase.regression:
        regression_parts = testcase.regression.split(':', 1)
        start_revision = int(regression_parts[0])
        end_revision = int(regression_parts[1])

        start_revisions_dict, start_revision_hash = (
            _prepare_component_revisions_dict(start_revision,
                                              testcase.job_type))
        # Do a None check since we can return {} for revision = 0.
        if start_revisions_dict is None:
            _set_predator_result_with_error(
                testcase,
                'Failed to fetch component revisions for revision %s.' %
                start_revision)
            return None

        end_revisions_dict, end_revision_hash = (
            _prepare_component_revisions_dict(end_revision, testcase.job_type))
        # Do a None check since we can return {} for revision = 0.
        if end_revisions_dict is None:
            _set_predator_result_with_error(
                testcase,
                'Failed to fetch component revisions for revision %s.' %
                end_revision)
            return None

        if start_revision != 0:
            dependency_rolls = _compute_rolls(start_revisions_dict,
                                              end_revisions_dict)

    # Put the current revisions dictionary in the format predator expects.
    crash_revision_component_revisions_list = (
        _format_component_revisions_for_predator(crash_revisions_dict))

    # In addition to the start and end revisions, Predator expects the regression
    # range to include the dependency path and repository URL in the same way that
    # they would be included in the dependency rolls. Note that we do not take
    # this from the rolls dict directly as it may not be available.
    src_entry = [
        entry for entry in crash_revision_component_revisions_list
        if entry['dep_path'] == 'src'
    ][0]

    # TODO(mbarbella): This is a hack since ClusterFuzz relies on "src" as a
    # special-cased path, but this is only going to be the correct repository
    # root path some of the time. For certain cases, we must update it.
    repo_url = src_entry['repo_url']
    real_dep_path = SRC_COMPONENT_OVERRIDES.get(repo_url, 'src')
    if real_dep_path != 'src':
        for dependency_list in [
                dependency_rolls, crash_revision_component_revisions_list
        ]:
            for entry in dependency_list:
                if entry['dep_path'] == 'src':
                    entry['dep_path'] = real_dep_path
                    break

    regression_range = {
        'dep_path': real_dep_path,
        'repo_url': repo_url,
        'old_revision': start_revision_hash,
        'new_revision': end_revision_hash,
    }

    crash_stacktrace = _filter_stacktrace(
        data_handler.get_stacktrace(testcase))

    return pubsub.Message(data=json.dumps({
        'stack_trace': crash_stacktrace,
        'crash_revision': crash_revision_hash,
        'customized_data': {
            'regression_range': regression_range,
            'dependency_rolls': dependency_rolls,
            'dependencies': crash_revision_component_revisions_list,
            'crash_type': testcase.crash_type,
            'crash_address': testcase.crash_address,
            'sanitizer': environment.get_memory_tool_name(testcase.job_type),
            'security_flag': testcase.security_flag,
            'job_type': testcase.job_type,
            'testcase_id': testcase.key.id()
        },
        'platform': testcase.platform,
        'client_id': 'clusterfuzz',
        'signature': testcase.crash_state,
    }).encode('utf-8'))
示例#3
0
def execute_task(testcase_id, job_type):
    """Attempt to find if the testcase affects release branches on Chromium."""
    # This shouldn't ever get scheduled, but check just in case.
    if not utils.is_chromium():
        return

    # Locate the testcase associated with the id.
    testcase = data_handler.get_testcase_by_id(testcase_id)

    # If this testcase is fixed, we should no longer be doing impact testing.
    if testcase.fixed and testcase.is_impact_set_flag:
        return

    # For testcases with status unreproducible, we just do impact analysis just
    # once.
    if testcase.is_status_unreproducible() and testcase.is_impact_set_flag:
        return

    # Update comments only after checking the above bailout conditions.
    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED)

    # This task is not applicable to unreproducible testcases.
    if testcase.one_time_crasher_flag:
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.ERROR,
            'Not applicable for unreproducible testcases')
        return

    # This task is not applicable for custom binaries. We cannot remove the
    # creation of such tasks specifically for custom binary testcase in cron,
    # so exit gracefully.
    if build_manager.is_custom_binary():
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.FINISHED,
            'Not applicable for custom binaries')
        return

    # If we don't have a stable or beta build url pattern, we try to use build
    # information url to make a guess.
    if not build_manager.has_production_builds():
        if not testcase.regression:
            data_handler.update_testcase_comment(
                testcase, data_types.TaskState.FINISHED,
                'Cannot run without regression range, will re-run once regression '
                'task finishes')
            return

        impacts = get_impacts_from_url(testcase.regression, testcase.job_type)
        testcase = data_handler.get_testcase_by_id(testcase_id)
        set_testcase_with_impacts(testcase, impacts)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.FINISHED)
        return

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
    if not file_list:
        return

    # Setup extended stable, stable, beta builds
    # and get impact and crash stacktrace.
    try:
        impacts = get_impacts_on_prod_builds(testcase, testcase_file_path)
    except BuildFailedException as error:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             str(error))
        tasks.add_task('impact',
                       testcase_id,
                       job_type,
                       wait_time=environment.get_value('FAIL_WAIT'))
        return

    testcase = data_handler.get_testcase_by_id(testcase_id)
    set_testcase_with_impacts(testcase, impacts)

    # Set stacktrace in case we have a unreproducible crash on trunk,
    # but it crashes on one of the production builds.
    if testcase.is_status_unreproducible() and impacts.get_extra_trace():
        testcase.crash_stacktrace = data_handler.filter_stacktrace(
            '%s\n\n%s' %
            (data_handler.get_stacktrace(testcase), impacts.get_extra_trace()))

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.FINISHED)
示例#4
0
def get_testcase_detail(testcase):
    """Get testcase detail for rendering the testcase detail page."""
    config = db_config.get()
    crash_address = testcase.crash_address
    crash_state = testcase.crash_state
    crash_state_lines = crash_state.strip().splitlines()
    crash_type = data_handler.get_crash_type_string(testcase)
    external_user = not access.has_access(job_type=testcase.job_type)
    issue_url = issue_tracker_utils.get_issue_url(testcase)
    metadata = testcase.get_metadata()
    original_testcase_size = _get_blob_size_string(testcase.fuzzed_keys)
    minimized_testcase_size = _get_blob_size_string(testcase.minimized_keys)
    has_issue_tracker = bool(data_handler.get_issue_tracker_name())

    fuzzer_display = data_handler.get_fuzzer_display(testcase)

    formatted_reproduction_help = _format_reproduction_help(
        data_handler.get_formatted_reproduction_help(testcase))
    # When we have a HELP_TEMPLATE, ignore any default values set for HELP_URL.
    if not formatted_reproduction_help:
        reproduction_help_url = data_handler.get_reproduction_help_url(
            testcase, config)
    else:
        reproduction_help_url = None

    if not testcase.regression:
        regression = 'Pending'
    elif testcase.regression == 'NA':
        regression = 'NA'
    else:
        regression = _get_revision_range_html_from_string(
            testcase.job_type, testcase.platform_id, testcase.regression)

    fixed_full = None
    if 'progression_pending' in metadata:
        fixed = 'Pending'
    elif not testcase.fixed:
        fixed = 'NO'
    elif testcase.fixed == 'NA':
        fixed = 'NA'
    elif testcase.fixed == 'Yes':
        fixed = 'YES'
    else:
        fixed = 'YES'
        fixed_full = _get_revision_range_html_from_string(
            testcase.job_type, testcase.platform_id, testcase.fixed)

    last_tested = None
    last_tested_revision = (metadata.get('last_tested_revision')
                            or testcase.crash_revision)
    if last_tested_revision:
        last_tested = _get_revision_range_html(testcase.job_type,
                                               testcase.platform_id,
                                               last_tested_revision)

    crash_revision = testcase.crash_revision
    crash_revisions_dict = revisions.get_component_revisions_dict(
        crash_revision, testcase.job_type, platform_id=testcase.platform_id)
    crash_stacktrace = data_handler.get_stacktrace(testcase)
    crash_stacktrace = filter_stacktrace(crash_stacktrace, testcase.crash_type,
                                         crash_revisions_dict,
                                         testcase.platform, testcase.job_type)
    crash_stacktrace = convert_to_lines(crash_stacktrace, crash_state_lines,
                                        crash_type)

    last_tested_crash_revision = metadata.get('last_tested_crash_revision')
    last_tested_crash_revisions_dict = revisions.get_component_revisions_dict(
        last_tested_crash_revision,
        testcase.job_type,
        platform_id=testcase.platform_id)
    last_tested_crash_stacktrace = data_handler.get_stacktrace(
        testcase, stack_attribute='last_tested_crash_stacktrace')
    last_tested_crash_stacktrace = filter_stacktrace(
        last_tested_crash_stacktrace, testcase.crash_type,
        last_tested_crash_revisions_dict, testcase.platform, testcase.job_type)
    last_tested_crash_stacktrace = convert_to_lines(
        last_tested_crash_stacktrace, crash_state_lines, crash_type)

    privileged_user = access.has_access(need_privileged_access=True)

    # Fix build url link. |storage.cloud.google.com| takes care of using the
    # right set of authentication credentials needed to access the link.
    if 'build_url' in metadata:
        metadata['build_url'] = metadata['build_url'].replace(
            'gs://', 'https://storage.cloud.google.com/')

    pending_blame_task = (testcase.has_blame() and 'blame_pending' in metadata
                          and metadata['blame_pending'])
    pending_impact_task = (testcase.has_impacts()
                           and not testcase.is_impact_set_flag)
    pending_minimize_task = not testcase.minimized_keys
    pending_progression_task = ('progression_pending' in metadata
                                and metadata['progression_pending'])
    pending_regression_task = not testcase.regression
    pending_stack_task = testcase.last_tested_crash_stacktrace == 'Pending'
    needs_refresh = (testcase.status == 'Pending' or (
        (testcase.status == 'Processed' or testcase.status == 'Duplicate') and
        (pending_blame_task or pending_impact_task or pending_minimize_task
         or pending_progression_task or pending_regression_task
         or pending_stack_task)))

    if data_types.SecuritySeverity.is_valid(testcase.security_severity):
        security_severity = severity_analyzer.severity_to_string(
            testcase.security_severity)
    else:
        security_severity = None

    auto_delete_timestamp = None
    auto_close_timestamp = None

    if testcase.one_time_crasher_flag:
        last_crash_time = (crash_stats.get_last_crash_time(testcase)
                           or testcase.timestamp)

        # Set auto-delete timestamp for unreproducible testcases with
        # no associated bug.
        if not testcase.bug_information:
            auto_delete_timestamp = utils.utc_datetime_to_timestamp(
                last_crash_time + datetime.timedelta(
                    days=data_types.UNREPRODUCIBLE_TESTCASE_NO_BUG_DEADLINE))

        # Set auto-close timestamp for unreproducible testcases with
        # an associated bug.
        if testcase.open and testcase.bug_information:
            auto_close_timestamp = utils.utc_datetime_to_timestamp(
                last_crash_time + datetime.timedelta(
                    days=data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE))

    memory_tool_display_string = environment.get_memory_tool_display_string(
        testcase.job_type)
    memory_tool_display_label = memory_tool_display_string.split(':')[0]
    memory_tool_display_value = memory_tool_display_string.split(
        ':')[1].strip()

    helpers.log('Testcase %s' % testcase.key.id(), helpers.VIEW_OPERATION)
    return {
        'id': testcase.key.id(),
        'crash_type': crash_type,
        'crash_address': crash_address,
        'crash_state': crash_state,  # Used by reproduce tool.
        'crash_state_lines': crash_state_lines,
        'crash_revision': testcase.crash_revision,
        'csrf_token': form.generate_csrf_token(),
        'external_user': external_user,
        'footer': testcase.comments,
        'formatted_reproduction_help': formatted_reproduction_help,
        'fixed': fixed,
        'fixed_full': fixed_full,
        'issue_url': issue_url,
        'is_admin': auth.is_current_user_admin(),
        'metadata': metadata,
        'minimized_testcase_size': minimized_testcase_size,
        'needs_refresh': needs_refresh,
        'original_testcase_size': original_testcase_size,
        'privileged_user': privileged_user,
        'regression': regression,
        'crash_stacktrace': {
            'lines':
            crash_stacktrace,
            'revision':
            revisions.get_real_revision(crash_revision,
                                        testcase.job_type,
                                        display=True,
                                        platform_id=testcase.platform_id)
        },
        'last_tested_crash_stacktrace': {
            'lines':
            last_tested_crash_stacktrace,
            'revision':
            revisions.get_real_revision(last_tested_crash_revision,
                                        testcase.job_type,
                                        display=True,
                                        platform_id=testcase.platform_id)
        },
        'security_severity': security_severity,
        'security_severities': data_types.SecuritySeverity.list(),
        'stats': {
            'min_hour': crash_stats.get_min_hour(),
            'max_hour': crash_stats.get_max_hour(),
        },
        'suspected_cls': _parse_suspected_cls(metadata.get('predator_result')),
        'testcase': testcase,
        'timestamp': utils.utc_datetime_to_timestamp(testcase.timestamp),
        'show_blame': testcase.has_blame(),
        'show_impact': testcase.has_impacts(),
        'impacts_production': testcase.impacts_production(),
        'find_similar_issues_options': FIND_SIMILAR_ISSUES_OPTIONS,
        'auto_delete_timestamp': auto_delete_timestamp,
        'auto_close_timestamp': auto_close_timestamp,
        'memory_tool_display_label': memory_tool_display_label,
        'memory_tool_display_value': memory_tool_display_value,
        'last_tested': last_tested,
        'is_admin_or_not_oss_fuzz': is_admin_or_not_oss_fuzz(),
        'has_issue_tracker': has_issue_tracker,
        'reproduction_help_url': reproduction_help_url,
        'is_local_development':
        environment.is_running_on_app_engine_development(),
        'fuzzer_display': fuzzer_display._asdict(),
    }
示例#5
0
def execute_task(testcase_id, job_type):
    """Execute a symbolize command."""
    # Locate the testcase associated with the id.
    testcase = data_handler.get_testcase_by_id(testcase_id)

    # We should atleast have a symbolized debug or release build.
    if not build_manager.has_symbolized_builds():
        return

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED)

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
    if not file_list:
        return

    # Initialize variables.
    build_fail_wait = environment.get_value('FAIL_WAIT')

    old_crash_stacktrace = data_handler.get_stacktrace(testcase)
    sym_crash_type = testcase.crash_type
    sym_crash_address = testcase.crash_address
    sym_crash_state = testcase.crash_state
    sym_redzone = DEFAULT_REDZONE
    warmup_timeout = environment.get_value('WARMUP_TIMEOUT')

    # Decide which build revision to use.
    if testcase.crash_stacktrace == 'Pending':
        # This usually happen when someone clicked the 'Update stacktrace from
        # trunk' button on the testcase details page. In this case, we are forced
        # to use trunk. No revision -> trunk build.
        build_revision = None
    else:
        build_revision = testcase.crash_revision

    # Set up a custom or regular build based on revision.
    build_manager.setup_build(build_revision)

    # Get crash revision used in setting up build.
    crash_revision = environment.get_value('APP_REVISION')

    if not build_manager.check_app_path():
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             'Build setup failed')
        tasks.add_task('symbolize',
                       testcase_id,
                       job_type,
                       wait_time=build_fail_wait)
        return

    # ASAN tool settings (if the tool is used).
    # See if we can get better stacks with higher redzone sizes.
    # A UAF might actually turn out to be OOB read/write with a bigger redzone.
    if environment.tool_matches('ASAN', job_type) and testcase.security_flag:
        redzone = MAX_REDZONE
        while redzone >= MIN_REDZONE:
            environment.reset_current_memory_tool_options(
                redzone_size=testcase.redzone,
                disable_ubsan=testcase.disable_ubsan)

            process_handler.terminate_stale_application_instances()
            command = testcase_manager.get_command_line_for_application(
                testcase_file_path, needs_http=testcase.http_flag)
            return_code, crash_time, output = (process_handler.run_process(
                command, timeout=warmup_timeout, gestures=testcase.gestures))
            crash_result = CrashResult(return_code, crash_time, output)

            if crash_result.is_crash() and 'AddressSanitizer' in output:
                state = crash_result.get_symbolized_data()
                security_flag = crash_result.is_security_issue()

                if (not crash_analyzer.ignore_stacktrace(
                        state.crash_stacktrace)
                        and security_flag == testcase.security_flag
                        and state.crash_type == testcase.crash_type
                        and (state.crash_type != sym_crash_type
                             or state.crash_state != sym_crash_state)):
                    logs.log(
                        'Changing crash parameters.\nOld : %s, %s, %s' %
                        (sym_crash_type, sym_crash_address, sym_crash_state))

                    sym_crash_type = state.crash_type
                    sym_crash_address = state.crash_address
                    sym_crash_state = state.crash_state
                    sym_redzone = redzone
                    old_crash_stacktrace = state.crash_stacktrace

                    logs.log(
                        '\nNew : %s, %s, %s' %
                        (sym_crash_type, sym_crash_address, sym_crash_state))
                    break

            redzone /= 2

    # We should have atleast a symbolized debug or a release build.
    symbolized_builds = build_manager.setup_symbolized_builds(crash_revision)
    if (not symbolized_builds
            or (not build_manager.check_app_path()
                and not build_manager.check_app_path('APP_PATH_DEBUG'))):
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             'Build setup failed')
        tasks.add_task('symbolize',
                       testcase_id,
                       job_type,
                       wait_time=build_fail_wait)
        return

    # Increase malloc_context_size to get all stack frames. Default is 30.
    environment.reset_current_memory_tool_options(
        redzone_size=sym_redzone,
        malloc_context_size=STACK_FRAME_COUNT,
        symbolize_inline_frames=True,
        disable_ubsan=testcase.disable_ubsan)

    # TSAN tool settings (if the tool is used).
    if environment.tool_matches('TSAN', job_type):
        environment.set_tsan_max_history_size()

    # Do the symbolization if supported by this application.
    result, sym_crash_stacktrace = (get_symbolized_stacktraces(
        testcase_file_path, testcase, old_crash_stacktrace, sym_crash_state))

    # Update crash parameters.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    testcase.crash_type = sym_crash_type
    testcase.crash_address = sym_crash_address
    testcase.crash_state = sym_crash_state
    testcase.crash_stacktrace = (
        data_handler.filter_stacktrace(sym_crash_stacktrace))

    if not result:
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.ERROR,
            'Unable to reproduce crash, skipping '
            'stacktrace update')
    else:
        # Switch build url to use the less-optimized symbolized build with better
        # stacktrace.
        build_url = environment.get_value('BUILD_URL')
        if build_url:
            testcase.set_metadata('build_url',
                                  build_url,
                                  update_testcase=False)

        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.FINISHED)

    testcase.symbolized = True
    testcase.crash_revision = crash_revision
    testcase.put()

    # We might have updated the crash state. See if we need to marked as duplicate
    # based on other testcases.
    data_handler.handle_duplicate_entry(testcase)

    task_creation.create_blame_task_if_needed(testcase)

    # Switch current directory before builds cleanup.
    root_directory = environment.get_value('ROOT_DIR')
    os.chdir(root_directory)

    # Cleanup symbolized builds which are space-heavy.
    symbolized_builds.delete()