def sanitizer_substitution(label, testcase, _): """Sanitizer substitution.""" stacktrace = data_handler.get_stacktrace(testcase) memory_tool_labels = get_memory_tool_labels(stacktrace) return [ label.replace('%SANITIZER%', memory_tool) for memory_tool in memory_tool_labels ]
def execute_task(testcase_id, job_type): """Attempt to find if the testcase affects release branches on Chromium.""" # This shouldn't ever get scheduled, but check just in case. if not utils.is_chromium(): return # Locate the testcase associated with the id. testcase = data_handler.get_testcase_by_id(testcase_id) # If this testcase is fixed, we should no longer be doing impact testing. if testcase.fixed and testcase.is_impact_set_flag: return # For testcases with status unreproducible, we just do impact analysis just # once. if testcase.is_status_unreproducible() and testcase.is_impact_set_flag: return # Update comments only after checking the above bailout conditions. data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED) # This task is not applicable to unreproducible testcases. if testcase.one_time_crasher_flag: data_handler.update_testcase_comment( testcase, data_types.TaskState.ERROR, 'Not applicable for unreproducible testcases') return # This task is not applicable for custom binaries. We cannot remove the # creation of such tasks specifically for custom binary testcase in cron, # so exit gracefully. if build_manager.is_custom_binary(): data_handler.update_testcase_comment( testcase, data_types.TaskState.FINISHED, 'Not applicable for custom binaries') return # If we don't have a stable or beta build url pattern, we try to use build # information url to make a guess. if not build_manager.has_production_builds(): if not testcase.regression: data_handler.update_testcase_comment( testcase, data_types.TaskState.FINISHED, 'Cannot run without regression range, will re-run once regression ' 'task finishes') return impacts = get_impacts_from_url(testcase.regression, testcase.job_type) testcase = data_handler.get_testcase_by_id(testcase_id) set_testcase_with_impacts(testcase, impacts) data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED) return # Setup testcase and its dependencies. file_list, _, testcase_file_path = setup.setup_testcase(testcase) if not file_list: return # Setup stable, beta builds and get impact and crash stacktrace. try: impacts = get_impacts_on_prod_builds(testcase, testcase_file_path) except BuildFailedException as error: testcase = data_handler.get_testcase_by_id(testcase_id) data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, error.message) tasks.add_task('impact', testcase_id, job_type, wait_time=environment.get_value('FAIL_WAIT')) return testcase = data_handler.get_testcase_by_id(testcase_id) set_testcase_with_impacts(testcase, impacts) # Set stacktrace in case we have a unreproducible crash on trunk, # but it crashes on one of the production builds. if testcase.is_status_unreproducible() and impacts.get_extra_trace(): testcase.crash_stacktrace = data_handler.filter_stacktrace( '%s\n\n%s' % (data_handler.get_stacktrace(testcase), impacts.get_extra_trace())) data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED)
def add_memory_tool_label_if_needed(issue, testcase): """Find memory tool used and add corresponding labels to the issue.""" stacktrace = data_handler.get_stacktrace(testcase) memory_tool_labels = label_utils.get_memory_tool_labels(stacktrace) for label in memory_tool_labels: issue.add_label(label)
def _prepare_predator_message(testcase): """Prepare the json sent to the Predator service for the given test case.""" result, error_message = _is_predator_testcase(testcase) if not result: _set_predator_result_with_error(testcase, error_message) return None crash_revisions_dict, crash_revision_hash = _prepare_component_revisions_dict( testcase.crash_revision, testcase.job_type) # Do a None check since we can return {} for revision = 0. if crash_revisions_dict is None: _set_predator_result_with_error( testcase, 'Failed to fetch component revisions for revision %s.' % testcase.crash_revision) return None dependency_rolls = [] start_revision_hash = end_revision_hash = None if ':' in testcase.regression: regression_parts = testcase.regression.split(':', 1) start_revision = int(regression_parts[0]) end_revision = int(regression_parts[1]) start_revisions_dict, start_revision_hash = ( _prepare_component_revisions_dict(start_revision, testcase.job_type)) # Do a None check since we can return {} for revision = 0. if start_revisions_dict is None: _set_predator_result_with_error( testcase, 'Failed to fetch component revisions for revision %s.' % start_revision) return None end_revisions_dict, end_revision_hash = ( _prepare_component_revisions_dict(end_revision, testcase.job_type)) # Do a None check since we can return {} for revision = 0. if end_revisions_dict is None: _set_predator_result_with_error( testcase, 'Failed to fetch component revisions for revision %s.' % end_revision) return None if start_revision != 0: dependency_rolls = _compute_rolls(start_revisions_dict, end_revisions_dict) # Put the current revisions dictionary in the format predator expects. crash_revision_component_revisions_list = ( _format_component_revisions_for_predator(crash_revisions_dict)) # In addition to the start and end revisions, Predator expects the regression # range to include the dependency path and repository URL in the same way that # they would be included in the dependency rolls. Note that we do not take # this from the rolls dict directly as it may not be available. src_entry = [ entry for entry in crash_revision_component_revisions_list if entry['dep_path'] == 'src' ][0] # TODO(mbarbella): This is a hack since ClusterFuzz relies on "src" as a # special-cased path, but this is only going to be the correct repository # root path some of the time. For certain cases, we must update it. repo_url = src_entry['repo_url'] real_dep_path = SRC_COMPONENT_OVERRIDES.get(repo_url, 'src') if real_dep_path != 'src': for dependency_list in [ dependency_rolls, crash_revision_component_revisions_list ]: for entry in dependency_list: if entry['dep_path'] == 'src': entry['dep_path'] = real_dep_path break regression_range = { 'dep_path': real_dep_path, 'repo_url': repo_url, 'old_revision': start_revision_hash, 'new_revision': end_revision_hash, } crash_stacktrace = _filter_stacktrace( data_handler.get_stacktrace(testcase)) return pubsub.Message(data=json.dumps({ 'stack_trace': crash_stacktrace, 'crash_revision': crash_revision_hash, 'customized_data': { 'regression_range': regression_range, 'dependency_rolls': dependency_rolls, 'dependencies': crash_revision_component_revisions_list, 'crash_type': testcase.crash_type, 'crash_address': testcase.crash_address, 'sanitizer': environment.get_memory_tool_name(testcase.job_type), 'security_flag': testcase.security_flag, 'job_type': testcase.job_type, 'testcase_id': testcase.key.id() }, 'platform': testcase.platform, 'client_id': 'clusterfuzz', 'signature': testcase.crash_state, }).encode('utf-8'))
def get_testcase_detail(testcase): """Get testcase detail for rendering the testcase detail page.""" config = db_config.get() crash_address = testcase.crash_address crash_state = testcase.crash_state crash_state_lines = crash_state.strip().splitlines() crash_type = data_handler.get_crash_type_string(testcase) reproduction_help_url = data_handler.get_reproduction_help_url( testcase, config) external_user = not access.has_access(job_type=testcase.job_type) issue_url = issue_tracker_utils.get_issue_url(testcase) metadata = testcase.get_metadata() original_testcase_size = _get_blob_size_string(testcase.fuzzed_keys) minimized_testcase_size = _get_blob_size_string(testcase.minimized_keys) has_issue_tracker = bool(data_handler.get_issue_tracker_name()) if not testcase.regression: regression = 'Pending' elif testcase.regression == 'NA': regression = 'NA' else: regression = _get_revision_range_html_from_string(testcase.job_type, testcase.regression) fixed_full = None if 'progression_pending' in metadata: fixed = 'Pending' elif not testcase.fixed: fixed = 'NO' elif testcase.fixed == 'NA': fixed = 'NA' elif testcase.fixed == 'Yes': fixed = 'YES' else: fixed = 'YES' fixed_full = _get_revision_range_html_from_string(testcase.job_type, testcase.fixed) last_tested = None last_tested_revision = ( metadata.get('last_tested_revision') or testcase.crash_revision) if last_tested_revision: last_tested = _get_revision_range_html(testcase.job_type, last_tested_revision) crash_revision = testcase.crash_revision crash_revisions_dict = revisions.get_component_revisions_dict( crash_revision, testcase.job_type) crash_stacktrace = data_handler.get_stacktrace(testcase) crash_stacktrace = filter_stacktrace(crash_stacktrace, testcase.crash_type, crash_revisions_dict) crash_stacktrace = convert_to_lines(crash_stacktrace, crash_state_lines, crash_type) crash_stacktrace_preview_lines = _preview_stacktrace(crash_stacktrace) second_crash_stacktrace_revision = metadata.get( 'second_crash_stacktrace_revision') second_crash_stacktrace_revisions_dict = ( revisions.get_component_revisions_dict(second_crash_stacktrace_revision, testcase.job_type)) second_crash_stacktrace = data_handler.get_stacktrace( testcase, stack_attribute='second_crash_stacktrace') second_crash_stacktrace = filter_stacktrace( second_crash_stacktrace, testcase.crash_type, second_crash_stacktrace_revisions_dict) second_crash_stacktrace = convert_to_lines(second_crash_stacktrace, crash_state_lines, crash_type) second_crash_stacktrace_preview_lines = _preview_stacktrace( second_crash_stacktrace) last_tested_crash_revision = metadata.get('last_tested_crash_revision') last_tested_crash_revisions_dict = revisions.get_component_revisions_dict( last_tested_crash_revision, testcase.job_type) last_tested_crash_stacktrace = data_handler.get_stacktrace( testcase, stack_attribute='last_tested_crash_stacktrace') last_tested_crash_stacktrace = filter_stacktrace( last_tested_crash_stacktrace, testcase.crash_type, last_tested_crash_revisions_dict) last_tested_crash_stacktrace = convert_to_lines(last_tested_crash_stacktrace, crash_state_lines, crash_type) last_tested_crash_stacktrace_preview_lines = _preview_stacktrace( last_tested_crash_stacktrace) privileged_user = access.has_access(need_privileged_access=True) # Fix build url link. |storage.cloud.google.com| takes care of using the # right set of authentication credentials needed to access the link. if 'build_url' in metadata: metadata['build_url'] = metadata['build_url'].replace( 'gs://', 'https://storage.cloud.google.com/') pending_blame_task = ( testcase.has_blame() and 'blame_pending' in metadata and metadata['blame_pending']) pending_impact_task = ( testcase.has_impacts() and not testcase.is_impact_set_flag) pending_minimize_task = not testcase.minimized_keys pending_progression_task = ('progression_pending' in metadata and metadata['progression_pending']) pending_regression_task = not testcase.regression pending_stack_task = testcase.last_tested_crash_stacktrace == 'Pending' needs_refresh = ( testcase.status == 'Pending' or ((testcase.status == 'Processed' or testcase.status == 'Duplicate') and (pending_blame_task or pending_impact_task or pending_minimize_task or pending_progression_task or pending_regression_task or pending_stack_task))) if data_types.SecuritySeverity.is_valid(testcase.security_severity): security_severity = severity_analyzer.severity_to_string( testcase.security_severity) else: security_severity = None auto_delete_timestamp = None auto_close_timestamp = None if testcase.one_time_crasher_flag: last_crash_time = ( crash_stats.get_last_crash_time(testcase) or testcase.timestamp) # Set auto-delete timestamp for unreproducible testcases with # no associated bug. if not testcase.bug_information: auto_delete_timestamp = utils.utc_datetime_to_timestamp( last_crash_time + datetime.timedelta( days=data_types.UNREPRODUCIBLE_TESTCASE_NO_BUG_DEADLINE)) # Set auto-close timestamp for unreproducible testcases with # an associated bug. if testcase.open and testcase.bug_information: auto_close_timestamp = utils.utc_datetime_to_timestamp( last_crash_time + datetime.timedelta( days=data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE)) memory_tool_display_string = environment.get_memory_tool_display_string( testcase.job_type) memory_tool_display_label = memory_tool_display_string.split(':')[0] memory_tool_display_value = memory_tool_display_string.split(':')[1].strip() helpers.log('Testcase %s' % testcase.key.id(), helpers.VIEW_OPERATION) return { 'id': testcase.key.id(), 'crash_type': crash_type, 'crash_address': crash_address, 'crash_state': crash_state, # Used by reproduce tool. 'crash_state_lines': crash_state_lines, 'crash_revision': testcase.crash_revision, 'csrf_token': form.generate_csrf_token(), 'external_user': external_user, 'footer': testcase.comments, 'fixed': fixed, 'fixed_full': fixed_full, 'issue_url': issue_url, 'is_admin': auth.is_current_user_admin(), 'metadata': metadata, 'minimized_testcase_size': minimized_testcase_size, 'needs_refresh': needs_refresh, 'original_testcase_size': original_testcase_size, 'privileged_user': privileged_user, 'regression': regression, 'crash_stacktrace': { 'lines': crash_stacktrace, 'preview_lines': crash_stacktrace_preview_lines, 'revision': revisions.get_real_revision( crash_revision, testcase.job_type, display=True) }, 'second_crash_stacktrace': { 'lines': second_crash_stacktrace, 'preview_lines': second_crash_stacktrace_preview_lines, 'revision': revisions.get_real_revision( second_crash_stacktrace_revision, testcase.job_type, display=True) }, 'last_tested_crash_stacktrace': { 'lines': last_tested_crash_stacktrace, 'preview_lines': last_tested_crash_stacktrace_preview_lines, 'revision': revisions.get_real_revision( last_tested_crash_revision, testcase.job_type, display=True) }, 'security_severity': security_severity, 'security_severities': data_types.SecuritySeverity.list(), 'stats': { 'min_hour': crash_stats.get_min_hour(), 'max_hour': crash_stats.get_max_hour(), }, 'suspected_cls': _parse_suspected_cls(metadata.get('predator_result')), 'testcase': testcase, 'timestamp': utils.utc_datetime_to_timestamp(testcase.timestamp), 'show_blame': testcase.has_blame(), 'show_impact': testcase.has_impacts(), 'impacts_production': testcase.impacts_production(), 'find_similar_issues_options': FIND_SIMILAR_ISSUES_OPTIONS, 'auto_delete_timestamp': auto_delete_timestamp, 'auto_close_timestamp': auto_close_timestamp, 'memory_tool_display_label': memory_tool_display_label, 'memory_tool_display_value': memory_tool_display_value, 'last_tested': last_tested, 'is_admin_or_not_oss_fuzz': is_admin_or_not_oss_fuzz(), 'has_issue_tracker': has_issue_tracker, 'reproduction_help_url': reproduction_help_url, 'is_local_development': environment.is_running_on_app_engine_development(), }
def execute_task(testcase_id, job_type): """Execute a symbolize command.""" # Locate the testcase associated with the id. testcase = data_handler.get_testcase_by_id(testcase_id) # We should atleast have a symbolized debug or release build. if not build_manager.has_symbolized_builds(): return data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED) # Setup testcase and its dependencies. file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type) if not file_list: return # Initialize variables. build_fail_wait = environment.get_value("FAIL_WAIT") old_crash_stacktrace = data_handler.get_stacktrace(testcase) sym_crash_type = testcase.crash_type sym_crash_address = testcase.crash_address sym_crash_state = testcase.crash_state sym_redzone = DEFAULT_REDZONE warmup_timeout = environment.get_value("WARMUP_TIMEOUT") # Decide which build revision to use. if testcase.crash_stacktrace == "Pending": # This usually happen when someone clicked the 'Update stacktrace from # trunk' button on the testcase details page. In this case, we are forced # to use trunk. No revision -> trunk build. build_revision = None else: build_revision = testcase.crash_revision # Set up a custom or regular build based on revision. build_manager.setup_build(build_revision) # Get crash revision used in setting up build. crash_revision = environment.get_value("APP_REVISION") if not build_manager.check_app_path(): testcase = data_handler.get_testcase_by_id(testcase_id) data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, "Build setup failed") tasks.add_task( "symbolize", testcase_id, job_type, wait_time=build_fail_wait) return # ASAN tool settings (if the tool is used). # See if we can get better stacks with higher redzone sizes. # A UAF might actually turn out to be OOB read/write with a bigger redzone. if environment.tool_matches("ASAN", job_type) and testcase.security_flag: redzone = MAX_REDZONE while redzone >= MIN_REDZONE: environment.reset_current_memory_tool_options( redzone_size=testcase.redzone, disable_ubsan=testcase.disable_ubsan) process_handler.terminate_stale_application_instances() command = testcase_manager.get_command_line_for_application( testcase_file_path, needs_http=testcase.http_flag) return_code, crash_time, output = process_handler.run_process( command, timeout=warmup_timeout, gestures=testcase.gestures) crash_result = CrashResult(return_code, crash_time, output) if crash_result.is_crash() and "AddressSanitizer" in output: state = crash_result.get_symbolized_data() security_flag = crash_result.is_security_issue() if (not crash_analyzer.ignore_stacktrace(state.crash_stacktrace) and security_flag == testcase.security_flag and state.crash_type == testcase.crash_type and (state.crash_type != sym_crash_type or state.crash_state != sym_crash_state)): logs.log("Changing crash parameters.\nOld : %s, %s, %s" % (sym_crash_type, sym_crash_address, sym_crash_state)) sym_crash_type = state.crash_type sym_crash_address = state.crash_address sym_crash_state = state.crash_state sym_redzone = redzone old_crash_stacktrace = state.crash_stacktrace logs.log("\nNew : %s, %s, %s" % (sym_crash_type, sym_crash_address, sym_crash_state)) break redzone /= 2 # We should have atleast a symbolized debug or a release build. symbolized_builds = build_manager.setup_symbolized_builds(crash_revision) if not symbolized_builds or ( not build_manager.check_app_path() and not build_manager.check_app_path("APP_PATH_DEBUG")): testcase = data_handler.get_testcase_by_id(testcase_id) data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, "Build setup failed") tasks.add_task( "symbolize", testcase_id, job_type, wait_time=build_fail_wait) return # Increase malloc_context_size to get all stack frames. Default is 30. environment.reset_current_memory_tool_options( redzone_size=sym_redzone, malloc_context_size=STACK_FRAME_COUNT, symbolize_inline_frames=True, disable_ubsan=testcase.disable_ubsan, ) # TSAN tool settings (if the tool is used). if environment.tool_matches("TSAN", job_type): environment.set_tsan_max_history_size() # Do the symbolization if supported by this application. result, sym_crash_stacktrace = get_symbolized_stacktraces( testcase_file_path, testcase, old_crash_stacktrace, sym_crash_state) # Update crash parameters. testcase = data_handler.get_testcase_by_id(testcase_id) testcase.crash_type = sym_crash_type testcase.crash_address = sym_crash_address testcase.crash_state = sym_crash_state testcase.crash_stacktrace = data_handler.filter_stacktrace( sym_crash_stacktrace) if not result: data_handler.update_testcase_comment( testcase, data_types.TaskState.ERROR, "Unable to reproduce crash, skipping " "stacktrace update", ) else: # Switch build url to use the less-optimized symbolized build with better # stacktrace. build_url = environment.get_value("BUILD_URL") if build_url: testcase.set_metadata("build_url", build_url, update_testcase=False) data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED) testcase.symbolized = True testcase.crash_revision = crash_revision testcase.put() # We might have updated the crash state. See if we need to marked as duplicate # based on other testcases. data_handler.handle_duplicate_entry(testcase) task_creation.create_blame_task_if_needed(testcase) # Switch current directory before builds cleanup. root_directory = environment.get_value("ROOT_DIR") os.chdir(root_directory) # Cleanup symbolized builds which are space-heavy. symbolized_builds.delete()
def get_testcase_detail(testcase): """Get testcase detail for rendering the testcase detail page.""" config = db_config.get() crash_address = testcase.crash_address crash_state = testcase.crash_state crash_state_lines = crash_state.strip().splitlines() crash_type = data_handler.get_crash_type_string(testcase) external_user = not access.has_access(job_type=testcase.job_type) issue_url = issue_tracker_utils.get_issue_url(testcase) metadata = testcase.get_metadata() original_testcase_size = _get_blob_size_string(testcase.fuzzed_keys) minimized_testcase_size = _get_blob_size_string(testcase.minimized_keys) has_issue_tracker = bool(data_handler.get_issue_tracker_name()) fuzzer_display = data_handler.get_fuzzer_display(testcase) formatted_reproduction_help = _format_reproduction_help( data_handler.get_formatted_reproduction_help(testcase)) # When we have a HELP_TEMPLATE, ignore any default values set for HELP_URL. if not formatted_reproduction_help: reproduction_help_url = data_handler.get_reproduction_help_url( testcase, config) else: reproduction_help_url = None if not testcase.regression: regression = "Pending" elif testcase.regression == "NA": regression = "NA" else: regression = _get_revision_range_html_from_string( testcase.job_type, testcase.regression) fixed_full = None if "progression_pending" in metadata: fixed = "Pending" elif not testcase.fixed: fixed = "NO" elif testcase.fixed == "NA": fixed = "NA" elif testcase.fixed == "Yes": fixed = "YES" else: fixed = "YES" fixed_full = _get_revision_range_html_from_string( testcase.job_type, testcase.fixed) last_tested = None last_tested_revision = (metadata.get("last_tested_revision") or testcase.crash_revision) if last_tested_revision: last_tested = _get_revision_range_html(testcase.job_type, last_tested_revision) crash_revision = testcase.crash_revision crash_revisions_dict = revisions.get_component_revisions_dict( crash_revision, testcase.job_type) crash_stacktrace = data_handler.get_stacktrace(testcase) crash_stacktrace = filter_stacktrace(crash_stacktrace, testcase.crash_type, crash_revisions_dict) crash_stacktrace = convert_to_lines(crash_stacktrace, crash_state_lines, crash_type) crash_stacktrace_preview_lines = _preview_stacktrace(crash_stacktrace) last_tested_crash_revision = metadata.get("last_tested_crash_revision") last_tested_crash_revisions_dict = revisions.get_component_revisions_dict( last_tested_crash_revision, testcase.job_type) last_tested_crash_stacktrace = data_handler.get_stacktrace( testcase, stack_attribute="last_tested_crash_stacktrace") last_tested_crash_stacktrace = filter_stacktrace( last_tested_crash_stacktrace, testcase.crash_type, last_tested_crash_revisions_dict, ) last_tested_crash_stacktrace = convert_to_lines( last_tested_crash_stacktrace, crash_state_lines, crash_type) last_tested_crash_stacktrace_preview_lines = _preview_stacktrace( last_tested_crash_stacktrace) privileged_user = access.has_access(need_privileged_access=True) # Fix build url link. |storage.cloud.google.com| takes care of using the # right set of authentication credentials needed to access the link. if "build_url" in metadata: metadata["build_url"] = metadata["build_url"].replace( "gs://", "https://storage.cloud.google.com/") pending_blame_task = (testcase.has_blame() and "blame_pending" in metadata and metadata["blame_pending"]) pending_impact_task = testcase.has_impacts( ) and not testcase.is_impact_set_flag pending_minimize_task = not testcase.minimized_keys pending_progression_task = ("progression_pending" in metadata and metadata["progression_pending"]) pending_regression_task = not testcase.regression pending_stack_task = testcase.last_tested_crash_stacktrace == "Pending" needs_refresh = testcase.status == "Pending" or ( (testcase.status == "Processed" or testcase.status == "Duplicate") and (pending_blame_task or pending_impact_task or pending_minimize_task or pending_progression_task or pending_regression_task or pending_stack_task)) if data_types.SecuritySeverity.is_valid(testcase.security_severity): security_severity = severity_analyzer.severity_to_string( testcase.security_severity) else: security_severity = None auto_delete_timestamp = None auto_close_timestamp = None if testcase.one_time_crasher_flag: last_crash_time = (crash_stats.get_last_crash_time(testcase) or testcase.timestamp) # Set auto-delete timestamp for unreproducible testcases with # no associated bug. if not testcase.bug_information: auto_delete_timestamp = utils.utc_datetime_to_timestamp( last_crash_time + datetime.timedelta( days=data_types.UNREPRODUCIBLE_TESTCASE_NO_BUG_DEADLINE)) # Set auto-close timestamp for unreproducible testcases with # an associated bug. if testcase.open and testcase.bug_information: auto_close_timestamp = utils.utc_datetime_to_timestamp( last_crash_time + datetime.timedelta( days=data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE)) memory_tool_display_string = environment.get_memory_tool_display_string( testcase.job_type) memory_tool_display_label = memory_tool_display_string.split(":")[0] memory_tool_display_value = memory_tool_display_string.split( ":")[1].strip() helpers.log("Testcase %s" % testcase.key.id(), helpers.VIEW_OPERATION) return { "id": testcase.key.id(), "crash_type": crash_type, "crash_address": crash_address, "crash_state": crash_state, # Used by reproduce tool. "crash_state_lines": crash_state_lines, "crash_revision": testcase.crash_revision, "csrf_token": form.generate_csrf_token(), "external_user": external_user, "footer": testcase.comments, "formatted_reproduction_help": formatted_reproduction_help, "fixed": fixed, "fixed_full": fixed_full, "issue_url": issue_url, "is_admin": auth.is_current_user_admin(), "metadata": metadata, "minimized_testcase_size": minimized_testcase_size, "needs_refresh": needs_refresh, "original_testcase_size": original_testcase_size, "privileged_user": privileged_user, "regression": regression, "crash_stacktrace": { "lines": crash_stacktrace, "preview_lines": crash_stacktrace_preview_lines, "revision": revisions.get_real_revision(crash_revision, testcase.job_type, display=True), }, "last_tested_crash_stacktrace": { "lines": last_tested_crash_stacktrace, "preview_lines": last_tested_crash_stacktrace_preview_lines, "revision": revisions.get_real_revision(last_tested_crash_revision, testcase.job_type, display=True), }, "security_severity": security_severity, "security_severities": data_types.SecuritySeverity.list(), "stats": { "min_hour": crash_stats.get_min_hour(), "max_hour": crash_stats.get_max_hour(), }, "suspected_cls": _parse_suspected_cls(metadata.get("predator_result")), "testcase": testcase, "timestamp": utils.utc_datetime_to_timestamp(testcase.timestamp), "show_blame": testcase.has_blame(), "show_impact": testcase.has_impacts(), "impacts_production": testcase.impacts_production(), "find_similar_issues_options": FIND_SIMILAR_ISSUES_OPTIONS, "auto_delete_timestamp": auto_delete_timestamp, "auto_close_timestamp": auto_close_timestamp, "memory_tool_display_label": memory_tool_display_label, "memory_tool_display_value": memory_tool_display_value, "last_tested": last_tested, "is_admin_or_not_oss_fuzz": is_admin_or_not_oss_fuzz(), "has_issue_tracker": has_issue_tracker, "reproduction_help_url": reproduction_help_url, "is_local_development": environment.is_running_on_app_engine_development(), "fuzzer_display": vars(fuzzer_display), }
def execute_task(testcase_id, job_type): """Attempt to minimize a given testcase.""" # Get deadline to finish this task. deadline = tasks.get_task_completion_deadline() # Locate the testcase associated with the id. testcase = data_handler.get_testcase_by_id(testcase_id) if not testcase: return # Update comments to reflect bot information. data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED) # Setup testcase and its dependencies. file_list, input_directory, testcase_file_path = setup.setup_testcase( testcase) if not file_list: return # Initialize variables. max_timeout = environment.get_value('TEST_TIMEOUT', 10) app_arguments = environment.get_value('APP_ARGS') # Set up a custom or regular build based on revision. last_tested_crash_revision = testcase.get_metadata( 'last_tested_crash_revision') crash_revision = last_tested_crash_revision or testcase.crash_revision build_manager.setup_build(crash_revision) # Check if we have an application path. If not, our build failed # to setup correctly. app_path = environment.get_value('APP_PATH') if not app_path: logs.log_error('Unable to setup build for minimization.') build_fail_wait = environment.get_value('FAIL_WAIT') if environment.get_value('ORIGINAL_JOB_NAME'): _skip_minimization(testcase, 'Failed to setup build for overridden job.') else: # Only recreate task if this isn't an overriden job. It's possible that a # revision exists for the original job, but doesn't exist for the # overriden job. tasks.add_task( 'minimize', testcase_id, job_type, wait_time=build_fail_wait) return if environment.is_libfuzzer_job(): do_libfuzzer_minimization(testcase, testcase_file_path) return max_threads = utils.maximum_parallel_processes_allowed() # Prepare the test case runner. crash_retries = environment.get_value('CRASH_RETRIES') warmup_timeout = environment.get_value('WARMUP_TIMEOUT') required_arguments = environment.get_value('REQUIRED_APP_ARGS', '') # Add any testcase-specific required arguments if needed. additional_required_arguments = testcase.get_metadata( 'additional_required_app_args') if additional_required_arguments: required_arguments = '%s %s' % (required_arguments, additional_required_arguments) test_runner = TestRunner(testcase, testcase_file_path, file_list, input_directory, app_arguments, required_arguments, max_threads, deadline) # Verify the crash with a long timeout. warmup_crash_occurred = False result = test_runner.run(timeout=warmup_timeout, log_command=True) if result.is_crash(): warmup_crash_occurred = True logs.log('Warmup crash occurred in %d seconds.' % result.crash_time) saved_unsymbolized_crash_state, flaky_stack, crash_times = ( check_for_initial_crash(test_runner, crash_retries, testcase)) # If the warmup crash occurred but we couldn't reproduce this in with # multiple processes running in parallel, try to minimize single threaded. if (len(crash_times) < tests.REPRODUCIBILITY_FACTOR * crash_retries and warmup_crash_occurred and max_threads > 1): logs.log('Attempting to continue single-threaded.') max_threads = 1 test_runner = TestRunner(testcase, testcase_file_path, file_list, input_directory, app_arguments, required_arguments, max_threads, deadline) saved_unsymbolized_crash_state, flaky_stack, crash_times = ( check_for_initial_crash(test_runner, crash_retries, testcase)) if flaky_stack: testcase.flaky_stack = flaky_stack testcase.put() if len(crash_times) < tests.REPRODUCIBILITY_FACTOR * crash_retries: if not crash_times: # We didn't crash at all, so try again. This might be a legitimately # unreproducible test case, so it will get marked as such after being # retried on other bots. testcase = data_handler.get_testcase_by_id(testcase_id) data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, 'Unable to reproduce crash') task_creation.mark_unreproducible_if_flaky(testcase, True) else: # We reproduced this crash at least once. It's too flaky to minimize, but # maybe we'll have more luck in the other jobs. testcase = data_handler.get_testcase_by_id(testcase_id) testcase.minimized_keys = 'NA' error_message = ( 'Unable to reproduce crash reliably, skipping ' 'minimization (crashed %d/%d)' % (len(crash_times), crash_retries)) data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, error_message) create_additional_tasks(testcase) return # If we've made it this far, the test case appears to be reproducible. Clear # metadata from previous runs had it been marked as potentially flaky. task_creation.mark_unreproducible_if_flaky(testcase, False) test_runner.set_test_expectations(testcase.security_flag, flaky_stack, saved_unsymbolized_crash_state) # Use the max crash time unless this would be greater than the max timeout. test_timeout = min(max(crash_times), max_timeout) + 1 logs.log('Using timeout %d (was %d)' % (test_timeout, max_timeout)) test_runner.timeout = test_timeout logs.log('Starting minimization.') if should_attempt_phase(testcase, MinimizationPhase.GESTURES): gestures = minimize_gestures(test_runner, testcase) # We can't call check_deadline_exceeded_and_store_partial_minimized_testcase # at this point because we do not have a test case to store. testcase = data_handler.get_testcase_by_id(testcase.key.id()) if testcase.security_flag and len(testcase.gestures) != len(gestures): # Re-run security severity analysis since gestures affect the severity. testcase.security_severity = severity_analyzer.get_security_severity( testcase.crash_type, data_handler.get_stacktrace(testcase), job_type, bool(gestures)) testcase.gestures = gestures testcase.set_metadata('minimization_phase', MinimizationPhase.MAIN_FILE) if time.time() > test_runner.deadline: tasks.add_task('minimize', testcase.key.id(), job_type) return # Minimize the main file. data = utils.get_file_contents_with_fatal_error_on_failure(testcase_file_path) if should_attempt_phase(testcase, MinimizationPhase.MAIN_FILE): data = minimize_main_file(test_runner, testcase_file_path, data) if check_deadline_exceeded_and_store_partial_minimized_testcase( deadline, testcase_id, job_type, input_directory, file_list, data, testcase_file_path): return testcase.set_metadata('minimization_phase', MinimizationPhase.FILE_LIST) # Minimize the file list. if should_attempt_phase(testcase, MinimizationPhase.FILE_LIST): if environment.get_value('MINIMIZE_FILE_LIST', True): file_list = minimize_file_list(test_runner, file_list, input_directory, testcase_file_path) if check_deadline_exceeded_and_store_partial_minimized_testcase( deadline, testcase_id, job_type, input_directory, file_list, data, testcase_file_path): return else: logs.log('Skipping minimization of file list.') testcase.set_metadata('minimization_phase', MinimizationPhase.RESOURCES) # Minimize any files remaining in the file list. if should_attempt_phase(testcase, MinimizationPhase.RESOURCES): if environment.get_value('MINIMIZE_RESOURCES', True): for dependency in file_list: minimize_resource(test_runner, dependency, input_directory, testcase_file_path) if check_deadline_exceeded_and_store_partial_minimized_testcase( deadline, testcase_id, job_type, input_directory, file_list, data, testcase_file_path): return else: logs.log('Skipping minimization of resources.') testcase.set_metadata('minimization_phase', MinimizationPhase.ARGUMENTS) if should_attempt_phase(testcase, MinimizationPhase.ARGUMENTS): app_arguments = minimize_arguments(test_runner, app_arguments) # Arguments must be stored here in case we time out below. testcase.minimized_arguments = app_arguments testcase.put() if check_deadline_exceeded_and_store_partial_minimized_testcase( deadline, testcase_id, job_type, input_directory, file_list, data, testcase_file_path): return command = tests.get_command_line_for_application( testcase_file_path, app_args=app_arguments, needs_http=testcase.http_flag) last_crash_result = test_runner.last_failing_result store_minimized_testcase(testcase, input_directory, file_list, data, testcase_file_path) finalize_testcase( testcase_id, command, last_crash_result, flaky_stack=flaky_stack)