def _get_application_arguments(testcase, job_type, task_name): """Get application arguments to use for setting up |testcase|. Use minimized arguments if available. For variant task, where we run a testcase against another job type, use both minimized arguments and application arguments from job.""" testcase_args = testcase.minimized_arguments if not testcase_args: return None if task_name != 'variant': return testcase_args # TODO(aarya): Use %TESTCASE% explicitly since it will not exist with new # engine impl libFuzzer testcases and AFL's launcher.py requires it as the # first argument. Remove once AFL is migrated to the new engine impl. if environment.is_afl_job(job_type): return '%TESTCASE%' job_args = data_handler.get_value_from_job_definition( job_type, 'APP_ARGS', default='') job_args_list = shlex.split(job_args) testcase_args_list = shlex.split(testcase_args) testcase_args_filtered_list = [ arg for arg in testcase_args_list if arg not in job_args_list ] app_args = ' '.join(testcase_args_filtered_list) if job_args: if app_args: app_args += ' ' app_args += job_args return app_args
def _get_application_arguments(testcase, task_name): """Get application arguments to use for setting up |testcase|. Use minimized arguments if available. For variant task, where we run a testcase against another job type, use both minimized arguments and application arguments from job.""" testcase_args = testcase.minimized_arguments if not testcase_args: return None if task_name != 'variant': return testcase_args job_args = data_handler.get_value_from_job_definition(testcase.job_type, 'APP_ARGS', default='') job_args_list = shlex.split(job_args) testcase_args_list = shlex.split(testcase_args) testcase_args_filtered_list = [ arg for arg in testcase_args_list if arg not in job_args_list ] app_args = ' '.join(testcase_args_filtered_list) if job_args: if app_args: app_args += ' ' app_args += job_args return app_args
def _check_commits(testcase, bisect_type, old_commit, new_commit): """Check old and new commit validity.""" if old_commit != new_commit or build_manager.is_custom_binary(): return old_commit, new_commit # Something went wrong during bisection for the same commit to be chosen for # both the start and end range. # Get the bisection infrastructure to re-bisect. if environment.is_running_on_app_engine(): bucket_path = data_handler.get_value_from_job_definition( testcase.job_type, 'RELEASE_BUILD_BUCKET_PATH') else: bucket_path = build_manager.get_primary_bucket_path() revision_list = build_manager.get_revisions_list(bucket_path) last_tested_revision = testcase.get_metadata('last_tested_crash_revision') known_crash_revision = last_tested_revision or testcase.crash_revision if bisect_type == 'fixed': # Narrowest range: last crashing revision up to the latest build. return _get_commits( str(known_crash_revision) + ':' + str(revision_list[-1]), testcase.job_type) if bisect_type == 'regressed': # Narrowest range: first build to the first crashing revision. return _get_commits( str(revision_list[0]) + ':' + str(testcase.crash_revision), testcase.job_type) raise ValueError('Invalid bisection type: ' + bisect_type)
def _get_revision_vars_url_format(job_type): """Return REVISION_VARS_URL from job environment if available. Otherwise, default to one set in project.yaml. For custom binary jobs, this is not applicable.""" if job_type is None: # Force it to use env attribute in project.yaml. return local_config.ProjectConfig().get('env.REVISION_VARS_URL') custom_binary = data_handler.get_value_from_job_definition( job_type, 'CUSTOM_BINARY') if utils.string_is_true(custom_binary): return None return data_handler.get_value_from_job_definition_or_environment( job_type, 'REVISION_VARS_URL')
def _get_performance_report(fuzzer_name, job_type, performance_report_data): """Return performance report.""" bucket_name = data_handler.get_value_from_job_definition( job_type, 'FUZZ_LOGS_BUCKET') # Load performance data as JSON. performance_report = json.loads(performance_report_data) # Get logs directory path containing the analyzed logs. logs_directory = fuzzer_logs.get_logs_directory(bucket_name, fuzzer_name, job_type) # Add other display metadata in report. for issue in performance_report['issues']: # Linkify the examples column. # TODO(mmoroz): build this in polymer using dom-repeat. issue['examples'] = '<br/>'.join([ _get_link_html(logs_directory, log_relative_path) for log_relative_path in issue['examples'] ]) # Add the solutions column explicitly. issue['solutions'] = constants.ISSUE_TYPE_SOLUTIONS_MAP[issue['type']] return performance_report
def file_issue(testcase, issue_tracker, security_severity=None, user_email=None, additional_ccs=None): """File an issue for the given test case.""" logs.log('Filing new issue for testcase: %d' % testcase.key.id()) policy = issue_tracker_policy.get(issue_tracker.project) is_crash = not utils.sub_string_exists_in(NON_CRASH_TYPES, testcase.crash_type) properties = policy.get_new_issue_properties( is_security=testcase.security_flag, is_crash=is_crash) issue = issue_tracker.new_issue() issue.title = data_handler.get_issue_summary(testcase) issue.body = data_handler.get_issue_description(testcase, reporter=user_email, show_reporter=True) # Add reproducibility flag label. if testcase.one_time_crasher_flag: issue.labels.add(policy.label('unreproducible')) else: issue.labels.add(policy.label('reproducible')) # Chromium-specific labels. if issue_tracker.project == 'chromium' and testcase.security_flag: # Add reward labels if this is from an external fuzzer contribution. fuzzer = data_types.Fuzzer.query( data_types.Fuzzer.name == testcase.fuzzer_name).get() if fuzzer and fuzzer.external_contribution: issue.labels.add('reward-topanel') issue.labels.add('External-Fuzzer-Contribution') update_issue_impact_labels(testcase, issue) # Add additional labels from the job definition and fuzzer. additional_labels = data_handler.get_additional_values_for_variable( 'AUTOMATIC_LABELS', testcase.job_type, testcase.fuzzer_name) for label in additional_labels: issue.labels.add(label) # Add additional components from the job definition and fuzzer. automatic_components = data_handler.get_additional_values_for_variable( 'AUTOMATIC_COMPONENTS', testcase.job_type, testcase.fuzzer_name) for component in automatic_components: issue.components.add(component) # Add issue assignee from the job definition and fuzzer. automatic_assignee = data_handler.get_additional_values_for_variable( 'AUTOMATIC_ASSIGNEE', testcase.job_type, testcase.fuzzer_name) if automatic_assignee: issue.status = policy.status('assigned') issue.assignee = automatic_assignee[0] else: issue.status = properties.status # Add additional ccs from the job definition and fuzzer. ccs = data_handler.get_additional_values_for_variable( 'AUTOMATIC_CCS', testcase.job_type, testcase.fuzzer_name) # For externally contributed fuzzers, potentially cc the author. # Use fully qualified fuzzer name if one is available. fully_qualified_fuzzer_name = (testcase.overridden_fuzzer_name or testcase.fuzzer_name) ccs += external_users.cc_users_for_fuzzer(fully_qualified_fuzzer_name, testcase.security_flag) ccs += external_users.cc_users_for_job(testcase.job_type, testcase.security_flag) # Add the user as a cc if requested, and any default ccs for this job. # Check for additional ccs or labels from the job definition. if additional_ccs: ccs += [cc for cc in additional_ccs if cc not in ccs] # For user uploads, we assume the uploader is interested in the issue. if testcase.uploader_email and testcase.uploader_email not in ccs: ccs.append(testcase.uploader_email) ccs.extend(properties.ccs) # Get view restriction rules for the job. issue_restrictions = data_handler.get_value_from_job_definition( testcase.job_type, 'ISSUE_VIEW_RESTRICTIONS', 'security') should_restrict_issue = (issue_restrictions == 'all' or (issue_restrictions == 'security' and testcase.security_flag)) has_accountable_people = bool(ccs) # Check for labels with special logic. additional_labels = [] if should_restrict_issue: additional_labels.append(policy.label('restrict_view')) if has_accountable_people: additional_labels.append(policy.label('reported')) if testcase.security_flag: additional_labels.append(policy.label('security_severity')) additional_labels.append(policy.label('os')) # Apply label substitutions. for label in itertools.chain(properties.labels, additional_labels): for result in apply_substitutions(policy, label, testcase, security_severity): issue.labels.add(result) issue.body += data_handler.format_issue_information( testcase, properties.issue_body_footer) if (should_restrict_issue and has_accountable_people and policy.deadline_policy_message): issue.body += '\n\n' + policy.deadline_policy_message for cc in ccs: issue.ccs.add(cc) # Add additional labels and components from testcase metadata. metadata_labels = _get_from_metadata(testcase, 'issue_labels') for label in metadata_labels: issue.labels.add(label) metadata_components = _get_from_metadata(testcase, 'issue_components') for component in metadata_components: issue.components.add(component) issue.reporter = user_email issue.save() # Update the testcase with this newly created issue. testcase.bug_information = str(issue.id) testcase.put() data_handler.update_group_bug(testcase.group_id) return issue.id
def file_issue(testcase, itm, security_severity=None, user_email=None, additional_ccs=None): """File an issue for the given test case.""" issue = Issue() issue.summary = data_handler.get_issue_summary(testcase) issue.body = data_handler.get_issue_description( testcase, reporter=user_email, show_reporter=True) # Labels applied by default across all issue trackers. issue.status = 'New' issue.add_label('ClusterFuzz') # Add label on memory tool used. add_memory_tool_label_if_needed(issue, testcase) # Add reproducibility flag label. if testcase.one_time_crasher_flag: issue.add_label('Unreproducible') else: issue.add_label('Reproducible') # Add security severity flag label. add_security_severity_label_if_needed(issue, testcase, security_severity) # Get view restriction rules for the job. issue_restrictions = data_handler.get_value_from_job_definition( testcase.job_type, 'ISSUE_VIEW_RESTRICTIONS', 'security') should_restrict_issue = ( issue_restrictions == 'all' or (issue_restrictions == 'security' and testcase.security_flag)) # Chromium-specific labels. if itm.project_name == 'chromium': # A different status system is used on the chromium tracker. Since we # have already reproduced the crash, we skip the Unconfirmed status. issue.status = 'Untriaged' # Add OS label. if environment.is_chromeos_job(testcase.job_type): # ChromeOS fuzzers run on Linux platform, so use correct OS-Chrome for # tracking. issue.add_label('OS-Chrome') elif testcase.platform_id: os_label = 'OS-%s' % ((testcase.platform_id.split(':')[0]).capitalize()) issue.add_label(os_label) # Add view restrictions for internal job types. add_view_restrictions_if_needed(issue, testcase) if testcase.security_flag: # Apply labels specific to security bugs. issue.add_label('Restrict-View-SecurityTeam') issue.add_label('Type-Bug-Security') # Add reward labels if this is from an external fuzzer contribution. fuzzer = data_types.Fuzzer.query( data_types.Fuzzer.name == testcase.fuzzer_name).get() if fuzzer and fuzzer.external_contribution: issue.add_label('reward-topanel') issue.add_label('External-Fuzzer-Contribution') data_handler.update_issue_impact_labels(testcase, issue) else: # Apply labels for functional (non-security) bugs. if utils.sub_string_exists_in(NON_CRASH_TYPES, testcase.crash_type): # Non-crashing test cases shouldn't be assigned Pri-1. issue.add_label('Pri-2') issue.add_label('Type-Bug') else: # Default functional bug labels. issue.add_label('Pri-1') issue.add_label('Stability-Crash') issue.add_label('Type-Bug') # AOSP-specific labels. elif itm.project_name == 'android': if testcase.security_flag: # Security bug labels. issue.add_cc('*****@*****.**') issue.add_label('Type-Security') issue.add_label('Restrict-View-Commit') else: # Functional bug labels. issue.add_label('Type-Defect') # OSS-Fuzz specific labels. elif itm.project_name == 'oss-fuzz': if testcase.security_flag: # Security bug labels. issue.add_label('Type-Bug-Security') else: # Functional bug labels. issue.add_label('Type-Bug') if should_restrict_issue: issue.add_label('Restrict-View-Commit') # Add additional labels from the job definition and fuzzer. additional_labels = data_handler.get_additional_values_for_variable( 'AUTOMATIC_LABELS', testcase.job_type, testcase.fuzzer_name) for label in additional_labels: issue.add_label(label) # Add additional components from the job definition and fuzzer. automatic_components = data_handler.get_additional_values_for_variable( 'AUTOMATIC_COMPONENTS', testcase.job_type, testcase.fuzzer_name) for component in automatic_components: issue.add_component(component) # Add additional ccs from the job definition and fuzzer. ccs = data_handler.get_additional_values_for_variable( 'AUTOMATIC_CCS', testcase.job_type, testcase.fuzzer_name) # For externally contributed fuzzers, potentially cc the author. # Use fully qualified fuzzer name if one is available. fully_qualified_fuzzer_name = ( testcase.overridden_fuzzer_name or testcase.fuzzer_name) ccs += external_users.cc_users_for_fuzzer(fully_qualified_fuzzer_name, testcase.security_flag) ccs += external_users.cc_users_for_job(testcase.job_type, testcase.security_flag) # Add the user as a cc if requested, and any default ccs for this job. # Check for additional ccs or labels from the job definition. if additional_ccs: ccs += [cc for cc in additional_ccs if cc not in ccs] # For user uploads, we assume the uploader is interested in the issue. if testcase.uploader_email and testcase.uploader_email not in ccs: ccs.append(testcase.uploader_email) if itm.project_name == 'oss-fuzz' and ccs: # Add a reported label for deadline tracking. issue.add_label(reported_label()) if issue.has_label_matching('Restrict-View-Commit'): issue.body += '\n\n' + DEADLINE_NOTE issue.body += '\n\n' + FIX_NOTE issue.body += '\n\n' + QUESTIONS_NOTE for cc in ccs: issue.add_cc(cc) # Add additional labels from testcase metadata. metadata_labels = utils.parse_delimited( testcase.get_metadata('issue_labels', ''), delimiter=',', strip=True, remove_empty=True) for label in metadata_labels: issue.add_label(label) issue.itm = itm issue.reporter = user_email issue.save() # Update the testcase with this newly created issue. testcase.bug_information = str(issue.id) testcase.put() data_handler.update_group_bug(testcase.group_id) return issue.id
def do_post(self): """Upload a testcase.""" testcase_id = self.request.get('testcaseId') uploaded_file = self.get_upload() if testcase_id and not uploaded_file: testcase = helpers.get_testcase(testcase_id) if not access.can_user_access_testcase(testcase): raise helpers.AccessDeniedException() # Use minimized testcase for upload (if available). key = (testcase.minimized_keys if testcase.minimized_keys and testcase.minimized_keys != 'NA' else testcase.fuzzed_keys) uploaded_file = blobs.get_blob_info(key) # Extract filename part from blob. uploaded_file.filename = os.path.basename( uploaded_file.filename.replace('\\', os.sep)) job_type = self.request.get('job') if not job_type: raise helpers.EarlyExitException('Missing job name.', 400) if (not data_types.Job.VALID_NAME_REGEX.match(job_type) or not data_types.Job.query( data_types.Job.name == job_type).get()): raise helpers.EarlyExitException('Invalid job name.', 400) fuzzer_name = '' job_type_lowercase = job_type.lower() if 'libfuzzer' in job_type_lowercase: fuzzer_name = 'libFuzzer' elif 'afl' in job_type_lowercase: fuzzer_name = 'afl' target_name = self.request.get('target') if not fuzzer_name and target_name: raise helpers.EarlyExitException( 'Target name is not applicable to non-engine jobs (AFL, libFuzzer).', 400) if fuzzer_name and not target_name: raise helpers.EarlyExitException( 'Missing target name for engine job (AFL, libFuzzer).', 400) if (target_name and not data_types.Fuzzer.VALID_NAME_REGEX.match(target_name)): raise helpers.EarlyExitException('Invalid target name.', 400) fully_qualified_fuzzer_name = '' if fuzzer_name and target_name: fully_qualified_fuzzer_name, target_name = find_fuzz_target( fuzzer_name, target_name, job_type) if not fully_qualified_fuzzer_name: raise helpers.EarlyExitException('Target does not exist.', 400) if not access.has_access(need_privileged_access=False, job_type=job_type, fuzzer_name=(fully_qualified_fuzzer_name or fuzzer_name)): raise helpers.AccessDeniedException() multiple_testcases = bool(self.request.get('multiple')) http_flag = bool(self.request.get('http')) high_end_job = bool(self.request.get('highEnd')) bug_information = self.request.get('issue') crash_revision = self.request.get('revision') timeout = self.request.get('timeout') retries = self.request.get('retries') bug_summary_update_flag = bool(self.request.get('updateIssue')) additional_arguments = self.request.get('args') app_launch_command = self.request.get('cmd') platform_id = self.request.get('platform') testcase_metadata = self.request.get('metadata') if testcase_metadata: try: testcase_metadata = json.loads(testcase_metadata) if not isinstance(testcase_metadata, dict): raise helpers.EarlyExitException( 'Metadata is not a JSON object.', 400) except Exception: raise helpers.EarlyExitException('Invalid metadata JSON.', 400) archive_state = 0 bundled = False file_path_input = '' email = helpers.get_user_email() # If we have a AFL or libFuzzer target, use that for arguments. # Launch command looks like # python launcher.py {testcase_path} {target_name} if target_name: additional_arguments = '%%TESTCASE%% %s' % target_name # Certain modifications such as app launch command, issue updates are only # allowed for privileged users. privileged_user = access.has_access(need_privileged_access=True) if not privileged_user: if bug_information or bug_summary_update_flag: raise helpers.EarlyExitException( 'You are not privileged to update existing issues.', 400) need_privileged_access = utils.string_is_true( data_handler.get_value_from_job_definition( job_type, 'PRIVILEGED_ACCESS')) if need_privileged_access: raise helpers.EarlyExitException( 'You are not privileged to run this job type.', 400) if app_launch_command: raise helpers.EarlyExitException( 'You are not privileged to run arbitary launch commands.', 400) if testcase_metadata: raise helpers.EarlyExitException( 'You are not privileged to set testcase metadata.', 400) if crash_revision and crash_revision.isdigit(): crash_revision = int(crash_revision) else: crash_revision = 0 if bug_information and not bug_information.isdigit(): raise helpers.EarlyExitException('Bug is not a number.', 400) if not timeout: timeout = 0 elif not timeout.isdigit() or timeout == '0': raise helpers.EarlyExitException( 'Testcase timeout must be a number greater than 0.', 400) else: timeout = int(timeout) if timeout > 120: raise helpers.EarlyExitException( 'Testcase timeout may not be greater than 120 seconds.', 400) if retries: if retries.isdigit(): retries = int(retries) else: retries = None if retries is None or retries > MAX_RETRIES: raise helpers.EarlyExitException( 'Testcase retries must be a number less than %d.' % MAX_RETRIES, 400) else: retries = None try: gestures = ast.literal_eval(self.request.get('gestures')) except: gestures = [] if not gestures: gestures = [] job_queue = tasks.queue_for_job(job_type, is_high_end=high_end_job) if uploaded_file is not None: filename = ''.join([ x for x in uploaded_file.filename if x not in ' ;/?:@&=+$,{}|<>()\\' ]) key = str(uploaded_file.key()) if archive.is_archive(filename): archive_state = data_types.ArchiveStatus.FUZZED if archive_state: if multiple_testcases: if testcase_metadata: raise helpers.EarlyExitException( 'Testcase metadata not supported with multiple testcases.', 400) # Create a job to unpack an archive. metadata = data_types.BundledArchiveMetadata() metadata.blobstore_key = key metadata.timeout = timeout metadata.job_queue = job_queue metadata.job_type = job_type metadata.http_flag = http_flag metadata.archive_filename = filename metadata.uploader_email = email metadata.gestures = gestures metadata.crash_revision = crash_revision metadata.additional_arguments = additional_arguments metadata.bug_information = bug_information metadata.platform_id = platform_id metadata.app_launch_command = app_launch_command metadata.fuzzer_name = fuzzer_name metadata.overridden_fuzzer_name = fully_qualified_fuzzer_name metadata.fuzzer_binary_name = target_name metadata.put() tasks.add_task('unpack', str(metadata.key.id()), job_type, queue=tasks.queue_for_job(job_type)) # Create a testcase metadata object to show the user their upload. upload_metadata = data_types.TestcaseUploadMetadata() upload_metadata.timestamp = datetime.datetime.utcnow() upload_metadata.filename = filename upload_metadata.blobstore_key = key upload_metadata.original_blobstore_key = key upload_metadata.status = 'Pending' upload_metadata.bundled = True upload_metadata.uploader_email = email upload_metadata.retries = retries upload_metadata.bug_summary_update_flag = bug_summary_update_flag upload_metadata.put() helpers.log('Uploaded multiple testcases.', helpers.VIEW_OPERATION) self.render_json({'multiple': True}) return file_path_input = guess_input_file(uploaded_file, filename) if not file_path_input: raise helpers.EarlyExitException(( "Unable to detect which file to launch. The main file\'s name " 'must contain either of %s.' % str(RUN_FILE_PATTERNS)), 400) else: raise helpers.EarlyExitException('Please select a file to upload.', 400) testcase_id = data_handler.create_user_uploaded_testcase( key, key, archive_state, filename, file_path_input, timeout, job_type, job_queue, http_flag, gestures, additional_arguments, bug_information, crash_revision, email, platform_id, app_launch_command, fuzzer_name, fully_qualified_fuzzer_name, target_name, bundled, retries, bug_summary_update_flag, additional_metadata=testcase_metadata) testcase = data_handler.get_testcase_by_id(testcase_id) issue = issue_tracker_utils.get_issue_for_testcase(testcase) if issue: report_url = data_handler.TESTCASE_REPORT_URL.format( domain=data_handler.get_domain(), testcase_id=testcase_id) comment = ('ClusterFuzz is analyzing your testcase. ' 'Developers can follow the progress at %s.' % report_url) issue.save(new_comment=comment) helpers.log('Uploaded testcase %s' % testcase_id, helpers.VIEW_OPERATION) self.render_json({'id': '%s' % testcase_id})
def mark_issue_as_closed_if_testcase_is_fixed(policy, testcase, issue): """Mark an issue as fixed if all of its associated reproducible testcase are fixed.""" verified_label = policy.label('verified') if not verified_label: return # If there is no associated issue, then bail out. if not issue or not testcase.bug_information: return # If the issue is closed in a status other than Fixed, like Duplicate, WontFix # or Archived, we shouldn't change it. Bail out. if not issue.is_open and issue.status != policy.status('fixed'): return # Check testcase status, so as to skip unreproducible uploads. if testcase.status not in ['Processed', 'Duplicate']: return # If the testcase is still open, no work needs to be done. Bail out. if testcase.open: return # FIXME: Find a better solution to skip over reproducible tests that are now # showing up a flaky (esp when we are unable to reproduce crash in original # crash revision). if testcase.fixed == 'NA': return # We can only verify fixed issues for reproducible testcases. If the testcase # is unreproducible, bail out. Exception is if we explicitly marked this as # fixed. if testcase.one_time_crasher_flag and testcase.fixed != 'Yes': return # Make sure that no other testcases associated with this issue are open. similar_testcase = data_types.Testcase.query( data_types.Testcase.bug_information == testcase.bug_information, ndb_utils.is_true(data_types.Testcase.open), ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get() if similar_testcase: return # As a last check, do the expensive call of actually checking all issue # comments to make sure we didn't do the verification already and we didn't # get called out on issue mistriage. if (issue_tracker_utils.was_label_added(issue, verified_label) or issue_tracker_utils.was_label_added(issue, policy.label('wrong'))): return issue.labels.add(verified_label) comment = 'ClusterFuzz testcase %d is verified as fixed' % testcase.key.id() fixed_range_url = data_handler.get_fixed_range_url(testcase) if fixed_range_url: comment += ' in ' + fixed_range_url else: comment += '.' if utils.is_oss_fuzz(): comment += OSS_FUZZ_INCORRECT_COMMENT else: comment = _append_generic_incorrect_comment(comment, policy, issue, ' and re-open the issue.') skip_auto_close = data_handler.get_value_from_job_definition( testcase.job_type, 'SKIP_AUTO_CLOSE_ISSUE') if not skip_auto_close: issue.status = policy.status('verified') issue.save(new_comment=comment, notify=True) logs.log('Mark issue %d as verified for fixed testcase %d.' % (issue.id, testcase.key.id()))
def mark_unreproducible_testcase_and_issue_as_closed_after_deadline( policy, testcase, issue): """Closes an unreproducible testcase and its associated issue after a certain time period.""" # If the testcase is already closed, no more work to do. if not testcase.open: return # Check testcase status, so as to skip unreproducible uploads. if testcase.status not in ['Processed', 'Duplicate']: return # Make sure that this testcase is an unreproducible bug. If not, bail out. if not testcase.one_time_crasher_flag: return # Make sure that this testcase has an associated bug. If not, bail out. if not testcase.bug_information: return # If this testcase was manually uploaded, don't change issue state as our # reproduction result might be incorrect. if testcase.uploader_email: return # Make sure that there is an associated bug and it is in open state. if not issue or not issue.is_open: return # Skip closing if flag is set. skip_auto_close = data_handler.get_value_from_job_definition( testcase.job_type, 'SKIP_AUTO_CLOSE_ISSUE') if skip_auto_close: return # Check if there are any reproducible open testcases are associated with # this bug. If yes, return. similar_testcase = data_types.Testcase.query( data_types.Testcase.bug_information == testcase.bug_information, ndb_utils.is_true(data_types.Testcase.open), ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get() if similar_testcase: return # Make sure that testcase is atleast older than # |UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE|, otherwise it will be seen in # crash stats anyway. if (testcase.timestamp and not dates.time_has_expired( testcase.timestamp, days=data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE)): return # Handle testcase that turned from reproducible to unreproducible. Account # for the recent progression task run time. last_tested_crash_time = testcase.get_metadata('last_tested_crash_time') if (last_tested_crash_time and not dates.time_has_expired( last_tested_crash_time, days=data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE)): return # Make that there is no crash seen in the deadline period. if get_crash_occurrence_platforms( testcase, data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE): return # As a last check, do the expensive call of actually checking all issue # comments to make sure we we didn't get called out on issue mistriage. if issue_tracker_utils.was_label_added(issue, policy.label('wrong')): return # Close associated issue and testcase. comment = ('ClusterFuzz testcase %d is flaky and no longer crashes, ' 'so closing issue.' % testcase.key.id()) if utils.is_oss_fuzz(): comment += OSS_FUZZ_INCORRECT_COMMENT else: comment = _append_generic_incorrect_comment(comment, policy, issue, ' and re-open the issue.') issue.status = policy.status('wontfix') issue.save(new_comment=comment, notify=True) testcase.fixed = 'NA' testcase.open = False testcase.put() logs.log('Closed unreproducible testcase %d and associated issue.' % testcase.key.id())
def _get_logs_bucket_from_job(self, job_type): """Get logs bucket from job.""" return data_handler.get_value_from_job_definition( job_type, 'FUZZ_LOGS_BUCKET')
def do_post(self): """Upload a testcase.""" email = helpers.get_user_email() testcase_id = request.get('testcaseId') uploaded_file = self.get_upload() if testcase_id and not uploaded_file: testcase = helpers.get_testcase(testcase_id) if not access.can_user_access_testcase(testcase): raise helpers.AccessDeniedException() # Use minimized testcase for upload (if available). key = (testcase.minimized_keys if testcase.minimized_keys and testcase.minimized_keys != 'NA' else testcase.fuzzed_keys) uploaded_file = blobs.get_blob_info(key) # Extract filename part from blob. uploaded_file.filename = os.path.basename( uploaded_file.filename.replace('\\', os.sep)) job_type = request.get('job') if not job_type: raise helpers.EarlyExitException('Missing job name.', 400) job = data_types.Job.query(data_types.Job.name == job_type).get() if not job: raise helpers.EarlyExitException('Invalid job name.', 400) fuzzer_name = request.get('fuzzer') job_type_lowercase = job_type.lower() if 'libfuzzer' in job_type_lowercase: fuzzer_name = 'libFuzzer' elif 'afl' in job_type_lowercase: fuzzer_name = 'afl' elif 'honggfuzz' in job_type_lowercase: fuzzer_name = 'honggfuzz' is_engine_job = fuzzer_name and environment.is_engine_fuzzer_job( job_type) target_name = request.get('target') if not is_engine_job and target_name: raise helpers.EarlyExitException( 'Target name is not applicable to non-engine jobs (AFL, libFuzzer).', 400) if is_engine_job and not target_name: raise helpers.EarlyExitException( 'Missing target name for engine job (AFL, libFuzzer).', 400) if (target_name and not data_types.Fuzzer.VALID_NAME_REGEX.match(target_name)): raise helpers.EarlyExitException('Invalid target name.', 400) fully_qualified_fuzzer_name = '' if is_engine_job and target_name: if job.is_external(): # External jobs don't run and set FuzzTarget entities as part of # fuzz_task. Set it here instead. fuzz_target = (data_handler.record_fuzz_target( fuzzer_name, target_name, job_type)) fully_qualified_fuzzer_name = fuzz_target.fully_qualified_name( ) target_name = fuzz_target.binary else: fully_qualified_fuzzer_name, target_name = find_fuzz_target( fuzzer_name, target_name, job_type) if (not access.has_access(need_privileged_access=False, job_type=job_type, fuzzer_name=(fully_qualified_fuzzer_name or fuzzer_name)) and not _is_uploader_allowed(email)): raise helpers.AccessDeniedException() multiple_testcases = bool(request.get('multiple')) http_flag = bool(request.get('http')) high_end_job = bool(request.get('highEnd')) bug_information = request.get('issue') crash_revision = request.get('revision') timeout = request.get('timeout') retries = request.get('retries') bug_summary_update_flag = bool(request.get('updateIssue')) quiet_flag = bool(request.get('quiet')) additional_arguments = request.get('args') app_launch_command = request.get('cmd') platform_id = request.get('platform') issue_labels = request.get('issue_labels') gestures = request.get('gestures') or '[]' stacktrace = request.get('stacktrace') crash_data = None if job.is_external(): if not stacktrace: raise helpers.EarlyExitException( 'Stacktrace required for external jobs.', 400) if not crash_revision: raise helpers.EarlyExitException( 'Revision required for external jobs.', 400) crash_data = stack_analyzer.get_crash_data( stacktrace, fuzz_target=target_name, symbolize_flag=False, already_symbolized=True, detect_ooms_and_hangs=True) elif stacktrace: raise helpers.EarlyExitException( 'Should not specify stacktrace for non-external jobs.', 400) testcase_metadata = request.get('metadata', {}) if testcase_metadata: try: testcase_metadata = json.loads(testcase_metadata) except Exception as e: raise helpers.EarlyExitException('Invalid metadata JSON.', 400) from e if not isinstance(testcase_metadata, dict): raise helpers.EarlyExitException( 'Metadata is not a JSON object.', 400) if issue_labels: testcase_metadata['issue_labels'] = issue_labels try: gestures = ast.literal_eval(gestures) except Exception as e: raise helpers.EarlyExitException('Failed to parse gestures.', 400) from e archive_state = 0 bundled = False file_path_input = '' # Certain modifications such as app launch command, issue updates are only # allowed for privileged users. privileged_user = access.has_access(need_privileged_access=True) if not privileged_user: if bug_information or bug_summary_update_flag: raise helpers.EarlyExitException( 'You are not privileged to update existing issues.', 400) need_privileged_access = utils.string_is_true( data_handler.get_value_from_job_definition( job_type, 'PRIVILEGED_ACCESS')) if need_privileged_access: raise helpers.EarlyExitException( 'You are not privileged to run this job type.', 400) if app_launch_command: raise helpers.EarlyExitException( 'You are not privileged to run arbitrary launch commands.', 400) if (testcase_metadata and not _allow_unprivileged_metadata(testcase_metadata)): raise helpers.EarlyExitException( 'You are not privileged to set testcase metadata.', 400) if additional_arguments: raise helpers.EarlyExitException( 'You are not privileged to add command-line arguments.', 400) if gestures: raise helpers.EarlyExitException( 'You are not privileged to run arbitrary gestures.', 400) # TODO(aarya): Remove once AFL is migrated to engine pipeline. if target_name: additional_arguments = '%TESTCASE%' if crash_revision and crash_revision.isdigit(): crash_revision = int(crash_revision) else: crash_revision = 0 if bug_information == '0': # Auto-recover from this bad input. bug_information = None if bug_information and not bug_information.isdigit(): raise helpers.EarlyExitException('Bug is not a number.', 400) if not timeout: timeout = 0 elif not timeout.isdigit() or timeout == '0': raise helpers.EarlyExitException( 'Testcase timeout must be a number greater than 0.', 400) else: timeout = int(timeout) if timeout > 120: raise helpers.EarlyExitException( 'Testcase timeout may not be greater than 120 seconds.', 400) if retries: if retries.isdigit(): retries = int(retries) else: retries = None if retries is None or retries > MAX_RETRIES: raise helpers.EarlyExitException( 'Testcase retries must be a number less than %d.' % MAX_RETRIES, 400) else: retries = None job_queue = tasks.queue_for_job(job_type, is_high_end=high_end_job) if uploaded_file is not None: filename = ''.join([ x for x in uploaded_file.filename if x not in ' ;/?:@&=+$,{}|<>()\\' ]) key = str(uploaded_file.key()) if archive.is_archive(filename): archive_state = data_types.ArchiveStatus.FUZZED if archive_state: if multiple_testcases: # Create a job to unpack an archive. metadata = data_types.BundledArchiveMetadata() metadata.blobstore_key = key metadata.timeout = timeout metadata.job_queue = job_queue metadata.job_type = job_type metadata.http_flag = http_flag metadata.archive_filename = filename metadata.uploader_email = email metadata.gestures = gestures metadata.crash_revision = crash_revision metadata.additional_arguments = additional_arguments metadata.bug_information = bug_information metadata.platform_id = platform_id metadata.app_launch_command = app_launch_command metadata.fuzzer_name = fuzzer_name metadata.overridden_fuzzer_name = fully_qualified_fuzzer_name metadata.fuzzer_binary_name = target_name metadata.put() tasks.add_task('unpack', str(metadata.key.id()), job_type, queue=tasks.queue_for_job(job_type)) # Create a testcase metadata object to show the user their upload. upload_metadata = data_types.TestcaseUploadMetadata() upload_metadata.timestamp = datetime.datetime.utcnow() upload_metadata.filename = filename upload_metadata.blobstore_key = key upload_metadata.original_blobstore_key = key upload_metadata.status = 'Pending' upload_metadata.bundled = True upload_metadata.uploader_email = email upload_metadata.retries = retries upload_metadata.bug_summary_update_flag = bug_summary_update_flag upload_metadata.quiet_flag = quiet_flag upload_metadata.additional_metadata_string = json.dumps( testcase_metadata) upload_metadata.bug_information = bug_information upload_metadata.put() helpers.log('Uploaded multiple testcases.', helpers.VIEW_OPERATION) return self.render_json({'multiple': True}) file_path_input = guess_input_file(uploaded_file, filename) if not file_path_input: raise helpers.EarlyExitException(( "Unable to detect which file to launch. The main file\'s name " 'must contain either of %s.' % str(RUN_FILE_PATTERNS)), 400) else: raise helpers.EarlyExitException('Please select a file to upload.', 400) testcase_id = data_handler.create_user_uploaded_testcase( key, key, archive_state, filename, file_path_input, timeout, job, job_queue, http_flag, gestures, additional_arguments, bug_information, crash_revision, email, platform_id, app_launch_command, fuzzer_name, fully_qualified_fuzzer_name, target_name, bundled, retries, bug_summary_update_flag, quiet_flag, additional_metadata=testcase_metadata, crash_data=crash_data) if not quiet_flag: testcase = data_handler.get_testcase_by_id(testcase_id) issue = issue_tracker_utils.get_issue_for_testcase(testcase) if issue: report_url = data_handler.TESTCASE_REPORT_URL.format( domain=data_handler.get_domain(), testcase_id=testcase_id) comment = ('ClusterFuzz is analyzing your testcase. ' 'Developers can follow the progress at %s.' % report_url) issue.save(new_comment=comment) helpers.log('Uploaded testcase %s' % testcase_id, helpers.VIEW_OPERATION) return self.render_json({'id': '%s' % testcase_id})
def do_post(self): """Upload a testcase.""" email = helpers.get_user_email() testcase_id = self.request.get("testcaseId") uploaded_file = self.get_upload() if testcase_id and not uploaded_file: testcase = helpers.get_testcase(testcase_id) if not access.can_user_access_testcase(testcase): raise helpers.AccessDeniedException() # Use minimized testcase for upload (if available). key = (testcase.minimized_keys if testcase.minimized_keys and testcase.minimized_keys != "NA" else testcase.fuzzed_keys) uploaded_file = blobs.get_blob_info(key) # Extract filename part from blob. uploaded_file.filename = os.path.basename( uploaded_file.filename.replace("\\", os.sep)) job_type = self.request.get("job") if not job_type: raise helpers.EarlyExitException("Missing job name.", 400) if (not data_types.Job.VALID_NAME_REGEX.match(job_type) or not data_types.Job.query( data_types.Job.name == job_type).get()): raise helpers.EarlyExitException("Invalid job name.", 400) fuzzer_name = "" job_type_lowercase = job_type.lower() if "libfuzzer" in job_type_lowercase: fuzzer_name = "libFuzzer" elif "afl" in job_type_lowercase: fuzzer_name = "afl" target_name = self.request.get("target") if not fuzzer_name and target_name: raise helpers.EarlyExitException( "Target name is not applicable to non-engine jobs (AFL, libFuzzer).", 400, ) if fuzzer_name and not target_name: raise helpers.EarlyExitException( "Missing target name for engine job (AFL, libFuzzer).", 400) if target_name and not data_types.Fuzzer.VALID_NAME_REGEX.match( target_name): raise helpers.EarlyExitException("Invalid target name.", 400) fully_qualified_fuzzer_name = "" if fuzzer_name and target_name: fully_qualified_fuzzer_name, target_name = find_fuzz_target( fuzzer_name, target_name, job_type) if not fully_qualified_fuzzer_name: raise helpers.EarlyExitException("Target does not exist.", 400) if not access.has_access( need_privileged_access=False, job_type=job_type, fuzzer_name=(fully_qualified_fuzzer_name or fuzzer_name), ) and not _is_uploader_allowed(email): raise helpers.AccessDeniedException() multiple_testcases = bool(self.request.get("multiple")) http_flag = bool(self.request.get("http")) high_end_job = bool(self.request.get("highEnd")) bug_information = self.request.get("issue") crash_revision = self.request.get("revision") timeout = self.request.get("timeout") retries = self.request.get("retries") bug_summary_update_flag = bool(self.request.get("updateIssue")) quiet_flag = bool(self.request.get("quiet")) additional_arguments = self.request.get("args") app_launch_command = self.request.get("cmd") platform_id = self.request.get("platform") issue_labels = self.request.get("issue_labels") gestures = self.request.get("gestures") or "[]" testcase_metadata = self.request.get("metadata", {}) if testcase_metadata: try: testcase_metadata = json.loads(testcase_metadata) except Exception: raise helpers.EarlyExitException("Invalid metadata JSON.", 400) if not isinstance(testcase_metadata, dict): raise helpers.EarlyExitException( "Metadata is not a JSON object.", 400) if issue_labels: testcase_metadata["issue_labels"] = issue_labels try: gestures = ast.literal_eval(gestures) except Exception: raise helpers.EarlyExitException("Failed to parse gestures.", 400) archive_state = 0 bundled = False file_path_input = "" # Certain modifications such as app launch command, issue updates are only # allowed for privileged users. privileged_user = access.has_access(need_privileged_access=True) if not privileged_user: if bug_information or bug_summary_update_flag: raise helpers.EarlyExitException( "You are not privileged to update existing issues.", 400) need_privileged_access = utils.string_is_true( data_handler.get_value_from_job_definition( job_type, "PRIVILEGED_ACCESS")) if need_privileged_access: raise helpers.EarlyExitException( "You are not privileged to run this job type.", 400) if app_launch_command: raise helpers.EarlyExitException( "You are not privileged to run arbitrary launch commands.", 400) if testcase_metadata and not _allow_unprivileged_metadata( testcase_metadata): raise helpers.EarlyExitException( "You are not privileged to set testcase metadata.", 400) if additional_arguments: raise helpers.EarlyExitException( "You are not privileged to add command-line arguments.", 400) if gestures: raise helpers.EarlyExitException( "You are not privileged to run arbitrary gestures.", 400) # TODO(aarya): Remove once AFL is migrated to engine pipeline. if target_name: additional_arguments = "%TESTCASE%" if crash_revision and crash_revision.isdigit(): crash_revision = int(crash_revision) else: crash_revision = 0 if bug_information == "0": # Auto-recover from this bad input. bug_information = None if bug_information and not bug_information.isdigit(): raise helpers.EarlyExitException("Bug is not a number.", 400) if not timeout: timeout = 0 elif not timeout.isdigit() or timeout == "0": raise helpers.EarlyExitException( "Testcase timeout must be a number greater than 0.", 400) else: timeout = int(timeout) if timeout > 120: raise helpers.EarlyExitException( "Testcase timeout may not be greater than 120 seconds.", 400) if retries: if retries.isdigit(): retries = int(retries) else: retries = None if retries is None or retries > MAX_RETRIES: raise helpers.EarlyExitException( "Testcase retries must be a number less than %d." % MAX_RETRIES, 400) else: retries = None job_queue = tasks.queue_for_job(job_type, is_high_end=high_end_job) if uploaded_file is not None: filename = "".join(x for x in uploaded_file.filename if x not in " ;/?:@&=+$,{}|<>()\\") key = str(uploaded_file.key()) if archive.is_archive(filename): archive_state = data_types.ArchiveStatus.FUZZED if archive_state: if multiple_testcases: # Create a job to unpack an archive. metadata = data_types.BundledArchiveMetadata() metadata.blobstore_key = key metadata.timeout = timeout metadata.job_queue = job_queue metadata.job_type = job_type metadata.http_flag = http_flag metadata.archive_filename = filename metadata.uploader_email = email metadata.gestures = gestures metadata.crash_revision = crash_revision metadata.additional_arguments = additional_arguments metadata.bug_information = bug_information metadata.platform_id = platform_id metadata.app_launch_command = app_launch_command metadata.fuzzer_name = fuzzer_name metadata.overridden_fuzzer_name = fully_qualified_fuzzer_name metadata.fuzzer_binary_name = target_name metadata.put() tasks.add_task( "unpack", str(metadata.key.id()), job_type, queue=tasks.queue_for_job(job_type), ) # Create a testcase metadata object to show the user their upload. upload_metadata = data_types.TestcaseUploadMetadata() upload_metadata.timestamp = datetime.datetime.utcnow() upload_metadata.filename = filename upload_metadata.blobstore_key = key upload_metadata.original_blobstore_key = key upload_metadata.status = "Pending" upload_metadata.bundled = True upload_metadata.uploader_email = email upload_metadata.retries = retries upload_metadata.bug_summary_update_flag = bug_summary_update_flag upload_metadata.quiet_flag = quiet_flag upload_metadata.additional_metadata_string = json.dumps( testcase_metadata) upload_metadata.put() helpers.log("Uploaded multiple testcases.", helpers.VIEW_OPERATION) self.render_json({"multiple": True}) return file_path_input = guess_input_file(uploaded_file, filename) if not file_path_input: raise helpers.EarlyExitException( ("Unable to detect which file to launch. The main file's name " "must contain either of %s." % str(RUN_FILE_PATTERNS)), 400, ) else: raise helpers.EarlyExitException("Please select a file to upload.", 400) testcase_id = data_handler.create_user_uploaded_testcase( key, key, archive_state, filename, file_path_input, timeout, job_type, job_queue, http_flag, gestures, additional_arguments, bug_information, crash_revision, email, platform_id, app_launch_command, fuzzer_name, fully_qualified_fuzzer_name, target_name, bundled, retries, bug_summary_update_flag, quiet_flag, additional_metadata=testcase_metadata, ) if not quiet_flag: testcase = data_handler.get_testcase_by_id(testcase_id) issue = issue_tracker_utils.get_issue_for_testcase(testcase) if issue: report_url = data_handler.TESTCASE_REPORT_URL.format( domain=data_handler.get_domain(), testcase_id=testcase_id) comment = ("ClusterFuzz is analyzing your testcase. " "Developers can follow the progress at %s." % report_url) issue.save(new_comment=comment) helpers.log("Uploaded testcase %s" % testcase_id, helpers.VIEW_OPERATION) self.render_json({"id": "%s" % testcase_id})