def _allowed_entities_for_user(user_email, entity_kind): """Return the entity names that the given user can access. Args: user_email: The email of the user. entity_kind: The type (data_types.PermissionEntityKind) of the entity. Returns: A list of entity names that the user has access to. """ if not user_email: return [] allowed = [] permissions = _get_permissions_query_for_user(user_email, entity_kind) if entity_kind == data_types.PermissionEntityKind.FUZZER: all_names = data_handler.get_all_fuzzer_names_including_children() else: all_names = data_handler.get_all_job_type_names() for permission in permissions: if permission.is_prefix: allowed.extend(_expand_prefix(all_names, permission.entity_name)) elif permission.entity_name in all_names: allowed.append(permission.entity_name) return sorted(allowed)
def get(self): """Handle a get request.""" fuzzer_logs_bucket = fuzzer_logs.get_bucket() fuzzers = list(data_types.Fuzzer.query().order(data_types.Fuzzer.name)) jobs = data_handler.get_all_job_type_names() corpora = [ bundle.name for bundle in data_types.DataBundle.query().order( data_types.DataBundle.name) ] privileged = access.has_access(need_privileged_access=True) # Unprivileged users can't download fuzzers, so hide the download keys. if not privileged: for fuzzer in fuzzers: fuzzer.blobstore_key = '' template_values = { 'privileged': privileged, 'fuzzers': fuzzers, 'fuzzerLogsBucket': fuzzer_logs_bucket, 'fieldValues': { 'corpora': corpora, 'jobs': jobs, 'uploadInfo': gcs.prepare_blob_upload()._asdict(), 'csrfToken': form.generate_csrf_token(), } } return self.render('fuzzers.html', template_values)
def get(self): """Handles get request.""" email = helpers.get_user_email() if not email: raise helpers.AccessDeniedException() is_privileged_or_domain_user = access.has_access( need_privileged_access=False) if is_privileged_or_domain_user or _is_uploader_allowed(email): # Privileged, domain and upload users can see all job and fuzzer names. allowed_jobs = data_handler.get_all_job_type_names() allowed_fuzzers = data_handler.get_all_fuzzer_names_including_children( include_parents=True) else: # Check if this is an external user with access to certain fuzzers/jobs. allowed_jobs = external_users.allowed_jobs_for_user(email) allowed_fuzzers = external_users.allowed_fuzzers_for_user( email, include_from_jobs=True) if not allowed_fuzzers and not allowed_jobs: raise helpers.AccessDeniedException() has_issue_tracker = bool(data_handler.get_issue_tracker_name()) result, params = get_result() return self.render( 'upload.html', { 'fieldValues': { 'blackboxFuzzers': filter_blackbox_fuzzers(allowed_fuzzers), 'jobs': allowed_jobs, 'targets': { engine: filter_target_names(allowed_fuzzers, engine) for engine in fuzzing.ENGINES }, 'isChromium': utils.is_chromium(), 'sandboxedJobs': data_types.INTERNAL_SANDBOXED_JOB_TYPES, 'csrfToken': form.generate_csrf_token(), 'isExternalUser': not is_privileged_or_domain_user, 'uploadInfo': gcs.prepare_blob_upload()._asdict(), 'hasIssueTracker': has_issue_tracker, }, 'params': params, 'result': result })
def get(self): """Get and render the testcase list in HTML.""" result, params = get_result() field_values = { 'projects': data_handler.get_all_project_names(), 'fuzzers': data_handler.get_all_fuzzer_names_including_children( include_parents=True), 'jobs': data_handler.get_all_job_type_names(), 'shouldShowImpact': utils.is_chromium() } return self.render('testcase-list.html', { 'fieldValues': field_values, 'result': result, 'params': params })
def get(self): """Handle a GET request.""" project = request.get('project') if access.has_access(): # User is an internal user of ClusterFuzz (eg: ClusterFuzz developer). # Show all projects in the list, since this allows user to pick another # project as needed. projects_list = data_handler.get_all_project_names() # Filter fuzzers and job list if a project is provided. fuzzers_list = ( data_handler.get_all_fuzzer_names_including_children( include_parents=True, project=project)) jobs_list = data_handler.get_all_job_type_names(project=project) else: # User is an external user of ClusterFuzz (eg: non-Chrome dev who # submitted a fuzzer or someone with a project in OSS-Fuzz). user_email = helpers.get_user_email() # TODO(aarya): Filter fuzzer and job if |project| is provided. fuzzers_list = sorted( external_users.allowed_fuzzers_for_user( user_email, include_from_jobs=True, include_parents=True)) if not fuzzers_list: # User doesn't actually have access to any fuzzers. raise helpers.AccessDeniedException( "You don't have access to any fuzzers.") jobs_list = sorted(external_users.allowed_jobs_for_user(user_email)) projects_list = sorted( {data_handler.get_project_name(job) for job in jobs_list}) result = { 'projects': projects_list, 'fuzzers': fuzzers_list, 'jobs': jobs_list, } return self.render_json(result)
def get(self): """Get and render the crash stats in HTML.""" result, params = get_result() field_values = { 'fuzzers': data_handler.get_all_fuzzer_names_including_children( include_parents=True), 'jobs': data_handler.get_all_job_type_names(), 'platforms': get_all_platforms(), 'projects': data_handler.get_all_project_names(), 'minHour': crash_stats_common.get_min_hour(), 'maxHour': crash_stats_common.get_max_hour() } return self.render('crash-stats.html', { 'result': result, 'fieldValues': field_values, 'params': params })
def get(self): """Handle a GET request.""" # pylint: disable=unexpected-keyword-arg # Memoize all project and job names. _ = data_handler.get_all_project_names(__memoize_force__=True) _ = data_handler.get_all_job_type_names(__memoize_force__=True) # Memoize both variants of get_all_fuzzer_names_including_children. _ = data_handler.get_all_fuzzer_names_including_children( include_parents=True, __memoize_force__=True) _ = data_handler.get_all_fuzzer_names_including_children( __memoize_force__=True) # Memoize expensive testcase attribute calls. for testcase_id in data_handler.get_open_testcase_id_iterator(): try: testcase = data_handler.get_testcase_by_id(testcase_id) except errors.InvalidTestcaseError: # Already deleted. continue blobs.get_blob_size(testcase.fuzzed_keys) blobs.get_blob_size(testcase.minimized_keys)
def get(self): """Handle a get request.""" try: grouper.group_testcases() except: logs.log_error('Error occurred while grouping test cases.') return # Free up memory after group task run. utils.python_gc() # Get a list of jobs excluded from bug filing. excluded_jobs = _get_excluded_jobs() # Get a list of all jobs. This is used to filter testcases whose jobs have # been removed. all_jobs = data_handler.get_all_job_type_names() for testcase_id in data_handler.get_open_testcase_id_iterator(): try: testcase = data_handler.get_testcase_by_id(testcase_id) except errors.InvalidTestcaseError: # Already deleted. continue # Skip if testcase's job is removed. if testcase.job_type not in all_jobs: continue # Skip if testcase's job is in exclusions list. if testcase.job_type in excluded_jobs: continue # Skip if we are running progression task at this time. if testcase.get_metadata('progression_pending'): continue # If the testcase has a bug filed already, no triage is needed. if _is_bug_filed(testcase): continue # Check if the crash is important, i.e. it is either a reproducible crash # or an unreproducible crash happening frequently. if not _is_crash_important(testcase): continue # Require that all tasks like minimizaton, regression testing, etc have # finished. if not data_handler.critical_tasks_completed(testcase): continue # For testcases that are not part of a group, wait an additional time to # make sure it is grouped. # The grouper runs prior to this step in the same cron, but there is a # window of time where new testcases can come in after the grouper starts. # This delay needs to be longer than the maximum time the grouper can take # to account for that. # FIXME: In future, grouping might be dependent on regression range, so we # would have to add an additional wait time. if not testcase.group_id and not dates.time_has_expired( testcase.timestamp, hours=data_types.MIN_ELAPSED_TIME_SINCE_REPORT): continue # If this project does not have an associated issue tracker, we cannot # file this crash anywhere. issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase( testcase) if not issue_tracker: issue_filer.notify_issue_update(testcase, 'new') continue # If there are similar issues to this test case already filed or recently # closed, skip filing a duplicate bug. if _check_and_update_similar_bug(testcase, issue_tracker): continue # Clean up old triage messages that would be not applicable now. testcase.delete_metadata(TRIAGE_MESSAGE_KEY, update_testcase=False) # File the bug first and then create filed bug metadata. try: issue_filer.file_issue(testcase, issue_tracker) except Exception as e: logs.log_error('Failed to file issue for testcase %d.' % testcase_id) _add_triage_message(testcase, f'Failed to file issue due to exception: {str(e)}') continue _create_filed_bug_metadata(testcase) issue_filer.notify_issue_update(testcase, 'new') logs.log('Filed new issue %s for testcase %d.' % (testcase.bug_information, testcase_id))