def summarize_profile(profile, enrollments=None, term_gpas=None): # Strip SIS details to lighten the API load. sis_profile = profile.pop('sisProfile', None) if sis_profile: profile['academicCareerStatus'] = sis_profile.get('academicCareerStatus') profile['cumulativeGPA'] = sis_profile.get('cumulativeGPA') profile['cumulativeUnits'] = sis_profile.get('cumulativeUnits') profile['currentTerm'] = sis_profile.get('currentTerm') profile['degree'] = sis_profile.get('degree') profile['expectedGraduationTerm'] = sis_profile.get('expectedGraduationTerm') profile['level'] = _get_sis_level_description(sis_profile) profile['majors'] = _get_active_plan_descriptions(sis_profile) profile['matriculation'] = sis_profile.get('matriculation') profile['transfer'] = sis_profile.get('transfer') if sis_profile.get('withdrawalCancel'): profile['withdrawalCancel'] = sis_profile['withdrawalCancel'] if not sis_profile['withdrawalCancel'].get('termId'): sis_profile['withdrawalCancel']['termId'] = current_term_id() profile['hasCurrentTermEnrollments'] = False if enrollments: # Add the singleton term. term = enrollments.get(profile['sid']) if term: if not current_user.can_access_canvas_data: _suppress_canvas_sites(term) profile['term'] = term if term['termId'] == current_term_id() and len(term['enrollments']) > 0: profile['hasCurrentTermEnrollments'] = True if term_gpas: profile['termGpa'] = term_gpas.get(profile['sid'])
def update_all_for_term(cls, term_id): app.logger.info('Starting alert update') enrollments_for_term = data_loch.get_enrollments_for_term(str(term_id)) no_activity_alerts_enabled = cls.no_activity_alerts_enabled() infrequent_activity_alerts_enabled = cls.infrequent_activity_alerts_enabled( ) for row in enrollments_for_term: enrollments = json.loads(row['enrollment_term']).get( 'enrollments', []) for enrollment in enrollments: cls.update_alerts_for_enrollment( row['sid'], term_id, enrollment, no_activity_alerts_enabled, infrequent_activity_alerts_enabled) if app.config['ALERT_HOLDS_ENABLED'] and str( term_id) == current_term_id(): holds = data_loch.get_sis_holds() for row in holds: hold_feed = json.loads(row['feed']) cls.update_hold_alerts(row['sid'], term_id, hold_feed.get('type'), hold_feed.get('reason')) if app.config['ALERT_WITHDRAWAL_ENABLED'] and str( term_id) == current_term_id(): profiles = data_loch.get_student_profiles() for row in profiles: profile_feed = json.loads(row['profile']) if 'withdrawalCancel' in (profile_feed.get('sisProfile') or {}): cls.update_withdrawal_cancel_alerts(row['sid'], term_id) app.logger.info('Alert update complete')
def get_summary_student_profiles(sids, term_id=None): if not sids: return [] benchmark = get_benchmarker('get_summary_student_profiles') benchmark('begin') # TODO It's probably more efficient to store summary profiles in the loch, rather than distilling them # on the fly from full profiles. profiles = get_full_student_profiles(sids) # TODO Many views require no term enrollment information other than a units count. This datum too should be # stored in the loch without BOAC having to crunch it. if not term_id: term_id = current_term_id() benchmark('begin enrollments query') enrollments_for_term = data_loch.get_enrollments_for_term(term_id, sids) benchmark('end enrollments query') enrollments_by_sid = { row['sid']: json.loads(row['enrollment_term']) for row in enrollments_for_term } benchmark('begin term GPA query') term_gpas = get_term_gpas_by_sid(sids) benchmark('end term GPA query') benchmark('begin profile transformation') for profile in profiles: # Strip SIS details to lighten the API load. sis_profile = profile.pop('sisProfile', None) if sis_profile: profile['cumulativeGPA'] = sis_profile.get('cumulativeGPA') profile['cumulativeUnits'] = sis_profile.get('cumulativeUnits') profile['currentTerm'] = sis_profile.get('currentTerm') profile['expectedGraduationTerm'] = sis_profile.get( 'expectedGraduationTerm') profile['level'] = _get_sis_level_description(sis_profile) profile['majors'] = sorted( plan.get('description') for plan in sis_profile.get('plans', [])) profile['transfer'] = sis_profile.get('transfer') if sis_profile.get('withdrawalCancel'): profile['withdrawalCancel'] = sis_profile['withdrawalCancel'] # Add the singleton term. term = enrollments_by_sid.get(profile['sid']) profile['hasCurrentTermEnrollments'] = False if term: profile['analytics'] = term.pop('analytics', None) profile['term'] = term if term['termId'] == current_term_id() and len( term['enrollments']) > 0: profile['hasCurrentTermEnrollments'] = True profile['termGpa'] = term_gpas.get(profile['sid']) benchmark('end') return profiles
def load_term(term_id=berkeley.current_term_id()): if term_id == 'all': load_all_terms() return JobProgress().update(f'About to refresh alerts for term {term_id}') refresh_alerts(term_id) if term_id == berkeley.current_term_id(): JobProgress().update(f'About to load filtered cohort counts') load_filtered_cohort_counts() JobProgress().update(f'About to update curated cohort memberships') update_curated_cohort_lists()
def current_alerts_for_sid(cls, viewer_id, sid): query = text(""" SELECT alerts.*, alert_views.dismissed_at FROM alerts LEFT JOIN alert_views ON alert_views.alert_id = alerts.id AND alert_views.viewer_id = :viewer_id WHERE alerts.active = true AND alerts.key LIKE :key AND alerts.sid = :sid ORDER BY alerts.created_at """) results = db.session.execute(query, { 'viewer_id': viewer_id, 'key': current_term_id() + '_%', 'sid': sid }) def result_to_dict(result): return { camelize(key): result[key] for key in ['id', 'alert_type', 'key', 'message'] } feed = { 'dismissed': [], 'shown': [], } for result in results: if result['dismissed_at']: feed['dismissed'].append(result_to_dict(result)) else: feed['shown'].append(result_to_dict(result)) return feed
def refresh_alert_counts_for_owner(cls, owner_id): query = text(f""" UPDATE cohort_filters SET alert_count = updated_cohort_counts.alert_count FROM ( SELECT cohort_filters.id AS cohort_filter_id, count(*) AS alert_count FROM alerts JOIN cohort_filters ON alerts.sid = ANY(cohort_filters.sids) AND alerts.key LIKE :key AND alerts.active IS TRUE JOIN cohort_filter_owners ON cohort_filters.id = cohort_filter_owners.cohort_filter_id AND cohort_filter_owners.user_id = :owner_id LEFT JOIN alert_views ON alert_views.alert_id = alerts.id AND alert_views.viewer_id = :owner_id WHERE alert_views.dismissed_at IS NULL GROUP BY cohort_filters.id ) updated_cohort_counts WHERE cohort_filters.id = updated_cohort_counts.cohort_filter_id """) result = db.session.execute(query, { 'owner_id': owner_id, 'key': current_term_id() + '_%' }) std_commit() return result
def current_alert_counts_for_sids(cls, viewer_id, sids, count_only=False, offset=None, limit=None): query = """ SELECT alerts.sid, count(*) as alert_count FROM alerts LEFT JOIN alert_views ON alert_views.alert_id = alerts.id AND alert_views.viewer_id = :viewer_id WHERE alerts.active = true AND alerts.key LIKE :key AND alerts.sid = ANY(:sids) AND alert_views.dismissed_at IS NULL GROUP BY alerts.sid ORDER BY alert_count DESC, alerts.sid """ if offset: query += ' OFFSET :offset' if limit: query += ' LIMIT :limit' params = { 'viewer_id': viewer_id, 'key': current_term_id() + '_%', 'sids': sids, 'offset': offset, 'limit': limit, } return cls.alert_counts_by_query(query, params, count_only=count_only)
def load_term(term_id=berkeley.current_term_id()): if term_id == 'all': load_all_terms() return JobProgress().update(f'About to refresh alerts for term {term_id}') refresh_alerts(term_id) if term_id == berkeley.current_term_id(): JobProgress().update(f'About to refresh department memberships') refresh_department_memberships() JobProgress().update(f'About to refresh CalNet attributes for active users') refresh_calnet_attributes() JobProgress().update(f'About to load filtered cohort counts') load_filtered_cohort_counts() JobProgress().update(f'About to update curated group memberships') update_curated_group_lists()
def no_activity_alerts_enabled(cls): session = data_loch.get_regular_undergraduate_session( current_term_id())[0] days_into_session = (datetime.date(datetime.today()) - session['session_begins']).days return (app.config['ALERT_NO_ACTIVITY_ENABLED'] and not app. config['CANVAS_CURRENT_ENROLLMENT_TERM'].startswith('Summer') and days_into_session >= app.config['ALERT_NO_ACTIVITY_DAYS_INTO_SESSION'])
def current_alert_counts_for_viewer(cls, viewer_id): query = """ SELECT alerts.sid, count(*) as alert_count FROM alerts LEFT JOIN alert_views ON alert_views.alert_id = alerts.id AND alert_views.viewer_id = :viewer_id WHERE alerts.active = true AND alerts.key LIKE :key AND alert_views.dismissed_at IS NULL GROUP BY alerts.sid """ params = {'viewer_id': viewer_id, 'key': current_term_id() + '_%'} return cls.alert_counts_by_query(query, params)
def get_student_and_terms(uid): """Provide external data for student-specific view.""" student = data_loch.get_student_for_uid_and_scope( uid, get_student_query_scope()) if not student: return profiles = get_full_student_profiles([student['sid']]) if not profiles or not profiles[0]: return profile = profiles[0] enrollments_for_sid = data_loch.get_enrollments_for_sid( student['sid'], latest_term_id=current_term_id()) profile['enrollmentTerms'] = [ json.loads(row['enrollment_term']) for row in enrollments_for_sid ] profile['hasCurrentTermEnrollments'] = False for term in profile['enrollmentTerms']: if term['termId'] == current_term_id(): profile['hasCurrentTermEnrollments'] = len(term['enrollments']) > 0 else: # Omit dropped sections for past terms. term.pop('droppedSections', None) return profile
def get_summary_student_profiles(sids, include_historical=False, term_id=None): if not sids: return [] benchmark = get_benchmarker('get_summary_student_profiles') benchmark('begin') # TODO It's probably more efficient to store summary profiles in the loch, rather than distilling them # on the fly from full profiles. profiles = get_full_student_profiles(sids) # TODO Many views require no term enrollment information other than a units count. This datum too should be # stored in the loch without BOAC having to crunch it. if not term_id: term_id = current_term_id() benchmark('begin enrollments query') enrollments_for_term = data_loch.get_enrollments_for_term(term_id, sids) benchmark('end enrollments query') enrollments_by_sid = {row['sid']: json.loads(row['enrollment_term']) for row in enrollments_for_term} benchmark('begin term GPA query') term_gpas = get_term_gpas_by_sid(sids) benchmark('end term GPA query') remaining_sids = list(set(sids) - set([p.get('sid') for p in profiles])) if len(remaining_sids) and include_historical: benchmark('begin historical profile supplement') historical_profile_rows = data_loch.get_historical_student_profiles_for_sids(remaining_sids) def _historicize_profile(row): return { **json.loads(row['profile']), **{ 'fullProfilePending': True, }, } historical_profiles = [_historicize_profile(row) for row in historical_profile_rows] # We don't expect photo information to show for historical profiles, but we still need a placeholder element # in the feed so the front end can show the proper fallback. _merge_photo_urls(historical_profiles) for historical_profile in historical_profiles: ManuallyAddedAdvisee.find_or_create(historical_profile['sid']) profiles += historical_profiles historical_enrollments_for_term = data_loch.get_historical_enrollments_for_term(term_id, remaining_sids) for row in historical_enrollments_for_term: enrollments_by_sid[row['sid']] = json.loads(row['enrollment_term']) benchmark('end historical profile supplement') benchmark('begin profile transformation') for profile in profiles: summarize_profile(profile, enrollments=enrollments_by_sid, term_gpas=term_gpas) benchmark('end') return profiles
def refresh_term(term_id=berkeley.current_term_id(), continuation=False): if not continuation or not json_cache.staging_table_exists(): JobProgress().update(f'About to drop/create staging table') json_cache.drop_staging_table() json_cache.create_staging_table(exclusions_for_term(term_id)) json_cache.set_staging(True) load_term(term_id) JobProgress().update(f'About to refresh from staging table') refresh_count = json_cache.refresh_from_staging(inclusions_for_term(term_id)) # TODO Currently we're not looping anything into the staging table, so we expect refresh count to be zero. # If a more considered set of cache entries comes back into the loop, this error message should come back # too. # if refresh_count == 0: # JobProgress().update('ERROR: No cache entries copied from staging') JobProgress().update(f'{refresh_count} cache entries copied from staging')
def _merge_enrollment_terms(profile, enrollment_results): profile['hasCurrentTermEnrollments'] = False filtered_enrollment_terms = [] for row in enrollment_results: term = json.loads(row['enrollment_term']) if term['termId'] == current_term_id(): profile['hasCurrentTermEnrollments'] = len(term['enrollments']) > 0 else: # Omit dropped sections for non-current terms. term.pop('droppedSections', None) # Filter out the now-empty term if all classes were dropped. enrollments = term.get('enrollments') if not enrollments: continue # Omit zombie waitlisted enrollments for past terms. # TODO Even for current terms, it may be a mistake when SIS data sources show both active and waitlisted # section enrollments for a single class, but that needs confirmation. if enrollments and term['termId'] < current_term_id(): for course in enrollments: sections = course['sections'] if sections: fixed_sections = [] for enrollment in sections: if enrollment.get('enrollmentStatus') != 'W': fixed_sections.append(enrollment) if not fixed_sections: app.logger.warn(f'SIS provided only waitlisted enrollments in a past term: {term}') else: course['sections'] = fixed_sections if not current_user.can_access_canvas_data: _suppress_canvas_sites(term) filtered_enrollment_terms.append(term) profile['enrollmentTerms'] = filtered_enrollment_terms
def _course_search(search_phrase, params, order_by): term_id = current_term_id() course_rows = [] def _compress_to_alphanumeric(s): return ''.join(e for e in s if e.isalnum()) words = search_phrase.rsplit(' ', 1) if len(words) == 1: candidate_subject_area = None candidate_catalog_id = words[0] else: candidate_subject_area = words[0] candidate_catalog_id = words[1] # If the search phrase appears to contain a catalog id, set up the course search that way. if any(c.isdigit() for c in candidate_catalog_id): subject_area = candidate_subject_area and _compress_to_alphanumeric( candidate_subject_area).upper() catalog_id = candidate_catalog_id.upper() course_rows = get_enrolled_primary_sections_for_parsed_code( term_id, subject_area, catalog_id) # Otherwise just compress the search phrase to alphanumeric characters and look for a simple match. else: compressed_search_phrase = _compress_to_alphanumeric(search_phrase) if compressed_search_phrase: course_rows = get_enrolled_primary_sections( term_id, compressed_search_phrase.upper()) courses = [] if course_rows: for row in islice(course_rows, 50): courses.append({ 'termId': row['term_id'], 'sectionId': row['sis_section_id'], 'courseName': row['sis_course_name'], 'courseTitle': row['sis_course_title'], 'instructionFormat': row['sis_instruction_format'], 'sectionNum': row['sis_section_num'], 'instructors': row['instructors'], }) return { 'courses': courses, 'totalCourseCount': len(course_rows), }
def _grad_terms(): term_ids = [ r['expected_grad_term'] for r in data_loch.get_expected_graduation_terms() ] terms = [{ 'name': ' '.join(term_name_for_sis_id(term_id).split()[::-1]), 'value': term_id } for term_id in term_ids] first_previous_term_index = next( (i for i, term in enumerate(terms) if term['value'] < current_term_id()), None) terms.insert(first_previous_term_index, { 'name': 'divider', 'value': 'divider' }) return terms
def _construct_student_profile(student): if not student: return profiles = get_full_student_profiles([student['sid']]) if not profiles or not profiles[0]: return profile = profiles[0] sis_profile = profile.get('sisProfile', None) if sis_profile and 'level' in sis_profile: sis_profile['level']['description'] = _get_sis_level_description(sis_profile) enrollment_results = data_loch.get_enrollments_for_sid(student['sid'], latest_term_id=future_term_id()) _merge_enrollment_terms(profile, enrollment_results) if sis_profile and sis_profile.get('withdrawalCancel'): profile['withdrawalCancel'] = sis_profile['withdrawalCancel'] if not sis_profile['withdrawalCancel'].get('termId'): sis_profile['withdrawalCancel']['termId'] = current_term_id() return profile
def current_alerts_for_sid(cls, viewer_id, sid): query = text(""" SELECT alerts.*, alert_views.dismissed_at FROM alerts LEFT JOIN alert_views ON alert_views.alert_id = alerts.id AND alert_views.viewer_id = :viewer_id WHERE alerts.active = true AND alerts.key LIKE :key AND alerts.sid = :sid ORDER BY alerts.created_at """) results = db.session.execute(query, { 'viewer_id': viewer_id, 'key': current_term_id() + '_%', 'sid': sid }) feed = [] def result_to_dict(result): return { camelize(key): result[key] for key in ['id', 'alert_type', 'key', 'message'] } for result in results: dismissed_at = result['dismissed_at'] alert = { **result_to_dict(result), **{ 'dismissed': dismissed_at and dismissed_at.strftime('%Y-%m-%d %H:%M:%S'), 'createdAt': result['created_at'].strftime('%Y-%m-%d %H:%M:%S'), 'updatedAt': result['updated_at'].strftime('%Y-%m-%d %H:%M:%S'), }, } feed.append(alert) return feed
def _construct_student_profile(student): if not student: return profiles = get_full_student_profiles([student['sid']]) if not profiles or not profiles[0]: return profile = profiles[0] sis_profile = profile.get('sisProfile', None) if sis_profile and 'level' in sis_profile: sis_profile['level']['description'] = _get_sis_level_description( sis_profile) enrollments_for_sid = data_loch.get_enrollments_for_sid( student['sid'], latest_term_id=future_term_id()) profile['enrollmentTerms'] = [ json.loads(row['enrollment_term']) for row in enrollments_for_sid ] profile['hasCurrentTermEnrollments'] = False for term in profile['enrollmentTerms']: if term['termId'] == current_term_id(): profile['hasCurrentTermEnrollments'] = len(term['enrollments']) > 0 else: # Omit dropped sections for past terms. term.pop('droppedSections', None) return profile
def low_assignment_scores(term_id=None): if not term_id: term_id = current_term_id() examined_sids = set() low_sids = set() multiple_low_sids = set() # Many students in a low percentile may have received a reasonably high score. # Since instructors rarely grade on a curve, it may be fine to receive a score of 85 # even if all other students received 90 or above. sids_with_low_raw_scores = set() primary_sections = set() primary_sections_with_scored_assignments = set() primary_sections_with_plottable_assignments = set() enrollments_for_term = data_loch.get_enrollments_for_term(term_id) enrollments_by_sid = { row['sid']: json.loads(row['enrollment_term']) for row in enrollments_for_term } itr = iter(enrollments_by_sid.items()) for (sid, term) in itr: examined_sids.add(sid) for enr in term['enrollments']: first_section = enr['sections'][0] if not first_section.get('primary'): continue ccn = first_section['ccn'] primary_sections.add(ccn) for site in enr['canvasSites']: score_info = site['analytics']['currentScore'] if score_info['courseDeciles']: primary_sections_with_scored_assignments.add(ccn) if score_info['boxPlottable']: primary_sections_with_plottable_assignments.add(ccn) pct = score_info['student']['roundedUpPercentile'] if pct is not None: if pct <= 25: if sid in low_sids: multiple_low_sids.add(sid) low_sids.add(sid) max_score = score_info['courseDeciles'][9] if score_info['student']['raw'] < (max_score * 0.7): sids_with_low_raw_scores.add(sid) app.logger.warn( f'Total of {len(examined_sids)} students in classes. {len(low_sids)} with low scores in a class.' ) app.logger.warn(f'Low scorers: {sorted(low_sids)}') app.logger.warn( f' {len(multiple_low_sids)} Low scorers in multiple sites: {sorted(multiple_low_sids)}' ) app.logger.warn( f' {len(sids_with_low_raw_scores)} Low scorers with raw score < 70% of max: {sorted(sids_with_low_raw_scores)}' ) app.logger.warn( f'Total of {len(primary_sections)} primary sections. ' f'{len(primary_sections_with_scored_assignments)} have scores. ' f'{len(primary_sections_with_plottable_assignments)} have a reasonable range of scores.' ) return { 'sids': sorted(examined_sids), 'low_sids': sorted(low_sids), 'multiple_low_sids': sorted(multiple_low_sids), 'sids_with_low_raw_scores': sorted(sids_with_low_raw_scores), 'primary_sections_count': len(primary_sections), 'sections_scored_count': len(primary_sections_with_scored_assignments), 'sections_with_range_of_scores_count': len(primary_sections_with_plottable_assignments), }
def get_students_query( # noqa advisor_plan_mappings=None, coe_advisor_ldap_uids=None, coe_ethnicities=None, coe_genders=None, coe_prep_statuses=None, coe_probation=None, coe_underrepresented=None, ethnicities=None, entering_terms=None, expected_grad_terms=None, genders=None, gpa_ranges=None, group_codes=None, in_intensive_cohort=None, is_active_asc=None, is_active_coe=None, last_name_ranges=None, last_term_gpa_ranges=None, levels=None, majors=None, midpoint_deficient_grade=None, scope=(), search_phrase=None, sids=(), transfer=None, underrepresented=None, unit_ranges=None, ): # If no specific scope is required by criteria, default to the admin view. if not scope: scope = ['ADMIN'] query_tables = _student_query_tables_for_scope(scope) if not query_tables: return None, None, None query_filter = ' WHERE true' query_bindings = {} # Name or SID search if search_phrase: words = search_phrase.upper().split() # A numeric string indicates an SID search. if len(words) == 1 and re.match(r'^\d+$', words[0]): query_filter += ' AND (sas.sid LIKE :sid_phrase)' query_bindings.update({'sid_phrase': f'{words[0]}%'}) # If a single word, search on both name and email. elif len(words) == 1: name_string = ''.join(re.split('\W', words[0])) email_string = search_phrase.lower() query_tables += f""" LEFT JOIN {student_schema()}.student_names n ON n.name LIKE :name_string AND n.sid = sas.sid""" query_filter += ' AND (sas.email_address LIKE :email_string OR n.name IS NOT NULL)' query_bindings.update({ 'email_string': f'{email_string}%', 'name_string': f'{name_string}%' }) # If multiple words, search name only. else: for i, word in enumerate(words): query_tables += f""" JOIN {student_schema()}.student_names n{i} ON n{i}.name LIKE :name_phrase_{i} AND n{i}.sid = sas.sid""" word = ''.join(re.split('\W', word)) query_bindings.update({f'name_phrase_{i}': f'{word}%'}) if ethnicities: query_tables += f""" JOIN {student_schema()}.ethnicities e ON e.sid = sas.sid""" if genders or underrepresented is not None: query_tables += f""" JOIN {student_schema()}.demographics d ON d.sid = sas.sid""" if sids: query_filter += f' AND sas.sid = ANY(:sids)' query_bindings.update({'sids': sids}) # Generic SIS criteria if gpa_ranges: sql_ready_gpa_ranges = [ f"numrange({gpa_range['min']}, {gpa_range['max']}, '[]')" for gpa_range in gpa_ranges ] query_filter += _number_ranges_to_sql('sas.gpa', sql_ready_gpa_ranges) if last_term_gpa_ranges: sql_ready_term_gpa_ranges = [ f"numrange({gpa_range['min']}, {gpa_range['max']}, '[]')" for gpa_range in last_term_gpa_ranges ] query_filter += _number_ranges_to_sql('set.term_gpa', sql_ready_term_gpa_ranges) query_tables += f""" JOIN {student_schema()}.student_enrollment_terms set ON set.sid = sas.sid AND set.term_id = :previous_term_id""" query_bindings.update( {'previous_term_id': previous_term_id(current_term_id())}) query_filter += _number_ranges_to_sql('sas.units', unit_ranges) if unit_ranges else '' if last_name_ranges: query_filter += _last_name_ranges_to_sql(last_name_ranges) if entering_terms: query_filter += ' AND sas.entering_term = ANY(:entering_terms)' query_bindings.update({'entering_terms': entering_terms}) if ethnicities: query_filter += ' AND e.ethnicity = ANY(:ethnicities)' query_bindings.update({'ethnicities': ethnicities}) if expected_grad_terms: query_filter += ' AND sas.expected_grad_term = ANY(:expected_grad_terms)' query_bindings.update({'expected_grad_terms': expected_grad_terms}) if underrepresented is not None: query_filter += ' AND d.minority IS :underrepresented' query_bindings.update({'underrepresented': underrepresented}) if genders: query_filter += ' AND d.gender = ANY(:genders)' query_bindings.update({'genders': genders}) if levels: query_filter += ' AND sas.level = ANY(:levels)' query_bindings.update({'levels': [level_to_code(l) for l in levels]}) if majors: # Only modify the majors list clone _majors = majors.copy() major_filters = [] # Afaik, no student can declare a major and remain undeclared. However, in the interest of surfacing # front-end bugs we do not use an 'if...else' below. We expect the front-end to be smart. if tolerant_remove(_majors, 'Declared'): major_filters.append('NOT maj.major ~* \'undeclared\'') if tolerant_remove(_majors, 'Undeclared'): major_filters.append('maj.major ~* \'undeclared\'') if _majors: major_filters.append('maj.major = ANY(:majors)') query_filter += ' AND (' + ' OR '.join(major_filters) + ')' query_tables += f' LEFT JOIN {student_schema()}.student_majors maj ON maj.sid = sas.sid' query_bindings.update({'majors': _majors}) if midpoint_deficient_grade is True: query_tables += f""" JOIN {student_schema()}.student_enrollment_terms ser ON ser.sid = sas.sid AND ser.term_id = :term_id AND ser.midpoint_deficient_grade = TRUE""" query_bindings.update({'term_id': current_term_id()}) if transfer is True: query_filter += ' AND sas.transfer = TRUE' if advisor_plan_mappings: advisor_plan_filters = [] for idx, mapping in enumerate(advisor_plan_mappings): advisor_sid = mapping['advisor_sid'] query_bindings.update({f'advisor_sid_{idx}': advisor_sid}) if mapping['academic_plan_code'] == '*': advisor_plan_filters.append( f'advs.advisor_sid = :advisor_sid_{idx}') else: academic_plan_code = mapping['academic_plan_code'] query_bindings.update( {f'academic_plan_code_{idx}': academic_plan_code}) advisor_plan_filters.append( f'(advs.advisor_sid = :advisor_sid_{idx} AND advs.academic_plan_code = :academic_plan_code_{idx})', ) query_tables += f""" JOIN {advisor_schema()}.advisor_students advs ON advs.student_sid = sas.sid""" query_tables += ' AND (' + ' OR '.join(advisor_plan_filters) + ')' # ASC criteria query_filter += f' AND s.active IS {is_active_asc}' if is_active_asc is not None else '' query_filter += f' AND s.intensive IS {in_intensive_cohort}' if in_intensive_cohort is not None else '' if group_codes: query_filter += ' AND s.group_code = ANY(:group_codes)' query_bindings.update({'group_codes': group_codes}) # COE criteria if coe_advisor_ldap_uids: query_filter += ' AND s.advisor_ldap_uid = ANY(:coe_advisor_ldap_uids)' query_bindings.update({'coe_advisor_ldap_uids': coe_advisor_ldap_uids}) if coe_ethnicities: query_filter += ' AND s.ethnicity = ANY(:coe_ethnicities)' query_bindings.update({'coe_ethnicities': coe_ethnicities}) if coe_genders: query_filter += ' AND s.gender = ANY(:coe_genders)' query_bindings.update({'coe_genders': coe_genders}) if coe_prep_statuses: query_filter += ' AND (' + ' OR '.join( [f's.{cps} IS TRUE' for cps in coe_prep_statuses]) + ')' if coe_probation is not None: query_filter += f' AND s.probation IS {coe_probation}' if coe_underrepresented is not None: query_filter += f' AND s.minority IS {coe_underrepresented}' if is_active_coe is False: query_filter += f" AND s.status IN ('D','P','U','W','X','Z')" elif is_active_coe is True: query_filter += f" AND s.status NOT IN ('D','P','U','W','X','Z')" return query_tables, query_filter, query_bindings
def term(): term_id = request.args.get('term') or berkeley.current_term_id() return term_id
def _get_current_term_start(): session = data_loch.get_undergraduate_term(current_term_id())[0] return session['term_begins']
def refresh_term(term_id=berkeley.current_term_id(), continuation=False): load_term(term_id)
def refresh_external_data(): from boac.api import cache_utils from boac.lib import berkeley cache_utils.refresh_request_handler(berkeley.current_term_id(), load_only=False)