def test_current_term_id_caching(self): """Fetches current term ID from the loch instead of cache when asked.""" import json from boac.models import json_cache from boac.models.json_cache import JsonCache index_row = JsonCache.query.filter_by(key='current_term_index').first() index_row.json = json.loads('{"current_term_name": "Spring 2020"}') json_cache.update_jsonb_row(index_row) assert(sis_terms.current_term_id()) == '2202' assert(sis_terms.current_term_id(use_cache=False)) == '2178'
def app_config(): return tolerant_jsonify({ 'academicStandingDescriptions': ACADEMIC_STANDING_DESCRIPTIONS, 'apptDeskRefreshInterval': app.config['APPT_DESK_REFRESH_INTERVAL'], 'boacEnv': app.config['BOAC_ENV'], 'currentEnrollmentTerm': current_term_name(), 'currentEnrollmentTermId': int(current_term_id()), 'degreeCategoryTypeOptions': list( filter( lambda t: 'Placeholder' not in t and 'Campus' not in t, degree_progress_category_type.enums, ), ) + ['Campus Requirements'], 'degreeProgressColorCodes': ACCENT_COLOR_CODES, 'disableMatrixViewThreshold': app.config['DISABLE_MATRIX_VIEW_THRESHOLD'], 'devAuthEnabled': app.config['DEVELOPER_AUTH_ENABLED'], 'ebEnvironment': app.config['EB_ENVIRONMENT'] if 'EB_ENVIRONMENT' in app.config else None, 'featureFlagAdmittedStudents': app.config['FEATURE_FLAG_ADMITTED_STUDENTS'], 'featureFlagDegreeCheck': app.config['FEATURE_FLAG_DEGREE_CHECK'], 'fixedWarningOnAllPages': app.config['FIXED_WARNING_ON_ALL_PAGES'], 'googleAnalyticsId': app.config['GOOGLE_ANALYTICS_ID'], 'isDemoModeAvailable': app.config['DEMO_MODE_AVAILABLE'], 'maxAttachmentsPerNote': app.config['NOTES_ATTACHMENTS_MAX_PER_NOTE'], 'pingFrequency': app.config['PING_FREQUENCY'], 'supportEmailAddress': app.config['BOAC_SUPPORT_EMAIL'], 'timezone': app.config['TIMEZONE'], })
def current_alert_counts_for_sids(cls, viewer_id, sids, count_only=False, offset=None, limit=None): query = """ SELECT alerts.sid, count(*) as alert_count FROM alerts LEFT JOIN alert_views ON alert_views.alert_id = alerts.id AND alert_views.viewer_id = :viewer_id WHERE alerts.deleted_at IS NULL AND alerts.key LIKE :key AND alerts.sid = ANY(:sids) AND alert_views.dismissed_at IS NULL GROUP BY alerts.sid ORDER BY alert_count DESC, alerts.sid """ if offset: query += ' OFFSET :offset' if limit: query += ' LIMIT :limit' params = { 'viewer_id': viewer_id, 'key': current_term_id() + '_%', 'sids': sids, 'offset': offset, 'limit': limit, } return cls.alert_counts_by_query(query, params, count_only=count_only)
def summarize_profile(profile, enrollments=None, academic_standing=None, term_gpas=None): # Strip SIS details to lighten the API load. sis_profile = profile.pop('sisProfile', None) if sis_profile: profile['academicCareerStatus'] = sis_profile.get('academicCareerStatus') profile['cumulativeGPA'] = sis_profile.get('cumulativeGPA') profile['cumulativeUnits'] = sis_profile.get('cumulativeUnits') profile['currentTerm'] = sis_profile.get('currentTerm') profile['degree'] = sis_profile.get('degree') profile['expectedGraduationTerm'] = sis_profile.get('expectedGraduationTerm') profile['level'] = _get_sis_level_description(sis_profile) profile['majors'] = _get_active_plan_descriptions(sis_profile) profile['matriculation'] = sis_profile.get('matriculation') profile['termsInAttendance'] = sis_profile.get('termsInAttendance') profile['transfer'] = sis_profile.get('transfer') if sis_profile.get('withdrawalCancel'): profile['withdrawalCancel'] = sis_profile['withdrawalCancel'] if not sis_profile['withdrawalCancel'].get('termId'): sis_profile['withdrawalCancel']['termId'] = current_term_id() if enrollments: # Add the singleton term. term = enrollments.get(profile['sid']) if term: if not current_user.can_access_canvas_data: _suppress_canvas_sites(term) profile['term'] = term if academic_standing: profile['academicStanding'] = academic_standing.get(profile['sid']) if term_gpas: profile['termGpa'] = term_gpas.get(profile['sid'])
def refresh_alert_counts_for_owner(cls, owner_id): query = text(""" UPDATE cohort_filters SET alert_count = updated_cohort_counts.alert_count FROM ( SELECT cohort_filters.id AS cohort_filter_id, count(*) AS alert_count FROM alerts JOIN cohort_filters ON alerts.sid = ANY(cohort_filters.sids) AND alerts.key LIKE :key AND alerts.deleted_at IS NULL AND cohort_filters.owner_id = :owner_id LEFT JOIN alert_views ON alert_views.alert_id = alerts.id AND alert_views.viewer_id = :owner_id WHERE alert_views.dismissed_at IS NULL GROUP BY cohort_filters.id ) updated_cohort_counts WHERE cohort_filters.id = updated_cohort_counts.cohort_filter_id """) result = db.session.execute(query, { 'owner_id': owner_id, 'key': current_term_id() + '_%' }) std_commit() return result
def _construct_student_profile(student): if not student: return profiles = get_full_student_profiles([student['sid']]) if not profiles or not profiles[0]: return profile = profiles[0] sis_profile = profile.get('sisProfile', None) if sis_profile and 'level' in sis_profile: sis_profile['level']['description'] = _get_sis_level_description(sis_profile) academic_standing = get_academic_standing_by_sid([student['sid']], as_dicts=False) if academic_standing: profile['academicStanding'] = academic_standing.get(student['sid']) academic_standing = {term['termId']: term['status'] for term in profile['academicStanding']} enrollment_results = data_loch.get_enrollments_for_sid(student['sid'], latest_term_id=future_term_id()) profile['enrollmentTerms'] = merge_enrollment_terms(enrollment_results, academic_standing=academic_standing) if sis_profile and sis_profile.get('withdrawalCancel'): profile['withdrawalCancel'] = sis_profile['withdrawalCancel'] if not sis_profile['withdrawalCancel'].get('termId'): sis_profile['withdrawalCancel']['termId'] = current_term_id() advisors = profile.get('advisors', []) for index, advisor in enumerate(advisors): if advisor.get('sid') == 'UCBUGADHAAS': profile['advisors'][index]['firstName'] = 'Haas Undergraduate Program' profile['advisors'][index]['email'] = '*****@*****.**' return profile
def load_term(term_id=current_term_id(use_cache=False)): if term_id == 'all': load_all_terms() return JobProgress().update(f'About to refresh alerts for term {term_id}') refresh_alerts(term_id) if term_id == current_term_id(): JobProgress().update('About to refresh department memberships') refresh_department_memberships() JobProgress().update('About to refresh CalNet attributes for active users') refresh_calnet_attributes() JobProgress().update('About to load filtered cohort counts') load_filtered_cohort_counts() JobProgress().update('About to update curated group memberships') update_curated_group_lists()
def grading_terms(): current_term = current_term_id() all_terms = term_ids_range(current_term, future_term_id()) def _term_option(term_id): term_name = term_name_for_sis_id(term_id) + ( ' (active)' if term_id == current_term else ' (future)') return {'name': term_name, 'value': term_id} return [_term_option(term_id) for term_id in all_terms]
def current_alert_counts_for_viewer(cls, viewer_id): query = """ SELECT alerts.sid, count(*) as alert_count FROM alerts LEFT JOIN alert_views ON alert_views.alert_id = alerts.id AND alert_views.viewer_id = :viewer_id WHERE alerts.deleted_at IS NULL AND alerts.key LIKE :key AND alert_views.dismissed_at IS NULL GROUP BY alerts.sid """ params = {'viewer_id': viewer_id, 'key': current_term_id() + '_%'} return cls.alert_counts_by_query(query, params)
def _get_enrollment_sections(sid): sections = [] enrollments = data_loch.get_enrollments_for_sid( sid=sid, latest_term_id=current_term_id(), ) for index, term in enumerate(merge_enrollment_terms(enrollments)): for enrollment in term.get('enrollments', []): for section in enrollment['sections']: section['displayName'] = enrollment['displayName'] section['termId'] = term['termId'] sections.append(section) return sections
def search_for_students( search_phrase=None, order_by=None, offset=0, limit=None, ): benchmark = get_benchmarker('search_for_students') benchmark('begin') query_tables, query_filter, query_bindings = data_loch.get_students_query(search_phrase=search_phrase) if not query_tables: return { 'students': [], 'totalStudentCount': 0, } o, o_secondary, o_tertiary, o_direction, supplemental_query_tables = data_loch.get_students_ordering( current_term_id=current_term_id(), order_by=order_by, ) if supplemental_query_tables: query_tables += supplemental_query_tables benchmark('begin SID query') result = data_loch.safe_execute_rds(f'SELECT DISTINCT(sas.sid) {query_tables} {query_filter}', **query_bindings) benchmark('end SID query') total_student_count = len(result) # In the special case of a numeric search phrase that returned no matches, fall back to historical student search. if total_student_count == 0 and search_phrase and re.match(r'^\d+$', search_phrase): return search_for_student_historical(search_phrase) sql = f"""SELECT sas.sid {query_tables} {query_filter} GROUP BY sas.sid ORDER BY MIN({o}) {o_direction} NULLS FIRST, MIN({o_secondary}) NULLS FIRST, MIN({o_tertiary}) NULLS FIRST""" if o_tertiary != 'sas.sid': sql += ', sas.sid' sql += f' OFFSET {offset}' if limit and limit < 100: # Sanity check large limits sql += ' LIMIT :limit' query_bindings['limit'] = limit benchmark('begin student query') result = data_loch.safe_execute_rds(sql, **query_bindings) benchmark('begin profile collection') students = get_summary_student_profiles([row['sid'] for row in result]) benchmark('end') return { 'students': students, 'totalStudentCount': total_student_count, }
def merge_enrollment_terms(enrollment_results, academic_standing=None): current_term_found = False filtered_enrollment_terms = [] for row in enrollment_results: term = json.loads(row['enrollment_term']) term_id = term['termId'] if term_id == current_term_id(): current_term_found = True else: if term_id < current_term_id(): # Skip past terms with no enrollments or drops. if not term.get('enrollments') and not term.get( 'droppedSections'): continue # Filter out old waitlisted enrollments from past terms. if term.get('enrollments'): _omit_zombie_waitlisted_enrollments(term) term_name = term.get('termName') term['academicYear'] = academic_year_for_term_name(term_name) if academic_standing: term['academicStanding'] = { 'status': academic_standing.get(term_id), 'termId': term_id, } if not current_user.can_access_canvas_data: _suppress_canvas_sites(term) filtered_enrollment_terms.append(term) if not current_term_found: current_term = { 'academicYear': academic_year_for_term_name(current_term_name()), 'enrolledUnits': 0, 'enrollments': [], 'termId': current_term_id(), 'termName': current_term_name(), } filtered_enrollment_terms.append(current_term) return filtered_enrollment_terms
def get_current_user_profile(): cohorts = [] for cohort in CohortFilter.get_cohorts(current_user.get_id()): cohort['isOwnedByCurrentUser'] = True cohorts.append(cohort) return { **current_user.to_api_json(), 'myCohorts': cohorts, 'myCuratedGroups': get_my_curated_groups(), 'preferences': { 'admitSortBy': 'last_name', 'sortBy': 'last_name', 'termId': current_term_id(), }, }
def get_student_profile_summaries(sids, term_id=None): if not sids: return [] benchmark = get_benchmarker('get_student_profile_summaries') benchmark('begin') profile_results = data_loch.get_student_profile_summaries(sids) if not profile_results: return [] profiles_by_sid = _get_profiles_by_sid(profile_results) profiles = [] for sid in sids: profile = profiles_by_sid.get(sid) if profile: profiles.append(profile) benchmark('begin photo merge') _merge_photo_urls(profiles) benchmark('end photo merge') scope = get_student_query_scope() benchmark('begin ASC profile merge') _merge_asc_student_profile_data(profiles_by_sid, scope) benchmark('end ASC profile merge') if 'COENG' in scope or 'ADMIN' in scope: benchmark('begin COE profile merge') _merge_coe_student_profile_data(profiles_by_sid) benchmark('end COE profile merge') # TODO Many views require no term enrollment information other than a units count. This datum too should be # stored in the loch without BOAC having to crunch it. if not term_id: term_id = current_term_id() benchmark('begin enrollments query') enrollments_for_term = data_loch.get_enrollments_for_term(term_id, sids) benchmark('end enrollments query') enrollments_by_sid = { row['sid']: json.loads(row['enrollment_term']) for row in enrollments_for_term } for profile in profiles: _merge_enrollments(profile, enrollments=enrollments_by_sid) benchmark('end') return profiles
def _course_search(search_phrase): term_id = current_term_id() course_rows = [] def _compress_to_alphanumeric(s): return ''.join(e for e in s if e.isalnum()) words = search_phrase.rsplit(' ', 1) if len(words) == 1: candidate_subject_area = None candidate_catalog_id = words[0] else: candidate_subject_area = words[0] candidate_catalog_id = words[1] # If the search phrase appears to contain a catalog id, set up the course search that way. if any(c.isdigit() for c in candidate_catalog_id): subject_area = candidate_subject_area and _compress_to_alphanumeric( candidate_subject_area).upper() catalog_id = candidate_catalog_id.upper() course_rows = get_enrolled_primary_sections_for_parsed_code( term_id, subject_area, catalog_id) # Otherwise just compress the search phrase to alphanumeric characters and look for a simple match. else: compressed_search_phrase = _compress_to_alphanumeric(search_phrase) if compressed_search_phrase: course_rows = get_enrolled_primary_sections( term_id, compressed_search_phrase.upper()) courses = [] if course_rows: for row in islice(course_rows, 50): courses.append({ 'termId': row['term_id'], 'sectionId': row['sis_section_id'], 'courseName': row['sis_course_name'], 'courseTitle': row['sis_course_title'], 'instructionFormat': row['sis_instruction_format'], 'sectionNum': row['sis_section_num'], 'instructors': row['instructors'], }) return { 'courses': courses, 'totalCourseCount': len(course_rows), }
def grad_terms(): current_term_id_ = current_term_id() option_groups = { 'Future': [], 'Past': [], } for term_id in [ r['expected_grad_term'] for r in data_loch.get_expected_graduation_terms() ]: key = 'Past' if term_id < current_term_id_ else 'Future' option_groups[key].append({ 'name': ' '.join(term_name_for_sis_id(term_id).split()[::-1]), 'value': term_id, }) return option_groups
def update_all_for_term(cls, term_id): app.logger.info('Starting alert update') enrollments_for_term = data_loch.get_enrollments_for_term(str(term_id)) no_activity_alerts_enabled = cls.no_activity_alerts_enabled() infrequent_activity_alerts_enabled = cls.infrequent_activity_alerts_enabled( ) for row in enrollments_for_term: enrollments = json.loads(row['enrollment_term']).get( 'enrollments', []) for enrollment in enrollments: cls.update_alerts_for_enrollment( sid=row['sid'], term_id=term_id, enrollment=enrollment, no_activity_alerts_enabled=no_activity_alerts_enabled, infrequent_activity_alerts_enabled= infrequent_activity_alerts_enabled, ) profiles = data_loch.get_student_profiles() if app.config['ALERT_WITHDRAWAL_ENABLED'] and str( term_id) == current_term_id(): for row in profiles: sis_profile_feed = json.loads( row['profile']).get('sisProfile') or {} if sis_profile_feed.get('withdrawalCancel', {}).get('termId') == str(term_id): cls.update_withdrawal_cancel_alerts(row['sid'], term_id) sids = [p['sid'] for p in profiles] for sid, academic_standing_list in get_academic_standing_by_sid( sids).items(): standing = next((s for s in academic_standing_list if s['termId'] == str(term_id)), None) if standing and standing['status'] in ('DIS', 'PRO', 'SUB'): cls.update_academic_standing_alerts( action_date=standing['actionDate'], sid=standing['sid'], status=standing['status'], term_id=term_id, ) app.logger.info('Alert update complete')
def get_summary_student_profiles(sids, include_historical=False, term_id=None): if not sids: return [] benchmark = get_benchmarker('get_summary_student_profiles') benchmark('begin') # TODO It's probably more efficient to store summary profiles in the loch, rather than distilling them # on the fly from full profiles. profiles = get_full_student_profiles(sids) # TODO Many views require no term enrollment information other than a units count. This datum too should be # stored in the loch without BOAC having to crunch it. if not term_id: term_id = current_term_id() benchmark('begin enrollments query') enrollments_for_term = data_loch.get_enrollments_for_term(term_id, sids) benchmark('end enrollments query') enrollments_by_sid = {row['sid']: json.loads(row['enrollment_term']) for row in enrollments_for_term} benchmark('begin academic standing query') academic_standing = get_academic_standing_by_sid(sids) benchmark('end academic standing query') benchmark('begin term GPA query') term_gpas = get_term_gpas_by_sid(sids) benchmark('end term GPA query') remaining_sids = list(set(sids) - set([p.get('sid') for p in profiles])) if len(remaining_sids) and include_historical: benchmark('begin historical profile supplement') historical_profiles = get_historical_student_profiles(remaining_sids) profiles += historical_profiles historical_enrollments_for_term = data_loch.get_historical_enrollments_for_term(str(term_id), remaining_sids) for row in historical_enrollments_for_term: enrollments_by_sid[row['sid']] = json.loads(row['enrollment_term']) benchmark('end historical profile supplement') benchmark('begin profile transformation') for profile in profiles: summarize_profile(profile, enrollments=enrollments_by_sid, academic_standing=academic_standing, term_gpas=term_gpas) benchmark('end') return profiles
def current_alerts_for_sid(cls, viewer_id, sid): query = text(""" SELECT alerts.*, alert_views.dismissed_at FROM alerts LEFT JOIN alert_views ON alert_views.alert_id = alerts.id AND alert_views.viewer_id = :viewer_id WHERE alerts.deleted_at IS NULL AND alerts.key LIKE :key AND alerts.sid = :sid ORDER BY alerts.created_at """) results = db.session.execute(query, { 'viewer_id': viewer_id, 'key': current_term_id() + '_%', 'sid': sid }) feed = [] def result_to_dict(result): return { camelize(key): result[key] for key in ['id', 'alert_type', 'key', 'message'] } for result in results: dismissed_at = result['dismissed_at'] alert = { **result_to_dict(result), **{ 'dismissed': dismissed_at and dismissed_at.strftime('%Y-%m-%d %H:%M:%S'), 'createdAt': result['created_at'].strftime('%Y-%m-%d %H:%M:%S'), 'updatedAt': result['updated_at'].strftime('%Y-%m-%d %H:%M:%S'), }, } feed.append(alert) return feed
def term(): term_id = request.args.get('term') or current_term_id(use_cache=False) return term_id
def _get_current_term_start(): session = data_loch.get_undergraduate_term(current_term_id())[0] return session['term_begins']
def test_current_term_id(self): """Returns the current term ID.""" assert(sis_terms.current_term_id()) == '2178'
def test_current_term_id_from_config(self, app): """Falls back on configured current term ID when not set to auto.""" with override_config(app, 'CANVAS_CURRENT_ENROLLMENT_TERM', 'Summer 1969'): assert(sis_terms.current_term_id()) == '1695'
def refresh_external_data(): from boac.api import cache_utils from boac.merged.sis_terms import current_term_id cache_utils.refresh_request_handler(current_term_id())
def _get_partitioned_courses_json(self): assigned_courses = [] ignored_courses = [] unassigned_courses = [] degree_progress_courses = {} sid = self.student_sid # Sort courses by created_at (asc) so "copied" courses come after the primary assigned course. degree_courses = DegreeProgressCourse.find_by_sid(degree_check_id=self.id, sid=sid) for course in sorted(degree_courses, key=lambda c: c.created_at): key = f'{course.section_id}_{course.term_id}_{course.manually_created_at}_{course.manually_created_by}' if key not in degree_progress_courses: degree_progress_courses[key] = [] degree_progress_courses[key].append(course) enrollments = data_loch.get_enrollments_for_sid( sid=sid, latest_term_id=current_term_id(), ) def _organize_course_and_its_copies(course_key, units_original_value=None): for idx, course_ in enumerate(degree_progress_courses.pop(course_key)): api_json = { **course_.to_api_json(), **{ 'sis': { # If user edits degreeCheck.units then we alert the user of diff with original sis.units. 'units': units_original_value, }, }, 'isCopy': idx > 0, } if api_json['categoryId']: assigned_courses.append(api_json) elif api_json['ignore']: ignored_courses.append(api_json) else: unassigned_courses.append(api_json) for index, term in enumerate(merge_enrollment_terms(enrollments)): for enrollment in term.get('enrollments', []): for section in enrollment['sections']: section_id = section['ccn'] term_id = term['termId'] units = section['units'] key = f'{section_id}_{term_id}_{None}_{None}' if key in degree_progress_courses: _organize_course_and_its_copies(key, units_original_value=units) else: grade = section['grade'] if section.get('primary') and grade and units: course = DegreeProgressCourse.create( degree_check_id=self.id, display_name=enrollment['displayName'], grade=grade, section_id=section_id, sid=sid, term_id=term_id, units=units, ) unassigned_courses.append({ **course.to_api_json(), **{ 'sis': { 'units': units, }, }, 'isCopy': False, }) for key in list(degree_progress_courses.keys()): _organize_course_and_its_copies(key) return assigned_courses, ignored_courses, unassigned_courses
def query_students( academic_standings=None, advisor_plan_mappings=None, coe_advisor_ldap_uids=None, coe_ethnicities=None, coe_genders=None, coe_prep_statuses=None, coe_probation=None, coe_underrepresented=None, colleges=None, curated_group_ids=None, entering_terms=None, epn_cpn_grading_terms=None, ethnicities=None, expected_grad_terms=None, genders=None, gpa_ranges=None, group_codes=None, in_intensive_cohort=None, include_profiles=False, intended_majors=None, is_active_asc=None, is_active_coe=None, last_name_ranges=None, last_term_gpa_ranges=None, levels=None, limit=50, majors=None, midpoint_deficient_grade=None, minors=None, offset=0, order_by=None, sids=(), sids_only=False, student_holds=None, term_id=None, transfer=None, underrepresented=None, unit_ranges=None, visa_types=None, ): criteria = { 'advisor_plan_mappings': advisor_plan_mappings, 'coe_advisor_ldap_uids': coe_advisor_ldap_uids, 'coe_ethnicities': coe_ethnicities, 'coe_genders': coe_genders, 'coe_prep_statuses': coe_prep_statuses, 'coe_probation': coe_probation, 'coe_underrepresented': coe_underrepresented, 'epn_cpn_grading_terms': epn_cpn_grading_terms, 'ethnicities': ethnicities, 'genders': genders, 'group_codes': group_codes, 'in_intensive_cohort': in_intensive_cohort, 'is_active_asc': is_active_asc, 'is_active_coe': is_active_coe, 'underrepresented': underrepresented, 'visa_types': visa_types, } # Cohorts pull from all students in BOA unless they include a department-specific criterion. scope = scope_for_criteria(**criteria) query_tables, query_filter, query_bindings = data_loch.get_students_query( academic_standings=academic_standings, advisor_plan_mappings=advisor_plan_mappings, coe_advisor_ldap_uids=coe_advisor_ldap_uids, coe_ethnicities=coe_ethnicities, coe_genders=coe_genders, coe_prep_statuses=coe_prep_statuses, coe_probation=coe_probation, coe_underrepresented=coe_underrepresented, colleges=colleges, curated_group_ids=curated_group_ids, current_term_id=current_term_id(), entering_terms=entering_terms, epn_cpn_grading_terms=epn_cpn_grading_terms, ethnicities=ethnicities, expected_grad_terms=expected_grad_terms, genders=genders, gpa_ranges=gpa_ranges, group_codes=group_codes, in_intensive_cohort=in_intensive_cohort, intended_majors=intended_majors, is_active_asc=is_active_asc, is_active_coe=is_active_coe, last_name_ranges=last_name_ranges, last_term_gpa_ranges=last_term_gpa_ranges, levels=levels, majors=majors, midpoint_deficient_grade=midpoint_deficient_grade, minors=minors, scope=scope, sids=sids, transfer=transfer, underrepresented=underrepresented, unit_ranges=unit_ranges, visa_types=visa_types, student_holds=student_holds, ) if not query_tables: return { 'sids': [], 'students': [], 'totalStudentCount': 0, } # First, get total_count of matching students sids_result = data_loch.safe_execute_rds(f'SELECT DISTINCT(sas.sid) {query_tables} {query_filter}', **query_bindings) if sids_result is None: return None # Upstream logic may require the full list of SIDs even if we're only returning full results for a particular # paged slice. summary = { 'sids': [row['sid'] for row in sids_result], 'totalStudentCount': len(sids_result), } if not sids_only: o, o_secondary, o_tertiary, o_direction, supplemental_query_tables = data_loch.get_students_ordering( current_term_id=current_term_id(), order_by=order_by, group_codes=group_codes, majors=majors, scope=scope, ) if supplemental_query_tables: query_tables += supplemental_query_tables nulls_last = ('entering_term', 'group_name', 'term_gpa', 'terms_in_attendance', 'units') o_null_order = 'NULLS LAST' if any(s in o for s in nulls_last) else 'NULLS FIRST' sql = f"""SELECT sas.sid, MIN({o}), MIN({o_secondary}), MIN({o_tertiary}) {query_tables} {query_filter} GROUP BY sas.sid ORDER BY MIN({o}) {o_direction} {o_null_order}, MIN({o_secondary}) NULLS FIRST, MIN({o_tertiary}) NULLS FIRST""" if o_tertiary != 'sas.sid': sql += ', sas.sid' sql += ' OFFSET :offset' query_bindings['offset'] = offset if limit and limit < 100: # Sanity check large limits query_bindings['limit'] = limit sql += ' LIMIT :limit' students_result = data_loch.safe_execute_rds(sql, **query_bindings) if include_profiles: summary['students'] = get_summary_student_profiles([row['sid'] for row in students_result], term_id=term_id) else: summary['students'] = get_distilled_student_profiles([row['sid'] for row in students_result]) return summary
def response_with_students_csv_download(sids, fieldnames, benchmark): term_id_last = previous_term_id(current_term_id()) term_id_previous = previous_term_id(term_id_last) rows = [] getters = { 'first_name': lambda profile: profile.get('firstName'), 'last_name': lambda profile: profile.get('lastName'), 'sid': lambda profile: profile.get('sid'), 'email': lambda profile: profile.get('sisProfile', {}).get('emailAddress'), 'phone': lambda profile: profile.get('sisProfile', {}).get('phoneNumber'), 'majors': lambda profile: ';'.join([ plan.get('description') for plan in profile.get('sisProfile', {}).get('plans', []) if plan.get('status') == 'Active' ], ), 'intended_majors': lambda profile: ';'.join([ major.get('description') for major in profile.get('sisProfile', {}).get('intendedMajors') ], ), 'level_by_units': lambda profile: profile.get('sisProfile', {}).get('level', {}).get( 'description'), 'minors': lambda profile: ';'.join([ plan.get('description') for plan in profile.get('sisProfile', {}).get('plansMinor', []) if plan.get('status') == 'Active' ], ), 'subplans': lambda profile: ';'.join([ subplan for subplan in profile.get('sisProfile', {}).get('subplans', []) ]), 'terms_in_attendance': lambda profile: profile.get('sisProfile', {}).get('termsInAttendance'), 'expected_graduation_term': lambda profile: profile.get('sisProfile', {}).get( 'expectedGraduationTerm', {}).get('name'), 'units_completed': lambda profile: profile.get('sisProfile', {}).get('cumulativeUnits'), f'term_gpa_{term_id_previous}': lambda profile: profile.get('termGpa', {}).get(term_id_previous), f'term_gpa_{term_id_last}': lambda profile: profile.get('termGpa', {}).get(term_id_last), 'cumulative_gpa': lambda profile: profile.get('sisProfile', {}).get('cumulativeGPA'), 'program_status': lambda profile: ';'.join( list( set([ plan.get('status') for plan in profile.get('sisProfile', {}).get('plans', []) ], ), ), ), 'academic_standing': lambda profile: profile.get('academicStanding'), 'transfer': lambda profile: 'Yes' if profile.get('sisProfile', {}).get('transfer') else '', 'intended_major': lambda profile: ', '.join([ major.get('description') for major in (profile.get('sisProfile', {}).get('intendedMajors') or []) ]), 'units_in_progress': lambda profile: profile.get('enrolledUnits', {}), } academic_standing = get_academic_standing_by_sid(sids, as_dicts=True) term_gpas = get_term_gpas_by_sid(sids, as_dicts=True) term_units = get_term_units_by_sid(current_term_id(), sids) def _get_last_element(results): return results[sorted(results)[-1]] if results else None def _add_row(student_profile): student_profile['academicStanding'] = _get_last_element( academic_standing.get(student_profile['sid'])) student_profile['termGpa'] = term_gpas.get(student_profile['sid'], {}) student_profile['enrolledUnits'] = term_units[student_profile['sid']] row = {} for fieldname in fieldnames: row[fieldname] = getters[fieldname](student_profile) rows.append(row) students = get_student_profiles(sids=sids) for student in students: profile = student.get('profile') if profile: _add_row(json.loads(profile)) remaining_sids = list(set(sids) - set([s.get('sid') for s in students])) if remaining_sids: for profile in get_historical_student_profiles(remaining_sids): _add_row(profile) benchmark('end') return response_with_csv_download( rows=sorted( rows, key=lambda r: (_norm(r, 'last_name'), _norm(r, 'first_name'), _norm(r, 'sid'))), filename_prefix='cohort', fieldnames=fieldnames, )
def low_assignment_scores(term_id=None): if not term_id: term_id = current_term_id() examined_sids = set() low_sids = set() multiple_low_sids = set() # Many students in a low percentile may have received a reasonably high score. # Since instructors rarely grade on a curve, it may be fine to receive a score of 85 # even if all other students received 90 or above. sids_with_low_raw_scores = set() primary_sections = set() primary_sections_with_scored_assignments = set() primary_sections_with_plottable_assignments = set() enrollments_for_term = data_loch.get_enrollments_for_term(term_id) enrollments_by_sid = { row['sid']: json.loads(row['enrollment_term']) for row in enrollments_for_term } itr = iter(enrollments_by_sid.items()) for (sid, term) in itr: examined_sids.add(sid) for enr in term['enrollments']: first_section = enr['sections'][0] if not first_section.get('primary'): continue ccn = first_section['ccn'] primary_sections.add(ccn) for site in enr['canvasSites']: score_info = site['analytics']['currentScore'] if score_info['courseDeciles']: primary_sections_with_scored_assignments.add(ccn) if score_info['boxPlottable']: primary_sections_with_plottable_assignments.add(ccn) pct = score_info['student']['roundedUpPercentile'] if pct is not None: if pct <= 25: if sid in low_sids: multiple_low_sids.add(sid) low_sids.add(sid) max_score = score_info['courseDeciles'][9] if score_info['student']['raw'] < (max_score * 0.7): sids_with_low_raw_scores.add(sid) app.logger.warn( f'Total of {len(examined_sids)} students in classes. {len(low_sids)} with low scores in a class.' ) app.logger.warn(f'Low scorers: {sorted(low_sids)}') app.logger.warn( f' {len(multiple_low_sids)} Low scorers in multiple sites: {sorted(multiple_low_sids)}' ) app.logger.warn( f' {len(sids_with_low_raw_scores)} Low scorers with raw score < 70% of max: {sorted(sids_with_low_raw_scores)}' ) app.logger.warn( f'Total of {len(primary_sections)} primary sections. ' f'{len(primary_sections_with_scored_assignments)} have scores. ' f'{len(primary_sections_with_plottable_assignments)} have a reasonable range of scores.' ) return { 'sids': sorted(examined_sids), 'low_sids': sorted(low_sids), 'multiple_low_sids': sorted(multiple_low_sids), 'sids_with_low_raw_scores': sorted(sids_with_low_raw_scores), 'primary_sections_count': len(primary_sections), 'sections_scored_count': len(primary_sections_with_scored_assignments), 'sections_with_range_of_scores_count': len(primary_sections_with_plottable_assignments), }
def _term(): term_id = request.args.get('term') or current_term_id() return term_id