def download_csv_per_filters(): benchmark = get_benchmarker('cohort download_csv_per_filters') benchmark('begin') params = request.get_json() filters = get_param(params, 'filters', []) fieldnames = get_param(params, 'csvColumnsSelected', []) domain = get_param(params, 'domain', 'default') if (domain == 'default' and not filters) or filters is None: raise BadRequestError('API requires \'filters\'') filter_keys = list(map(lambda f: f['key'], filters)) if is_unauthorized_search(filter_keys): raise ForbiddenRequestError( 'You are unauthorized to access student data managed by other departments' ) domain = get_param(params, 'domain', 'default') if is_unauthorized_domain(domain): raise ForbiddenRequestError( f'You are unauthorized to query the \'{domain}\' domain') cohort = _construct_phantom_cohort( domain=domain, filters=filters, offset=0, limit=None, include_profiles=False, include_sids=True, include_students=False, ) return _response_with_csv_download(benchmark, domain, fieldnames, cohort['sids'])
def search_for_admitted_students( search_phrase=None, order_by='last_name', ): benchmark = get_benchmarker('search_for_admitted_students') query_tables, query_filter, query_bindings = data_loch.get_admitted_students_query( search_phrase=search_phrase, ) sql = f""" SELECT DISTINCT(sa.cs_empl_id), sa.first_name, sa.middle_name, sa.last_name, sa.current_sir, sa.special_program_cep, sa.reentry_status, sa.first_generation_college, sa.urem, sa.application_fee_waiver_flag, sa.residency_category, sa.freshman_or_transfer, sa.updated_at {query_tables} {query_filter} ORDER BY sa.{order_by}, sa.last_name, sa.first_name, sa.middle_name, sa.cs_empl_id""" benchmark('begin admit search query') admits = data_loch.safe_execute_rds(sql, **query_bindings) benchmark('end') return { 'admits': [_to_api_json(row) for row in islice(admits, 50)] if admits else None, 'totalAdmitCount': len(admits), }
def get_cohort_per_filters(): benchmark = get_benchmarker('cohort get_students_per_filters') benchmark('begin') params = request.get_json() filters = get_param(params, 'filters', []) if not filters: raise BadRequestError('API requires \'filters\'') include_students = to_bool(get_param(params, 'includeStudents')) include_students = True if include_students is None else include_students order_by = get_param(params, 'orderBy', None) offset = get_param(params, 'offset', 0) limit = get_param(params, 'limit', 50) filter_keys = list(map(lambda f: f['key'], filters)) if is_unauthorized_search(filter_keys, order_by): raise ForbiddenRequestError( 'You are unauthorized to access student data managed by other departments' ) benchmark('begin phantom cohort query') cohort = _construct_phantom_cohort( filters=filters, order_by=order_by, offset=int(offset), limit=int(limit), include_alerts_for_user_id=current_user.get_id(), include_profiles=True, include_students=include_students, ) _decorate_cohort(cohort) benchmark('end') return tolerant_jsonify(cohort)
def get_cohort(cohort_id): benchmark = get_benchmarker(f'cohort {cohort_id} get_cohort') benchmark('begin') filter_keys = list(request.args.keys()) order_by = get_param(request.args, 'orderBy', None) if is_unauthorized_search(filter_keys, order_by): raise ForbiddenRequestError( 'You are unauthorized to access student data managed by other departments' ) include_students = to_bool(get_param(request.args, 'includeStudents')) include_students = True if include_students is None else include_students offset = get_param(request.args, 'offset', 0) limit = get_param(request.args, 'limit', 50) benchmark('begin cohort filter query') cohort = CohortFilter.find_by_id( int(cohort_id), order_by=order_by, offset=int(offset), limit=int(limit), include_alerts_for_user_id=current_user.get_id(), include_profiles=True, include_students=include_students, ) if cohort and _can_current_user_view_cohort(cohort): _decorate_cohort(cohort) benchmark('end') return tolerant_jsonify(cohort) else: raise ResourceNotFoundError( f'No cohort found with identifier: {cohort_id}')
def _curated_group_with_complete_student_profiles(curated_group_id, order_by='last_name', offset=0, limit=50): benchmark = get_benchmarker( f'curated group {curated_group_id} with student profiles') benchmark('begin') curated_group = CuratedGroup.find_by_id(curated_group_id) if not curated_group: raise ResourceNotFoundError( f'Sorry, no curated group found with id {curated_group_id}.') if curated_group.owner_id != current_user.get_id(): raise ForbiddenRequestError( f'Current user, {current_user.get_uid()}, does not own curated group {curated_group.id}' ) api_json = curated_group.to_api_json(order_by=order_by, offset=offset, limit=limit) sids = [s['sid'] for s in api_json['students']] benchmark('begin profile query') api_json['students'] = get_summary_student_profiles(sids) benchmark('begin alerts query') Alert.include_alert_counts_for_students( viewer_user_id=current_user.get_id(), group=api_json) benchmark('end') return api_json
def students_with_alerts(cohort_id): benchmark = get_benchmarker(f'cohort {cohort_id} students_with_alerts') benchmark('begin') offset = get_param(request.args, 'offset', 0) limit = get_param(request.args, 'limit', 50) cohort = CohortFilter.find_by_id( cohort_id, include_alerts_for_user_id=current_user.get_id(), include_students=False, alert_offset=offset, alert_limit=limit, ) benchmark('fetched cohort') if cohort and _can_current_user_view_cohort(cohort): _decorate_cohort(cohort) students = cohort.get('alerts', []) alert_sids = [s['sid'] for s in students] alert_profiles = get_summary_student_profiles(alert_sids) benchmark('fetched student profiles') alert_profiles_by_sid = {p['sid']: p for p in alert_profiles} for student in students: student.update(alert_profiles_by_sid[student['sid']]) # The enrolled units count is the one piece of term data we want to preserve. if student.get('term'): student['term'] = { 'enrolledUnits': student['term'].get('enrolledUnits') } else: raise ResourceNotFoundError( f'No cohort found with identifier: {cohort_id}') benchmark('end') return tolerant_jsonify(students)
def get_full_student_profiles(sids): benchmark = get_benchmarker('get_full_student_profiles') benchmark('begin') if not sids: return [] benchmark('begin SIS profile query') profile_results = data_loch.get_student_profiles(sids) benchmark('end SIS profile query') if not profile_results: return [] profiles_by_sid = _get_profiles_by_sid(profile_results) profiles = [] for sid in sids: profile = profiles_by_sid.get(sid) if profile: profiles.append(profile) benchmark('begin photo merge') _merge_photo_urls(profiles) benchmark('end photo merge') scope = get_student_query_scope() benchmark('begin ASC profile merge') _merge_asc_student_profile_data(profiles_by_sid, scope) benchmark('end ASC profile merge') if 'COENG' in scope or 'ADMIN' in scope: benchmark('begin COE profile merge') _merge_coe_student_profile_data(profiles_by_sid) benchmark('end COE profile merge') return profiles
def get_students_with_alerts(curated_group_id): offset = get_param(request.args, 'offset', 0) limit = get_param(request.args, 'limit', 50) benchmark = get_benchmarker( f'curated group {curated_group_id} students_with_alerts') benchmark('begin') curated_group = CuratedGroup.find_by_id(curated_group_id) if not curated_group: raise ResourceNotFoundError( f'Sorry, no curated group found with id {curated_group_id}.') if not _can_current_user_view_curated_group(curated_group): raise ForbiddenRequestError( f'Current user, {current_user.get_uid()}, cannot view curated group {curated_group.id}' ) students = Alert.include_alert_counts_for_students( benchmark=benchmark, viewer_user_id=current_user.get_id(), group={'sids': CuratedGroup.get_all_sids(curated_group_id)}, count_only=True, offset=offset, limit=limit, ) alert_count_per_sid = {} for s in list(filter(lambda s: s.get('alertCount') > 0, students)): sid = s.get('sid') alert_count_per_sid[sid] = s.get('alertCount') sids = list(alert_count_per_sid.keys()) benchmark('begin profile query') students_with_alerts = get_student_profile_summaries(sids=sids) benchmark('end profile query') for student in students_with_alerts: student['alertCount'] = alert_count_per_sid[student['sid']] benchmark('end') return tolerant_jsonify(students_with_alerts)
def get_advising_notes(sid): benchmark = get_benchmarker(f'get_advising_notes {sid}') benchmark('begin') notes_by_id = {} benchmark('begin SIS advising notes query') notes_by_id.update(get_sis_advising_notes(sid)) benchmark('begin ASC advising notes query') notes_by_id.update(get_asc_advising_notes(sid)) benchmark('begin Data Science advising notes query') notes_by_id.update(get_data_science_advising_notes(sid)) benchmark('begin E&I advising notes query') notes_by_id.update(get_e_i_advising_notes(sid)) benchmark('begin History Dept advising notes query') notes_by_id.update(get_history_dept_advising_notes(sid)) benchmark('begin non legacy advising notes query') notes_by_id.update(get_non_legacy_advising_notes(sid)) benchmark('begin SIS late drop eforms query') notes_by_id.update(get_sis_late_drop_eforms(sid)) if not notes_by_id.values(): return None notes_read = NoteRead.get_notes_read_by_user(current_user.get_id(), notes_by_id.keys()) for note_read in notes_read: note_feed = notes_by_id.get(note_read.note_id) if note_feed: note_feed['read'] = True else: app.logger.error( f'DB query mismatch for note id {note_read.note_id}') benchmark('end') return list(notes_by_id.values())
def to_api_json(self, include_students, order_by='last_name', offset=0, limit=50): benchmark = get_benchmarker(f'CuratedGroup {self.id} to_api_json') benchmark('begin') sids = CuratedGroupStudent.get_sids(curated_group_id=self.id) feed = { 'domain': self.domain, 'id': self.id, 'name': self.name, 'ownerId': self.owner_id, 'sids': sids, 'totalStudentCount': len(sids), } if include_students: if sids: if self.domain == 'admitted_students': feed['students'] = get_admitted_students_by_sids( limit=limit, offset=offset, order_by=order_by, sids=sids, ) else: result = query_students( sids=sids, academic_career_status=('all'), include_profiles=False, order_by=order_by, offset=offset, limit=limit, ) feed['students'] = result['students'] else: feed['students'] = [] benchmark('end') return feed
def _curated_group_with_complete_student_profiles(curated_group_id, order_by='last_name', term_id=None, offset=0, limit=50): benchmark = get_benchmarker( f'curated group {curated_group_id} with student profiles') benchmark('begin') curated_group = CuratedGroup.find_by_id(curated_group_id) if not curated_group: raise ResourceNotFoundError( f'Sorry, no curated group found with id {curated_group_id}.') if not _can_current_user_view_curated_group(curated_group): raise ForbiddenRequestError( f'Current user, {current_user.get_uid()}, cannot view curated group {curated_group.id}' ) api_json = curated_group.to_api_json(order_by=order_by, offset=offset, limit=limit) sids = [s['sid'] for s in api_json['students']] benchmark('begin profile query') api_json['students'] = get_summary_student_profiles( sids, term_id=term_id, include_historical=True) benchmark('begin alerts query') Alert.include_alert_counts_for_students( viewer_user_id=current_user.get_id(), group=api_json) benchmark('end') benchmark('begin get_referencing_cohort_ids') api_json[ 'referencingCohortIds'] = curated_group.get_referencing_cohort_ids() benchmark('end') return api_json
def search_advising_appointments( search_phrase, advisor_csid=None, advisor_uid=None, student_csid=None, topic=None, datetime_from=None, datetime_to=None, offset=0, limit=20, ): benchmark = get_benchmarker('search_advising_appointments') benchmark('begin') if search_phrase: search_terms = list({t.group(0) for t in list(re.finditer(TEXT_SEARCH_PATTERN, search_phrase)) if t}) search_phrase = ' & '.join(search_terms) else: search_terms = [] advisor_uid = get_uid_for_csid(app, advisor_csid) if (not advisor_uid and advisor_csid) else advisor_uid benchmark('begin local appointments query') appointments_feed = Appointment.search( search_phrase=search_phrase, advisor_uid=advisor_uid, student_csid=student_csid, topic=topic, datetime_from=datetime_from, datetime_to=datetime_to, limit=limit, offset=offset, ) benchmark('end local appointments query') local_appointments_count = len(appointments_feed) if local_appointments_count == limit: return appointments_feed benchmark('begin loch appointments query') loch_results = data_loch.search_advising_appointments( search_phrase=search_phrase, advisor_uid=advisor_uid, advisor_csid=advisor_csid, student_csid=student_csid, topic=topic, datetime_from=datetime_from, datetime_to=datetime_to, offset=max(0, offset - local_appointments_count), limit=(limit - local_appointments_count), ) benchmark('end loch appointments query') benchmark('begin loch appointments parsing') appointments_feed += _get_loch_appointments_search_results(loch_results, search_terms) benchmark('end loch appointments parsing') return appointments_feed
def create_notes(): benchmark = get_benchmarker('create_notes') params = request.form sids = _get_sids_for_note_creation() benchmark(f'SID count: {len(sids)}') body = params.get('body', None) is_private = to_bool_or_none(params.get('isPrivate', False)) subject = params.get('subject', None) topics = get_note_topics_from_http_post() if not sids or not subject: benchmark('end (BadRequest)') raise BadRequestError( 'Note creation requires \'subject\' and \'sids\'') dept_codes = dept_codes_where_advising(current_user) if current_user.is_admin or not len(dept_codes): benchmark('end (Forbidden)') raise ForbiddenRequestError( 'Sorry, only advisors can create advising notes') if is_private and not current_user.can_access_private_notes: benchmark('end (Forbidden)') raise ForbiddenRequestError( 'Sorry, you are not authorized to manage note privacy.') attachments = get_note_attachments_from_http_post(tolerate_none=True) benchmark(f'Attachment count: {len(attachments)}') body = process_input_from_rich_text_editor(body) template_attachment_ids = get_template_attachment_ids_from_http_post() if len(sids) == 1: note = Note.create( **_get_author_profile(), attachments=attachments, body=body, is_private=is_private, sid=sids[0], subject=subject, template_attachment_ids=template_attachment_ids, topics=topics, ) response = tolerant_jsonify( _boa_note_to_compatible_json(note, note_read=True)) else: response = tolerant_jsonify( Note.create_batch( **_get_author_profile(), attachments=attachments, author_id=current_user.to_api_json()['id'], body=body, is_private=is_private, sids=sids, subject=subject, template_attachment_ids=template_attachment_ids, topics=topics, ), ) benchmark('end') return response
def get_summary_student_profiles(sids, term_id=None): if not sids: return [] benchmark = get_benchmarker('get_summary_student_profiles') benchmark('begin') # TODO It's probably more efficient to store summary profiles in the loch, rather than distilling them # on the fly from full profiles. profiles = get_full_student_profiles(sids) # TODO Many views require no term enrollment information other than a units count. This datum too should be # stored in the loch without BOAC having to crunch it. if not term_id: term_id = current_term_id() benchmark('begin enrollments query') enrollments_for_term = data_loch.get_enrollments_for_term(term_id, sids) benchmark('end enrollments query') enrollments_by_sid = { row['sid']: json.loads(row['enrollment_term']) for row in enrollments_for_term } benchmark('begin term GPA query') term_gpas = get_term_gpas_by_sid(sids) benchmark('end term GPA query') benchmark('begin profile transformation') for profile in profiles: # Strip SIS details to lighten the API load. sis_profile = profile.pop('sisProfile', None) if sis_profile: profile['cumulativeGPA'] = sis_profile.get('cumulativeGPA') profile['cumulativeUnits'] = sis_profile.get('cumulativeUnits') profile['currentTerm'] = sis_profile.get('currentTerm') profile['expectedGraduationTerm'] = sis_profile.get( 'expectedGraduationTerm') profile['level'] = _get_sis_level_description(sis_profile) profile['majors'] = sorted( plan.get('description') for plan in sis_profile.get('plans', [])) profile['transfer'] = sis_profile.get('transfer') if sis_profile.get('withdrawalCancel'): profile['withdrawalCancel'] = sis_profile['withdrawalCancel'] # Add the singleton term. term = enrollments_by_sid.get(profile['sid']) profile['hasCurrentTermEnrollments'] = False if term: profile['analytics'] = term.pop('analytics', None) profile['term'] = term if term['termId'] == current_term_id() and len( term['enrollments']) > 0: profile['hasCurrentTermEnrollments'] = True profile['termGpa'] = term_gpas.get(profile['sid']) benchmark('end') return profiles
def search_for_students( include_profiles=False, search_phrase=None, order_by=None, offset=0, limit=None, ): benchmark = get_benchmarker('search_for_students') benchmark('begin') query_tables, query_filter, query_bindings = data_loch.get_students_query( search_phrase=search_phrase) if not query_tables: return { 'students': [], 'totalStudentCount': 0, } o, o_secondary, o_tertiary, supplemental_query_tables = data_loch.get_students_ordering( order_by=order_by) if supplemental_query_tables: query_tables += supplemental_query_tables benchmark('begin SID query') result = data_loch.safe_execute_rds( f'SELECT DISTINCT(sas.sid) {query_tables} {query_filter}', **query_bindings) benchmark('end SID query') total_student_count = len(result) sql = f"""SELECT sas.sid {query_tables} {query_filter} GROUP BY sas.sid ORDER BY MIN({o}) NULLS FIRST, MIN({o_secondary}) NULLS FIRST, MIN({o_tertiary}) NULLS FIRST""" if o_tertiary != 'sas.sid': sql += ', sas.sid' sql += f' OFFSET {offset}' if limit and limit < 100: # Sanity check large limits sql += f' LIMIT :limit' query_bindings['limit'] = limit benchmark('begin student query') result = data_loch.safe_execute_rds(sql, **query_bindings) if include_profiles: benchmark('begin profile collection') students = get_summary_student_profiles([row['sid'] for row in result]) benchmark('end profile collection') else: students = get_api_json([row['sid'] for row in result]) benchmark('end') return { 'students': students, 'totalStudentCount': total_student_count, }
def create_batch_degree_checks(template_id, sids): benchmark = get_benchmarker( f'create_batch_degree_checks template_id={template_id}') benchmark('begin') template = fetch_degree_template(template_id) created_by = current_user.get_id() results_by_sid = {} benchmark(f'creating {len(sids)} clones') for sid in sids: degree_check = clone(template, created_by, sid=sid) results_by_sid[sid] = degree_check.id benchmark('end') return results_by_sid
def search_for_students( search_phrase=None, order_by=None, offset=0, limit=None, ): benchmark = get_benchmarker('search_for_students') benchmark('begin') query_tables, query_filter, query_bindings = data_loch.get_students_query(search_phrase=search_phrase) if not query_tables: return { 'students': [], 'totalStudentCount': 0, } o, o_secondary, o_tertiary, o_direction, supplemental_query_tables = data_loch.get_students_ordering( current_term_id=current_term_id(), order_by=order_by, ) if supplemental_query_tables: query_tables += supplemental_query_tables benchmark('begin SID query') result = data_loch.safe_execute_rds(f'SELECT DISTINCT(sas.sid) {query_tables} {query_filter}', **query_bindings) benchmark('end SID query') total_student_count = len(result) # In the special case of a numeric search phrase that returned no matches, fall back to historical student search. if total_student_count == 0 and search_phrase and re.match(r'^\d+$', search_phrase): return search_for_student_historical(search_phrase) sql = f"""SELECT sas.sid {query_tables} {query_filter} GROUP BY sas.sid ORDER BY MIN({o}) {o_direction} NULLS FIRST, MIN({o_secondary}) NULLS FIRST, MIN({o_tertiary}) NULLS FIRST""" if o_tertiary != 'sas.sid': sql += ', sas.sid' sql += f' OFFSET {offset}' if limit and limit < 100: # Sanity check large limits sql += ' LIMIT :limit' query_bindings['limit'] = limit benchmark('begin student query') result = data_loch.safe_execute_rds(sql, **query_bindings) benchmark('begin profile collection') students = get_summary_student_profiles([row['sid'] for row in result]) benchmark('end') return { 'students': students, 'totalStudentCount': total_student_count, }
def download_csv(curated_group_id): benchmark = get_benchmarker( f'curated group {curated_group_id} download_csv') benchmark('begin') curated_group = CuratedGroup.find_by_id(curated_group_id) if not curated_group: raise ResourceNotFoundError( f'No curated group found with id: {curated_group_id}') if curated_group.owner_id != current_user.get_id(): raise ForbiddenRequestError( f'Current user, {current_user.get_uid()}, does not own curated group {curated_group.id}' ) return response_with_students_csv_download( sids=CuratedGroup.get_all_sids(curated_group_id), benchmark=benchmark)
def get_summary_student_profiles(sids, include_historical=False, term_id=None): if not sids: return [] benchmark = get_benchmarker('get_summary_student_profiles') benchmark('begin') # TODO It's probably more efficient to store summary profiles in the loch, rather than distilling them # on the fly from full profiles. profiles = get_full_student_profiles(sids) # TODO Many views require no term enrollment information other than a units count. This datum too should be # stored in the loch without BOAC having to crunch it. if not term_id: term_id = current_term_id() benchmark('begin enrollments query') enrollments_for_term = data_loch.get_enrollments_for_term(term_id, sids) benchmark('end enrollments query') enrollments_by_sid = {row['sid']: json.loads(row['enrollment_term']) for row in enrollments_for_term} benchmark('begin term GPA query') term_gpas = get_term_gpas_by_sid(sids) benchmark('end term GPA query') remaining_sids = list(set(sids) - set([p.get('sid') for p in profiles])) if len(remaining_sids) and include_historical: benchmark('begin historical profile supplement') historical_profile_rows = data_loch.get_historical_student_profiles_for_sids(remaining_sids) def _historicize_profile(row): return { **json.loads(row['profile']), **{ 'fullProfilePending': True, }, } historical_profiles = [_historicize_profile(row) for row in historical_profile_rows] # We don't expect photo information to show for historical profiles, but we still need a placeholder element # in the feed so the front end can show the proper fallback. _merge_photo_urls(historical_profiles) for historical_profile in historical_profiles: ManuallyAddedAdvisee.find_or_create(historical_profile['sid']) profiles += historical_profiles historical_enrollments_for_term = data_loch.get_historical_enrollments_for_term(term_id, remaining_sids) for row in historical_enrollments_for_term: enrollments_by_sid[row['sid']] = json.loads(row['enrollment_term']) benchmark('end historical profile supplement') benchmark('begin profile transformation') for profile in profiles: summarize_profile(profile, enrollments=enrollments_by_sid, term_gpas=term_gpas) benchmark('end') return profiles
def create_batch( cls, author_id, author_uid, author_name, author_role, author_dept_codes, sids, subject, body, topics=(), attachments=(), template_attachment_ids=(), ): sid_count = len(sids) benchmark = get_benchmarker( 'begin note creation' if sid_count == 1 else f'begin creation of {sid_count} notes') ids_by_sid = _create_notes( author_id=author_id, author_uid=author_uid, author_name=author_name, author_role=author_role, author_dept_codes=author_dept_codes, body=body, sids=sids, subject=subject, ) note_ids = list(ids_by_sid.values()) benchmark('begin add 1 topic' if len(topics) == 1 else f'begin add {len(topics)} topics') _add_topics_to_notes(author_uid=author_uid, note_ids=note_ids, topics=topics) benchmark('begin add 1 attachment' if len(attachments) == 1 else f'begin add {len(attachments)} attachments') _add_attachments_to_notes( attachments=attachments, template_attachment_ids=template_attachment_ids, author_uid=author_uid, note_ids=note_ids, ) benchmark('begin refresh search index') cls.refresh_search_index() benchmark('end note creation' if sid_count == 1 else f'end creation of {sid_count} notes') return ids_by_sid
def get_student_profile_summaries(sids, term_id=None): if not sids: return [] benchmark = get_benchmarker('get_student_profile_summaries') benchmark('begin') profile_results = data_loch.get_student_profile_summaries(sids) if not profile_results: return [] profiles_by_sid = _get_profiles_by_sid(profile_results) profiles = [] for sid in sids: profile = profiles_by_sid.get(sid) if profile: profiles.append(profile) benchmark('begin photo merge') _merge_photo_urls(profiles) benchmark('end photo merge') scope = get_student_query_scope() benchmark('begin ASC profile merge') _merge_asc_student_profile_data(profiles_by_sid, scope) benchmark('end ASC profile merge') if 'COENG' in scope or 'ADMIN' in scope: benchmark('begin COE profile merge') _merge_coe_student_profile_data(profiles_by_sid) benchmark('end COE profile merge') # TODO Many views require no term enrollment information other than a units count. This datum too should be # stored in the loch without BOAC having to crunch it. if not term_id: term_id = current_term_id() benchmark('begin enrollments query') enrollments_for_term = data_loch.get_enrollments_for_term(term_id, sids) benchmark('end enrollments query') enrollments_by_sid = { row['sid']: json.loads(row['enrollment_term']) for row in enrollments_for_term } for profile in profiles: _merge_enrollments(profile, enrollments=enrollments_by_sid) benchmark('end') return profiles
def download_csv(curated_group_id): benchmark = get_benchmarker( f'curated group {curated_group_id} download_csv') benchmark('begin') curated_group = CuratedGroup.find_by_id(curated_group_id) params = request.get_json() fieldnames = get_param(params, 'csvColumnsSelected', []) if not curated_group: raise ResourceNotFoundError( f'No curated group found with id: {curated_group_id}') if not _can_current_user_view_curated_group(curated_group): raise ForbiddenRequestError( f'Current user, {current_user.get_uid()}, cannot view curated group {curated_group.id}' ) return response_with_students_csv_download( sids=CuratedGroup.get_all_sids(curated_group_id), fieldnames=fieldnames, benchmark=benchmark)
def get_section(term_id, section_id): benchmark = util.get_benchmarker(f'course section {section_id} in term {term_id}') benchmark('begin') if not current_user.can_access_canvas_data: raise ForbiddenRequestError('Unauthorized to view course data') offset = util.get(request.args, 'offset', None) if offset: offset = int(offset) limit = util.get(request.args, 'limit', None) if limit: limit = int(limit) featured = util.get(request.args, 'featured', None) section = get_sis_section(term_id, section_id) if not section: raise ResourceNotFoundError(f'No section {section_id} in term {term_id}') student_profiles = get_course_student_profiles(term_id, section_id, offset=offset, limit=limit, featured=featured) section.update(student_profiles) Alert.include_alert_counts_for_students(benchmark=benchmark, viewer_user_id=current_user.get_id(), group=student_profiles) benchmark('end') return tolerant_jsonify(section)
def download_csv_per_filters(): benchmark = get_benchmarker('cohort download_csv_per_filters') benchmark('begin') filters = get_param(request.get_json(), 'filters', []) if not filters: raise BadRequestError('API requires \'filters\'') filter_keys = list(map(lambda f: f['key'], filters)) if is_unauthorized_search(filter_keys): raise ForbiddenRequestError( 'You are unauthorized to access student data managed by other departments' ) cohort = CohortFilter.construct_phantom_cohort( filters=filters, offset=0, limit=None, include_profiles=False, include_sids=True, include_students=False, ) return response_with_students_csv_download(sids=cohort['sids'], benchmark=benchmark)
def find_advisors_by_name(cls, tokens, limit=None): benchmark = get_benchmarker('appointments find_advisors_by_name') benchmark('begin') token_conditions = [] params = {} for idx, token in enumerate(tokens): token_conditions.append( f"""JOIN appointments a{idx} ON UPPER(a{idx}.advisor_name) LIKE :token_{idx} AND a{idx}.advisor_uid = a.advisor_uid""", ) params[f'token_{idx}'] = f'%{token}%' sql = f"""SELECT DISTINCT a.advisor_name, a.advisor_uid FROM appointments a {' '.join(token_conditions)} ORDER BY a.advisor_name""" if limit: sql += f' LIMIT {limit}' benchmark('execute query') results = db.session.execute(sql, params) benchmark('end') return results
def download_cohort_csv(): benchmark = get_benchmarker('cohort download_csv') benchmark('begin') params = request.get_json() cohort_id = int(get_param(params, 'cohortId')) cohort = CohortFilter.find_by_id( cohort_id, offset=0, limit=None, include_profiles=False, include_sids=True, include_students=False, ) if cohort and _can_current_user_view_cohort(cohort): fieldnames = get_param(params, 'csvColumnsSelected', []) sids = CohortFilter.get_sids(cohort['id']) return _response_with_csv_download(benchmark, cohort['domain'], fieldnames, sids) else: raise ResourceNotFoundError( f'No cohort found with identifier: {cohort_id}')
def get_advising_appointments(sid): benchmark = get_benchmarker(f'get_advising_appointments {sid}') benchmark('begin') appointments_by_id = {} benchmark('begin SIS advising appointments query') appointments_by_id.update(get_sis_advising_appointments(sid)) benchmark('begin non legacy advising appointments query') appointments_by_id.update(get_non_legacy_advising_appointments(sid)) benchmark('begin YCBM advising appointments query') appointments_by_id.update(get_ycbm_advising_appointments(sid)) if not appointments_by_id.values(): return None appointments_read = AppointmentRead.get_appointments_read_by_user(current_user.get_id(), appointments_by_id.keys()) for appointment_read in appointments_read: appointment_feed = appointments_by_id.get(appointment_read.appointment_id) if appointment_feed: appointment_feed['read'] = True else: app.logger.error(f'DB query mismatch for appointment id {appointment_read.appointment_id}') benchmark('end') return list(appointments_by_id.values())
def get_full_student_profiles(sids): benchmark = get_benchmarker('get_full_student_profiles') benchmark('begin') if not sids: return [] benchmark('begin SIS profile query') profile_results = data_loch.get_student_profiles(sids) benchmark('end SIS profile query') if not profile_results: return [] profiles_by_sid = _get_profiles_by_sid(profile_results) profiles = [] for sid in sids: profile = profiles_by_sid.get(sid) if profile: profiles.append(profile) benchmark('begin photo merge') _merge_photo_urls(profiles) benchmark('end photo merge') scope = get_student_query_scope() if 'UWASC' in scope or 'ADMIN' in scope: benchmark('begin ASC profile merge') athletics_profiles = data_loch.get_athletics_profiles(sids) for row in athletics_profiles: profile = profiles_by_sid.get(row['sid']) if profile: profile['athleticsProfile'] = json.loads(row['profile']) benchmark('end ASC profile merge') if 'COENG' in scope or 'ADMIN' in scope: benchmark('begin COE profile merge') coe_profiles = data_loch.get_coe_profiles(sids) if coe_profiles: for coe_profile in coe_profiles: sid = coe_profile['sid'] _merge_coe_student_profile_data(profiles_by_sid.get(sid), coe_profile) benchmark('end COE profile merge') return profiles
def get_summary_student_profiles(sids, include_historical=False, term_id=None): if not sids: return [] benchmark = get_benchmarker('get_summary_student_profiles') benchmark('begin') # TODO It's probably more efficient to store summary profiles in the loch, rather than distilling them # on the fly from full profiles. profiles = get_full_student_profiles(sids) # TODO Many views require no term enrollment information other than a units count. This datum too should be # stored in the loch without BOAC having to crunch it. if not term_id: term_id = current_term_id() benchmark('begin enrollments query') enrollments_for_term = data_loch.get_enrollments_for_term(term_id, sids) benchmark('end enrollments query') enrollments_by_sid = {row['sid']: json.loads(row['enrollment_term']) for row in enrollments_for_term} benchmark('begin academic standing query') academic_standing = get_academic_standing_by_sid(sids) benchmark('end academic standing query') benchmark('begin term GPA query') term_gpas = get_term_gpas_by_sid(sids) benchmark('end term GPA query') remaining_sids = list(set(sids) - set([p.get('sid') for p in profiles])) if len(remaining_sids) and include_historical: benchmark('begin historical profile supplement') historical_profiles = get_historical_student_profiles(remaining_sids) profiles += historical_profiles historical_enrollments_for_term = data_loch.get_historical_enrollments_for_term(str(term_id), remaining_sids) for row in historical_enrollments_for_term: enrollments_by_sid[row['sid']] = json.loads(row['enrollment_term']) benchmark('end historical profile supplement') benchmark('begin profile transformation') for profile in profiles: summarize_profile(profile, enrollments=enrollments_by_sid, academic_standing=academic_standing, term_gpas=term_gpas) benchmark('end') return profiles
def get_my_curated_groups(): benchmark = get_benchmarker('my_curated_groups') curated_groups = [] user_id = current_user.get_id() for curated_group in CuratedGroup.get_curated_groups(owner_id=user_id): students = [{ 'sid': sid } for sid in CuratedGroup.get_all_sids(curated_group.id)] students_with_alerts = Alert.include_alert_counts_for_students( benchmark=benchmark, viewer_user_id=user_id, group={'students': students}, count_only=True, ) curated_groups.append({ **curated_group.to_api_json(include_students=False), 'alertCount': sum(s['alertCount'] for s in students_with_alerts), 'sids': [student['sid'] for student in students], 'totalStudentCount': len(students), }) return curated_groups