def test_auto_terms(self, app, current_term_index): all_term_ids = set(berkeley.reverse_term_ids(include_future_terms=True, include_legacy_terms=True)) canvas_integrated_term_ids = set(berkeley.reverse_term_ids()) assert canvas_integrated_term_ids < all_term_ids assert berkeley.current_term_id() == '2182' assert berkeley.future_term_id() == '2188' assert berkeley.s3_canvas_data_path_current_term() == 'canvas-data/term/spring-2018'
def test_term_id_lists(self, app): all_term_ids = set( berkeley.reverse_term_ids(include_future_terms=True, include_legacy_terms=True)) canvas_integrated_term_ids = set(berkeley.reverse_term_ids()) future_term_ids = set(berkeley.future_term_ids()) legacy_term_ids = set(berkeley.legacy_term_ids()) assert canvas_integrated_term_ids < all_term_ids assert berkeley.sis_term_id_for_name( app.config['EARLIEST_LEGACY_TERM']) in all_term_ids assert berkeley.sis_term_id_for_name( app.config['EARLIEST_TERM']) in all_term_ids assert berkeley.sis_term_id_for_name( app.config['CURRENT_TERM']) in all_term_ids assert berkeley.sis_term_id_for_name( app.config['FUTURE_TERM']) in all_term_ids assert berkeley.current_term_id() in canvas_integrated_term_ids assert berkeley.earliest_term_id() in canvas_integrated_term_ids assert future_term_ids.isdisjoint(canvas_integrated_term_ids) assert future_term_ids < all_term_ids assert berkeley.future_term_id() in future_term_ids assert legacy_term_ids.isdisjoint(canvas_integrated_term_ids) assert legacy_term_ids < all_term_ids assert berkeley.earliest_legacy_term_id() in berkeley.legacy_term_ids()
def app_config(): def _to_api_key(key): chunks = key.split('_') return f"{chunks[0].lower()}{''.join(chunk.title() for chunk in chunks[1:])}" return tolerant_jsonify( { **dict((_to_api_key(key), app.config[key] if key in app.config else None) for key in PUBLIC_CONFIGS), **{ 'currentEnrollmentTerm': current_term_name(), 'currentEnrollmentTermId': int(current_term_id()), 'futureTermId': int(future_term_id()), }, }, )
def app_config(): current_term_name = berkeley.current_term_name() current_term_id = berkeley.current_term_id() future_term_id = berkeley.future_term_id() return tolerant_jsonify({ 'currentEnrollmentTerm': current_term_name, 'currentEnrollmentTermId': int(current_term_id), 'futureTermId': int(future_term_id), 'ebEnvironment': app.config['EB_ENVIRONMENT'] if 'EB_ENVIRONMENT' in app.config else None, 'nessieEnv': app.config['NESSIE_ENV'], })
def generate_feeds(self): # Translation between canvas_user_id and UID/SID is needed to merge Canvas analytics data and SIS enrollment-based data. advisees_by_canvas_id = {} advisees_by_sid = {} self.successes = [] self.failures = [] profile_tables = self.generate_student_profile_tables( advisees_by_canvas_id, advisees_by_sid) if not profile_tables: raise BackgroundJobError( 'Failed to generate student profile tables.') feed_path = app.config['LOCH_S3_BOAC_ANALYTICS_DATA_PATH'] + '/feeds/' s3.upload_json(advisees_by_canvas_id, feed_path + 'advisees_by_canvas_id.json') upload_student_term_maps(advisees_by_sid) # Avoid processing Canvas analytics data for future terms and pre-CS terms. for term_id in (future_term_ids() + legacy_term_ids()): enrollment_term_map = s3.get_object_json( feed_path + f'enrollment_term_map_{term_id}.json') if enrollment_term_map: GenerateMergedEnrollmentTerm().refresh_student_enrollment_term( term_id, enrollment_term_map) canvas_integrated_term_ids = reverse_term_ids() app.logger.info( f'Will queue analytics generation for {len(canvas_integrated_term_ids)} terms on worker nodes.' ) result = queue_merged_enrollment_term_jobs(self.job_id, canvas_integrated_term_ids) if not result: raise BackgroundJobError('Failed to queue enrollment term jobs.') refresh_all_from_staging(profile_tables) self.update_redshift_academic_standing() self.update_rds_profile_indexes() app.logger.info( 'Profile generation complete; waiting for enrollment term generation to finish.' ) while True: sleep(1) enrollment_results = get_merged_enrollment_term_job_status( self.job_id) if not enrollment_results: raise BackgroundJobError('Failed to refresh RDS indexes.') any_pending_job = next( (row for row in enrollment_results if row['status'] == 'created' or row['status'] == 'started'), None) if not any_pending_job: break app.logger.info('Exporting analytics data for archival purposes.') unload_enrollment_terms([current_term_id(), future_term_id()]) app.logger.info('Refreshing enrollment terms in RDS.') with rds.transaction() as transaction: if self.refresh_rds_enrollment_terms(None, transaction): transaction.commit() app.logger.info('Refreshed RDS enrollment terms.') else: transaction.rollback() raise BackgroundJobError( 'Failed to refresh RDS enrollment terms.') status_string = f'Generated merged profiles ({len(self.successes)} successes, {len(self.failures)} failures).' errored = False for row in enrollment_results: status_string += f" {row['details']}" if row['status'] == 'error': errored = True truncate_staging_table('student_enrollment_terms') if errored: raise BackgroundJobError(status_string) else: return status_string