def refresh_request_handler(term_id, load_only=False): """Handle a start refresh admin request by returning an error status or starting the job on a background thread.""" job_state = JobProgress().get() if job_state is None or (not job_state['start']) or job_state['end']: job_type = 'Load' if load_only else 'Refresh' JobProgress().start({ 'job_type': job_type, 'term_id': term_id, }) app.logger.warn('About to start background thread') thread = Thread( target=background_thread_refresh, daemon=True, kwargs={ 'app_arg': app._get_current_object(), 'term_id': term_id, 'job_type': job_type, }, ) thread.start() return { 'progress': JobProgress().get(), } else: return { 'error': 'Cannot start a new refresh job', 'progress': job_state, }
def test_start_and_end(self): assert JobProgress().get() is None progress = JobProgress().start() assert progress['start'].startswith(str(date.today())) assert progress['end'] is None progress = JobProgress().end() assert progress['end'].startswith(str(date.today()))
def continue_request_handler(): """Continue an interrupted cache refresh or load job (skipping the optional ASC import).""" # WARNING: There is currently no protection against duplicate continuation requests. Admins need to ensure # that the refresh job is currently inactive. job_state = JobProgress().get() if job_state and job_state['start'] and not job_state['end']: term_id = job_state['term_id'] thread = Thread( target=background_thread_refresh, daemon=True, kwargs={ 'app_arg': app._get_current_object(), 'term_id': term_id, 'continuation': True, }, ) thread.start() return { 'progress': JobProgress().get(), } else: return { 'error': 'Cannot continue this refresh job', 'progress': job_state, }
def test_start_with_stored_properties(self): assert JobProgress().get() is None JobProgress().start({'job_type': 'refresh', 'term_id': '2178'}) progress = JobProgress().get() assert progress['start'].startswith(str(date.today())) assert progress['job_type'] == 'refresh' assert progress['term_id'] == '2178'
def refresh_request_handler(term_id, load_only=False): """Handle a start refresh admin request by returning an error status or starting the job on a background thread.""" job_state = JobProgress().get() if job_state and job_state['start'] and not job_state['end']: app.logger.error( f'Previous refresh job did not finish normally: {job_state}') JobProgress().delete() job_type = 'Load' if load_only else 'Refresh' JobProgress().start({ 'job_type': job_type, 'term_id': term_id, }) app.logger.warn('About to start background thread') thread = Thread( target=background_thread_refresh, daemon=True, kwargs={ 'app_arg': app._get_current_object(), 'term_id': term_id, 'job_type': job_type, }, ) thread.start() return { 'progress': JobProgress().get(), }
def cancel_refresh_in_progress(term_id): progress = JobProgress().get() if progress and progress['job_type'] == 'Refresh': # Drop the staging table. json_cache.drop_staging_table() progress = JobProgress().delete() return { 'progressDeleted': progress, }
def test_updates_a_started_job(self): assert JobProgress().start() progress = JobProgress().update('First step') assert len(progress['steps']) == 1 assert progress['steps'][0].startswith(str(date.today())) assert progress['steps'][0].endswith('First step') progress = JobProgress().update('Next step') assert len(progress['steps']) == 2 assert progress['steps'][1].endswith('Next step')
def background_thread_refresh(app_arg, term_id): with app_arg.app_context(): try: load_term(term_id) JobProgress().end() except Exception as e: app.logger.exception(e) app.logger.error('Background thread is stopping') JobProgress().update(f'An unexpected error occured: {e}') raise e
def load_all_terms(): job_progress = JobProgress().get() terms_done = job_progress.get('terms_done', []) all_terms = berkeley.all_term_ids() while terms_done != all_terms: if len(terms_done) == len(all_terms): app.logger.error(f'Unexpected terms_done value; stopping load: {terms_done}') return term_id = next(t for t in all_terms if t not in terms_done) load_term(term_id) terms_done.append(term_id) JobProgress().update(f'Term {term_id} loaded', properties={'terms_done': terms_done})
def background_thread_refresh(app_arg, term_id, job_type, continuation=False): with app_arg.app_context(): try: if job_type == 'Refresh': refresh_term(term_id, continuation) else: load_term(term_id) JobProgress().end() except Exception as e: app.logger.exception(e) app.logger.error('Background thread is stopping') JobProgress().update(f'An unexpected error occured: {e}') raise e
def load_term(term_id=berkeley.current_term_id()): if term_id == 'all': load_all_terms() return JobProgress().update(f'About to refresh alerts for term {term_id}') refresh_alerts(term_id) if term_id == berkeley.current_term_id(): JobProgress().update(f'About to load filtered cohort counts') load_filtered_cohort_counts() JobProgress().update(f'About to update curated cohort memberships') update_curated_cohort_lists()
def refresh_term(term_id=berkeley.current_term_id(), continuation=False): if not continuation or not json_cache.staging_table_exists(): JobProgress().update(f'About to drop/create staging table') json_cache.drop_staging_table() json_cache.create_staging_table(exclusions_for_term(term_id)) json_cache.set_staging(True) load_term(term_id) JobProgress().update(f'About to refresh from staging table') refresh_count = json_cache.refresh_from_staging(inclusions_for_term(term_id)) # TODO Currently we're not looping anything into the staging table, so we expect refresh count to be zero. # If a more considered set of cache entries comes back into the loop, this error message should come back # too. # if refresh_count == 0: # JobProgress().update('ERROR: No cache entries copied from staging') JobProgress().update(f'{refresh_count} cache entries copied from staging')
def load_term(term_id=berkeley.current_term_id()): if term_id == 'all': load_all_terms() return JobProgress().update(f'About to refresh alerts for term {term_id}') refresh_alerts(term_id) if term_id == berkeley.current_term_id(): JobProgress().update(f'About to refresh department memberships') refresh_department_memberships() JobProgress().update(f'About to refresh CalNet attributes for active users') refresh_calnet_attributes() JobProgress().update(f'About to load filtered cohort counts') load_filtered_cohort_counts() JobProgress().update(f'About to update curated group memberships') update_curated_group_lists()
def clear_cachejob(): progress = JobProgress().delete() return tolerant_jsonify({ 'progressDeleted': progress, })
def get_cachejob_status(): progress = JobProgress().get() return tolerant_jsonify({ 'progress': progress, })
def test_multiple_job_names(self): assert JobProgress('alphonse').start() assert JobProgress('alphonse').start() is False assert JobProgress('gaston').start() assert JobProgress('alphonse').delete() assert JobProgress('gaston').start() is False
def test_delete_job(self): assert JobProgress().start() assert JobProgress().start() is False assert JobProgress().delete() assert JobProgress().start()
def test_does_not_update_an_ended_job(self): assert JobProgress().start() assert JobProgress().end() progress = JobProgress().update('Into the wall') assert progress is False
def test_does_not_update_an_unstarted_job(self): assert JobProgress().get() is None progress = JobProgress().update('False step') assert progress is False