def update_job_schedule(job_id): sched = get_scheduler() job_id = job_id.upper() job = sched.get_job(job_id) if not job: raise BadRequestError(f'No job found for job id: {job_id}') if request.method == 'DELETE': app.logger.warn( f'About to delete schedule definition for job id: {job_id}') sched.remove_job(job_id) return tolerant_jsonify([job_to_dict(job) for job in sched.get_jobs()]) else: # If JSON properties are present, they will be evaluated by APScheduler's cron trigger API. # https://apscheduler.readthedocs.io/en/latest/modules/triggers/cron.html#module-apscheduler.triggers.cron try: args = request.get_json(force=True) except Exception as e: raise BadRequestError(str(e)) if args: try: job.reschedule(trigger='cron', **args) except Exception as e: raise BadRequestError(f'Error rescheduling job: {e}') # Passing a empty JSON object will pause this job. else: job.pause() job = sched.get_job(job_id) return tolerant_jsonify(job_to_dict(job))
def background_job_status(): iso_date = request.args.get('date') date = dateutil.parser.parse(iso_date) if iso_date else datetime.today() rows = metadata.background_job_status_by_date(created_date=date) or [] rows.sort(key=lambda row: row.get('created_at')) def to_api_json(row): created_at = row['created_at'] updated_at = row['updated_at'] return { 'id': row['job_id'], 'status': row['status'], 'instanceId': row['instance_id'], 'details': row['details'], 'started': created_at.isoformat(), 'finished': None if updated_at == created_at else updated_at.isoformat(), } return tolerant_jsonify([to_api_json(row) for row in rows])
def logout(): referrer = urlparse(request.referrer) base_url = f'{referrer.scheme}://{referrer.netloc}' logout_user() return tolerant_jsonify({ 'casLogoutURL': _cas_client().get_logout_url(base_url), })
def xkcd(): try: url = http.build_url( f'https://xkcd.com/{randint(1, 2427)}/info.0.json') json = http.request(url).json() except Exception: json = { 'alt': '40% of OpenBSD installs lead to shark attacks.', 'img': 'https://imgs.xkcd.com/comics/success.png', } return tolerant_jsonify(json)
def analyze_student(sid): _safety_check() result = {} for key in ('edl', 'sis'): with _override_edl_feature_flag(key == 'edl'): rows, failure_count = ImportSisStudentApi().load(all_sids=[sid]) result[key] = { 'failureCount': failure_count, 'rows': rows, } return tolerant_jsonify(result)
def app_version(): v = { 'version': version, } build_stats = load_json('config/build-summary.json') if build_stats: v.update(build_stats) else: v.update({ 'build': None, }) return tolerant_jsonify(v)
def app_config(): current_term_name = app.config['CURRENT_TERM'] current_term_id = sis_term_id_for_name(current_term_name) return tolerant_jsonify({ 'currentEnrollmentTerm': current_term_name, 'currentEnrollmentTermId': int(current_term_id), 'ebEnvironment': app.config['EB_ENVIRONMENT'] if 'EB_ENVIRONMENT' in app.config else None, 'nessieEnv': app.config['NESSIE_ENV'], })
def app_config(): def _to_api_key(key): chunks = key.split('_') return f"{chunks[0].lower()}{''.join(chunk.title() for chunk in chunks[1:])}" return tolerant_jsonify( { **dict((_to_api_key(key), app.config[key] if key in app.config else None) for key in PUBLIC_CONFIGS), **{ 'currentEnrollmentTerm': current_term_name(), 'currentEnrollmentTermId': int(current_term_id()), 'futureTermId': int(future_term_id()), }, }, )
def console_available_jobs(): job_api_endpoints = [] for rule in app.url_map.iter_rules(): if isinstance(rule.rule, str) and rule.rule.startswith('/api/job/'): job_api_endpoints.append({ 'name': rule.endpoint.replace('_', ' ').capitalize(), 'path': rule.rule, 'required': list(rule.arguments), 'methods': list(rule.methods), }) job_api_endpoints.sort(key=lambda row: row.get('name')) return tolerant_jsonify(job_api_endpoints)
def app_status(): def db_status(): try: db.session.execute('SELECT 1') return True except Exception: app.logger.exception('Failed to connect to RDS database') return False redshift_row = redshift.fetch('SELECT 1', silent=True) resp = { 'app': True, 'rds': db_status(), 'redshift': redshift_row is not None, } return tolerant_jsonify(resp)
def analyze_term_gpa(term_id, sid): _safety_check() result = {} class MockFeedFile: def write(self, tsv): result[key] = tsv for key in ('edl', 'sis'): with _override_edl_feature_flag(key == 'edl'): GenerateMergedHistEnrFeeds().collect_merged_enrollments( sids=[sid], term_id=term_id, feed_file=MockFeedFile(), ) return tolerant_jsonify(result)
def app_config(): current_term_name = berkeley.current_term_name() current_term_id = berkeley.current_term_id() future_term_id = berkeley.future_term_id() return tolerant_jsonify({ 'currentEnrollmentTerm': current_term_name, 'currentEnrollmentTermId': int(current_term_id), 'futureTermId': int(future_term_id), 'ebEnvironment': app.config['EB_ENVIRONMENT'] if 'EB_ENVIRONMENT' in app.config else None, 'nessieEnv': app.config['NESSIE_ENV'], })
def analyze_edl_registration_data(sid): # TODO: All 'analyze_edl' API endpoints must start with safety_check(). _safety_check() result = {} class MockRegistrationsJob(AbstractRegistrationsJob): def run(self, load_mode='new'): pass job = MockRegistrationsJob() demographics_key = 'demographics' if feature_flag_edl( ) else 'api_demographics' for key in ('edl', 'sis'): with _override_edl_feature_flag(key == 'edl'): result[key] = { 'term_gpas': [], 'last_registrations': [], demographics_key: [], } job.get_registration_data_per_sids(result[key], [sid]) return tolerant_jsonify(result)
def update_scheduled_job_args(job_id): try: args = request.get_json(force=True) except Exception as e: raise BadRequestError(str(e)) if not args: raise BadRequestError(f'Could not parse args from request') sched = get_scheduler() job_id = job_id.upper() job = sched.get_job(job_id) if not job: raise BadRequestError(f'No job found for job id: {job_id}') try: existing_args = job.args if len(existing_args) > 2: new_args = dict(existing_args[2]) new_args.update(args) else: new_args = args job.modify(args=[existing_args[0], existing_args[1], new_args]) except Exception as e: raise BadRequestError(f'Error updating job args: {e}') job = sched.get_job(job_id) return tolerant_jsonify(job_to_dict(job))
def get_job_schedule(): sched = get_scheduler() return tolerant_jsonify([job_to_dict(job) for job in sched.get_jobs()])
def cas_login_url(): return tolerant_jsonify({ 'casLoginURL': _cas_client(request.referrer).get_login_url(), })
def respond_with_status(job_started): if job_started: return tolerant_jsonify({'status': 'started'}) else: return tolerant_jsonify({'status': 'errored'})
def cas_login_url(): target_url = request.referrer or None return tolerant_jsonify({ 'casLoginURL': _cas_client(target_url).get_login_url(), })
def to_json(self): if self.message: return tolerant_jsonify({'message': self.message}) else: return ''
def logout(): logout_user() return tolerant_jsonify({ 'casLogoutURL': _cas_client().get_logout_url(), })
def handle_unexpected_error(error): app.logger.exception(error) return tolerant_jsonify( {'message': 'An unexpected server error occurred.'}), 500
def my_profile(): me = { 'uid': current_user.get_id() } if current_user.is_authenticated else None return tolerant_jsonify(me)