def create_user( data: UserCreate, background_tasks: BackgroundTasks, db: Session = Depends(deps.get_db), rdc: RedisCache = Depends(deps.get_redis) ) -> Any: """ Endpoint for create user """ user = crud_user.get_user_by_email(db, data.email) if user: raise HTTPException(status_code=400, detail='Endereço de email já registrador!') user = crud_user.create(db, data) rdc.invalidate_cache_prefix("providers-list") token = security.generate_token(str(user.id), "activate", datetime.utcnow() + timedelta(days=31)) background_tasks.add_task(mail.send_account_activation_email, user.name, user.email, token) return { 'detail': 'Enviamos um e-mail para você confirma seu cadastro, por favor verifique sua caixa de entrada.' }
def handle_decline_mission(db: Session, mission_uuid: str, volunteer_id: int, error_state: models.MissionErrorState, background_tasks: BackgroundTasks): # fetch models instances mission = db.query( models.Mission).filter(models.Mission.uuid == mission_uuid).first() volunteer_mission = db.query(models.VolunteerMission).filter( and_(models.VolunteerMission.mission_id == mission.id, models.VolunteerMission.volunteer_id == volunteer_id)).first() volunteer_mission.state = VolunteerMissionState.declined if error_state == MissionErrorState.no_one_answered_call: if mission.error_state == MissionErrorState.no_one_answered_call: # mission had already no answer, just set the state as error mission.state = MissionState.error db.commit() else: # schedule retry in 60 minutes, have in mind the the other volunteers will still have the chance to call the elder. mission.error_state = MissionErrorState.no_one_answered_call set_mission_state(db, mission, MissionState.approved) elif error_state == MissionErrorState.not_relevant_anymore: # mission will be handled by call center set_mission_error(db, mission, MissionErrorState.not_relevant_anymore) elif error_state == MissionErrorState.refuse_mission: # mission was refused by volunteer, rerun search now set_mission_state(db, mission, MissionState.approved) background_tasks.add_task(search_task_caller, mission.uuid)
def _tasks_from_event(self, event: Hashable, *args: Any, **kwargs: Any) -> BackgroundTasks: tasks = BackgroundTasks() for f in list(self._handlers[event].values()): tasks.add_task(f, *args, **kwargs) return tasks
async def end(self): """ Ends given match. """ match = await self.get() if match.error: return match values = { "match_id": self.match_id, } query = """UPDATE scoreboard_total SET status = 0 WHERE match_id = :match_id""" await self.current_league.obj.database.execute(query=query, values=values) # We just delete map pool for the given match. query = "DELETE FROM map_pool WHERE match_id = :match_id" await self.current_league.obj.database.execute(query=query, values=values) background_tasks = BackgroundTasks() league_details = await self.current_league.details() if not league_details.error: background_tasks.add_task( Webhook(uri=league_details["websocket_endpoint"], data=match.data).send) background_tasks.add_task( self.current_league.obj.server( server_id=match.data["server_id"]).stop) return response(data=match.data, backgroud=background_tasks)
async def sso_callback(request: Request, background_tasks: BackgroundTasks, sso_type: str = Path(default=None, description="social media type", example="google")): """ Generate login token after successful social media login. """ existing_user, user_details, access_token = await Authentication.verify_and_process( request, sso_type) if not existing_user and Utility.email_conf["email"]["enable"]: background_tasks.add_task( Utility.format_and_send_mail, mail_type='password_generated', email=user_details['email'], first_name=user_details['first_name'], password=user_details['password'].get_secret_value()) return { "data": { "access_token": access_token, "token_type": "bearer" }, "message": """It is your responsibility to keep the token secret. If leaked then other may have access to your system.""", }
async def resolve_get_user(user, obj, info): task = BackgroundTasks() task.add_task(test_func) task.add_task(testing_func_two, "I work now") request = info.context["request"] request.state.background = task return True
def create_test_run(cls, background_tasks: BackgroundTasks): """Creates a new test run in the DB ensuring that no other tests are still running and starts up a background task to run it. :returns dict: """ output = { 'created': False, 'id': None } running = cls.get_currently_running() if running.count(): print('test already running') output['id'] = running[0].id return output # No tests are still running. new_run = cls().save() background_tasks.add_task(new_run.execute_pytest) output['id'] = new_run.id output['created'] = True return output
def register( *, db: Session = Depends(get_db), password: str = Body(...), email: EmailStr = Body(...), full_name: str = Body(None), background_tasks: BackgroundTasks, ): """register a new user""" # why not use UserCreate schema? if used, user can control self become a superuser # only USERS_OPEN_REGISTRATION is True can register if not settings.USERS_OPEN_REGISTRATION: raise HTTPException(status_code=403, detail="forbidden for register") user = crud.user.get_by_email(db, email=email) if user: raise HTTPException(status_code=400, detail="User already exists") user_in = schemas.UserCreate(password=password, email=email, full_name=full_name) user = crud.user.create(db, obj=user_in) # send confirm email if settings.EMAILS_ENABLED and user.email: confirm_token = create_access_token( subject=email, expires_delta=timedelta(settings.EMAIL_CONFIRM_TOKEN_EXPIRE)) background_tasks.add_task(send_confirm_email, email_to=user.email, token=confirm_token) return user
def predict( request: Any = Body(..., media_type="application/json"), debug=False): api = local_cache["api"] predictor_impl = local_cache["predictor_impl"] debug_obj("payload", request, debug) prediction = predictor_impl.predict(request) debug_obj("prediction", prediction, debug) try: json_string = json.dumps(prediction) except: json_string = util.json_tricks_encoder().encode(prediction) response = Response(content=json_string, media_type="application/json") if api.tracker is not None: try: predicted_value = api.tracker.extract_predicted_value(prediction) api.post_tracker_metrics(predicted_value) if (api.tracker.model_type == "classification" and predicted_value not in local_cache["class_set"]): tasks = BackgroundTasks() tasks.add_task(api.upload_class, class_name=predicted_value) local_cache["class_set"].add(predicted_value) response.background = tasks except: cx_logger().warn("unable to record prediction metric", exc_info=True) return response
def predict( request: Any = Body(..., media_type="application/json"), debug=False): api = local_cache["api"] predictor_impl = local_cache["predictor_impl"] debug_obj("payload", request, debug) prediction = predictor_impl.predict(request) try: json_string = json.dumps(prediction) except Exception as e: raise UserRuntimeException( f"the return value of predict() or one of its nested values is not JSON serializable", str(e), ) from e debug_obj("prediction", json_string, debug) response = Response(content=json_string, media_type="application/json") if api.tracker is not None: try: predicted_value = api.tracker.extract_predicted_value(prediction) api.post_tracker_metrics(predicted_value) if (api.tracker.model_type == "classification" and predicted_value not in local_cache["class_set"]): tasks = BackgroundTasks() tasks.add_task(api.upload_class, class_name=predicted_value) local_cache["class_set"].add(predicted_value) response.background = tasks except: cx_logger().warn("unable to record prediction metric", exc_info=True) return response
def image_response(background_tasks: BackgroundTasks, image_id: str): """Serves the encoded img and deletes it shortly after""" path = os.sep.join((TMP, image_id)) + ".png" if os.path.isfile(path): background_tasks.add_task(remove_file, path) return FileResponse(path) return "Encoded pictures are avaible only once"
async def encode_img(background_tasks: BackgroundTasks, key: str = Form(...), secret: str = Form(...)): pxl = Rothko(key).encode_to_img(secret, scale=True) save_path = pxl.save(TMP) background_tasks.add_task(remove_file, save_path) return FileResponse(save_path)
async def put_stack_instance( background_tasks: BackgroundTasks, stack_instance_update: StackInstanceUpdate, document_manager: DocumentManager = Depends(get_document_manager), stack_manager: StackManager = Depends(get_stack_manager), redis=Depends(get_redis)): """ Updates a stack instance by using a StackInstanceUpdate object """ logger.info("[StackInstances PUT] Received PUT request") to_be_deleted = stack_manager.check_delete_services(stack_instance_update) (stack_instance, return_result) = stack_manager.process_stack_request( stack_instance_update, "update") if stack_instance is None: return HTTPException(422, return_result) # Perform invocations if not stack_instance_update.disable_invocation: for service in to_be_deleted: background_tasks.add_task(create_job_per_service, service, document_manager, "delete", redis, stack_instance, to_be_deleted) copy_stack_instance = stack_instance.copy(deep=True) delete_services(to_be_deleted, copy_stack_instance) background_tasks.add_task(create_job_for_agent, copy_stack_instance, "update", redis) document_manager.write_stack_instance(stack_instance) return return_result
async def app(scope, receive, send): tasks = BackgroundTasks() tasks.add_task(increment) tasks.add_task(increment) response = Response("tasks initiated", media_type="text/plain", background=tasks) await response(scope, receive, send)
async def clear_cache(name: str, background_tasks: BackgroundTasks) -> None: cache = Cache(Cache.REDIS, endpoint=app_settings.CACHE_HOST, port=app_settings.CACHE_PORT) if name == "all": background_tasks.add_task(cache.delete, "/*") else: background_tasks.add_task(cache.delete, f"/{name}*")
async def email_by_gmail2(request: Request, mailing_list: SendEmail, background_tasks: BackgroundTasks): t = time() background_tasks.add_task( send_email, mailing_list=mailing_list.email_to ) print("+*+*" * 30) print(str(round((time() - t) * 1000, 5)) + "ms") print("+*+*" * 30) return MessageOk()
async def xls2html(request: Request, back_ground_tasks: BackgroundTasks): form_data = await request.form() file = form_data.get("file").file._file if not file.getvalue(): return templates.TemplateResponse("web/failure.html", {"request": request, "message": "没有文件上传!"}) back_ground_tasks.add_task(send_html, file) return templates.TemplateResponse("web/success.html", {"request": request, "email_address": "*****@*****.**"})
async def create_summary( payload: SummaryPayloadSchema, background_tasks: BackgroundTasks) -> SummaryResponseSchema: summary_id = await crud.post(payload) background_tasks.add_task(generate_summary, summary_id, payload.url) response_object = {"id": summary_id, "url": payload.url} return response_object
async def asgi(receive, send): tasks = BackgroundTasks() tasks.add_task(increment, amount=1) tasks.add_task(increment, amount=2) tasks.add_task(increment, amount=3) response = Response("tasks initiated", media_type="text/plain", background=tasks) await response(receive, send)
def predict(self, request, tasks: BackgroundTasks): payload = request['payload'] # use np.round to squeeze to binary {0,1} results = list(np.round(self.model.predict(payload))) tasks.add_task(log_to_arize, results, self.arize_client, self.model_id, self.model_version) response = {'result': results} return response
async def background_jobs(request): def sync_task(): print("Doing the sync task ! ✨") async def async_task(): print("Doing the async task ! 🎉") tasks = BackgroundTasks() tasks.add_task(sync_task) tasks.add_task(async_task) return PlainTextResponse("Triggering background jobs", background=tasks)
async def add_up( background_tasks: BackgroundTasks, url: HttpUrl = Query(...), up_repo: UpRepository = Depends(get_repository(UpRepository)) ) -> UpInDB: up_info = await get_up_info(url=url) create = get_create_from_dict(CREATE_MODEL=UpInCreate)(up_info) up_in_db = await up_repo.create(CREATE_MODEL=UpInCreate, RETURN_MODEL=UpInDB)(create) background_tasks.add_task(update_up_info, url=url, up_repo=up_repo) return up_in_db
async def register_dicom(request): """Endpoint for registering newly received DICOM files. Called by the getdcmtags module.""" payload = dict(await request.form()) filename = payload.get("filename","") file_uid = payload.get("file_uid","") series_uid = payload.get("series_uid","") query = dicom_files.insert().values( filename=filename, file_uid=file_uid, series_uid=series_uid, time=datetime.datetime.now() ) tasks = BackgroundTasks() tasks.add_task(execute_db_operation, operation=query) return JSONResponse({'ok': ''}, background=tasks)
async def background_jobs(request): def sync_noop(): pass async def async_noop(): pass tasks = BackgroundTasks() tasks.add_task(sync_noop) tasks.add_task(async_noop) return PlainTextResponse("Triggering background jobs", background=tasks)
async def create_propagation_task( token: str, request: Request, background_tasks: BackgroundTasks, jessigod_config=fastapi.Depends(get_jessigod_config), ): if not compare_digest(jessigod_config.bots.telegram_bot.token, token): return 'not ok' body = await request.body() background_tasks.add_task(core.handle_telegram_update, jessigod_config, json.loads(body)) return 'ok'
async def post_webgui_event(request): """Endpoint for logging relevant events of the webgui.""" payload = dict(await request.form()) sender = payload.get("sender","Unknown") event = payload.get("event",monitor.w_events.UNKNOWN) user = payload.get("user","UNKNOWN") description = payload.get("description","") query = webgui_events.insert().values( sender=sender, event=event, user=user, description=description, time=datetime.datetime.now() ) tasks = BackgroundTasks() tasks.add_task(execute_db_operation, operation=query) return JSONResponse({'ok': ''}, background=tasks)
async def post_mercure_event(request): """Endpoint for receiving mercure system events.""" payload = dict(await request.form()) sender = payload.get("sender","Unknown") event = payload.get("event",monitor.h_events.UNKNOWN) severity = int(payload.get("severity",monitor.severity.INFO)) description = payload.get("description","") query = mercure_events.insert().values( sender=sender, event=event, severity=severity, description=description, time=datetime.datetime.now() ) tasks = BackgroundTasks() tasks.add_task(execute_db_operation, operation=query) return JSONResponse({'ok': ''}, background=tasks)
def predict(request: Request): tasks = BackgroundTasks() api = local_cache["api"] predictor_impl = local_cache["predictor_impl"] dynamic_batcher = local_cache["dynamic_batcher"] kwargs = build_predict_kwargs(request) if dynamic_batcher: prediction = dynamic_batcher.predict(**kwargs) else: prediction = predictor_impl.predict(**kwargs) if isinstance(prediction, bytes): response = Response(content=prediction, media_type="application/octet-stream") elif isinstance(prediction, str): response = Response(content=prediction, media_type="text/plain") elif isinstance(prediction, Response): response = prediction else: try: json_string = json.dumps(prediction) except Exception as e: raise UserRuntimeException( str(e), "please return an object that is JSON serializable (including its nested fields), a bytes object, " "a string, or a starlette.response.Response object", ) from e response = Response(content=json_string, media_type="application/json") if local_cache["provider"] != "local" and api.monitoring is not None: try: predicted_value = api.monitoring.extract_predicted_value( prediction) api.post_monitoring_metrics(predicted_value) if (api.monitoring.model_type == "classification" and predicted_value not in local_cache["class_set"]): tasks.add_task(api.upload_class, class_name=predicted_value) local_cache["class_set"].add(predicted_value) except: logger().warn("unable to record prediction metric", exc_info=True) if util.has_method(predictor_impl, "post_predict"): kwargs = build_post_predict_kwargs(prediction, request) request_thread_pool.submit(predictor_impl.post_predict, **kwargs) if len(tasks.tasks) > 0: response.background = tasks return response
def create_appointments( data: AppointmentCreate, background_tasks: BackgroundTasks, user: User = Depends(deps.get_user), db: Session = Depends(deps.get_db), rdc: RedisCache = Depends(deps.get_redis) ) -> Any: """ Endpoint for create appointment """ db_provider = crud_user.get_user_by_id(db, str(data.provider_id)) if not db_provider: raise HTTPException(status_code=404, detail="Cabeleireiro não encontrado") current_date = datetime.now() compare_date = data.date.replace(tzinfo=None) if compare_date < current_date: raise HTTPException( status_code=400, detail="Você não pode marcar agendamento em datas passadas") if data.date.hour < 8 or data.date.hour > 17: raise HTTPException( status_code=400, detail="Você só pode cria agendamentos entre 8:00 e 17:00") if data.provider_id == user.id: raise HTTPException( status_code=400, detail="Você não pode marca agendamento consigo mesmo") validate_date = crud_appointment.get_appointment_by_date( db, data.provider_id, data.date) if validate_date: raise HTTPException(status_code=400, detail="Este horario já esta agendado") appointment = crud_appointment.create(db, data, user) msg = f"Novo agendamento de {user.name} {user.surname} para o {date.format_date(data.date)}" background_tasks.add_task(crud_notification.create, str(data.provider_id), msg) date_time = data.date rdc.invalidate_cache( f"providers-appointments:{data.provider_id}:{date_time.year}:{date_time.month}:{date_time.day}" ) rdc.invalidate_cache(f"user-appointments:{user.id}") return appointment
async def get_revisions(page_id: int, bg_tasks: BackgroundTasks) -> WikiPage: """Return revisions data for a given page ID.""" page = await _try_cached_page(page_id) if page: return page page = await _perform_revisions_request(page_id) bg_tasks.add_task( ctx.redis.set, page_id, pickle.dumps(page), expire=config.CACHE_TTL_IN_SECONDS, ) return page