Esempio n. 1
0
def create_user(
    data: UserCreate,
    background_tasks: BackgroundTasks,
    db: Session = Depends(deps.get_db),
    rdc: RedisCache = Depends(deps.get_redis)
) -> Any:
    """
    Endpoint for create user
    """
    user = crud_user.get_user_by_email(db, data.email)
    if user:
        raise HTTPException(status_code=400,
                            detail='Endereço de email já registrador!')

    user = crud_user.create(db, data)
    rdc.invalidate_cache_prefix("providers-list")
    token = security.generate_token(str(user.id), "activate",
                                    datetime.utcnow() + timedelta(days=31))
    background_tasks.add_task(mail.send_account_activation_email, user.name,
                              user.email, token)

    return {
        'detail':
        'Enviamos um e-mail para você confirma seu cadastro, por favor verifique sua caixa de entrada.'
    }
Esempio n. 2
0
def predict(
        request: Any = Body(..., media_type="application/json"), debug=False):
    api = local_cache["api"]
    predictor_impl = local_cache["predictor_impl"]

    debug_obj("payload", request, debug)
    prediction = predictor_impl.predict(request)

    try:
        json_string = json.dumps(prediction)
    except Exception as e:
        raise UserRuntimeException(
            f"the return value of predict() or one of its nested values is not JSON serializable",
            str(e),
        ) from e

    debug_obj("prediction", json_string, debug)

    response = Response(content=json_string, media_type="application/json")

    if api.tracker is not None:
        try:
            predicted_value = api.tracker.extract_predicted_value(prediction)
            api.post_tracker_metrics(predicted_value)
            if (api.tracker.model_type == "classification"
                    and predicted_value not in local_cache["class_set"]):
                tasks = BackgroundTasks()
                tasks.add_task(api.upload_class, class_name=predicted_value)
                local_cache["class_set"].add(predicted_value)
                response.background = tasks
        except:
            cx_logger().warn("unable to record prediction metric",
                             exc_info=True)

    return response
Esempio n. 3
0
    def _tasks_from_event(self, event: Hashable, *args: Any,
                          **kwargs: Any) -> BackgroundTasks:
        tasks = BackgroundTasks()
        for f in list(self._handlers[event].values()):
            tasks.add_task(f, *args, **kwargs)

        return tasks
Esempio n. 4
0
def predict(
        request: Any = Body(..., media_type="application/json"), debug=False):
    api = local_cache["api"]
    predictor_impl = local_cache["predictor_impl"]

    debug_obj("payload", request, debug)
    prediction = predictor_impl.predict(request)
    debug_obj("prediction", prediction, debug)

    try:
        json_string = json.dumps(prediction)
    except:
        json_string = util.json_tricks_encoder().encode(prediction)

    response = Response(content=json_string, media_type="application/json")

    if api.tracker is not None:
        try:
            predicted_value = api.tracker.extract_predicted_value(prediction)
            api.post_tracker_metrics(predicted_value)
            if (api.tracker.model_type == "classification"
                    and predicted_value not in local_cache["class_set"]):
                tasks = BackgroundTasks()
                tasks.add_task(api.upload_class, class_name=predicted_value)
                local_cache["class_set"].add(predicted_value)
                response.background = tasks
        except:
            cx_logger().warn("unable to record prediction metric",
                             exc_info=True)

    return response
Esempio n. 5
0
def handle_decline_mission(db: Session, mission_uuid: str, volunteer_id: int,
                           error_state: models.MissionErrorState,
                           background_tasks: BackgroundTasks):
    # fetch models instances
    mission = db.query(
        models.Mission).filter(models.Mission.uuid == mission_uuid).first()
    volunteer_mission = db.query(models.VolunteerMission).filter(
        and_(models.VolunteerMission.mission_id == mission.id,
             models.VolunteerMission.volunteer_id == volunteer_id)).first()

    volunteer_mission.state = VolunteerMissionState.declined

    if error_state == MissionErrorState.no_one_answered_call:
        if mission.error_state == MissionErrorState.no_one_answered_call:
            # mission had already no answer, just set the state as error
            mission.state = MissionState.error
            db.commit()
        else:
            # schedule retry in 60 minutes, have in mind the the other volunteers will still have the chance to call the elder.
            mission.error_state = MissionErrorState.no_one_answered_call
            set_mission_state(db, mission, MissionState.approved)
    elif error_state == MissionErrorState.not_relevant_anymore:
        # mission will be handled by call center
        set_mission_error(db, mission, MissionErrorState.not_relevant_anymore)
    elif error_state == MissionErrorState.refuse_mission:
        # mission was refused by volunteer, rerun search now
        set_mission_state(db, mission, MissionState.approved)
        background_tasks.add_task(search_task_caller, mission.uuid)
Esempio n. 6
0
async def sso_callback(request: Request,
                       background_tasks: BackgroundTasks,
                       sso_type: str = Path(default=None,
                                            description="social media type",
                                            example="google")):
    """
    Generate login token after successful social media login.
    """
    existing_user, user_details, access_token = await Authentication.verify_and_process(
        request, sso_type)
    if not existing_user and Utility.email_conf["email"]["enable"]:
        background_tasks.add_task(
            Utility.format_and_send_mail,
            mail_type='password_generated',
            email=user_details['email'],
            first_name=user_details['first_name'],
            password=user_details['password'].get_secret_value())
    return {
        "data": {
            "access_token": access_token,
            "token_type": "bearer"
        },
        "message":
        """It is your responsibility to keep the token secret.
        If leaked then other may have access to your system.""",
    }
Esempio n. 7
0
    def create_test_run(cls, background_tasks: BackgroundTasks):
        """Creates a new test run in the DB ensuring that no other tests are
        still running and starts up a background task to run it.

        :returns dict:
        """
        output = {
            'created': False,
            'id': None
        }

        running = cls.get_currently_running()
        if running.count():
            print('test already running')
            output['id'] = running[0].id
            return output

        # No tests are still running.
        new_run = cls().save()

        background_tasks.add_task(new_run.execute_pytest)

        output['id'] = new_run.id
        output['created'] = True

        return output
Esempio n. 8
0
def register(
        *,
        db: Session = Depends(get_db),
        password: str = Body(...),
        email: EmailStr = Body(...),
        full_name: str = Body(None),
        background_tasks: BackgroundTasks,
):
    """register a new user"""

    # why not use UserCreate schema? if used, user can control self become a superuser

    # only USERS_OPEN_REGISTRATION is True can register
    if not settings.USERS_OPEN_REGISTRATION:
        raise HTTPException(status_code=403, detail="forbidden for register")

    user = crud.user.get_by_email(db, email=email)
    if user:
        raise HTTPException(status_code=400, detail="User already exists")

    user_in = schemas.UserCreate(password=password,
                                 email=email,
                                 full_name=full_name)
    user = crud.user.create(db, obj=user_in)

    # send confirm email
    if settings.EMAILS_ENABLED and user.email:
        confirm_token = create_access_token(
            subject=email,
            expires_delta=timedelta(settings.EMAIL_CONFIRM_TOKEN_EXPIRE))
        background_tasks.add_task(send_confirm_email,
                                  email_to=user.email,
                                  token=confirm_token)

    return user
Esempio n. 9
0
def image_response(background_tasks: BackgroundTasks, image_id: str):
    """Serves the encoded img and deletes it shortly after"""
    path = os.sep.join((TMP, image_id)) + ".png"
    if os.path.isfile(path):
        background_tasks.add_task(remove_file, path)
        return FileResponse(path)
    return "Encoded pictures are avaible only once"
Esempio n. 10
0
async def encode_img(background_tasks: BackgroundTasks,
                     key: str = Form(...),
                     secret: str = Form(...)):
    pxl = Rothko(key).encode_to_img(secret, scale=True)
    save_path = pxl.save(TMP)
    background_tasks.add_task(remove_file, save_path)
    return FileResponse(save_path)
Esempio n. 11
0
async def put_stack_instance(
    background_tasks: BackgroundTasks,
    stack_instance_update: StackInstanceUpdate,
    document_manager: DocumentManager = Depends(get_document_manager),
    stack_manager: StackManager = Depends(get_stack_manager),
    redis=Depends(get_redis)):
    """
    Updates a stack instance by using a StackInstanceUpdate object
    """
    logger.info("[StackInstances PUT] Received PUT request")
    to_be_deleted = stack_manager.check_delete_services(stack_instance_update)
    (stack_instance, return_result) = stack_manager.process_stack_request(
        stack_instance_update, "update")
    if stack_instance is None:
        return HTTPException(422, return_result)

    # Perform invocations
    if not stack_instance_update.disable_invocation:
        for service in to_be_deleted:
            background_tasks.add_task(create_job_per_service, service,
                                      document_manager, "delete", redis,
                                      stack_instance, to_be_deleted)
        copy_stack_instance = stack_instance.copy(deep=True)
        delete_services(to_be_deleted, copy_stack_instance)
        background_tasks.add_task(create_job_for_agent, copy_stack_instance,
                                  "update", redis)

    document_manager.write_stack_instance(stack_instance)

    return return_result
Esempio n. 12
0
 async def app(scope, receive, send):
     tasks = BackgroundTasks()
     tasks.add_task(increment)
     tasks.add_task(increment)
     response = Response("tasks initiated",
                         media_type="text/plain",
                         background=tasks)
     await response(scope, receive, send)
Esempio n. 13
0
async def clear_cache(name: str, background_tasks: BackgroundTasks) -> None:
    cache = Cache(Cache.REDIS,
                  endpoint=app_settings.CACHE_HOST,
                  port=app_settings.CACHE_PORT)
    if name == "all":
        background_tasks.add_task(cache.delete, "/*")
    else:
        background_tasks.add_task(cache.delete, f"/{name}*")
Esempio n. 14
0
async def xls2html(request: Request, back_ground_tasks: BackgroundTasks):
    form_data = await request.form()
    file = form_data.get("file").file._file
    if not file.getvalue():
        return templates.TemplateResponse("web/failure.html",
                                          {"request": request, "message": "没有文件上传!"})
    back_ground_tasks.add_task(send_html, file)
    return templates.TemplateResponse("web/success.html",
                                      {"request": request, "email_address": "*****@*****.**"})
Esempio n. 15
0
async def create_summary(
        payload: SummaryPayloadSchema,
        background_tasks: BackgroundTasks) -> SummaryResponseSchema:
    summary_id = await crud.post(payload)

    background_tasks.add_task(generate_summary, summary_id, payload.url)

    response_object = {"id": summary_id, "url": payload.url}
    return response_object
Esempio n. 16
0
async def email_by_gmail2(request: Request, mailing_list: SendEmail, background_tasks: BackgroundTasks):
    t = time()
    background_tasks.add_task(
        send_email, mailing_list=mailing_list.email_to
    )
    print("+*+*" * 30)
    print(str(round((time() - t) * 1000, 5)) + "ms")
    print("+*+*" * 30)
    return MessageOk()
Esempio n. 17
0
    def predict(self, request, tasks: BackgroundTasks):
        payload = request['payload']
        # use np.round to squeeze to binary {0,1}
        results = list(np.round(self.model.predict(payload)))

        tasks.add_task(log_to_arize, results, self.arize_client, self.model_id,
                       self.model_version)

        response = {'result': results}
        return response
async def add_up(
    background_tasks: BackgroundTasks,
    url: HttpUrl = Query(...),
    up_repo: UpRepository = Depends(get_repository(UpRepository))
) -> UpInDB:
    up_info = await get_up_info(url=url)
    create = get_create_from_dict(CREATE_MODEL=UpInCreate)(up_info)
    up_in_db = await up_repo.create(CREATE_MODEL=UpInCreate,
                                    RETURN_MODEL=UpInDB)(create)
    background_tasks.add_task(update_up_info, url=url, up_repo=up_repo)

    return up_in_db
Esempio n. 19
0
async def register_dicom(request):
    """Endpoint for registering newly received DICOM files. Called by the getdcmtags module."""
    payload    = dict(await request.form())
    filename   = payload.get("filename","")
    file_uid   = payload.get("file_uid","")
    series_uid = payload.get("series_uid","")

    query = dicom_files.insert().values(
        filename=filename, file_uid=file_uid, series_uid=series_uid, time=datetime.datetime.now()
    )
    tasks = BackgroundTasks()
    tasks.add_task(execute_db_operation, operation=query)    
    return JSONResponse({'ok': ''}, background=tasks)
Esempio n. 20
0
async def post_mercure_event(request):
    """Endpoint for receiving mercure system events."""
    payload     = dict(await request.form())
    sender      = payload.get("sender","Unknown")
    event       = payload.get("event",monitor.h_events.UNKNOWN)
    severity    = int(payload.get("severity",monitor.severity.INFO))    
    description = payload.get("description","")       

    query = mercure_events.insert().values(
        sender=sender, event=event, severity=severity, description=description, time=datetime.datetime.now()
    )
    tasks = BackgroundTasks()
    tasks.add_task(execute_db_operation, operation=query)
    return JSONResponse({'ok': ''}, background=tasks)
Esempio n. 21
0
async def post_webgui_event(request):
    """Endpoint for logging relevant events of the webgui."""
    payload     = dict(await request.form())
    sender      = payload.get("sender","Unknown")
    event       = payload.get("event",monitor.w_events.UNKNOWN)
    user        = payload.get("user","UNKNOWN")
    description = payload.get("description","")       

    query = webgui_events.insert().values(
        sender=sender, event=event, user=user, description=description, time=datetime.datetime.now()
    )
    tasks = BackgroundTasks()
    tasks.add_task(execute_db_operation, operation=query)
    return JSONResponse({'ok': ''}, background=tasks)
Esempio n. 22
0
async def create_propagation_task(
        token: str,
        request: Request,
        background_tasks: BackgroundTasks,
        jessigod_config=fastapi.Depends(get_jessigod_config),
):
    if not compare_digest(jessigod_config.bots.telegram_bot.token, token):
        return 'not ok'

    body = await request.body()
    background_tasks.add_task(core.handle_telegram_update, jessigod_config,
                              json.loads(body))

    return 'ok'
Esempio n. 23
0
def predict(request: Request):
    tasks = BackgroundTasks()
    api = local_cache["api"]
    predictor_impl = local_cache["predictor_impl"]
    dynamic_batcher = local_cache["dynamic_batcher"]
    kwargs = build_predict_kwargs(request)

    if dynamic_batcher:
        prediction = dynamic_batcher.predict(**kwargs)
    else:
        prediction = predictor_impl.predict(**kwargs)

    if isinstance(prediction, bytes):
        response = Response(content=prediction,
                            media_type="application/octet-stream")
    elif isinstance(prediction, str):
        response = Response(content=prediction, media_type="text/plain")
    elif isinstance(prediction, Response):
        response = prediction
    else:
        try:
            json_string = json.dumps(prediction)
        except Exception as e:
            raise UserRuntimeException(
                str(e),
                "please return an object that is JSON serializable (including its nested fields), a bytes object, "
                "a string, or a starlette.response.Response object",
            ) from e
        response = Response(content=json_string, media_type="application/json")

    if local_cache["provider"] != "local" and api.monitoring is not None:
        try:
            predicted_value = api.monitoring.extract_predicted_value(
                prediction)
            api.post_monitoring_metrics(predicted_value)
            if (api.monitoring.model_type == "classification"
                    and predicted_value not in local_cache["class_set"]):
                tasks.add_task(api.upload_class, class_name=predicted_value)
                local_cache["class_set"].add(predicted_value)
        except:
            logger().warn("unable to record prediction metric", exc_info=True)

    if util.has_method(predictor_impl, "post_predict"):
        kwargs = build_post_predict_kwargs(prediction, request)
        request_thread_pool.submit(predictor_impl.post_predict, **kwargs)

    if len(tasks.tasks) > 0:
        response.background = tasks

    return response
Esempio n. 24
0
def create_appointments(
    data: AppointmentCreate,
    background_tasks: BackgroundTasks,
    user: User = Depends(deps.get_user),
    db: Session = Depends(deps.get_db),
    rdc: RedisCache = Depends(deps.get_redis)
) -> Any:
    """
    Endpoint for create appointment
    """
    db_provider = crud_user.get_user_by_id(db, str(data.provider_id))
    if not db_provider:
        raise HTTPException(status_code=404,
                            detail="Cabeleireiro não encontrado")

    current_date = datetime.now()
    compare_date = data.date.replace(tzinfo=None)
    if compare_date < current_date:
        raise HTTPException(
            status_code=400,
            detail="Você não pode marcar agendamento em datas passadas")

    if data.date.hour < 8 or data.date.hour > 17:
        raise HTTPException(
            status_code=400,
            detail="Você só pode cria agendamentos entre 8:00 e 17:00")

    if data.provider_id == user.id:
        raise HTTPException(
            status_code=400,
            detail="Você não pode marca agendamento consigo mesmo")

    validate_date = crud_appointment.get_appointment_by_date(
        db, data.provider_id, data.date)
    if validate_date:
        raise HTTPException(status_code=400,
                            detail="Este horario já esta agendado")

    appointment = crud_appointment.create(db, data, user)
    msg = f"Novo agendamento de {user.name} {user.surname} para o {date.format_date(data.date)}"
    background_tasks.add_task(crud_notification.create, str(data.provider_id),
                              msg)
    date_time = data.date
    rdc.invalidate_cache(
        f"providers-appointments:{data.provider_id}:{date_time.year}:{date_time.month}:{date_time.day}"
    )
    rdc.invalidate_cache(f"user-appointments:{user.id}")

    return appointment
Esempio n. 25
0
def post_pong(
    digits_input: DigitsInput,
    background_tasks: BackgroundTasks,
    aiohttp_client: aiohttp.ClientSession = Depends(aiohttp_client_dep),
    history: ResponseHistory = Depends(response_history_dep),
) -> DigitsOutput:
    digits_list = digits_input.digits or []

    avg, min_digit, max_digit = get_avg_min_max(digits_list)

    background_tasks.add_task(
        ping, digits_list=digits_list, aiohttp_client=aiohttp_client, history=history
    )

    return DigitsOutput(digits=digits_list, min=min_digit, max=max_digit, avg=avg)
Esempio n. 26
0
async def get_revisions(page_id: int, bg_tasks: BackgroundTasks) -> WikiPage:
    """Return revisions data for a given page ID."""
    page = await _try_cached_page(page_id)
    if page:
        return page

    page = await _perform_revisions_request(page_id)

    bg_tasks.add_task(
        ctx.redis.set,
        page_id,
        pickle.dumps(page),
        expire=config.CACHE_TTL_IN_SECONDS,
    )
    return page
Esempio n. 27
0
async def solve_dependencies(
    *,
    request: Request,
    dependant: Dependant,
    body: Dict[str, Any] = None,
    background_tasks: BackgroundTasks = None,
) -> Tuple[Dict[str, Any], List[ErrorWrapper], Optional[BackgroundTasks]]:
    values: Dict[str, Any] = {}
    errors: List[ErrorWrapper] = []
    for sub_dependant in dependant.dependencies:
        sub_values, sub_errors, background_tasks = await solve_dependencies(
            request=request,
            dependant=sub_dependant,
            body=body,
            background_tasks=background_tasks,
        )
        if sub_errors:
            errors.extend(sub_errors)
            continue
        assert sub_dependant.call is not None, "sub_dependant.call must be a function"
        if is_coroutine_callable(sub_dependant.call):
            solved = await sub_dependant.call(**sub_values)
        else:
            solved = await run_in_threadpool(sub_dependant.call, **sub_values)
        assert sub_dependant.name is not None, "Subdependants always have a name"
        values[sub_dependant.name] = solved
    path_values, path_errors = request_params_to_args(
        dependant.path_params, request.path_params
    )
    query_values, query_errors = request_params_to_args(
        dependant.query_params, request.query_params
    )
    header_values, header_errors = request_params_to_args(
        dependant.header_params, request.headers
    )
    cookie_values, cookie_errors = request_params_to_args(
        dependant.cookie_params, request.cookies
    )
    values.update(path_values)
    values.update(query_values)
    values.update(header_values)
    values.update(cookie_values)
    errors += path_errors + query_errors + header_errors + cookie_errors
    if dependant.body_params:
        body_values, body_errors = await request_body_to_args(  # type: ignore # body_params checked above
            dependant.body_params, body
        )
        values.update(body_values)
        errors.extend(body_errors)
    if dependant.request_param_name:
        values[dependant.request_param_name] = request
    if dependant.background_tasks_param_name:
        if background_tasks is None:
            background_tasks = BackgroundTasks()
        values[dependant.background_tasks_param_name] = background_tasks
    if dependant.security_scopes_param_name:
        values[dependant.security_scopes_param_name] = SecurityScopes(
            scopes=dependant.security_scopes
        )
    return values, errors, background_tasks
    async def handle_graphql(self, request: Request) -> Response:
        if request.method in ("GET", "HEAD"):
            if "text/html" in request.headers.get("Accept", ""):
                if not self.graphiql:
                    return PlainTextResponse(
                        "Not Found", status_code=status.HTTP_404_NOT_FOUND)
                return await self.handle_graphiql(request)

            data = request.query_params  # type: typing.Mapping[str, typing.Any]

        elif request.method == "POST":
            content_type = request.headers.get("Content-Type", "")

            if "application/json" in content_type:
                data = await request.json()
            elif "application/graphql" in content_type:
                body = await request.body()
                text = body.decode()
                data = {"query": text}
            elif "query" in request.query_params:
                data = request.query_params
            else:
                return PlainTextResponse(
                    "Unsupported Media Type",
                    status_code=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
                )

        else:
            return PlainTextResponse(
                "Method Not Allowed",
                status_code=status.HTTP_405_METHOD_NOT_ALLOWED)

        try:
            query = data["query"]
            variables = data.get("variables")
            operation_name = data.get("operationName")
        except KeyError:
            return PlainTextResponse(
                "No GraphQL query found in the request",
                status_code=status.HTTP_400_BAD_REQUEST,
            )

        background = BackgroundTasks()
        context = {"request": request, "background": background}

        result = await self.execute(query,
                                    variables=variables,
                                    context=context,
                                    operation_name=operation_name)
        error_data = ([format_error(err)
                       for err in result.errors] if result.errors else None)
        response_data = {"data": result.data}
        if error_data:
            response_data["errors"] = error_data
        status_code = (status.HTTP_400_BAD_REQUEST
                       if result.errors else status.HTTP_200_OK)
        print(status_code, response_data)
        return JSONResponse(response_data,
                            status_code=status_code,
                            background=background)
Esempio n. 29
0
    async def _get_response(self, request: Request, data: QueryParams,
                            variables: typing.Optional[dict]) -> Response:
        try:
            query = data["query"]
        except KeyError:
            return PlainTextResponse("No GraphQL query found in the request",
                                     400)

        config = get_graphql_config(request)
        background = BackgroundTasks()
        context = {"req": request, "background": background, **config.context}

        engine: Engine = config.engine
        result: dict = await engine.execute(
            query,
            context=context,
            variables=variables,
            operation_name=data.get("operationName"),
        )

        content = {"data": result["data"]}
        has_errors = "errors" in result
        if has_errors:
            content["errors"] = format_errors(result["errors"])
        status = 400 if has_errors else 200

        return JSONResponse(content=content,
                            status_code=status,
                            background=background)
Esempio n. 30
0
    async def handle_http_request(self, http_request: Request):
        background_tasks = BackgroundTasks()

        # noinspection PyTypeChecker
        sub_response = Response(
            content=None,
            status_code=None,
            headers=None,
            media_type=None,
            background=None,
        )

        try:
            body = await self.parse_body(http_request)
        except Exception as exc:
            resp, _ = await self.entrypoint.handle_exception_to_resp(exc)
            response = self.response_class(content=resp, background=background_tasks)
        else:
            try:
                resp = await self.handle_body(http_request, background_tasks, sub_response, body)
            except NoContent:
                # no content for successful notifications
                response = Response(media_type='application/json', background=background_tasks)
            else:
                response = self.response_class(content=resp, background=background_tasks)

        response.headers.raw.extend(sub_response.headers.raw)
        if sub_response.status_code:
            response.status_code = sub_response.status_code

        return response