def delete_project( name: str, deletion_strategy: schemas.DeletionStrategy = fastapi.Header( schemas.DeletionStrategy.default(), alias=schemas.HeaderNames.deletion_strategy), projects_role: typing.Optional[schemas.ProjectsRole] = fastapi.Header( None, alias=schemas.HeaderNames.projects_role), # TODO: we're in a http request context here, therefore it doesn't make sense that by default it will hold the # request until the process will be completed - after UI supports waiting - change default to False wait_for_completion: bool = fastapi.Query(True, alias="wait-for-completion"), iguazio_session: typing.Optional[str] = fastapi.Cookie( None, alias="session"), db_session: Session = fastapi.Depends(deps.get_db_session), ): is_running_in_background = get_project_member().delete_project( db_session, name, deletion_strategy, projects_role, iguazio_session, wait_for_completion=wait_for_completion, ) if is_running_in_background: return fastapi.Response(status_code=HTTPStatus.ACCEPTED.value) return fastapi.Response(status_code=HTTPStatus.NO_CONTENT.value)
def delete_project( name: str, deletion_strategy: mlrun.api.schemas.DeletionStrategy = fastapi.Header( mlrun.api.schemas.DeletionStrategy.default(), alias=mlrun.api.schemas.HeaderNames.deletion_strategy, ), # TODO: we're in a http request context here, therefore it doesn't make sense that by default it will hold the # request until the process will be completed - after UI supports waiting - change default to False wait_for_completion: bool = fastapi.Query(True, alias="wait-for-completion"), auth_verifier: mlrun.api.api.deps.AuthVerifierDep = fastapi.Depends( mlrun.api.api.deps.AuthVerifierDep), db_session: sqlalchemy.orm.Session = fastapi.Depends( mlrun.api.api.deps.get_db_session), ): is_running_in_background = get_project_member().delete_project( db_session, name, deletion_strategy, auth_verifier.auth_info.projects_role, auth_verifier.auth_info, wait_for_completion=wait_for_completion, ) if is_running_in_background: return fastapi.Response(status_code=http.HTTPStatus.ACCEPTED.value) return fastapi.Response(status_code=http.HTTPStatus.NO_CONTENT.value)
async def weather(loc: Location = Depends(), units: Optional[str] = 'metric'): try: return await get_report_async(loc.city, loc.state, loc.country, units) except ValidationError as ve: return fastapi.Response(ve.error_msg, ve.status_code) except Exception as e: return fastapi.Response(content=str(e), status_code=500)
async def weather(loc: Location = Depends(), units: Optional[str] = 'metric'): try: return await openweather.get_current_weather(loc.city, loc.province, loc.country, units) except ValidationError as ve: return fastapi.Response(content=ve.error_msg, status_code=ve.status_code) except Exception as x: return fastapi.Response(content=str(x), status_code=500)
def add_user_secrets(secrets: schemas.UserSecretCreationRequest,): if secrets.provider != schemas.SecretProviderName.vault: return fastapi.Response( status_code=HTTPStatus.BAD_REQUEST.vault, content=f"Invalid secrets provider {secrets.provider}", ) add_vault_user_secrets(secrets.user, secrets.secrets) return fastapi.Response(status_code=HTTPStatus.CREATED.value)
def queryset_delete(queryset:str, session = Depends(get_session)): """ Deletes the target queryset (does not delete any data) """ existing = session.query(models.Queryset).get(queryset) if existing is not None: session.delete(existing) session.commit() return fastapi.Response(status_code=204) return fastapi.Response(status_code=404)
async def weather(loc: Location = Depends(), units: Optional[str] = 'metric'): try: return await openweather_service.get_report_async( loc.city, loc.state, loc.country, units) except ValidationError as ve: return fastapi.Response(content=ve.error_msg, status_code=ve.status_code) except Exception as e: print(f'Server crashed while processing request: {e}') return fastapi.Response(content='Error procesing your request', status_code=500)
async def weather(loc: Location = Depends(), units: Optional[str] = "metric"): try: return await openweather.get_report(loc.city, loc.state, loc.country, units) except ValidationError as validation_error: return fastapi.Response( content=validation_error.error_message, status_code=validation_error.status_code, ) except Exception as error: print(f"Server crashed while processing request: {error}") return fastapi.Response(content="Error processing request.", status_code=500)
async def get_weather( loc: Location = Depends(), units: Optional[str] = 'metric'): try: return await openweather.get_report(loc.city, loc.state, loc.country, units) except ValidationError as err: return fastapi.Response(content=err.error_msg, status_code=err.status_code) except Exception as err: return fastapi.Response(content='Error processing your request.', status_code=500)
async def weather( location: Location = Depends(), units: str = 'metric' ) -> Union[Optional[Dict[str, Any]], fastapi.Response]: """Returns a city weather route.""" try: return await openweather.report(location.city, location.country, units) except ValidationError as flaw: return fastapi.Response(content=flaw.error_message, status_code=flaw.status_code) except Exception as flaw: return fastapi.Response( content=str(flaw), status_code=int(http.HTTPStatus.INTERNAL_SERVER_ERROR), )
def edit_multiple_involve( ls: dependencies.LoginSession = f.Depends( dependencies.dependency_login_session), album_ids: List[int] = f.Query( ..., description="The ids of the albums to involve the person with."), person_id: int = f.Query(..., description="The ids of the person to involve."), role_id: int = f.Query( ..., description="The id of the role of the involvement."), ): """ Connect the specified person to the specified albums detailing a role as the role of their involvement. For example, "[Luis Fonsi](https://en.wikipedia.org/wiki/Luis_Fonsi)" should be involved with the album "[Vida](https://en.wikipedia.org/wiki/Vida_(Luis_Fonsi_album))" with the role "Artist". Non-existing `album_ids` passed to the method will be silently skipped, while a 404 error will be raised for non-existing people or roles. Trying to create an involvement that already exists will result in that involvement being skipped. """ role = ls.get(tables.Role, role_id) person = ls.get(tables.Person, person_id) for album in ls.group(tables.Album, album_ids): tables.AlbumInvolvement.make(session=ls.session, role=role, album=album, person=person) ls.log("album.edit.multiple.involve", obj=album.id) ls.session.commit() return f.Response(status_code=204)
def start_migration( background_tasks: fastapi.BackgroundTasks, response: fastapi.Response, ): # we didn't yet decide who should have permissions to such actions, therefore no authorization at the moment # note in api.py we do declare to use the authenticate_request dependency - meaning we do have authentication global current_migration_background_task_name if mlrun.mlconf.httpdb.state == mlrun.api.schemas.APIStates.migrations_in_progress: background_task = mlrun.api.utils.background_tasks.Handler( ).get_background_task(current_migration_background_task_name) response.status_code = http.HTTPStatus.ACCEPTED.value return background_task elif mlrun.mlconf.httpdb.state == mlrun.api.schemas.APIStates.migrations_failed: raise mlrun.errors.MLRunPreconditionFailedError( "Migrations were already triggered and failed. Restart the API to retry" ) elif (mlrun.mlconf.httpdb.state != mlrun.api.schemas.APIStates.waiting_for_migrations): return fastapi.Response(status_code=http.HTTPStatus.OK.value) logger.info("Starting the migration process") background_task = mlrun.api.utils.background_tasks.Handler( ).create_background_task( background_tasks, _perform_migration, ) current_migration_background_task_name = background_task.metadata.name response.status_code = http.HTTPStatus.ACCEPTED.value return background_task
def get_log( project: str, uid: str, size: int = -1, offset: int = 0, auth_verifier: mlrun.api.api.deps.AuthVerifierDep = fastapi.Depends( mlrun.api.api.deps.AuthVerifierDep), db_session: sqlalchemy.orm.Session = fastapi.Depends( mlrun.api.api.deps.get_db_session), ): mlrun.api.utils.clients.opa.Client().query_project_resource_permissions( mlrun.api.schemas.AuthorizationResourceTypes.log, project, uid, mlrun.api.schemas.AuthorizationAction.read, auth_verifier.auth_info, ) run_state, log = mlrun.api.crud.Logs().get_logs(db_session, project, uid, size, offset) headers = { "x-mlrun-run-state": run_state, # pod_status was changed x-mlrun-run-state in 0.5.3, keeping it here for backwards compatibility (so <0.5.3 # clients will work with the API) # TODO: remove this in 0.7.0 "pod_status": run_state, } return fastapi.Response(content=log, media_type="text/plain", headers=headers)
async def recorded_video(video_id: str, range: str = fastapi.Header(None)): log.info(f"Recorded video requested : '{video_id}'") video_path = pintu.config.recordings_dir / video_id if not video_path.is_file(): log.error(f"No such file: '{video_path}'") raise FileNotFoundError(video_id) if range: start_str, end_str = range.replace("bytes=", "").split("-") start = int(start_str) end = int(end_str) if end_str else start + CHUNK_SIZE else: start = 0 end = video_path.stat().st_size with video_path.open("rb") as video: video.seek(start) data = video.read(end - start) filesize = str(video_path.stat().st_size) headers = { "Content-Range": f"bytes {str(start)}-{str(end)}/{filesize}", "Accept-Ranges": "bytes", } return fastapi.Response( data, status_code=206, headers=headers, # media_type="video/mp4", media_type="video/webm", )
async def handler(request: fastapi.Request) -> fastapi.Response: config: 'AppConfig' = request.app.config cache: Cache = request.app.cache prerender_internal_url = config.nginx_prerender_internal_url url_with_args = f'{request.path_params.get("url", "")}?{request.query_params}' prerender_url = f'{prerender_internal_url}{url_with_args}' prerender_cache_key_secret = config.nginx_prerender_cache_key_secret prerender_cache_key = cache.get_prerender_cache_key_by_url(str( request.url)) is_prerender_cache_valid = await cache.check_prerender_key_keys_hashes( prerender_cache_key) headers = { 'x-accel-redirect': prerender_url, 'x-prerender-cache-url': str(request.url), 'x-prerender-cache-key': prerender_cache_key, 'x-prerender-cache-key-sec': cache.get_prerender_cache_key_sec(prerender_cache_key, prerender_cache_key_secret), } if not is_prerender_cache_valid: await cache.delete_prerender_key_keys_hashes(prerender_cache_key) headers.update({ 'x-prerender-cache-invalid': 'yes', }) return fastapi.Response(headers=headers)
def edit_multiple_uninvolve( ls: dependencies.LoginSession = f.Depends( dependencies.dependency_login_session), album_ids: List[int] = f.Query( ..., description="The ids of the albums to uninvolve the person from."), person_id: int = f.Query( ..., description="The ids of the person to uninvolve."), role_id: int = f.Query( ..., description="The id of the role of the involvement."), ): """ The opposite of _involve_: delete the connection between the specified person and the specified albums that has the specified role. Non-existing `album_ids` passed to the method will be silently skipped, while a 404 error will be raised for non-existing people or roles. Involvements that don't exist will be silently ignored. """ role = ls.get(tables.Role, role_id) person = ls.get(tables.Person, person_id) for song in ls.group(tables.Album, album_ids): tables.AlbumInvolvement.unmake(session=ls.session, role=role, song=song, person=person) ls.log("album.edit.multiple.uninvolve", obj=song.id) ls.session.commit() return f.Response(status_code=204)
def patch_project( project: dict, name: str, patch_mode: schemas.PatchMode = fastapi.Header( schemas.PatchMode.replace, alias=schemas.HeaderNames.patch_mode), projects_role: typing.Optional[schemas.ProjectsRole] = fastapi.Header( None, alias=schemas.HeaderNames.projects_role), # TODO: we're in a http request context here, therefore it doesn't make sense that by default it will hold the # request until the process will be completed - after UI supports waiting - change default to False wait_for_completion: bool = fastapi.Query(True, alias="wait-for-completion"), auth_verifier: deps.AuthVerifier = fastapi.Depends(deps.AuthVerifier), db_session: Session = fastapi.Depends(deps.get_db_session), ): project, is_running_in_background = get_project_member().patch_project( db_session, name, project, patch_mode, projects_role, auth_verifier.auth_info.session, wait_for_completion=wait_for_completion, ) if is_running_in_background: return fastapi.Response(status_code=HTTPStatus.ACCEPTED.value) return project
async def delete_queue_freeze( application: application_mod.Application = fastapi.Depends( # noqa: B008 security.get_application ), queue_name: rules.QueueName = fastapi.Path( # noqa: B008 ..., description="The name of the queue" ), repository_ctxt: context.Repository = fastapi.Depends( # noqa: B008 security.get_repository_context ), ) -> fastapi.Response: qf = freeze.QueueFreeze( repository=repository_ctxt, name=queue_name, application_name=application.name, application_id=application.id, ) if not await qf.delete(): raise fastapi.HTTPException( status_code=404, detail=f'The queue "{queue_name}" does not exist or is not currently frozen.', ) return fastapi.Response(status_code=HTTP_204_NO_CONTENT)
def patch_project( project: dict, name: str, patch_mode: mlrun.api.schemas.PatchMode = fastapi.Header( mlrun.api.schemas.PatchMode.replace, alias=mlrun.api.schemas.HeaderNames.patch_mode, ), # TODO: we're in a http request context here, therefore it doesn't make sense that by default it will hold the # request until the process will be completed - after UI supports waiting - change default to False wait_for_completion: bool = fastapi.Query(True, alias="wait-for-completion"), auth_info: mlrun.api.schemas.AuthInfo = fastapi.Depends( mlrun.api.api.deps.authenticate_request), db_session: sqlalchemy.orm.Session = fastapi.Depends( mlrun.api.api.deps.get_db_session), ): project, is_running_in_background = get_project_member().patch_project( db_session, name, project, patch_mode, auth_info.projects_role, auth_info.session, wait_for_completion=wait_for_completion, ) if is_running_in_background: return fastapi.Response(status_code=http.HTTPStatus.ACCEPTED.value) return project
def lfg_delete( *, ls: LoginSession = f.Depends(dep_loginsession), aid: int = f.Path(..., description="The aid of the LFG to delete."), ): """ Quietly delete a LFG without triggering any webhook or notification. Follows the `DELETE` specification: it will return a success even if the LFG does not exist. Requires the `delete:lfg_admin` scope. """ if "delete:lfg_admin" not in ls.cu.permissions: raise f.HTTPException(403, "Missing `delete:lfg_admin` scope.") lfg = ls.session.execute( ss.select(database.Announcement).where(database.Announcement.aid == aid) ).scalar() if lfg is not None: ls.session.delete(lfg) ls.session.commit() if lfg.state == database.AnnouncementState.PLANNED: planned_event.set() elif lfg.state == database.AnnouncementState.OPEN: open_event.set() return f.Response(status_code=204)
def pdf_to_audio_ep(url: str): """turns a pdf into an audiofile""" audio_path = pdfToVoice() return fastapi.Response( getAudioFromFile(audio_path) if audio_path != "" else "", media_type="audio/mpeg", )
def merge( ls: dependencies.LoginSession = f.Depends(dependencies.dependency_login_session), ss: sqlalchemy.orm.Session = f.Depends(dependencies.dependency_db_session_serializable), song_ids: List[int] = f.Query(..., description="The ids of the genres to merge."), ): """ Move the layers of all the specified songs into a single one, which will have the metadata of the first song specified. """ if len(song_ids) < 2: raise f.HTTPException(400, "Not enough songs specified") # Get the first genre main_song = ss.query(tables.Song).get(song_ids[0]) ls.log("song.merge.to", obj=main_song.id) # Get the other genres other_songs = ss.query(tables.Song).filter(tables.Song.id.in_(song_ids[1:])).all() # Replace and delete the other genres for merged_song in other_songs: for layer in merged_song.layers: layer.song = main_song ls.log("song.merge.from", obj=merged_song.id) ss.delete(merged_song) ss.commit() ss.close() ls.session.commit() return f.Response(status_code=204)
async def generate_image(request: fastapi.Request): post_data: dict = await request.json() for item in ["artist", "image_url", "predefined_image"]: if item not in post_data.keys(): return fastapi.Response("Invalid Request", 400) artist_id = post_data["artist"] image_url = post_data["image_url"] predefined_image = post_data["predefined_image"] job_id = generate_job_id() await sqlite.add_job(job_id) asyncio.ensure_future( lyrics.artist_to_image(job_id, artist_id, image_url, predefined_image)) return fastapi.Response(job_id)
def response(template_file: str, mimetype='text/html', status_code=200, **template_data): html = render(template_file, **template_data) return fastapi.Response(content=html, media_type=mimetype, status_code=status_code)
def queryset_detail(queryset:str, session = Depends(get_session)): """ Get details about a queryset """ queryset = session.query(models.Queryset).get(queryset) if queryset is None: return fastapi.Response(status_code=404) return queryset.dict()
async def snippet_ocr(snippet_ocr_request: SnippetOCRRequest) -> Dict[str, Any]: ensure_ocr_enabled() ocr_results = ocr_on_selection( snippet_ocr_request.selection, threshold=snippet_ocr_request.threshold, ) if ocr_results: return {"text": ocr_results[0], "confidence": ocr_results[1]} else: return fastapi.Response(status_code=204)
def database_reset(ls: dependencies.LoginSession = f.Depends( dependencies.dependency_login_session), ): """ **Drop** and **recreate** all tables declared using the `DeclarativeBase` in `mandarin.database.base`. # THIS DELETES ALL DATA FROM THE DATABASE! """ Base.metadata.drop_all(bind=lazy_engine.evaluate()) create_all() return f.Response(status_code=204)
def _delete_runtime_resources( db_session: sqlalchemy.orm.Session, auth_info: mlrun.api.schemas.AuthInfo, project: str, label_selector: typing.Optional[str] = None, kind: typing.Optional[str] = None, object_id: typing.Optional[str] = None, force: bool = False, grace_period: int = mlrun.mlconf.runtime_resources_deletion_grace_period, return_body: bool = True, ) -> typing.Union[ mlrun.api.schemas.GroupedByProjectRuntimeResourcesOutput, fastapi.Response ]: ( allowed_projects, grouped_by_project_runtime_resources_output, is_non_project_runtime_resource_exists, ) = _get_runtime_resources_allowed_projects( project, auth_info, label_selector, kind, object_id, mlrun.api.schemas.AuthorizationAction.delete, ) # if nothing allowed, simply return empty response if allowed_projects: permissions_label_selector = _generate_label_selector_for_allowed_projects( allowed_projects ) if label_selector: computed_label_selector = ",".join( [label_selector, permissions_label_selector] ) else: computed_label_selector = permissions_label_selector mlrun.api.crud.RuntimeResources().delete_runtime_resources( db_session, kind, object_id, computed_label_selector, force, grace_period, ) if is_non_project_runtime_resource_exists: # delete one more time, without adding the allowed projects selector mlrun.api.crud.RuntimeResources().delete_runtime_resources( db_session, kind, object_id, label_selector, force, grace_period, ) if return_body: filtered_projects = copy.deepcopy(allowed_projects) if is_non_project_runtime_resource_exists: filtered_projects.append("") return mlrun.api.crud.RuntimeResources().filter_and_format_grouped_by_project_runtime_resources_output( grouped_by_project_runtime_resources_output, filtered_projects, mlrun.api.schemas.ListRuntimeResourcesGroupByField.project, ) else: return fastapi.Response(status_code=http.HTTPStatus.NO_CONTENT.value)
def get_variable_value(loa: str, var: str, agg: str, session=Depends(get_sess)): network = query_planning.join_network(metadata.tables) table, variable = var.split(".") logger.info("Composing query") try: query = query_planning.query_with_ops(session.query(), query_planning.compose_join, network, loa, table, variable, settings.index_columns(loa), agg) except exceptions.QueryError as qe: logger.error("exceptions.QueryError: %s", str(qe)) return fastapi.Response(str(qe), status_code=400) except exceptions.ConfigError as ce: logger.error("exceptions.ConfigError: %s", str(ce)) return fastapi.Response(str(ce), status_code=500) bytes_buffer = io.BytesIO() logger.debug("Executing %s", str(query)) logger.info("Fetching data") dataframe = pd.read_sql_query(query.statement, session.connection()) logger.info("Got %s rows", str(dataframe.shape[0])) try: loa_indices = [ "_".join((tbl, col)) for tbl, col in settings.index_columns(loa) ] dataframe.set_index(loa_indices, inplace=True) except KeyError: missing_idx = set(loa_indices).difference(dataframe.columns) logger.error("Missing index columns: %s", ", ".join(missing_idx)) return fastapi.Response("Couldn't set index.", status_code=500) logger.debug("Sorting dataframe") dataframe.sort_index(inplace=True) dataframe.to_parquet(bytes_buffer) return fastapi.Response(bytes_buffer.getvalue(), media_type="application/octet-stream")
async def StackOverflowBadge(request: Request, userID: str): try: data_dict = await stackoverflow_service.StackUserRequestAsync(userID) except ValidationError as error: return fastapi.Response(content=error.error_msg, status_code=error.status_code) except Exception as x: return fastapi.Response(content=str(x), status_code=500) mimetypes.add_type("image/svg+xml", ".svg") return templates.TemplateResponse( "badge1.svg", { "request": request, "rep": str(data_dict["rep"]), "gold": str(data_dict["gold"]), "silver": str(data_dict["silver"]), "bronze": str(data_dict["bronze"]), }, media_type="image/svg+xml", )