def delete_project( name: str, deletion_strategy: mlrun.api.schemas.DeletionStrategy = fastapi.Header( mlrun.api.schemas.DeletionStrategy.default(), alias=mlrun.api.schemas.HeaderNames.deletion_strategy, ), # TODO: we're in a http request context here, therefore it doesn't make sense that by default it will hold the # request until the process will be completed - after UI supports waiting - change default to False wait_for_completion: bool = fastapi.Query(True, alias="wait-for-completion"), auth_info: mlrun.api.schemas.AuthInfo = fastapi.Depends( mlrun.api.api.deps.authenticate_request), db_session: sqlalchemy.orm.Session = fastapi.Depends( mlrun.api.api.deps.get_db_session), ): is_running_in_background = get_project_member().delete_project( db_session, name, deletion_strategy, auth_info.projects_role, auth_info, wait_for_completion=wait_for_completion, ) if is_running_in_background: return fastapi.Response(status_code=http.HTTPStatus.ACCEPTED.value) return fastapi.Response(status_code=http.HTTPStatus.NO_CONTENT.value)
async def fetch_remote_snippet( source_url: str, force_renew: bool = False, storage_actor=fastapi.Depends(helpers.storage_dep), parser_actor=fastapi.Depends(helpers.html_parser_dep), comebacker_actor=fastapi.Depends(helpers.comebacker_dep), ): """Fetch snippet from url and store it in db.""" result_store: typing.Optional[models.SnippetAnswer] = None await storage_actor.provide_url(source_url) if not force_renew and await storage_actor.exists(): result_store = models.SnippetAnswer(source_url=source_url, payload=await storage_actor.fetch()) if not result_store: try: extracted_meta: dict = await parser_actor.setup(source_url).fetch_and_extract() except exceptions.ParserFetchException as error_obj: LOGGER_OBJ.exception(f"Exception happens during snippet extraction, url: {source_url}") return models.SnippetAnswer( source_url=source_url, result=models.Status.JOB_FAIL, result_info=str(error_obj) ) try: await storage_actor.save(extracted_meta) except exceptions.StoreSaveException: # error during cache storing — is bad, but must no be deadly for main flow LOGGER_OBJ.error(f"Cant store cache for url {source_url}") result_store = models.SnippetAnswer(source_url=source_url, payload=extracted_meta) if comebacker_actor: asyncio.create_task(comebacker_actor(source_url, extracted_meta)) result_store.is_comeback_goes_on = True typing.cast(models.SnippetAnswer, result_store) return result_store
async def refresh_branch( owner_login: github_types.GitHubLogin, repo_name: github_types.GitHubRepositoryName, branch: str, redis_cache: utils.RedisCache = fastapi.Depends( # noqa: B008 redis.get_redis_cache), redis_stream: utils.RedisStream = fastapi.Depends( # noqa: B008 redis.get_redis_stream), ) -> responses.Response: installation_json = await github.get_installation_from_login(owner_login) async with github.aget_client(installation_json) as client: try: repository = await client.item(f"/repos/{owner_login}/{repo_name}") except http.HTTPNotFound: return responses.JSONResponse(status_code=404, content="repository not found") await utils.send_branch_refresh( redis_cache, redis_stream, repository, action="user", source="API", ref=github_types.GitHubRefType(f"refs/heads/{branch}"), ) return responses.Response("Refresh queued", status_code=202)
def check_import_token( auth: fastapi.security.HTTPBearer = fastapi.Depends(http_bearer), settings: settings.Settings = fastapi.Depends(dependendies.get_settings), ) -> None: """Check if token has permission to import data.""" if auth.credentials != settings.import_token: # type: ignore raise fastapi.HTTPException(status_code=403, detail='Invalid token.')
def create_search(response: responses.Response, search_input: schemas.search.SearchBase, search_repository: repository.search_repository. SearchRepository = fastapi.Depends(), fs_repo: repository.financing_statement_repository. FinancingStatementRepository = fastapi.Depends(), user: auth.authentication.User = fastapi.Depends( auth.authentication.get_current_user), payment: schemas.payment.Payment = fastapi.Depends( services.payment_service.get_payment)): exact_matches = [] similar_matches = [] criteria_value = search_input.criteria['value'].strip( ) if 'value' in search_input.criteria else None if search_input.type == schemas.search.SearchType.REGISTRATION_NUMBER.value: fs_event = fs_repo.find_event_by_registration_number(criteria_value) if fs_event: exact_matches = [fs_event.registration_number] search_model = search_repository.create_search(search_input, exact_matches, similar_matches, user, payment) response.status_code = status.HTTP_201_CREATED return search_model
async def all_todos( current_user: auth.User = fastapi.Depends(get_current_active_user), todo_service: todo.TodoService = fastapi.Depends( service_locator.default().todo_service), ) -> typing.List[response.TodoResponse]: todos = todo_service.all(current_user.user_id) return [response.TodoResponse.from_domain(t) for t in todos]
def get_log( project: str, uid: str, size: int = -1, offset: int = 0, auth_verifier: mlrun.api.api.deps.AuthVerifierDep = fastapi.Depends( mlrun.api.api.deps.AuthVerifierDep), db_session: sqlalchemy.orm.Session = fastapi.Depends( mlrun.api.api.deps.get_db_session), ): mlrun.api.utils.clients.opa.Client().query_project_resource_permissions( mlrun.api.schemas.AuthorizationResourceTypes.log, project, uid, mlrun.api.schemas.AuthorizationAction.read, auth_verifier.auth_info, ) run_state, log = mlrun.api.crud.Logs().get_logs(db_session, project, uid, size, offset) headers = { "x-mlrun-run-state": run_state, # pod_status was changed x-mlrun-run-state in 0.5.3, keeping it here for backwards compatibility (so <0.5.3 # clients will work with the API) # TODO: remove this in 0.7.0 "pod_status": run_state, } return fastapi.Response(content=log, media_type="text/plain", headers=headers)
async def update_yearly_todo( todo_id: int, description: typing.Optional[str] = None, start_date: typing.Optional[datetime.date] = None, month: typing.Optional[int] = None, day: typing.Optional[int] = None, note: typing.Optional[str] = None, current_user: auth.User = fastapi.Depends(get_current_active_user), todo_service: todo.TodoService = fastapi.Depends( service_locator.default().todo_service), ) -> response.TodoResponse: updates: typing.Dict[str, typing.Any] = {} if description is not None: updates["description"] = description if month is not None: updates["month"] = month if day is not None: updates["day"] = month if note is not None: updates["note"] = note if start_date is not None: updates["date"] = start_date return update_todo( user_id=current_user.user_id, todo_id=todo_id, todo_service=todo_service, updates=updates, )
async def delete_todo( todo_id: int, current_user: auth.User = fastapi.Depends(get_current_active_user), todo_service: todo.TodoService = fastapi.Depends( service_locator.default().todo_service), ) -> None: todo_service.delete_todo(user_id=current_user.user_id, todo_id=todo_id)
async def add_daily_todo( description: str, note: typing.Optional[str] = None, start_date: typing.Optional[datetime.date] = None, current_user: auth.User = fastapi.Depends(get_current_active_user), todo_service: todo.TodoService = fastapi.Depends( service_locator.default().todo_service), ) -> response.TodoResponse: if start_date is None: start_date = datetime.date.today() if note is None: note = "" daily_todo = todo.Daily( advance_days=0, category=core.TodoCategory.Todo, date_added=datetime.date.today(), date_completed=None, description=description, note=note, start_date=start_date, todo_id=-1, user_id=current_user.user_id, ) new_todo = todo_service.add_todo(user_id=current_user.user_id, todo=daily_todo) return response.TodoResponse.from_domain(new_todo)
async def update_monthly_todo( todo_id: int, description: typing.Optional[str] = None, advance_days: typing.Optional[int] = None, month_day: typing.Optional[int] = None, note: typing.Optional[str] = None, start_date: typing.Optional[datetime.date] = None, current_user: auth.User = fastapi.Depends(get_current_active_user), todo_service: todo.TodoService = fastapi.Depends( service_locator.default().todo_service), ) -> response.TodoResponse: if month_day and month_day not in range(1, 28): raise fastapi.HTTPException( status_code=HTTP_400_BAD_REQUEST, detail="month_day must be between 1 and 28", ) updates: typing.Dict[str, typing.Any] = {} if description is not None: updates["description"] = description if advance_days is not None: updates["advance_days"] = advance_days if month_day is not None: updates["month_day"] = month_day if note is not None: updates["note"] = note if start_date is not None: updates["start_date"] = start_date return update_todo( user_id=current_user.user_id, todo_id=todo_id, todo_service=todo_service, updates=updates, )
async def add_monthly_todo( description: str, advance_days: int, month_day: int, note: typing.Optional[str] = None, start_date: typing.Optional[datetime.date] = None, current_user: auth.User = fastapi.Depends(get_current_active_user), todo_service: todo.TodoService = fastapi.Depends( service_locator.default().todo_service), ) -> response.TodoResponse: if month_day not in range(1, 28): raise fastapi.HTTPException( status_code=HTTP_400_BAD_REQUEST, detail="month_day must be between 1 and 28", ) if start_date is None: start_date = datetime.date.today() if note is None: note = "" monthly_todo = todo.Monthly( advance_days=advance_days, category=core.TodoCategory.Todo, date_added=datetime.date.today(), date_completed=None, description=description, note=note, start_date=start_date, todo_id=-1, user_id=current_user.user_id, month_day=month_day, ) new_todo = todo_service.add_todo(user_id=current_user.user_id, todo=monthly_todo) return response.TodoResponse.from_domain(new_todo)
def delete_project( name: str, deletion_strategy: schemas.DeletionStrategy = fastapi.Header( schemas.DeletionStrategy.default(), alias=schemas.HeaderNames.deletion_strategy), projects_role: typing.Optional[schemas.ProjectsRole] = fastapi.Header( None, alias=schemas.HeaderNames.projects_role), # TODO: we're in a http request context here, therefore it doesn't make sense that by default it will hold the # request until the process will be completed - after UI supports waiting - change default to False wait_for_completion: bool = fastapi.Query(True, alias="wait-for-completion"), auth_verifier: deps.AuthVerifier = fastapi.Depends(deps.AuthVerifier), db_session: Session = fastapi.Depends(deps.get_db_session), ): is_running_in_background = get_project_member().delete_project( db_session, name, deletion_strategy, projects_role, auth_verifier.auth_info.session, wait_for_completion=wait_for_completion, ) if is_running_in_background: return fastapi.Response(status_code=HTTPStatus.ACCEPTED.value) return fastapi.Response(status_code=HTTPStatus.NO_CONTENT.value)
def create_search(response: responses.Response, search_input: schemas.search.SearchBase, search_repository: repository.search_repository. SearchRepository = fastapi.Depends(), fs_repo: repository.financing_statement_repository. FinancingStatementRepository = fastapi.Depends(), user: auth.authentication.User = fastapi.Depends( auth.authentication.get_current_user), search_exec_service: services.search_execution_service. SearchExecutionService = fastapi.Depends(), payment_service: services.payment_service. PaymentService = fastapi.Depends()): """Submit and execute a new search.""" exact_matches = [] similar_matches = [] criteria_value = search_input.criteria['value'].strip( ) if 'value' in search_input.criteria else None if search_input.type == schemas.search.SearchType.REGISTRATION_NUMBER.value: match = search_exec_service.find_latest_event_number_for_registration_number( criteria_value) exact_matches = [match] if match else [] elif search_input.type == schemas.search.SearchType.MHR_NUMBER.value: exact_matches = search_exec_service.find_latest_event_numbers_for_mhr_number( criteria_value) payment = payment_service.create_payment( services.payment_service.FilingCode.SEARCH) search_model = search_repository.create_search(search_input, exact_matches, similar_matches, user, payment) response.status_code = status.HTTP_201_CREATED return search_model.as_schema()
async def delete_queue_freeze( application: application_mod.Application = fastapi.Depends( # noqa: B008 security.get_application ), queue_name: rules.QueueName = fastapi.Path( # noqa: B008 ..., description="The name of the queue" ), repository_ctxt: context.Repository = fastapi.Depends( # noqa: B008 security.get_repository_context ), ) -> fastapi.Response: qf = freeze.QueueFreeze( repository=repository_ctxt, name=queue_name, application_name=application.name, application_id=application.id, ) if not await qf.delete(): raise fastapi.HTTPException( status_code=404, detail=f'The queue "{queue_name}" does not exist or is not currently frozen.', ) return fastapi.Response(status_code=HTTP_204_NO_CONTENT)
def merge( ls: dependencies.LoginSession = f.Depends(dependencies.dependency_login_session), ss: sqlalchemy.orm.Session = f.Depends(dependencies.dependency_db_session_serializable), song_ids: List[int] = f.Query(..., description="The ids of the genres to merge."), ): """ Move the layers of all the specified songs into a single one, which will have the metadata of the first song specified. """ if len(song_ids) < 2: raise f.HTTPException(400, "Not enough songs specified") # Get the first genre main_song = ss.query(tables.Song).get(song_ids[0]) ls.log("song.merge.to", obj=main_song.id) # Get the other genres other_songs = ss.query(tables.Song).filter(tables.Song.id.in_(song_ids[1:])).all() # Replace and delete the other genres for merged_song in other_songs: for layer in merged_song.layers: layer.song = main_song ls.log("song.merge.from", obj=merged_song.id) ss.delete(merged_song) ss.commit() ss.close() ls.session.commit() return f.Response(status_code=204)
def get_project( name: str, db_session: Session = fastapi.Depends(deps.get_db_session), auth_verifier: deps.AuthVerifier = fastapi.Depends(deps.AuthVerifier), ): return get_project_member().get_project(db_session, name, auth_verifier.auth_info.session)
def lfg_post( *, ls: LoginSession = f.Depends(dep_loginsession), session: so.Session = f.Depends(database.DatabaseSession), user: t.Optional[str] = f.Query(None, description="The user on behalf of which you are acting."), data: models.AnnouncementEditable = f.Body(..., description="The data of the LFG you are creating."), ): """ Create a new LFG with the passed data. Requires the `create:lfg` scope, or the `create:lfg_sudo` scope if you're creating a LFG on behalf of another user. """ if "create:lfg" not in ls.cu.permissions: raise f.HTTPException(403, "Missing `create:lfg` scope.") if user is None: user = ls.cu.sub if "create:lfg_sudo" not in ls.cu.permissions and user != ls.cu.sub: raise f.HTTPException(403, "Missing `create:lfg_sudo` scope.") # noinspection PyArgumentList lfg = database.Announcement(**data.dict(), creator_id=user) session.add(lfg) session.commit() planned_event.set() send_message(session, models.EventAnnouncement( type="create", announcement=models.AnnouncementFull.from_orm(lfg), ).json()) return lfg
async def post_message( message: dto_models.ReceivedMessage, auth: dto_models.AuthUser = fastapi.Depends(refs.UserAuthProto), database: sql_api.DatabaseHandler = fastapi.Depends(refs.DatabaseProto), metadata: utilities.Metadata = fastapi.Depends(utilities.Metadata), ) -> fastapi.responses.Response: expire_at: typing.Optional[datetime.datetime] = None if message.expire_after: expire_at = datetime.datetime.now( tz=datetime.timezone.utc) + message.expire_after try: result = await database.set_message( expire_at=expire_at, is_transient=message.is_transient, text=message.text, title=message.title, user_id=auth.id, ) except sql_api.DataError as exc: raise fastapi.exceptions.HTTPException(400, detail=str(exc)) from None response = dto_models.Message.from_orm(result) response.with_paths(metadata, recursive=False) uri = metadata.message_private_uri(response.id) return fastapi.responses.Response(response.json(), headers={ LOCATION: uri, CONTENT_LOCATION: uri }, media_type=JSON)
async def user_auth_message( message_id: uuid.UUID = fastapi.Path(...), auth: dto_models.AuthUser = fastapi.Depends(refs.UserAuthProto), database: sql_api.DatabaseHandler = fastapi.Depends(refs.DatabaseProto), ) -> dao_protos.Message: if stored_message := await database.get_message(message_id, auth.id): return stored_message
def delete_runtime_resources( project: str, label_selector: typing.Optional[str] = fastapi.Query( None, alias="label-selector"), kind: typing.Optional[str] = None, object_id: typing.Optional[str] = fastapi.Query(None, alias="object-id"), force: bool = False, grace_period: int = fastapi.Query( mlrun.mlconf.runtime_resources_deletion_grace_period, alias="grace-period"), auth_info: mlrun.api.schemas.AuthInfo = fastapi.Depends( mlrun.api.api.deps.authenticate_request), db_session: sqlalchemy.orm.Session = fastapi.Depends( mlrun.api.api.deps.get_db_session), ): return _delete_runtime_resources( db_session, auth_info, project, label_selector, kind, object_id, force, grace_period, )
async def refresh_pull( owner: github_types.GitHubLogin, repo_name: github_types.GitHubRepositoryName, pull_request_number: github_types.GitHubPullRequestNumber, action: github_types.GitHubEventRefreshActionType = "user", redis_cache: utils.RedisCache = fastapi.Depends( # noqa: B008 redis.get_redis_cache), redis_stream: utils.RedisStream = fastapi.Depends( # noqa: B008 redis.get_redis_stream), ) -> responses.Response: action = RefreshActionSchema(action) async with github.aget_client(owner_name=owner) as client: try: repository = await client.item(f"/repos/{owner}/{repo_name}") except http.HTTPNotFound: return responses.JSONResponse(status_code=404, content="repository not found") await utils.send_refresh( redis_cache, redis_stream, repository, pull_request_number=pull_request_number, action=action, ) return responses.Response("Refresh queued", status_code=202)
def patch_project( project: dict, name: str, patch_mode: mlrun.api.schemas.PatchMode = fastapi.Header( mlrun.api.schemas.PatchMode.replace, alias=mlrun.api.schemas.HeaderNames.patch_mode, ), # TODO: we're in a http request context here, therefore it doesn't make sense that by default it will hold the # request until the process will be completed - after UI supports waiting - change default to False wait_for_completion: bool = fastapi.Query(True, alias="wait-for-completion"), auth_verifier: mlrun.api.api.deps.AuthVerifierDep = fastapi.Depends( mlrun.api.api.deps.AuthVerifierDep), db_session: sqlalchemy.orm.Session = fastapi.Depends( mlrun.api.api.deps.get_db_session), ): project, is_running_in_background = get_project_member().patch_project( db_session, name, project, patch_mode, auth_verifier.auth_info.projects_role, auth_verifier.auth_info.session, wait_for_completion=wait_for_completion, ) if is_running_in_background: return fastapi.Response(status_code=http.HTTPStatus.ACCEPTED.value) return project
async def refresh_branch( owner: github_types.GitHubLogin, repo_name: github_types.GitHubRepositoryName, branch: str, redis_cache: utils.RedisCache = fastapi.Depends( # noqa: B008 redis.get_redis_cache ), redis_stream: utils.RedisStream = fastapi.Depends( # noqa: B008 redis.get_redis_stream ), ) -> responses.Response: async with github.aget_client(owner_name=owner) as client: try: repository = await client.item(f"/repos/{owner}/{repo_name}") except http.HTTPNotFound: return responses.JSONResponse( status_code=404, content="repository not found" ) await github_events.send_refresh( redis_cache, redis_stream, repository, ref=github_types.GitHubRefType(f"refs/heads/{branch}"), ) return responses.Response("Refresh queued", status_code=202)
async def new_team( schema: NewTeam, user: ijik.Registrant = fastapi.Depends(editor.get_auth), db: Session = fastapi.Depends(self.sessionmanager.get_session)): team = ijik.Team(registrant=user, **schema.dict()) self.entitymanager.session(db).add(team) return TeamInfo.from_orm(team)
async def get_shared_message( _: dto_models.LinkAuth = fastapi.Depends(refs.LinkAuthProto), message_id: uuid.UUID = fastapi.Path(...), database: sql_api.DatabaseHandler = fastapi.Depends(refs.DatabaseProto), metadata: utilities.Metadata = fastapi.Depends(utilities.Metadata), ) -> dto_models.Message: if message := await database.get_message(message_id): return await get_message(message, database, metadata)
async def get_message_views( message: dao_protos.Message = fastapi.Depends(user_auth_message), database: sql_api.DatabaseHandler = fastapi.Depends(refs.DatabaseProto), ) -> list[dto_models.View]: # Avoid unnecessary extra-lookups if there's no views if not (view_daos := list(await database.iter_views().filter( "eq", ("message_id", message.id)))): return []
async def new_member( schema: NewMember, user: ijik.Registrant = fastapi.Depends(editor.get_auth), db: Session = fastapi.Depends(self.sessionmanager.get_session)): member = ijik.Member(registrant=user, **schema.dict()) self.entitymanager.session(db).add(member) return MemberInfo.from_orm(member)
async def delete_member( id: int, user: ijik.Registrant = fastapi.Depends(editor.get_auth), db: Session = fastapi.Depends(self.sessionmanager.get_session)): member = db.query(ijik.Member).filter_by( id=id, registrant_id=user.id).one_or_none() or abort(404) self.entitymanager.session(db).delete(member)
async def update_user( schema: UpdateUser, user: ijik.Registrant = fastapi.Depends(editor.get_auth), db: Session = fastapi.Depends(self.sessionmanager.get_session)): self.entitymanager.session(db).update(user, **filter_none(schema.dict())) return UserInfo.from_orm(user)