async def job_submissions_agent_pending(token_payload: TokenPayload = Depends(
    guard.lockdown(Permissions.JOB_SUBMISSIONS_VIEW)), ):
    """
    Get a list of pending job submissions for the cluster-agent.
    """
    identity_claims = IdentityClaims.from_token_payload(token_payload)
    if identity_claims.cluster_id is None:
        raise HTTPException(
            status_code=status.HTTP_400_BAD_REQUEST,
            detail=
            "Access token does not contain a `cluster_id`. Cannot fetch pending submissions",
        )

    logger.info(
        f"Fetching newly created job_submissions for cluster_id: {identity_claims.cluster_id}"
    )

    query = (select(columns=[
        job_submissions_table.c.id,
        job_submissions_table.c.job_submission_name,
        job_submissions_table.c.job_submission_owner_email,
        job_scripts_table.c.job_script_name,
        job_scripts_table.c.job_script_data_as_string,
        applications_table.c.application_name,
    ]).select_from(
        job_submissions_table.join(job_scripts_table).join(applications_table)
    ).where(
        job_submissions_table.c.status == JobSubmissionStatus.CREATED, ).where(
            job_submissions_table.c.cluster_id == identity_claims.cluster_id, )
             )

    rows = await database.fetch_all(query)
    return rows
async def job_submission_list(
    pagination: Pagination = Depends(),
    all: Optional[bool] = Query(
        None,
        description=
        "If supplied, do not limit job_submissions to only the current user",
    ),
    slurm_job_ids: Optional[str] = Query(
        None,
        description=
        "Comma-separated list of slurm-job-ids to match active job_submissions",
    ),
    submit_status: Optional[JobSubmissionStatus] = Query(
        None,
        description="Limit results to those with matching status",
    ),
    search: Optional[str] = Query(None),
    sort_field: Optional[str] = Query(None),
    sort_ascending: bool = Query(True),
    token_payload: TokenPayload = Depends(
        guard.lockdown(Permissions.JOB_SUBMISSIONS_VIEW)),
):
    """
    List job_submissions for the authenticated user.
    """
    logger.debug("Fetching job submissions")
    identity_claims = IdentityClaims.from_token_payload(token_payload)
    logger.debug(f"Extracted identity claims from token: {identity_claims}")
    query = job_submissions_table.select()

    logger.debug("Building query")
    if submit_status:
        query = query.where(job_submissions_table.c.status == submit_status)

    if not all:
        query = query.where(job_submissions_table.c.job_submission_owner_email
                            == identity_claims.user_email)

    if slurm_job_ids is not None and slurm_job_ids != "":
        try:
            job_ids = [int(i) for i in slurm_job_ids.split(",")]
        except Exception:
            raise HTTPException(
                status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
                detail=
                "Invalid slurm_job_ids param. Must be a comma-separated list of integers",
            )
        query = query.where(job_submissions_table.c.slurm_job_id.in_(job_ids))
    if search is not None:
        query = query.where(search_clause(search, searchable_fields))
    if sort_field is not None:
        query = query.order_by(
            sort_clause(sort_field, sortable_fields, sort_ascending))

    logger.debug(f"Query built as: {render_sql(query)}")

    logger.debug("Awaiting query and response package")
    response = await package_response(JobSubmissionResponse, query, pagination)
    logger.debug(f"Response built as: {response}")
    return response
async def applications_create(
    application: ApplicationCreateRequest,
    token_payload: TokenPayload = Depends(
        guard.lockdown(Permissions.APPLICATIONS_EDIT)),
):
    """
    Create new applications using an authenticated user token.
    """
    identity_claims = IdentityClaims.from_token_payload(token_payload)
    create_dict = dict(
        **application.dict(exclude_unset=True),
        application_owner_email=identity_claims.user_email,
    )

    try:
        insert_query = applications_table.insert().returning(
            applications_table)
        application_data = await database.fetch_one(query=insert_query,
                                                    values=create_dict)

    except INTEGRITY_CHECK_EXCEPTIONS as e:
        raise HTTPException(status_code=status.HTTP_409_CONFLICT,
                            detail=str(e))

    return application_data
async def applications_list(
    user: bool = Query(False),
    all: bool = Query(False),
    search: Optional[str] = Query(None),
    sort_field: Optional[str] = Query(None),
    sort_ascending: bool = Query(True),
    pagination: Pagination = Depends(),
    token_payload: TokenPayload = Depends(
        guard.lockdown(Permissions.APPLICATIONS_VIEW)),
):
    """
    List all applications.
    """
    identity_claims = IdentityClaims.from_token_payload(token_payload)
    query = applications_table.select()
    if user:
        query = query.where(applications_table.c.application_owner_email ==
                            identity_claims.user_email)
    if not all:
        query = query.where(
            not_(applications_table.c.application_identifier.is_(None)))
    if search is not None:
        query = query.where(search_clause(search, searchable_fields))
    if sort_field is not None:
        query = query.order_by(
            sort_clause(sort_field, sortable_fields, sort_ascending))

    return await package_response(ApplicationResponse, query, pagination)
Пример #5
0
async def job_script_list(
    pagination: Pagination = Depends(),
    all: Optional[bool] = Query(False),
    search: Optional[str] = Query(None),
    sort_field: Optional[str] = Query(None),
    sort_ascending: bool = Query(True),
    token_payload: TokenPayload = Depends(
        guard.lockdown(Permissions.JOB_SCRIPTS_VIEW)),
):
    """
    List job_scripts for the authenticated user.

    Note::

       Use responses instead of response_model to skip a second round of validation and serialization. This
       is already happening in the ``package_response`` method. So, we uses ``responses`` so that FastAPI
       can generate the correct OpenAPI spec but not post-process the response.
    """
    query = job_scripts_table.select()
    identity_claims = IdentityClaims.from_token_payload(token_payload)
    if not all:
        query = query.where(job_scripts_table.c.job_script_owner_email ==
                            identity_claims.user_email)
    if search is not None:
        query = query.where(search_clause(search, searchable_fields))
    if sort_field is not None:
        query = query.order_by(
            sort_clause(sort_field, sortable_fields, sort_ascending))
    return await package_response(JobScriptResponse, query, pagination)
async def job_submission_create(
    job_submission: JobSubmissionCreateRequest,
    token_payload: TokenPayload = Depends(
        guard.lockdown(Permissions.JOB_SUBMISSIONS_EDIT)),
):
    """
    Create a new job submission.

    Make a post request to this endpoint with the required values to create a new job submission.
    """
    identity_claims = IdentityClaims.from_token_payload(token_payload)
    cluster_id = job_submission.cluster_id or identity_claims.cluster_id
    if cluster_id is None:
        raise HTTPException(
            status_code=status.HTTP_400_BAD_REQUEST,
            detail=
            "Could not find a cluster_id in the request body or auth token.",
        )

    create_dict = dict(
        **job_submission.dict(exclude_unset=True),
        job_submission_owner_email=identity_claims.user_email,
        status=JobSubmissionStatus.CREATED,
    )
    if job_submission.cluster_id is None:
        create_dict.update(cluster_id=cluster_id)

    exec_dir = create_dict.pop("execution_directory", None)
    if exec_dir is not None:
        create_dict.update(execution_directory=str(exec_dir))

    select_query = job_scripts_table.select().where(
        job_scripts_table.c.id == job_submission.job_script_id)
    raw_job_script = await database.fetch_one(select_query)

    if not raw_job_script:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail=f"JobScript id={job_submission.job_script_id} not found.",
        )

    async with database.transaction():
        try:
            insert_query = job_submissions_table.insert().returning(
                job_submissions_table)
            job_submission_data = await database.fetch_one(query=insert_query,
                                                           values=create_dict)

        except INTEGRITY_CHECK_EXCEPTIONS as e:
            raise HTTPException(
                status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
                detail=str(e))

    return job_submission_data
async def job_submission_agent_update(
    job_submission_id: int,
    new_status: str = Body(..., embed=True),
    slurm_job_id: Optional[int] = Body(None, embed=True),
    token_payload: TokenPayload = Depends(
        guard.lockdown(Permissions.JOB_SUBMISSIONS_EDIT)),
):
    """
    Update a job_submission with a new status.

    Make a put request to this endpoint with the new status to update a job_submission.
    """
    identity_claims = IdentityClaims.from_token_payload(token_payload)
    if identity_claims.cluster_id is None:
        logger.error("Access token does not contain a cluster_id")
        raise HTTPException(
            status_code=status.HTTP_400_BAD_REQUEST,
            detail=
            "Access token does not contain a `cluster_id`. Cannot update job_submission",
        )

    logger.info(f"Setting status to: {new_status} "
                f"for job_submission: {job_submission_id} "
                f"on cluster_id: {identity_claims.cluster_id}")

    update_values: Dict[str, Any] = dict(status=new_status)
    if slurm_job_id is not None:
        update_values.update(slurm_job_id=slurm_job_id)

    update_query = (job_submissions_table.update().where(
        job_submissions_table.c.id == job_submission_id).where(
            job_submissions_table.c.cluster_id == identity_claims.cluster_id).
                    values(**update_values).returning(job_submissions_table))
    result = await database.fetch_one(update_query)

    if result is None:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail=(f"JobSubmission with id={job_submission_id} "
                    "and cluster_id={identity_claims.cluster_id} not found."),
        )

    return result
async def job_submissions_agent_active(token_payload: TokenPayload = Depends(
    guard.lockdown(Permissions.JOB_SUBMISSIONS_VIEW)), ):
    """
    Get a list of active job submissions for the cluster-agent.
    """
    identity_claims = IdentityClaims.from_token_payload(token_payload)
    if identity_claims.cluster_id is None:
        logger.error("Access token does not contain a cluster_id")
        raise HTTPException(
            status_code=status.HTTP_400_BAD_REQUEST,
            detail=
            "Access token does not contain a `cluster_id`. Cannot fetch pending submissions",
        )

    logger.info(
        f"Fetching active job_submissions for cluster_id: {identity_claims.cluster_id}"
    )

    query = (job_submissions_table.select().where(
        job_submissions_table.c.status == JobSubmissionStatus.SUBMITTED).where(
            job_submissions_table.c.cluster_id == identity_claims.cluster_id))

    rows = await database.fetch_all(query)
    return rows
Пример #9
0
async def job_script_create(
    job_script: JobScriptCreateRequest,
    token_payload: TokenPayload = Depends(
        guard.lockdown(Permissions.JOB_SCRIPTS_EDIT)),
):
    """
    Create a new job script.

    Make a post request to this endpoint with the required values to create a new job script.
    """
    logger.debug(f"Creating job_script with: {job_script}")
    select_query = applications_table.select().where(
        applications_table.c.id == job_script.application_id)
    raw_application = await database.fetch_one(select_query)

    if not raw_application:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail=
            f"Application with id={job_script.application_id} not found.",
        )
    application = ApplicationResponse.parse_obj(raw_application)
    logger.debug("Fetching application tarfile")
    s3_application_tar = get_s3_object_as_tarfile(application.id)

    identity_claims = IdentityClaims.from_token_payload(token_payload)

    create_dict = dict(
        **{
            k: v
            for (k, v) in job_script.dict(exclude_unset=True).items()
            if k != "param_dict"
        },
        job_script_owner_email=identity_claims.user_email,
    )

    # Use application_config from the application as a baseline of defaults
    print("APP CONFIG: ", application.application_config)
    param_dict = safe_load(application.application_config)

    # User supplied param dict is optional and may override defaults
    param_dict.update(**job_script.param_dict)

    logger.debug("Rendering job_script data as string")
    job_script_data_as_string = build_job_script_data_as_string(
        s3_application_tar, param_dict)

    sbatch_params = create_dict.pop("sbatch_params", [])
    create_dict["job_script_data_as_string"] = inject_sbatch_params(
        job_script_data_as_string, sbatch_params)

    logger.debug("Inserting job_script")
    try:
        insert_query = job_scripts_table.insert().returning(job_scripts_table)
        job_script_data = await database.fetch_one(query=insert_query,
                                                   values=create_dict)

    except INTEGRITY_CHECK_EXCEPTIONS as e:
        raise HTTPException(status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
                            detail=str(e))

    logger.debug(f"Created job_script={job_script_data}")
    return job_script_data