Пример #1
0
def reset_problem_config(
    lakefs_reset: schemas.LakeFSReset,
    problem: models.Problem = Depends(parse_problem),
) -> StandardResponse[Empty]:
    problem_config = LakeFSProblemConfig(problem)
    problem_config.reset(lakefs_reset)
    return StandardResponse()
Пример #2
0
def get_file_or_directory_info_in_uncommitted_problem_config(
    path: str = Path(...),
    problem: models.Problem = Depends(parse_problem),
) -> StandardResponse[FileInfo]:
    problem_config = LakeFSProblemConfig(problem)
    file_info = problem_config.get_file_info(pathlib.Path(path))
    return StandardResponse(file_info)
Пример #3
0
def delete_file_from_uncommitted_problem_config(
    path: str = Path(...),
    problem: models.Problem = Depends(parse_problem),
) -> StandardResponse[FileInfo]:
    problem_config = LakeFSProblemConfig(problem)
    file_info = problem_config.delete_file(pathlib.Path(path))
    return StandardResponse(file_info)
Пример #4
0
def upload_file_to_problem_config(
        file: UploadFile = File(...),
        problem: models.Problem = Depends(parse_problem),
        path: str = Depends(parse_file_path),
) -> StandardResponse[FileInfo]:
    problem_config = LakeFSProblemConfig(problem)
    file_info = problem_config.upload_file(pathlib.Path(path), file.file)
    return StandardResponse(file_info)
Пример #5
0
def update_problem_config_by_archive(
        file: UploadFile = File(...),
        problem: models.Problem = Depends(parse_problem)
) -> StandardResponse[Empty]:
    logger.info("problem config archive name: %s", file.filename)
    problem_config = LakeFSProblemConfig(problem)
    problem_config.upload_archive(file.filename, file.file)
    return StandardResponse()
Пример #6
0
def delete_directory_from_uncommitted_problem_config(
    path: str = Path(...),
    problem: models.Problem = Depends(parse_problem),
    recursive: bool = Query(
        False,
        description="Act as -r in the rm command. "
        "If false, only empty directory can be deleted.",
    ),
) -> StandardResponse[FileInfo]:
    problem_config = LakeFSProblemConfig(problem)
    file_info = problem_config.delete_directory(pathlib.Path(path), recursive)
    return StandardResponse(file_info)
Пример #7
0
async def create_problem(
    problem_create: schemas.ProblemCreate,
    background_tasks: BackgroundTasks,
    domain: models.Domain = Depends(parse_domain_from_auth),
    user: models.User = Depends(parse_user_from_auth),
    session: AsyncSession = Depends(db_session_dependency),
) -> StandardResponse[schemas.ProblemDetail]:
    try:
        problem_group = models.ProblemGroup()
        session.sync_session.add(problem_group)
        logger.info(f"problem group created: {problem_group}")
        problem = models.Problem(
            **problem_create.dict(),
            domain_id=domain.id,
            owner_id=user.id,
            problem_group_id=problem_group.id,
        )
        session.sync_session.add(problem)
        logger.info(f"problem created: {problem}")
        await session.commit()
        await session.refresh(problem)
    except Exception as e:
        logger.exception(f"problem creation failed: {problem_create}")
        raise e
    lakefs_problem_config = LakeFSProblemConfig(problem)
    background_tasks.add_task(lakefs_problem_config.ensure_branch)
    return StandardResponse(problem)
Пример #8
0
def download_file_in_problem_config(
    path: str = Path(...),
    problem: models.Problem = Depends(parse_problem),
    config: Optional[models.ProblemConfig] = Depends(parse_problem_config),
) -> Any:
    problem_config = LakeFSProblemConfig(problem)
    ref: Optional[str]
    if config is not None:
        ref = config.commit_id
    else:
        ref = None
    file = problem_config.download_file(pathlib.Path(path), ref)
    response = StreamingResponse(file)
    filename = pathlib.Path(path).name
    response.content_disposition = f'attachment; filename="{filename}"'
    return response
Пример #9
0
def download_problem_config_archive(
    temp_dir: pathlib.Path = Depends(TemporaryDirectory()),
    archive_format: ArchiveType = Query(ArchiveType.zip),
    problem: models.Problem = Depends(parse_problem),
    config: Optional[models.ProblemConfig] = Depends(parse_problem_config),
) -> Any:
    # use lakefs to sync and zip files
    ref: Optional[str]
    if config is not None:
        ref = config.commit_id
    else:
        ref = None
    problem_config = LakeFSProblemConfig(problem)
    file_path = problem_config.download_archive(temp_dir, archive_format, ref)
    # TODO: cache the archive
    response = StreamingResponse(iter_file(file_path))
    response.content_disposition = f'attachment; filename="{file_path.name}"'
    return response
Пример #10
0
async def claim_record_by_judge(
    judge_claim: schemas.JudgeClaim,
    record: models.Record = Depends(parse_record_judger),
    user: models.User = Depends(parse_user_from_auth),
) -> StandardResponse[schemas.JudgeCredentials]:
    # task_id can only be obtained by listening to the celery task queue
    # we give the worker with task_id the chance to claim the record
    # celery tasks can be retried, only one worker can hold the task_id at the same time
    # if a rejudge is scheduled, task_id changes, so previous task will be ineffective
    # TODO: we probably need a lock to handle race condition of rejudge and claim
    if record.task_id is None or record.task_id != judge_claim.task_id:
        raise BizError(ErrorCode.Error)
    # if record.state not in (schemas.RecordState.queueing, schemas.RecordState.retrying):
    #     raise BizError(ErrorCode.Error)
    # we can mark task failed if no problem config is available
    if record.problem_config is None or record.problem is None:
        raise BizError(ErrorCode.Error)

    # we always reset the state to "fetched", for both first attempt and retries
    record.judger_id = user.id
    record.state = schemas.RecordState.fetched
    await record.save_model()
    logger.info("judger claim record: {}", record)

    # initialize the permission of the judger to lakefs
    # the user have read access to all problems in the problem group,
    # actually only the access to one branch is necessary,
    # but it will create too many policies, so we grant all for simplicity
    # the user have read/write access to all records in the problem,
    # because the judger will write test result to the repo
    await record.fetch_related("problem")
    access_key = await models.UserAccessKey.get_lakefs_access_key(user)
    lakefs_problem_config = LakeFSProblemConfig(record.problem)
    lakefs_record = LakeFSRecord(record.problem, record)

    def sync_func() -> None:
        lakefs_problem_config.ensure_user_policy(user, "read")
        lakefs_record.ensure_user_policy(user, "all")

    await run_in_threadpool(sync_func)

    judge_credentials = schemas.JudgeCredentials(
        access_key_id=access_key.access_key_id,
        secret_access_key=access_key.secret_access_key,
        problem_config_repo_name=lakefs_problem_config.repo_name,
        problem_config_commit_id=record.problem_config.commit_id,
        record_repo_name=lakefs_record.repo_name,
        record_commit_id=record.commit_id,
    )
    return StandardResponse(judge_credentials)
Пример #11
0
 def sync_func() -> Commit:
     lakefs_problem_config = LakeFSProblemConfig(problem)
     # manager = Manager(logger, lakefs_problem_config.storage)
     # manager.validate_source()
     return lakefs_problem_config.commit(commit.message)