Esempio n. 1
0
def delete(job_id: int, user_id: int, workspace_id: Optional[int] = None):
    job = get(job_id, user_id, workspace_id)

    remove_job_schedule(job_id)
    storage.delete(storage.Type.Job, job_id)

    get_db_session().delete(job)
    db_commit()
    return job_id
Esempio n. 2
0
def _create_log_entry(log_msg: str, job_id: int, job_run_id: int,
                      user_id: int):
    now = datetime.utcnow()
    job_run_log = JobRunLog(job_run_id=job_run_id,
                            timestamp=now,
                            message=log_msg)

    send_update(
        'logs', {
            'job_id': str(job_id),
            'job_run_id': str(job_run_id),
            'message': log_msg,
            'timestamp': str(now)
        }, user_id)

    get_db_session().add(job_run_log)
Esempio n. 3
0
def get_user_id_from_job(job_id: int):
    job = get_db_session().query(Job).filter_by(id=job_id).one_or_none()

    if job is None:
        raise JobNotFoundException("Job Not Found")
    else:
        return job.user_id
Esempio n. 4
0
def _trigger_job_run(job: Job, trigger_type: str,
                     user_id: int) -> Optional[int]:
    job_run = JobRun(job_id=job.id, type=trigger_type)
    get_db_session().add(job_run)
    db_commit()  # we need to have an id generated before we start writing logs

    send_update(
        'status', {
            'job_id': str(job.id),
            'job_run_id': str(job_run.id),
            'status': job_run.status
        }, user_id)

    job_entrypoint = job.entrypoint or constants.DEFAULT_ENTRYPOINT
    job_requirements = job.requirements or constants.DEFAULT_REQUIREMENTS

    path_to_job_files = storage.get_path_to_files(storage.Type.Job, job.id)

    try:
        with executor.execute(path_to_job_files, job_entrypoint,
                              job.get_parameters_as_dict(), job_requirements,
                              _generate_container_name(str(
                                  job.id), user_id)) as executor_result:
            logs, get_exit_code = executor_result.output, executor_result.get_exit_code
            for line in logs:
                _create_log_entry(line, job.id, job_run.id, user_id)
            exit_code = get_exit_code()
    except ExecutorBuildException as exc:
        logs, get_exit_code = (el for el in [str(exc)]), lambda: 1
        for line in logs:
            _create_log_entry(line, job.id, job_run.id, user_id)
        exit_code = get_exit_code()

    if exit_code == 0:
        job_run.status = JobRunStatus.Ok.value
    else:
        job_run.status = JobRunStatus.Failed.value
    db_commit()

    send_update(
        'status', {
            'job_id': str(job.id),
            'job_run_id': str(job_run.id),
            'status': job_run.status
        }, user_id)

    return exit_code
Esempio n. 5
0
def get(job_id: int, user_id: int, workspace_id: Optional[int] = None) -> Job:
    if not workspace_id:
        workspace_id = workspace_service.get_default_workspace(user_id).id
    job = get_db_session().query(Job).filter_by(
        id=job_id, user_id=user_id, workspace_id=workspace_id).one_or_none()

    if job is None:
        raise JobNotFoundException("Job Not Found")
    return job
Esempio n. 6
0
def get_job_by_name(name: str,
                    user_id: int,
                    workspace_id: Optional[int] = None):
    if not workspace_id:
        workspace_id = workspace_service.get_default_workspace(user_id).id
    job = get_db_session().query(Job).filter_by(
        name=name, user_id=user_id, workspace_id=workspace_id).one_or_none()
    if not job:
        raise JobNotFoundException("Job Not Found")
    return job
Esempio n. 7
0
def get_user_info():
    email = session['profile']['email']
    try:
        user = User.get_user_from_email(email, get_db_session())
        if user:
            return jsonify({'email': user.email, 'api_key': user.api_key}), 200
        return jsonify({'message': 'Unable to find a user'}), 404
    except Exception as e:
        logging.exception(e)
        logging.exception(request.data.decode('utf-8'))
        return jsonify({'message': 'Unable to get an api key'}), 500
Esempio n. 8
0
def _create_job_in_db(name, cron, entrypoint, requirements, user_id,
                      schedule_is_active, workspace_id):
    job_attributes = {
        'name': name,
        'user_id': user_id,
        'entrypoint': entrypoint,
        'requirements': requirements,
        'workspace_id': workspace_id
    }
    if cron:
        aws_cron, human_cron = parse_cron(cron)
        job_attributes.update({
            "cron": cron,
            "aws_cron": aws_cron,
            "human_cron": human_cron,
            "schedule_is_active": schedule_is_active
        })

    job = Job(**job_attributes)
    get_db_session().add(job)
    return job
Esempio n. 9
0
def add_parameters_to_job(job_id: int, user_id: int,
                          parameters: List[Tuple[str, Optional[str]]]):
    job = get(job_id, user_id)
    if len(list(job.parameters)) + len(parameters) > PARAMETERS_LIMIT_PER_JOB:
        raise JobsParametersLimitExceededException(
            f"You cannot have more than {PARAMETERS_LIMIT_PER_JOB} "
            f"Parameters per single Job.")
    for key, value in parameters:
        parameter = JobParameter(job_id=job.id, key=key, value=value)
        get_db_session().add(parameter)
    try:
        db_commit()
    except IntegrityError as e:
        # Potential duplicate Key value. Let's check.
        existing_keys = set([parameter.key for parameter in job.parameters])
        new_keys = set([key for key, value in parameters])
        duplicate_keys = set.intersection(existing_keys, new_keys)
        if len(duplicate_keys) > 0:
            raise DuplicateParameterKeyException(
                "Parameter with the same Key already exists.")
        else:
            raise e
Esempio n. 10
0
def publish(name: str,
            cron: str,
            entrypoint: str,
            requirements: str,
            user: User,
            project_file: io.BytesIO,
            workspace_id: str,
            schedule_is_active=True):
    existing_job = get_db_session().query(Job).filter_by(
        name=name, user_id=user.id).one_or_none()
    if existing_job:
        job = _update_job(existing_job, cron, entrypoint, requirements)
    else:
        _check_user_quotas_for_job_creation(user)
        job = _create_job_in_db(name, cron, entrypoint, requirements, user.id,
                                schedule_is_active, workspace_id)

    db_commit()
    job.schedule_job()

    storage.save(project_file, storage.Type.Job, job.id)

    return job, bool(existing_job)