Пример #1
0
 def cache_job(self):
     """Cache a job."""
     workflow = Session.query(Workflow).filter_by(
         id_=self.workflow_uuid).one_or_none()
     access_times = calculate_file_access_time(workflow.workspace_path)
     prepared_job_cache = JobCache()
     prepared_job_cache.job_id = self.job_id
     prepared_job_cache.access_times = access_times
     Session.add(prepared_job_cache)
     Session.commit()
def test_workspace_deletion(app, session, default_user,
                            sample_yadage_workflow_in_db,
                            tmp_shared_volume_path, workspace, hard_delete):
    """Test workspace deletion."""
    workflow = sample_yadage_workflow_in_db
    create_workflow_workspace(sample_yadage_workflow_in_db.get_workspace())
    absolute_workflow_workspace = os.path.join(tmp_shared_volume_path,
                                               workflow.get_workspace())

    # create a job for the workflow
    workflow_job = Job(id_=uuid.uuid4(), workflow_uuid=workflow.id_)
    job_cache_entry = JobCache(job_id=workflow_job.id_)
    session.add(workflow_job)
    session.add(job_cache_entry)
    session.commit()

    # create cached workspace
    cache_dir_path = os.path.abspath(
        os.path.join(absolute_workflow_workspace, os.pardir, 'archive',
                     str(workflow_job.id_)))
    os.makedirs(cache_dir_path)

    # check that the workflow workspace exists
    assert os.path.exists(absolute_workflow_workspace)
    assert os.path.exists(cache_dir_path)
    _delete_workflow(workflow, hard_delete=hard_delete, workspace=workspace)
    if hard_delete or workspace:
        assert not os.path.exists(absolute_workflow_workspace)

    # check that all cache entries for jobs
    # of the deleted workflow are removed
    cache_entries_after_delete = JobCache.query.filter_by(
        job_id=workflow_job.id_).all()
    assert not cache_entries_after_delete
    assert not os.path.exists(cache_dir_path)
def test_workspace_deletion(app,
                            session,
                            default_user,
                            yadage_workflow_with_name,
                            tmp_shared_volume_path,
                            workspace,
                            hard_delete):
    """Test workspace deletion."""
    with app.test_client() as client:
        res = client.post(url_for('api.create_workflow'),
                          query_string={
                              "user": default_user.id_},
                          content_type='application/json',
                          data=json.dumps(yadage_workflow_with_name))
        assert res.status_code == 201
        response_data = json.loads(res.get_data(as_text=True))

        workflow = Workflow.query.filter(
            Workflow.id_ == response_data.get('workflow_id')).first()
        assert workflow

        absolute_workflow_workspace = os.path.join(
            tmp_shared_volume_path,
            workflow.get_workspace())

        # create a job for the workflow
        workflow_job = Job(id_=uuid.uuid4(), workflow_uuid=workflow.id_)
        job_cache_entry = JobCache(job_id=workflow_job.id_)
        session.add(workflow_job)
        session.add(job_cache_entry)
        session.commit()

        # check that the workflow workspace exists
        assert os.path.exists(absolute_workflow_workspace)
        with app.test_client() as client:
            res = client.put(
                url_for('api.set_workflow_status',
                        workflow_id_or_name=workflow.id_),
                query_string={
                    'user': default_user.id_,
                    'status': 'deleted'
                },
                content_type='application/json',
                data=json.dumps({'hard_delete': hard_delete,
                                 'workspace': workspace}))
        if hard_delete or workspace:
            assert not os.path.exists(absolute_workflow_workspace)

        # check that all cache entries for jobs
        # of the deleted workflow are removed
        cache_entries_after_delete = JobCache.query.filter_by(
            job_id=workflow_job.id_).all()
        assert not cache_entries_after_delete
def test_workspace_deletion(
    mock_update_user_quota,
    mock_update_workflow_quota,
    app,
    session,
    default_user,
    sample_yadage_workflow_in_db,
    workspace,
):
    """Test workspace deletion."""
    workflow = sample_yadage_workflow_in_db
    create_workflow_workspace(sample_yadage_workflow_in_db.workspace_path)

    # Add file to the worskpace
    file_size = 123
    file_path = os.path.join(sample_yadage_workflow_in_db.workspace_path, "temp.txt")
    with open(file_path, "w") as f:
        f.write("A" * file_size)

    # Get disk usage
    disk_usage = get_disk_usage_or_zero(sample_yadage_workflow_in_db.workspace_path)
    assert disk_usage

    # Update disk quotas
    store_workflow_disk_quota(sample_yadage_workflow_in_db)
    update_users_disk_quota(sample_yadage_workflow_in_db.owner)

    # create a job for the workflow
    workflow_job = Job(id_=uuid.uuid4(), workflow_uuid=workflow.id_)
    job_cache_entry = JobCache(job_id=workflow_job.id_)
    session.add(workflow_job)
    session.commit()
    session.add(job_cache_entry)
    session.commit()

    # create cached workspace
    cache_dir_path = os.path.join(
        sample_yadage_workflow_in_db.workspace_path,
        "..",
        "archive",
        str(workflow_job.id_),
    )

    os.makedirs(cache_dir_path)

    # check that the workflow workspace exists
    assert os.path.exists(sample_yadage_workflow_in_db.workspace_path)
    assert os.path.exists(cache_dir_path)
    delete_workflow(workflow, workspace=workspace)
    if workspace:
        assert not os.path.exists(sample_yadage_workflow_in_db.workspace_path)
        mock_update_user_quota.assert_called_once_with(
            sample_yadage_workflow_in_db.owner,
            bytes_to_sum=-disk_usage,
            override_policy_checks=True,
        )
        mock_update_workflow_quota.assert_called_once_with(
            sample_yadage_workflow_in_db,
            bytes_to_sum=-disk_usage,
            override_policy_checks=True,
        )
    else:
        assert not mock_update_user_quota.called
        assert not mock_update_workflow_quota.called

    # check that all cache entries for jobs
    # of the deleted workflow are removed
    cache_entries_after_delete = JobCache.query.filter_by(job_id=workflow_job.id_).all()
    assert not cache_entries_after_delete
    assert not os.path.exists(cache_dir_path)
Пример #5
0
def create_job():  # noqa
    r"""Create a new job.

    ---
    post:
      summary: Creates a new job.
      description: >-
        This resource is expecting JSON data with all the necessary information
        of a new job.
      operationId: create_job
      consumes:
       - application/json
      produces:
       - application/json
      parameters:
       - name: job
         in: body
         description: Information needed to instantiate a Job
         required: true
         schema:
           $ref: '#/definitions/JobRequest'
      responses:
        201:
          description: Request succeeded. The job has been launched.
          schema:
            type: object
            properties:
              job_id:
                type: string
          examples:
            application/json:
              {
                "job_id": "cdcf48b1-c2f3-4693-8230-b066e088c6ac"
              }
        400:
          description: >-
            Request failed. The incoming data specification seems malformed.
        500:
          description: >-
            Request failed. Internal controller error. The job could probably
            not have been allocated.
    """
    json_data = request.get_json()
    if not json_data:
        return jsonify({'message': 'Empty request'}), 400

    # Validate and deserialize input
    job_request, errors = job_request_schema.load(json_data)
    if errors:
        return jsonify(errors), 400
    job_parameters = dict(
      job_id=str(job_request['job_id']),
      workflow_workspace=str(job_request['workflow_workspace']),
      docker_img=job_request['docker_img'],
      cmd=job_request['cmd'],
      cvmfs_mounts=job_request['cvmfs_mounts'],
      env_vars=job_request['env_vars'],
      shared_file_system=job_request['shared_file_system'],
      job_type=job_request.get('job_type'))
    job_obj = k8s_instantiate_job(**job_parameters)
    if job_obj:
        job = copy.deepcopy(job_request)
        job['status'] = 'started'
        job['restart_count'] = 0
        job['max_restart_count'] = 3
        job['deleted'] = False
        job['obj'] = job_obj
        JOB_DB[str(job['job_id'])] = job

        job_db_entry = JobTable(
            id_=job['job_id'],
            workflow_uuid=None,
            # The workflow_uuid is populated by the workflow-controller
            status=job['status'],
            job_type=job_request.get('job_type'),
            cvmfs_mounts=job_request['cvmfs_mounts'],
            shared_file_system=job_request['shared_file_system'],
            docker_img=job_request['docker_img'],
            experiment=job_request['experiment'],
            cmd=job_request['cmd'],
            env_vars=json.dumps(job_request['env_vars']),
            restart_count=job['restart_count'],
            max_restart_count=job['max_restart_count'],
            deleted=job['deleted'],
            name=job_request['job_name'],
            prettified_cmd=job_request['prettified_cmd'])
        Session.add(job_db_entry)
        Session.commit()
        access_times = calculate_file_access_time(
            json_data['workflow_workspace'])
        prepared_job_cache = JobCache()
        prepared_job_cache.job_id = job['job_id']
        prepared_job_cache.access_times = access_times
        Session.add(prepared_job_cache)
        Session.commit()

        return jsonify({'job_id': job['job_id']}), 201
    else:
        return jsonify({'job': 'Could not be allocated'}), 500