def test_workspace_deletion(app, session, default_user,
                            sample_yadage_workflow_in_db,
                            tmp_shared_volume_path, workspace, hard_delete):
    """Test workspace deletion."""
    workflow = sample_yadage_workflow_in_db
    create_workflow_workspace(sample_yadage_workflow_in_db.get_workspace())
    absolute_workflow_workspace = os.path.join(tmp_shared_volume_path,
                                               workflow.get_workspace())

    # create a job for the workflow
    workflow_job = Job(id_=uuid.uuid4(), workflow_uuid=workflow.id_)
    job_cache_entry = JobCache(job_id=workflow_job.id_)
    session.add(workflow_job)
    session.add(job_cache_entry)
    session.commit()

    # create cached workspace
    cache_dir_path = os.path.abspath(
        os.path.join(absolute_workflow_workspace, os.pardir, 'archive',
                     str(workflow_job.id_)))
    os.makedirs(cache_dir_path)

    # check that the workflow workspace exists
    assert os.path.exists(absolute_workflow_workspace)
    assert os.path.exists(cache_dir_path)
    _delete_workflow(workflow, hard_delete=hard_delete, workspace=workspace)
    if hard_delete or workspace:
        assert not os.path.exists(absolute_workflow_workspace)

    # check that all cache entries for jobs
    # of the deleted workflow are removed
    cache_entries_after_delete = JobCache.query.filter_by(
        job_id=workflow_job.id_).all()
    assert not cache_entries_after_delete
    assert not os.path.exists(cache_dir_path)
Example #2
0
    def add_kubernetes_jobs_to_workflow_callable(workflow,
                                                 backend=None,
                                                 num_jobs=2,
                                                 status=None):
        """Add Kubernetes jobs to a given workflow.

        :param workflow_uuid: Workflow which the jobs should belong to.
        :param backend: Backend of the created jobs.
        :param num_jobs: Number of jobs to create.
        :param status: String representing the status of the created jobs,
            by default ``running``.
        """
        jobs = []
        if status and status not in JobStatus.__members__:
            raise ValueError("Unknown status {} use one of {}".format(
                status, JobStatus.__members__))

        status = status or JobStatus.running.name
        backend = backend or "kubernetes"
        progress_dict = {
            "total": {
                "job_ids": [],
                "total": 0
            },
            JobStatus.running.name: {
                "job_ids": [],
                "total": 0
            },
            JobStatus.failed.name: {
                "job_ids": [],
                "total": 0
            },
            JobStatus.finished.name: {
                "job_ids": [],
                "total": 0
            },
        }
        for num in range(num_jobs):
            reana_job_id = uuid.uuid4()
            backend_job_id = uuid.uuid4()
            job = Job(
                id_=reana_job_id,
                backend_job_id=str(backend_job_id),
                workflow_uuid=workflow.id_,
                status=JobStatus.running,
            )
            progress_dict[status]["job_ids"].append(str(job.id_))
            progress_dict[status]["total"] += 1
            session.add(job)
            jobs.append(job)
        workflow.job_progress = progress_dict
        session.add(workflow)
        session.commit()
        return jobs
def test_workspace_deletion(app,
                            session,
                            default_user,
                            yadage_workflow_with_name,
                            tmp_shared_volume_path,
                            workspace,
                            hard_delete):
    """Test workspace deletion."""
    with app.test_client() as client:
        res = client.post(url_for('api.create_workflow'),
                          query_string={
                              "user": default_user.id_},
                          content_type='application/json',
                          data=json.dumps(yadage_workflow_with_name))
        assert res.status_code == 201
        response_data = json.loads(res.get_data(as_text=True))

        workflow = Workflow.query.filter(
            Workflow.id_ == response_data.get('workflow_id')).first()
        assert workflow

        absolute_workflow_workspace = os.path.join(
            tmp_shared_volume_path,
            workflow.get_workspace())

        # create a job for the workflow
        workflow_job = Job(id_=uuid.uuid4(), workflow_uuid=workflow.id_)
        job_cache_entry = JobCache(job_id=workflow_job.id_)
        session.add(workflow_job)
        session.add(job_cache_entry)
        session.commit()

        # check that the workflow workspace exists
        assert os.path.exists(absolute_workflow_workspace)
        with app.test_client() as client:
            res = client.put(
                url_for('api.set_workflow_status',
                        workflow_id_or_name=workflow.id_),
                query_string={
                    'user': default_user.id_,
                    'status': 'deleted'
                },
                content_type='application/json',
                data=json.dumps({'hard_delete': hard_delete,
                                 'workspace': workspace}))
        if hard_delete or workspace:
            assert not os.path.exists(absolute_workflow_workspace)

        # check that all cache entries for jobs
        # of the deleted workflow are removed
        cache_entries_after_delete = JobCache.query.filter_by(
            job_id=workflow_job.id_).all()
        assert not cache_entries_after_delete
def test_workspace_deletion(
    mock_update_user_quota,
    mock_update_workflow_quota,
    app,
    session,
    default_user,
    sample_yadage_workflow_in_db,
    workspace,
):
    """Test workspace deletion."""
    workflow = sample_yadage_workflow_in_db
    create_workflow_workspace(sample_yadage_workflow_in_db.workspace_path)

    # Add file to the worskpace
    file_size = 123
    file_path = os.path.join(sample_yadage_workflow_in_db.workspace_path, "temp.txt")
    with open(file_path, "w") as f:
        f.write("A" * file_size)

    # Get disk usage
    disk_usage = get_disk_usage_or_zero(sample_yadage_workflow_in_db.workspace_path)
    assert disk_usage

    # Update disk quotas
    store_workflow_disk_quota(sample_yadage_workflow_in_db)
    update_users_disk_quota(sample_yadage_workflow_in_db.owner)

    # create a job for the workflow
    workflow_job = Job(id_=uuid.uuid4(), workflow_uuid=workflow.id_)
    job_cache_entry = JobCache(job_id=workflow_job.id_)
    session.add(workflow_job)
    session.commit()
    session.add(job_cache_entry)
    session.commit()

    # create cached workspace
    cache_dir_path = os.path.join(
        sample_yadage_workflow_in_db.workspace_path,
        "..",
        "archive",
        str(workflow_job.id_),
    )

    os.makedirs(cache_dir_path)

    # check that the workflow workspace exists
    assert os.path.exists(sample_yadage_workflow_in_db.workspace_path)
    assert os.path.exists(cache_dir_path)
    delete_workflow(workflow, workspace=workspace)
    if workspace:
        assert not os.path.exists(sample_yadage_workflow_in_db.workspace_path)
        mock_update_user_quota.assert_called_once_with(
            sample_yadage_workflow_in_db.owner,
            bytes_to_sum=-disk_usage,
            override_policy_checks=True,
        )
        mock_update_workflow_quota.assert_called_once_with(
            sample_yadage_workflow_in_db,
            bytes_to_sum=-disk_usage,
            override_policy_checks=True,
        )
    else:
        assert not mock_update_user_quota.called
        assert not mock_update_workflow_quota.called

    # check that all cache entries for jobs
    # of the deleted workflow are removed
    cache_entries_after_delete = JobCache.query.filter_by(job_id=workflow_job.id_).all()
    assert not cache_entries_after_delete
    assert not os.path.exists(cache_dir_path)