async def check_pipeline_state() -> ComputationTaskGet:
     response = await client.get(url, params={"user_id": user_id})
     assert (
         response.status_code == status.HTTP_202_ACCEPTED
     ), f"response code is {response.status_code}, error: {response.text}"
     task_out = ComputationTaskGet.parse_obj(response.json())
     assert task_out.id == project_uuid
     assert task_out.url == f"{client.base_url}/v2/computations/{project_uuid}"
     print(
         f"Pipeline '{project_uuid=}' current task out is '{task_out=}'", )
     assert wait_for_states
     assert (
         task_out.state in wait_for_states
     ), f"current task state is '{task_out.state}', not in any of {wait_for_states}"
     return task_out
async def test_nodeports_integration(
    # pylint: disable=too-many-arguments
    minimal_configuration: None,
    cleanup_services_and_networks: None,
    update_project_workbench_with_comp_tasks: Callable,
    async_client: httpx.AsyncClient,
    db_manager: DBManager,
    user_db: Dict,
    current_study: ProjectAtDB,
    services_endpoint: Dict[str, URL],
    workbench_dynamic_services: Dict[str, Node],
    services_node_uuids: ServicesNodeUUIDs,
    fake_dy_success: Dict[str, Any],
    fake_dy_published: Dict[str, Any],
    temp_dir: Path,
    mocker: MockerFixture,
) -> None:
    """
    Creates a new project with where the following connections
    are defined: `sleeper:1.0.0` ->
    `dy-static-file-server-dynamic-sidecar:2.0.0` ->
    `dy-static-file-server-dynamic-sidecar-compose-spec:2.0.0`.

    Both `dy-static-file-server-*` services are able to map the
    inputs of the service to the outputs. Both services also
    generate an internal state which is to be persisted
    between runs.

    Execution steps:
    1. start all the dynamic services and make sure they are running
    2. run the computational pipeline & trigger port retrievals
    3. check that the outputs of the `sleeper` are the same as the
        outputs of the `dy-static-file-server-dynamic-sidecar-compose-spec``
    4. fetch the "state" via `docker/aioboto` for both dynamic services
    5. start the dynamic-services and fetch the "state" via
        `storage-data_manager API/aioboto` for both dynamic services
    6. start the dynamic-services again, fetch the "state" via
        `docker/aioboto` for both dynamic services
    7. finally check that all states for both dynamic services match

    NOTE: when the services are started using S3 as a backend
    for saving the state, the state files are recovered via
    `aioboto` instead of `docker` or `storage-data_manager API`.
    """

    # STEP 1

    dynamic_services_urls: Dict[
        str, str] = await _wait_for_dynamic_services_to_be_running(
            director_v2_client=async_client,
            user_id=user_db["id"],
            workbench_dynamic_services=workbench_dynamic_services,
            current_study=current_study,
        )

    # STEP 2

    response = await create_pipeline(
        async_client,
        project=current_study,
        user_id=user_db["id"],
        start_pipeline=True,
        expected_response_status_code=status.HTTP_201_CREATED,
    )
    task_out = ComputationTaskGet.parse_obj(response.json())

    # check the contents is correct: a pipeline that just started gets PUBLISHED
    await assert_computation_task_out_obj(
        async_client,
        task_out,
        project=current_study,
        exp_task_state=RunningState.PUBLISHED,
        exp_pipeline_details=PipelineDetails.parse_obj(fake_dy_published),
    )

    # wait for the computation to start
    await assert_and_wait_for_pipeline_status(
        async_client,
        task_out.url,
        user_db["id"],
        current_study.uuid,
        wait_for_states=[RunningState.STARTED],
    )

    # wait for the computation to finish (either by failing, success or abort)
    task_out = await assert_and_wait_for_pipeline_status(
        async_client, task_out.url, user_db["id"], current_study.uuid)

    await assert_computation_task_out_obj(
        async_client,
        task_out,
        project=current_study,
        exp_task_state=RunningState.SUCCESS,
        exp_pipeline_details=PipelineDetails.parse_obj(fake_dy_success),
    )

    update_project_workbench_with_comp_tasks(str(current_study.uuid))

    # Trigger inputs pulling & outputs pushing on dynamic services

    # Since there is no webserver monitoring postgres notifications
    # trigger the call manually

    # dump logs form started containers before retrieve
    await _print_dynamic_sidecars_containers_logs_and_get_containers(
        dynamic_services_urls)

    await _assert_retrieve_completed(
        director_v2_client=async_client,
        service_uuid=services_node_uuids.dy,
        dynamic_services_urls=dynamic_services_urls,
    )

    await _assert_retrieve_completed(
        director_v2_client=async_client,
        service_uuid=services_node_uuids.dy_compose_spec,
        dynamic_services_urls=dynamic_services_urls,
    )

    # STEP 3
    # pull data via nodeports

    # storage config.py resolves env vars at import time, unlike newer settingslib
    # configuration. patching the module with the correct url
    mocker.patch(
        "simcore_sdk.node_ports_common.config.STORAGE_ENDPOINT",
        str(services_endpoint["storage"]).replace("http://", ""),
    )

    mapped_nodeports_values = await _get_mapped_nodeports_values(
        user_db["id"], str(current_study.uuid), current_study.workbench,
        db_manager)
    await _assert_port_values(mapped_nodeports_values, services_node_uuids)

    # STEP 4

    # pylint: disable=protected-access
    app_settings: AppSettings = async_client._transport.app.state.settings
    r_clone_settings: RCloneSettings = (
        app_settings.DYNAMIC_SERVICES.DYNAMIC_SIDECAR.
        DYNAMIC_SIDECAR_R_CLONE_SETTINGS)

    if app_settings.DIRECTOR_V2_DEV_FEATURES_ENABLED:
        await sleep_for(
            WAIT_FOR_R_CLONE_VOLUME_TO_SYNC_DATA,
            "Waiting for rclone to sync data from the docker volume",
        )

    dy_path_volume_before = (await _fetch_data_via_aioboto(
        r_clone_settings=r_clone_settings,
        dir_tag="dy",
        temp_dir=temp_dir,
        node_id=services_node_uuids.dy,
        project_id=current_study.uuid,
    ) if app_settings.DIRECTOR_V2_DEV_FEATURES_ENABLED else await
                             _fetch_data_from_container(
                                 dir_tag="dy",
                                 service_uuid=services_node_uuids.dy,
                                 temp_dir=temp_dir))
    dy_compose_spec_path_volume_before = (
        await _fetch_data_via_aioboto(
            r_clone_settings=r_clone_settings,
            dir_tag="dy_compose_spec",
            temp_dir=temp_dir,
            node_id=services_node_uuids.dy_compose_spec,
            project_id=current_study.uuid,
        ) if app_settings.DIRECTOR_V2_DEV_FEATURES_ENABLED else await
        _fetch_data_from_container(
            dir_tag="dy_compose_spec",
            service_uuid=services_node_uuids.dy_compose_spec,
            temp_dir=temp_dir,
        ))

    # STEP 5

    # stop the services to make sure the data is saved to storage
    await asyncio.gather(*(assert_stop_service(
        director_v2_client=async_client,
        service_uuid=service_uuid,
    ) for service_uuid in workbench_dynamic_services))

    await _wait_for_dy_services_to_fully_stop(async_client)

    if app_settings.DIRECTOR_V2_DEV_FEATURES_ENABLED:
        await sleep_for(
            WAIT_FOR_R_CLONE_VOLUME_TO_SYNC_DATA,
            "Waiting for rclone to sync data from the docker volume",
        )

    dy_path_data_manager_before = (await _fetch_data_via_aioboto(
        r_clone_settings=r_clone_settings,
        dir_tag="dy",
        temp_dir=temp_dir,
        node_id=services_node_uuids.dy,
        project_id=current_study.uuid,
    ) if app_settings.DIRECTOR_V2_DEV_FEATURES_ENABLED else await
                                   _fetch_data_via_data_manager(
                                       dir_tag="dy",
                                       user_id=user_db["id"],
                                       project_id=str(current_study.uuid),
                                       service_uuid=services_node_uuids.dy,
                                       temp_dir=temp_dir,
                                   ))

    dy_compose_spec_path_data_manager_before = (
        await _fetch_data_via_aioboto(
            r_clone_settings=r_clone_settings,
            dir_tag="dy_compose_spec",
            temp_dir=temp_dir,
            node_id=services_node_uuids.dy_compose_spec,
            project_id=current_study.uuid,
        ) if app_settings.DIRECTOR_V2_DEV_FEATURES_ENABLED else await
        _fetch_data_via_data_manager(
            dir_tag="dy_compose_spec",
            user_id=user_db["id"],
            project_id=str(current_study.uuid),
            service_uuid=services_node_uuids.dy_compose_spec,
            temp_dir=temp_dir,
        ))

    # STEP 6

    await _wait_for_dynamic_services_to_be_running(
        director_v2_client=async_client,
        user_id=user_db["id"],
        workbench_dynamic_services=workbench_dynamic_services,
        current_study=current_study,
    )

    dy_path_volume_after = (await _fetch_data_via_aioboto(
        r_clone_settings=r_clone_settings,
        dir_tag="dy",
        temp_dir=temp_dir,
        node_id=services_node_uuids.dy,
        project_id=current_study.uuid,
    ) if app_settings.DIRECTOR_V2_DEV_FEATURES_ENABLED else await
                            _fetch_data_from_container(
                                dir_tag="dy",
                                service_uuid=services_node_uuids.dy,
                                temp_dir=temp_dir))
    dy_compose_spec_path_volume_after = (
        await _fetch_data_via_aioboto(
            r_clone_settings=r_clone_settings,
            dir_tag="dy_compose_spec",
            temp_dir=temp_dir,
            node_id=services_node_uuids.dy_compose_spec,
            project_id=current_study.uuid,
        ) if app_settings.DIRECTOR_V2_DEV_FEATURES_ENABLED else await
        _fetch_data_from_container(
            dir_tag="dy_compose_spec",
            service_uuid=services_node_uuids.dy_compose_spec,
            temp_dir=temp_dir,
        ))

    # STEP 7

    _assert_same_set(
        _get_file_hashes_in_path(dy_path_volume_before),
        _get_file_hashes_in_path(dy_path_data_manager_before),
        _get_file_hashes_in_path(dy_path_volume_after),
    )

    _assert_same_set(
        _get_file_hashes_in_path(dy_compose_spec_path_volume_before),
        _get_file_hashes_in_path(dy_compose_spec_path_data_manager_before),
        _get_file_hashes_in_path(dy_compose_spec_path_volume_after),
    )
示例#3
0
async def test_update_and_delete_computation(
    minimal_configuration: None,
    async_client: httpx.AsyncClient,
    registered_user: Callable,
    project: Callable,
    fake_workbench_without_outputs: Dict[str, Any],
    fake_workbench_computational_pipeline_details_not_started: PipelineDetails,
    fake_workbench_computational_pipeline_details: PipelineDetails,
):
    user = registered_user()
    sleepers_project = project(user, workbench=fake_workbench_without_outputs)
    # send a valid project with sleepers
    response = await create_pipeline(
        async_client,
        project=sleepers_project,
        user_id=user["id"],
        start_pipeline=False,
        expected_response_status_code=status.HTTP_201_CREATED,
    )
    task_out = ComputationTaskGet.parse_obj(response.json())

    # check the contents is correctb
    await assert_computation_task_out_obj(
        async_client,
        task_out,
        project=sleepers_project,
        exp_task_state=RunningState.NOT_STARTED,
        exp_pipeline_details=
        fake_workbench_computational_pipeline_details_not_started,
    )

    # update the pipeline
    response = await create_pipeline(
        async_client,
        project=sleepers_project,
        user_id=user["id"],
        start_pipeline=False,
        expected_response_status_code=status.HTTP_201_CREATED,
    )
    task_out = ComputationTaskGet.parse_obj(response.json())

    # check the contents is correctb
    await assert_computation_task_out_obj(
        async_client,
        task_out,
        project=sleepers_project,
        exp_task_state=RunningState.NOT_STARTED,
        exp_pipeline_details=
        fake_workbench_computational_pipeline_details_not_started,
    )

    # update the pipeline
    response = await create_pipeline(
        async_client,
        project=sleepers_project,
        user_id=user["id"],
        start_pipeline=False,
        expected_response_status_code=status.HTTP_201_CREATED,
    )
    task_out = ComputationTaskGet.parse_obj(response.json())

    # check the contents is correctb
    await assert_computation_task_out_obj(
        async_client,
        task_out,
        project=sleepers_project,
        exp_task_state=RunningState.NOT_STARTED,
        exp_pipeline_details=
        fake_workbench_computational_pipeline_details_not_started,
    )

    # start it now
    response = await create_pipeline(
        async_client,
        project=sleepers_project,
        user_id=user["id"],
        start_pipeline=True,
        expected_response_status_code=status.HTTP_201_CREATED,
    )
    task_out = ComputationTaskGet.parse_obj(response.json())
    # check the contents is correctb
    await assert_computation_task_out_obj(
        async_client,
        task_out,
        project=sleepers_project,
        exp_task_state=RunningState.PUBLISHED,
        exp_pipeline_details=fake_workbench_computational_pipeline_details,
    )

    # wait until the pipeline is started
    task_out = await assert_and_wait_for_pipeline_status(
        async_client,
        task_out.url,
        user["id"],
        sleepers_project.uuid,
        wait_for_states=[RunningState.STARTED],
    )
    assert (
        task_out.state == RunningState.STARTED
    ), f"pipeline is not in the expected starting state but in {task_out.state}"

    # now try to update the pipeline, is expected to be forbidden
    response = await create_pipeline(
        async_client,
        project=sleepers_project,
        user_id=user["id"],
        start_pipeline=False,
        expected_response_status_code=status.HTTP_403_FORBIDDEN,
    )

    # try to delete the pipeline, is expected to be forbidden if force parameter is false (default)
    response = await async_client.request("DELETE",
                                          task_out.url,
                                          json={"user_id": user["id"]})
    assert (
        response.status_code == status.HTTP_403_FORBIDDEN
    ), f"response code is {response.status_code}, error: {response.text}"

    # try again with force=True this should abort and delete the pipeline
    response = await async_client.request("DELETE",
                                          task_out.url,
                                          json={
                                              "user_id": user["id"],
                                              "force": True
                                          })
    assert (
        response.status_code == status.HTTP_204_NO_CONTENT
    ), f"response code is {response.status_code}, error: {response.text}"
示例#4
0
async def test_abort_computation(
    minimal_configuration: None,
    async_client: httpx.AsyncClient,
    registered_user: Callable,
    project: Callable,
    fake_workbench_without_outputs: Dict[str, Any],
    fake_workbench_computational_pipeline_details: PipelineDetails,
):
    user = registered_user()
    # we need long running tasks to ensure cancellation is done properly
    for node in fake_workbench_without_outputs.values():
        if "sleeper" in node["key"]:
            node["inputs"].setdefault("in_2", 120)
            if not isinstance(node["inputs"]["in_2"], dict):
                node["inputs"]["in_2"] = 120
    sleepers_project = project(user, workbench=fake_workbench_without_outputs)
    # send a valid project with sleepers
    response = await create_pipeline(
        async_client,
        project=sleepers_project,
        user_id=user["id"],
        start_pipeline=True,
        expected_response_status_code=status.HTTP_201_CREATED,
    )
    task_out = ComputationTaskGet.parse_obj(response.json())

    # check the contents is correctb
    await assert_computation_task_out_obj(
        async_client,
        task_out,
        project=sleepers_project,
        exp_task_state=RunningState.PUBLISHED,
        exp_pipeline_details=fake_workbench_computational_pipeline_details,
    )

    # wait until the pipeline is started
    task_out = await assert_and_wait_for_pipeline_status(
        async_client,
        task_out.url,
        user["id"],
        sleepers_project.uuid,
        wait_for_states=[RunningState.STARTED],
    )
    assert (
        task_out.state == RunningState.STARTED
    ), f"pipeline is not in the expected starting state but in {task_out.state}"
    assert (task_out.url ==
            f"{async_client.base_url}/v2/computations/{sleepers_project.uuid}")
    assert (
        task_out.stop_url ==
        f"{async_client.base_url}/v2/computations/{sleepers_project.uuid}:stop"
    )
    # wait a bit till it has some momentum
    await asyncio.sleep(5)

    # now abort the pipeline
    response = await async_client.post(f"{task_out.stop_url}",
                                       json={"user_id": user["id"]})
    assert (
        response.status_code == status.HTTP_202_ACCEPTED
    ), f"response code is {response.status_code}, error: {response.text}"
    task_out = ComputationTaskGet.parse_obj(response.json())
    assert (str(task_out.url) ==
            f"{async_client.base_url}/v2/computations/{sleepers_project.uuid}")
    assert task_out.stop_url == None

    # check that the pipeline is aborted/stopped
    task_out = await assert_and_wait_for_pipeline_status(
        async_client,
        task_out.url,
        user["id"],
        sleepers_project.uuid,
        wait_for_states=[RunningState.ABORTED],
    )
    assert task_out.state == RunningState.ABORTED
示例#5
0
async def test_run_computation(
    minimal_configuration: None,
    async_client: httpx.AsyncClient,
    registered_user: Callable,
    project: Callable,
    fake_workbench_without_outputs: Dict[str, Any],
    update_project_workbench_with_comp_tasks: Callable,
    fake_workbench_computational_pipeline_details: PipelineDetails,
    fake_workbench_computational_pipeline_details_completed: PipelineDetails,
):
    user = registered_user()
    sleepers_project = project(user, workbench=fake_workbench_without_outputs)
    # send a valid project with sleepers
    response = await create_pipeline(
        async_client,
        project=sleepers_project,
        user_id=user["id"],
        start_pipeline=True,
        expected_response_status_code=status.HTTP_201_CREATED,
    )
    task_out = ComputationTaskGet.parse_obj(response.json())

    # check the contents is correct: a pipeline that just started gets PUBLISHED
    await assert_computation_task_out_obj(
        async_client,
        task_out,
        project=sleepers_project,
        exp_task_state=RunningState.PUBLISHED,
        exp_pipeline_details=fake_workbench_computational_pipeline_details,
    )

    # wait for the computation to start
    await assert_and_wait_for_pipeline_status(
        async_client,
        task_out.url,
        user["id"],
        sleepers_project.uuid,
        wait_for_states=[RunningState.STARTED],
    )

    # wait for the computation to finish (either by failing, success or abort)
    task_out = await assert_and_wait_for_pipeline_status(
        async_client, task_out.url, user["id"], sleepers_project.uuid)

    await assert_computation_task_out_obj(
        async_client,
        task_out,
        project=sleepers_project,
        exp_task_state=RunningState.SUCCESS,
        exp_pipeline_details=
        fake_workbench_computational_pipeline_details_completed,
    )

    # FIXME: currently the webserver is the one updating the projects table so we need to fake this by copying the run_hash
    update_project_workbench_with_comp_tasks(str(sleepers_project.uuid))
    # run again should return a 422 cause everything is uptodate
    response = await create_pipeline(
        async_client,
        project=sleepers_project,
        user_id=user["id"],
        start_pipeline=True,
        expected_response_status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
    )

    # now force run again
    # the task are up-to-date but we force run them
    expected_pipeline_details_forced = deepcopy(
        fake_workbench_computational_pipeline_details_completed)
    for node_id, node_data in expected_pipeline_details_forced.node_states.items(
    ):
        node_data.current_status = (
            fake_workbench_computational_pipeline_details.node_states[node_id].
            current_status)
    response = await create_pipeline(
        async_client,
        project=sleepers_project,
        user_id=user["id"],
        start_pipeline=True,
        expected_response_status_code=status.HTTP_201_CREATED,
        force_restart=True,
    )
    task_out = ComputationTaskGet.parse_obj(response.json())
    # check the contents is correct
    await assert_computation_task_out_obj(
        async_client,
        task_out,
        project=sleepers_project,
        exp_task_state=RunningState.PUBLISHED,
        exp_pipeline_details=
        expected_pipeline_details_forced,  # NOTE: here the pipeline already ran so its states are different
    )

    # wait for the computation to finish
    task_out = await assert_and_wait_for_pipeline_status(
        async_client, task_out.url, user["id"], sleepers_project.uuid)
    await assert_computation_task_out_obj(
        async_client,
        task_out,
        project=sleepers_project,
        exp_task_state=RunningState.SUCCESS,
        exp_pipeline_details=
        fake_workbench_computational_pipeline_details_completed,
    )
示例#6
0
async def test_run_partial_computation(
    minimal_configuration: None,
    async_client: httpx.AsyncClient,
    registered_user: Callable,
    project: Callable,
    update_project_workbench_with_comp_tasks: Callable,
    fake_workbench_without_outputs: Dict[str, Any],
    params: PartialComputationParams,
):
    user = registered_user()
    sleepers_project: ProjectAtDB = project(
        user, workbench=fake_workbench_without_outputs)

    def _convert_to_pipeline_details(
        project: ProjectAtDB,
        exp_pipeline_adj_list: Dict[int, List[int]],
        exp_node_states: Dict[int, Dict[str, Any]],
    ) -> PipelineDetails:
        workbench_node_uuids = list(project.workbench.keys())
        converted_adj_list: Dict[NodeID, List[NodeID]] = {}
        for node_key, next_nodes in exp_pipeline_adj_list.items():
            converted_adj_list[NodeID(workbench_node_uuids[node_key])] = [
                NodeID(workbench_node_uuids[n]) for n in next_nodes
            ]
        converted_node_states: Dict[NodeID, NodeState] = {
            NodeID(workbench_node_uuids[n]): NodeState(
                modified=s["modified"],
                dependencies={
                    workbench_node_uuids[dep_n]
                    for dep_n in s["dependencies"]
                },
                currentStatus=s.get("currentStatus", RunningState.NOT_STARTED),
            )
            for n, s in exp_node_states.items()
        }
        return PipelineDetails(adjacency_list=converted_adj_list,
                               node_states=converted_node_states)

    # convert the ids to the node uuids from the project
    expected_pipeline_details = _convert_to_pipeline_details(
        sleepers_project, params.exp_pipeline_adj_list, params.exp_node_states)

    # send a valid project with sleepers
    response = await create_pipeline(
        async_client,
        project=sleepers_project,
        user_id=user["id"],
        start_pipeline=True,
        expected_response_status_code=status.HTTP_201_CREATED,
        subgraph=[
            str(node_id)
            for index, node_id in enumerate(sleepers_project.workbench)
            if index in params.subgraph_elements
        ],
    )
    task_out = ComputationTaskGet.parse_obj(response.json())
    # check the contents is correctb
    await assert_computation_task_out_obj(
        async_client,
        task_out,
        project=sleepers_project,
        exp_task_state=RunningState.PUBLISHED,
        exp_pipeline_details=expected_pipeline_details,
    )

    # now wait for the computation to finish
    task_out = await assert_and_wait_for_pipeline_status(
        async_client, task_out.url, user["id"], sleepers_project.uuid)
    expected_pipeline_details_after_run = _convert_to_pipeline_details(
        sleepers_project, params.exp_pipeline_adj_list,
        params.exp_node_states_after_run)
    await assert_computation_task_out_obj(
        async_client,
        task_out,
        project=sleepers_project,
        exp_task_state=RunningState.SUCCESS,
        exp_pipeline_details=expected_pipeline_details_after_run,
    )

    # run it a second time. the tasks are all up-to-date, nothing should be run
    # FIXME: currently the webserver is the one updating the projects table so we need to fake this by copying the run_hash
    update_project_workbench_with_comp_tasks(str(sleepers_project.uuid))

    response = await create_pipeline(
        async_client,
        project=sleepers_project,
        user_id=user["id"],
        start_pipeline=True,
        expected_response_status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
        subgraph=[
            str(node_id)
            for index, node_id in enumerate(sleepers_project.workbench)
            if index in params.subgraph_elements
        ],
    )

    # force run it this time.
    # the task are up-to-date but we force run them
    expected_pipeline_details_forced = _convert_to_pipeline_details(
        sleepers_project,
        params.exp_pipeline_adj_list_after_force_run,
        params.exp_node_states_after_force_run,
    )
    response = await create_pipeline(
        async_client,
        project=sleepers_project,
        user_id=user["id"],
        start_pipeline=True,
        expected_response_status_code=status.HTTP_201_CREATED,
        subgraph=[
            str(node_id)
            for index, node_id in enumerate(sleepers_project.workbench)
            if index in params.subgraph_elements
        ],
        force_restart=True,
    )
    task_out = ComputationTaskGet.parse_obj(response.json())

    await assert_computation_task_out_obj(
        async_client,
        task_out,
        project=sleepers_project,
        exp_task_state=RunningState.PUBLISHED,
        exp_pipeline_details=expected_pipeline_details_forced,
    )

    # now wait for the computation to finish
    task_out = await assert_and_wait_for_pipeline_status(
        async_client, task_out.url, user["id"], sleepers_project.uuid)