Exemplo n.º 1
0
def _test_deployment_json_serde_helper(ray_dag: DAGNode,
                                       input=None,
                                       expected_num_deployments=None):
    """Helper function for DeploymentNode and DeploymentMethodNode calls, checks
    the following:
        1) Transform ray dag to serve dag, and ensure serve dag is JSON
            serializable.
        2) Serve dag JSON and be deserialized back to serve dag.
        3) Deserialized serve dag can extract correct number and definition of
            serve deployments.
    """
    serve_root_dag = ray_dag.apply_recursive(transform_ray_dag_to_serve_dag)
    json_serialized = json.dumps(serve_root_dag, cls=DAGNodeEncoder)
    deserialized_serve_root_dag_node = json.loads(
        json_serialized, object_hook=dagnode_from_json)
    deserialized_deployments = extract_deployments_from_serve_dag(
        deserialized_serve_root_dag_node)
    assert len(deserialized_deployments) == expected_num_deployments
    # Deploy deserilized version to ensure JSON serde correctness
    for model in deserialized_deployments:
        model.deploy()
    if input is None:
        assert ray.get(ray_dag.execute()) == ray.get(serve_root_dag.execute())
    else:
        assert ray.get(ray_dag.execute(input)) == ray.get(
            serve_root_dag.execute(input))
    return serve_root_dag, deserialized_serve_root_dag_node
Exemplo n.º 2
0
def build(ray_dag_root_node: DAGNode) -> List[Deployment]:
    """Do all the DAG transformation, extraction and generation needed to
    produce a runnable and deployable serve pipeline application from a valid
    DAG authored with Ray DAG API.

    This should be the only user facing API that user interacts with.

    Assumptions:
        Following enforcements are only applied at generating and applying
        pipeline artifact, but not blockers for local development and testing.

        - ALL args and kwargs used in DAG building should be JSON serializable.
            This means in order to ensure your pipeline application can run on
            a remote cluster potentially with different runtime environment,
            among all options listed:

                1) binding in-memory objects
                2) Rely on pickling
                3) Enforce JSON serialibility on all args used

            We believe both 1) & 2) rely on unstable in-memory objects or
            cross version pickling / closure capture, where JSON serialization
            provides the right contract needed for proper deployment.

        - ALL classes and methods used should be visible on top of the file and
            importable via a fully qualified name. Thus no inline class or
            function definitions should be used.

    Args:
        ray_dag_root_node: DAGNode acting as root of a Ray authored DAG. It
            should be executable via `ray_dag_root_node.execute(user_input)`
            and should have `InputNode` in it.

    Returns:
        deployments: All deployments needed for an e2e runnable serve pipeline,
            accessible via python .remote() call.

    Examples:
        >>> with InputNode() as dag_input:
        ...    m1 = Model.bind(1)
        ...    m2 = Model.bind(2)
        ...    m1_output = m1.forward.bind(dag_input[0])
        ...    m2_output = m2.forward.bind(dag_input[1])
        ...    ray_dag = ensemble.bind(m1_output, m2_output)

        Assuming we have non-JSON serializable or inline defined class or
        function in local pipeline development.

        >>> from ray.serve.api import build as build_app
        >>> deployments = build_app(ray_dag) # it can be method node
        >>> deployments = build_app(m1) # or just a regular node.
    """
    serve_root_dag = ray_dag_root_node.apply_recursive(
        transform_ray_dag_to_serve_dag)
    deployments = extract_deployments_from_serve_dag(serve_root_dag)
    deployments_with_http = process_ingress_deployment_in_serve_dag(
        deployments)

    return deployments_with_http