Esempio n. 1
0
def build_graph_output(op_name, out):
    assert isinstance(out, Tensor)

    output_conf = (
        oneflow._oneflow_internal.oneflow.core.operator.op_conf.FetchOutputOpConf()
    )

    output_op = oneflow._oneflow_internal.one.FetchOutputOpExpr(
        op_name, output_conf, ["in_0"], ["out_0"]
    )
    fake_eager_out = _C.dispatch_fetch_output(output_op, out)

    return fake_eager_out
Esempio n. 2
0
def build_graph_output(op_name, out):
    assert isinstance(out, Tensor)

    output_conf = oneflow.core.operator.op_conf_pb2.FetchOutputOpConf()
    output_conf.in_0 = "in_0"  # Set the default value, otherwise the parsing fails
    output_conf.out_0 = "out_0"
    output_conf_str = text_format.MessageToString(output_conf)

    output_op = oneflow._oneflow_internal.one.FetchOutputOpExpr(
        op_name, output_conf_str, ["in_0"], ["out_0"])
    fake_eager_out = _C.dispatch_fetch_output(output_op, out)

    return fake_eager_out
Esempio n. 3
0
 def test_fetch_output_tensor(test_case):
     test_case.assertTrue(oneflow.env.is_multi_client())
     test_case.assertTrue(
         oneflow.framework.env_util.HasAllMultiClientEnvVars())
     x = flow.Tensor(1, 1, 10, 10)
     flow.nn.init.uniform_(x, a=-1.0, b=1.0)
     session = session_ctx.GetDefaultSession()
     test_case.assertTrue(isinstance(session, MultiClientSession))
     session.TryInit()
     with oneflow._oneflow_internal.lazy_mode.guard(True):
         oneflow._oneflow_internal.JobBuildAndInferCtx_Open(
             "cc_test_output_op_expr_job")
         job_conf = (oneflow._oneflow_internal.oneflow.core.job.job_conf.
                     JobConfigProto())
         job_conf.set_job_name("cc_test_output_op_expr_job")
         job_conf.mutable_predict_conf()
         c_api_util.CurJobBuildAndInferCtx_SetJobConf(job_conf)
         input_conf = (oneflow._oneflow_internal.oneflow.core.operator.
                       op_conf.FeedInputOpConf())
         input_conf.set_in_0("EagerTensorInput")
         input_conf.set_out_0("out_0")
         input_op = oneflow._oneflow_internal.one.FeedInputOpExpr(
             "cc_Input_0", input_conf, ["in_0"], ["out_0"])
         output_conf = (oneflow._oneflow_internal.oneflow.core.operator.
                        op_conf.FetchOutputOpConf())
         output_conf.set_in_0("LazyTensorInput")
         output_conf.set_out_0("out_0")
         output_op = oneflow._oneflow_internal.one.FetchOutputOpExpr(
             "cc_Output_0", output_conf, ["in_0"], ["out_0"])
         lazy_tensor = _C.dispatch_feed_input(input_op, x)
         test_case.assertEqual(lazy_tensor.shape, (1, 1, 10, 10))
         test_case.assertTrue(lazy_tensor.is_lazy)
         test_case.assertTrue(lazy_tensor.is_local)
         eager_tensor = _C.dispatch_fetch_output(output_op, lazy_tensor)
         test_case.assertEqual(eager_tensor.shape, (1, 1, 10, 10))
         test_case.assertTrue(not eager_tensor.is_lazy)
         test_case.assertTrue(eager_tensor.is_local)
         oneflow._oneflow_internal.JobBuildAndInferCtx_Close()
Esempio n. 4
0
def _test_user_op_graph(test_case, is_cuda):
    test_case.assertTrue(oneflow.framework.env_util.HasAllMultiClientEnvVars())

    x0 = flow.tensor(np.random.rand(20, 30), dtype=flow.float32)
    weight0 = flow.tensor(np.random.rand(30, 50), dtype=flow.float32)
    x1 = flow.tensor(np.random.rand(50, 70), dtype=flow.float32)

    if is_cuda:
        x0 = x0.to(device=flow.device("cuda"))
        weight0 = weight0.to(device=flow.device("cuda"))
        x1 = x1.to(device=flow.device("cuda"))

    # NOTE(chengcheng): this tiny net is:
    #    x0 * weight0 -> out0
    #    relu(out0) -> y0
    #    y0 * x1 -> out1
    #    relu(out1) -> y1

    session = session_ctx.GetDefaultSession()
    test_case.assertTrue(isinstance(session, MultiClientSession))
    session.TryInit()

    with oneflow._oneflow_internal.lazy_mode.guard(True):

        oneflow._oneflow_internal.JobBuildAndInferCtx_Open(
            "cc_test_user_op_expr_job_with_cuda" + str(is_cuda))
        job_conf = oneflow._oneflow_internal.oneflow.core.job.job_conf.JobConfigProto(
        )
        job_conf.set_job_name("cc_test_user_op_expr_job_with_cuda" +
                              str(is_cuda))
        job_conf.mutable_predict_conf()
        c_api_util.CurJobBuildAndInferCtx_SetJobConf(job_conf)

        # input_conf.set_in_0("EagerTensorInput")
        # input_conf.set_out_0("out_0")

        x0_conf = (oneflow._oneflow_internal.oneflow.core.operator.op_conf.
                   FeedInputOpConf())
        x0_op = oneflow._oneflow_internal.one.FeedInputOpExpr(
            "cc_Input_0", x0_conf, ["in_0"], ["out_0"])
        x1_conf = (oneflow._oneflow_internal.oneflow.core.operator.op_conf.
                   FeedInputOpConf())
        x1_op = oneflow._oneflow_internal.one.FeedInputOpExpr(
            "cc_Input_1", x1_conf, ["in_0"], ["out_0"])
        weight0_conf = (oneflow._oneflow_internal.oneflow.core.operator.
                        op_conf.FeedVariableOpConf())
        weight0_op = oneflow._oneflow_internal.one.FeedVariableOpExpr(
            "cc_Variable_0", weight0_conf, ["in_0"], ["out_0"])
        output_conf = (oneflow._oneflow_internal.oneflow.core.operator.op_conf.
                       FetchOutputOpConf())
        output_op = oneflow._oneflow_internal.one.FetchOutputOpExpr(
            "cc_Output_0", output_conf, ["in_0"], ["out_0"])

        x0_lazy_tensor = _C.dispatch_feed_input(x0_op, x0)
        x1_lazy_tensor = _C.dispatch_feed_input(x1_op, x1)
        weight0_lazy_tensor = _C.dispatch_feed_input(weight0_op, weight0)

        test_case.assertEqual(x0_lazy_tensor.shape, (20, 30))
        test_case.assertTrue(x0_lazy_tensor.is_lazy)

        test_case.assertEqual(weight0_lazy_tensor.shape, (30, 50))
        test_case.assertTrue(weight0_lazy_tensor.is_lazy)
        test_case.assertEqual(x1_lazy_tensor.shape, (50, 70))
        test_case.assertTrue(x1_lazy_tensor.is_lazy)

        out0 = flow._C.matmul(x0_lazy_tensor, weight0_lazy_tensor)
        test_case.assertEqual(out0.shape, (20, 50))
        test_case.assertTrue(out0.is_lazy)

        y0 = flow._C.relu(out0)
        test_case.assertEqual(y0.shape, (20, 50))
        test_case.assertTrue(y0.is_lazy)

        out1 = flow._C.matmul(y0, x1_lazy_tensor)
        test_case.assertEqual(out1.shape, (20, 70))
        test_case.assertTrue(out1.is_lazy)

        y1 = flow._C.relu(out1)
        test_case.assertEqual(y1.shape, (20, 70))
        test_case.assertTrue(y1.is_lazy)

        eager_output = _C.dispatch_fetch_output(output_op, y1)
        test_case.assertEqual(eager_output.shape, (20, 70))
        test_case.assertTrue(not eager_output.is_lazy)

        if is_cuda:
            test_case.assertTrue(x0_lazy_tensor.is_cuda)
            test_case.assertTrue(x1_lazy_tensor.is_cuda)
            test_case.assertTrue(weight0_lazy_tensor.is_cuda)
            test_case.assertTrue(out0.is_cuda)
            test_case.assertTrue(y0.is_cuda)
            test_case.assertTrue(out1.is_cuda)
            test_case.assertTrue(y1.is_cuda)

        oneflow._oneflow_internal.JobBuildAndInferCtx_Close()