def test_feed_input_tensor(test_case): test_case.assertTrue( oneflow.framework.env_util.HasAllMultiClientEnvVars()) x = flow.Tensor(1, 1, 10, 10) flow.nn.init.uniform_(x, a=-1.0, b=1.0) session = session_ctx.GetDefaultSession() test_case.assertTrue(isinstance(session, MultiClientSession)) session.TryInit() with oneflow._oneflow_internal.lazy_mode.guard(True): oneflow._oneflow_internal.JobBuildAndInferCtx_Open( "cc_test_input_op_expr_job") job_conf = (oneflow._oneflow_internal.oneflow.core.job.job_conf. JobConfigProto()) job_conf.set_job_name("cc_test_input_op_expr_job") job_conf.mutable_predict_conf() c_api_util.CurJobBuildAndInferCtx_SetJobConf(job_conf) op_name = "cc_Input_0" input_conf = (oneflow._oneflow_internal.oneflow.core.operator. op_conf.FeedInputOpConf()) input_conf.set_in_0("EagerTensorInput") input_conf.set_out_0("out_0") input_op = oneflow._oneflow_internal.one.FeedInputOpExpr( op_name, input_conf, ["in_0"], ["out_0"]) attrs = oneflow._oneflow_internal.MutableCfgAttrMap() out_tensor = _C.dispatch_feed_input(input_op, x) test_case.assertEqual(out_tensor.shape, (1, 1, 10, 10)) test_case.assertTrue(out_tensor.is_lazy) test_case.assertTrue(out_tensor.is_local) oneflow._oneflow_internal.JobBuildAndInferCtx_Close()
def test_feed_var_tensor(test_case): test_case.assertTrue( oneflow.framework.env_util.HasAllMultiClientEnvVars()) x = flow.Tensor(1, 1, 10, 10) flow.nn.init.uniform_(x, a=-1.0, b=1.0) session = session_ctx.GetDefaultSession() test_case.assertTrue(isinstance(session, MultiClientSession)) session.TryInit() with oneflow._oneflow_internal.lazy_mode.guard(True): oneflow._oneflow_internal.JobBuildAndInferCtx_Open( "cc_test_variable_op_expr_job") job_conf = oneflow.core.job.job_conf_pb2.JobConfigProto() job_conf.job_name = "cc_test_variable_op_expr_job" job_conf.predict_conf.SetInParent() c_api_util.CurJobBuildAndInferCtx_SetJobConf(job_conf) op_name = "cc_Variable_0" var_conf = oneflow.core.operator.op_conf_pb2.FeedVariableOpConf() var_conf.in_0 = "EagerTensorInput" var_conf.out_0 = "out_0" var_conf_str = text_format.MessageToString(var_conf) var_op = oneflow._oneflow_internal.one.FeedVariableOpExpr( op_name, var_conf_str, ["in_0"], ["out_0"]) out_tensor = _C.dispatch_feed_variable(var_op, x, l2=0) test_case.assertEqual(out_tensor.shape, (1, 1, 10, 10)) test_case.assertTrue(out_tensor.is_lazy) test_case.assertTrue(out_tensor.is_local) oneflow._oneflow_internal.JobBuildAndInferCtx_Close()
def open(self, job_name, signature=None, batch_size=None): self._check_status(self.SessionStatus.OPEN) c_api_util.JobBuildAndInferCtx_Open(job_name) if signature is not None: self.set_job_signature(job_name, signature) if isinstance(batch_size, int): self.set_job_batch_size(job_name, batch_size) job_conf = self._get_job_conf(job_name) c_api_util.CurJobBuildAndInferCtx_SetJobConf(job_conf) # NOTE(chengcheng): placement_util is unavailable. # tag_and_dev_ids = placement_util.GetDefaultMachineDeviceIds( # self.config_proto_.resource # ) scope = scope_util.MakeInitialScope(job_conf, "cpu", ["0:0"], None, self.is_mirrored_) with runtime_mode.ModeScope(runtime_mode.GLOBAL_MODE): with scope_util.ScopeContext(scope): self.cur_job_name_ = job_name yield self self.cur_job_name_ = None oneflow._oneflow_internal.JobBuildAndInferCtx_Close()
def __enter__(self): c_api_util.JobBuildAndInferCtx_Open(self._job_conf.job_name()) c_api_util.CurJobBuildAndInferCtx_SetJobConf(self._job_conf)
def _test_user_op_graph(test_case, is_cuda): test_case.assertTrue(oneflow.framework.env_util.HasAllMultiClientEnvVars()) x0 = flow.tensor(np.random.rand(20, 30), dtype=flow.float32) weight0 = flow.tensor(np.random.rand(30, 50), dtype=flow.float32) x1 = flow.tensor(np.random.rand(50, 70), dtype=flow.float32) if is_cuda: x0 = x0.to(device=flow.device("cuda")) weight0 = weight0.to(device=flow.device("cuda")) x1 = x1.to(device=flow.device("cuda")) # NOTE(chengcheng): this tiny net is: # x0 * weight0 -> out0 # relu(out0) -> y0 # y0 * x1 -> out1 # relu(out1) -> y1 session = session_ctx.GetDefaultSession() test_case.assertTrue(isinstance(session, MultiClientSession)) session.TryInit() with oneflow._oneflow_internal.lazy_mode.guard(True): oneflow._oneflow_internal.JobBuildAndInferCtx_Open( "cc_test_user_op_expr_job_with_cuda" + str(is_cuda)) job_conf = oneflow._oneflow_internal.oneflow.core.job.job_conf.JobConfigProto( ) job_conf.set_job_name("cc_test_user_op_expr_job_with_cuda" + str(is_cuda)) job_conf.mutable_predict_conf() c_api_util.CurJobBuildAndInferCtx_SetJobConf(job_conf) # input_conf.set_in_0("EagerTensorInput") # input_conf.set_out_0("out_0") x0_conf = (oneflow._oneflow_internal.oneflow.core.operator.op_conf. FeedInputOpConf()) x0_op = oneflow._oneflow_internal.one.FeedInputOpExpr( "cc_Input_0", x0_conf, ["in_0"], ["out_0"]) x1_conf = (oneflow._oneflow_internal.oneflow.core.operator.op_conf. FeedInputOpConf()) x1_op = oneflow._oneflow_internal.one.FeedInputOpExpr( "cc_Input_1", x1_conf, ["in_0"], ["out_0"]) weight0_conf = (oneflow._oneflow_internal.oneflow.core.operator. op_conf.FeedVariableOpConf()) weight0_op = oneflow._oneflow_internal.one.FeedVariableOpExpr( "cc_Variable_0", weight0_conf, ["in_0"], ["out_0"]) output_conf = (oneflow._oneflow_internal.oneflow.core.operator.op_conf. FetchOutputOpConf()) output_op = oneflow._oneflow_internal.one.FetchOutputOpExpr( "cc_Output_0", output_conf, ["in_0"], ["out_0"]) x0_lazy_tensor = _C.dispatch_feed_input(x0_op, x0) x1_lazy_tensor = _C.dispatch_feed_input(x1_op, x1) weight0_lazy_tensor = _C.dispatch_feed_input(weight0_op, weight0) test_case.assertEqual(x0_lazy_tensor.shape, (20, 30)) test_case.assertTrue(x0_lazy_tensor.is_lazy) test_case.assertEqual(weight0_lazy_tensor.shape, (30, 50)) test_case.assertTrue(weight0_lazy_tensor.is_lazy) test_case.assertEqual(x1_lazy_tensor.shape, (50, 70)) test_case.assertTrue(x1_lazy_tensor.is_lazy) out0 = flow._C.matmul(x0_lazy_tensor, weight0_lazy_tensor) test_case.assertEqual(out0.shape, (20, 50)) test_case.assertTrue(out0.is_lazy) y0 = flow._C.relu(out0) test_case.assertEqual(y0.shape, (20, 50)) test_case.assertTrue(y0.is_lazy) out1 = flow._C.matmul(y0, x1_lazy_tensor) test_case.assertEqual(out1.shape, (20, 70)) test_case.assertTrue(out1.is_lazy) y1 = flow._C.relu(out1) test_case.assertEqual(y1.shape, (20, 70)) test_case.assertTrue(y1.is_lazy) eager_output = _C.dispatch_fetch_output(output_op, y1) test_case.assertEqual(eager_output.shape, (20, 70)) test_case.assertTrue(not eager_output.is_lazy) if is_cuda: test_case.assertTrue(x0_lazy_tensor.is_cuda) test_case.assertTrue(x1_lazy_tensor.is_cuda) test_case.assertTrue(weight0_lazy_tensor.is_cuda) test_case.assertTrue(out0.is_cuda) test_case.assertTrue(y0.is_cuda) test_case.assertTrue(out1.is_cuda) test_case.assertTrue(y1.is_cuda) oneflow._oneflow_internal.JobBuildAndInferCtx_Close()