Esempio n. 1
0
 def test_tf_multi_output_int(self):
     model = fe.build(model_fn=multi_layer_tf_model, optimizer_fn="adam")
     op = ModelOp(inputs='x',
                  outputs=['y', 'embedding'],
                  model=model,
                  intermediate_layers=1)
     op.build(framework='tf', device=None)
     with self.subTest("Eager"):
         y, embedding = op.forward(data=self.tf_input_data_big,
                                   state=self.state)
         self.assertTrue(np.allclose(y.numpy(), self.output_big, atol=1e-4))
         self.assertTrue(
             np.allclose(embedding.numpy(),
                         self.embedding_output,
                         atol=1e-4))
     forward_fn = tf.function(op.forward)
     with self.subTest("Static Call 1"):
         y, embedding = forward_fn(data=self.tf_input_data_big,
                                   state=self.state)
         self.assertTrue(np.allclose(y.numpy(), self.output_big, atol=1e-4))
         self.assertTrue(
             np.allclose(embedding.numpy(),
                         self.embedding_output,
                         atol=1e-4))
     with self.subTest("Static Call 2"):
         y, embedding = forward_fn(data=self.tf_input_data_big,
                                   state=self.state)
         self.assertTrue(np.allclose(y.numpy(), self.output_big, atol=1e-4))
         self.assertTrue(
             np.allclose(embedding.numpy(),
                         self.embedding_output,
                         atol=1e-4))
Esempio n. 2
0
 def test_torch_input(self):
     model = fe.build(model_fn=OneLayerTorchModel, optimizer_fn="adam")
     self.torch_input_data = self.torch_input_data.to(
         "cuda:0" if torch.cuda.is_available() else "cpu")
     model.to("cuda:0" if torch.cuda.is_available() else "cpu")
     op = ModelOp(inputs='x', outputs='x', model=model)
     op.build(framework='torch',
              device=torch.device(
                  "cuda:0" if torch.cuda.is_available() else "cpu"))
     output = op.forward(data=self.torch_input_data, state=self.state)
     output = output.to("cpu")
     self.assertTrue(is_equal(output.detach().numpy(), self.output))
Esempio n. 3
0
 def test_tf_input(self):
     model = fe.build(model_fn=one_layer_tf_model, optimizer_fn="adam")
     op = ModelOp(inputs='x', outputs='x', model=model)
     op.build(framework='tf', device=None)
     with self.subTest("Eager"):
         output = op.forward(data=self.tf_input_data, state=self.state)
         self.assertTrue(is_equal(output.numpy(), self.output))
     forward_fn = tf.function(op.forward)
     with self.subTest("Static Call 1"):
         output = forward_fn(data=self.tf_input_data, state=self.state)
         self.assertTrue(is_equal(output.numpy(), self.output))
     with self.subTest("Static Call 2"):
         output = forward_fn(data=self.tf_input_data, state=self.state)
         self.assertTrue(is_equal(output.numpy(), self.output))
Esempio n. 4
0
 def test_torch_multi_output_int(self):
     model = fe.build(model_fn=MultiLayerTorchModel, optimizer_fn="adam")
     self.torch_input_data_big = self.torch_input_data_big.to(
         "cuda:0" if torch.cuda.is_available() else "cpu")
     model.to("cuda:0" if torch.cuda.is_available() else "cpu")
     op = ModelOp(inputs='x',
                  outputs=['y', 'embedding'],
                  model=model,
                  intermediate_layers=1)
     op.build(framework='torch',
              device=torch.device(
                  "cuda:0" if torch.cuda.is_available() else "cpu"))
     y, embedding = op.forward(data=self.torch_input_data_big,
                               state=self.state)
     y = y.to("cpu")
     embedding = embedding.to('cpu')
     self.assertTrue(
         np.allclose(y.detach().numpy(), self.output_big, atol=1e-4))
     self.assertTrue(
         np.allclose(embedding.detach().numpy(),
                     self.embedding_output,
                     atol=1e-4))