예제 #1
0
  def build_test_residual_graph(self, backend):
    """Create a residual model.

    The graph contains a residual connection, where the output of conv0 and
    conv2 is added at the end."""

    np_dtype = test_backend_dtypes[backend]
    self.expected_dtype = datatypes.np_to_smaug_type[np_dtype]
    with Graph(name="test_residual_graph", backend=backend) as graph:
      input_tensor = Tensor(
          data_layout=types_pb2.NCHW,
          tensor_data=np.random.rand(1, 1, 28, 28).astype(np_dtype))
      filter_tensor0 = Tensor(
          data_layout=types_pb2.NCHW,
          tensor_data=np.random.rand(64, 1, 3, 3).astype(np_dtype))
      filter_tensor1 = Tensor(
          data_layout=types_pb2.NCHW,
          tensor_data=np.random.rand(64, 1, 3, 3).astype(np_dtype))
      filter_tensor2 = Tensor(
          data_layout=types_pb2.NCHW,
          tensor_data=np.random.rand(64, 64, 3, 3).astype(np_dtype))
      bn_mean_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))
      bn_var_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))
      bn_gamma_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))
      bn_beta_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))

      act = data_op.input_data(input_tensor, "input")
      x = nn_ops.convolution(
          act, filter_tensor0, stride=[1, 1], padding="same", name="conv0")
      out = nn_ops.convolution(
          act, filter_tensor1, stride=[1, 1], padding="same", name="conv1")
      out = nn_ops.batch_norm(
          out, bn_mean_tensor, bn_var_tensor, bn_gamma_tensor, bn_beta_tensor,
          name="bn")
      out = activation_ops.relu(out, "relu")
      out = nn_ops.convolution(
          out, filter_tensor2, stride=[1, 1], padding="same", name="conv2")
      out = math_ops.add(x, out, "add")
      out = math_ops.mul(x, out, "mul")
      # Concatenate the channel dimension of x and out.
      axis = 1 if out.shape.layout == types_pb2.NCHW else 3
      out = array_ops.concat([x, out], axis, "concat")
      # Evenly split the tensor into 4 over the channel dimension.
      out0, out1, out2, out3 = array_ops.split(out, 4, axis, "split")
      out = math_ops.mul(
          math_ops.add(out0, out1, "add1"), math_ops.add(out2, out3, "add2"),
          "mul1")

    self.test_graph, _ = graph.to_proto()
    self.backend = backend
    self.alignment = global_vars.backend_alignment[
        self.test_graph.backend]
예제 #2
0
 def func_true(a, b):
   minus_one = Tensor(
       data_layout=types_pb2.N, tensor_data=np.array([-1], dtype=self.dtype))
   res = control_flow_ops.cond(
       math_ops.less(a, b),
       lambda: math_ops.add(a, math_ops.mul(b, minus_one)),
       lambda: math_ops.add(a, b))[0]
   # Use the cond results before returning.
   return math_ops.mul(res, res)
예제 #3
0
  def test_mul(self):
    batch = 4
    channels = 32
    tf_a = tf.Variable(initializer(shape=[batch, channels], dtype=self.dtype))
    tf_b = tf.Variable(initializer(shape=[batch, channels], dtype=self.dtype))
    tf_result = tf.math.multiply(tf_a, tf_b)

    a = Tensor(data_layout=types_pb2.NC, tensor_data=tf_a.numpy())
    b = Tensor(data_layout=types_pb2.NC, tensor_data=tf_b.numpy())
    with Graph(name=self.graph_name, backend=self.backend) as graph:
      math_ops.mul(a, b)
    self.runAndValidate(graph, tf_result, decimal=3)
예제 #4
0
 def test_parent_use_child_outputs(self):
   with Graph(parent_graph_name, backend) as parent_graph:
     with Graph(child_graph_name, backend) as child_graph:
       z = math_ops.add(x, y, name="add")
       w = math_ops.add(z, z, name="add_1")
     u = math_ops.mul(z, z, name="mul")
     out = math_ops.mul(w, u, name="mul_1")
   self.assertGraphContains(parent_graph, {"add", "add_1", "mul", "mul_1"})
   self.assertNodesConnected(
       parent_graph, {
           "add_1": ["add", "add"],
           "mul": ["add", "add"],
           "mul_1": ["add_1", "mul"]
       })
예제 #5
0
 def test_shared_data_op(self):
   with Graph(graph_name, backend) as test_graph:
     x = Tensor(data_layout=types_pb2.N, tensor_data=np.array([1]))
     y = Tensor(data_layout=types_pb2.N, tensor_data=np.array([1]))
     res = math_ops.add(x, y)
     res = math_ops.mul(x, res)
   self.assertEqual(get_num_data_nodes(test_graph), 2)
예제 #6
0
 def func_true(a, b):
   minus_one = Tensor(
       data_layout=types_pb2.N, tensor_data=np.array([-1], dtype=self.dtype))
   return control_flow_ops.cond(
       math_ops.less(a, b),
       lambda: math_ops.add(a, math_ops.mul(b, minus_one)),
       lambda: math_ops.add(a, b))
예제 #7
0
 def test_use_existing_data_op_in_parent_graph(self):
   with Graph(graph_name, backend) as parent_graph:
     x = Tensor(data_layout=types_pb2.N, tensor_data=np.array([1]))
     y = Tensor(data_layout=types_pb2.N, tensor_data=np.array([1]))
     res = math_ops.mul(x, y)
     with Graph(graph_name + "_subgraph", backend) as child_graph:
       res = math_ops.add(x, y)
   self.assertEqual(get_num_data_nodes(parent_graph), 2)
예제 #8
0
  def step(self, input_tensor, timestep):
    """Invoke this cell for a single timestep.

    Args:
      input_tensor: An input tensor of shape [batch, depth].
      timestep: The start timestep. This is used for naming the output tensors.

    Returns:
      Output contains two parts:
      1) An output tensor of shape [Batch, Depth].
      2) The final state of the LSTM.
    """
    x = input_tensor
    name_pfx = self.name + "step%d:" % timestep

    z = nn_ops.mat_mul(x, self.kernel, name=name_pfx + "mm_f")
    z = math_ops.add(
        z, nn_ops.mat_mul(self.h, self.recurrent_kernel, name="mm_u"),
        name=name_pfx + "add_z")
    # i = input_gate, c = new_input, f = forget_gate, o = output_gate
    zi, zf, zc, zo = array_ops.split(z, num_or_size_splits=4, axis=1)
    i = activation_ops.sigmoid(zi, name=name_pfx + "sigmoid_i")
    f = activation_ops.sigmoid(zf, name=name_pfx + "sigmoid_f")
    c = math_ops.add(
        math_ops.mul(f, self.c, name=name_pfx + "mul_f"),
        math_ops.mul(
            i,
            self.activation(
                zc, **self.activation_params, name=name_pfx + "act0"),
            name=name_pfx + "mul_i"), name=name_pfx + "add_c")
    o = activation_ops.sigmoid(zo, name=name_pfx + "sigmoid_o")
    h = math_ops.mul(
        o, self.activation(c, **self.activation_params, name=name_pfx + "act1"),
        name=name_pfx + "mul_h")
    self.c = c
    self.h = h
    return self.h, self.c
예제 #9
0
 def test_cond_op_simple_func(self):
   with Graph(name=self.graph_name, backend=self.backend) as graph:
     x0 = Tensor(
         data_layout=types_pb2.N, tensor_data=np.array([2], dtype=self.dtype))
     x1 = Tensor(
         data_layout=types_pb2.N, tensor_data=np.array([5], dtype=self.dtype))
     y = Tensor(
         data_layout=types_pb2.N, tensor_data=np.array([10], dtype=self.dtype))
     z = Tensor(
         data_layout=types_pb2.N, tensor_data=np.array([20], dtype=self.dtype))
     expected_res = Tensor(
         data_layout=types_pb2.N, tensor_data=np.array([30], dtype=self.dtype))
     # res = y + z if x0 < x1 else y * z
     res = control_flow_ops.cond(
         math_ops.less(x0, x1), lambda: math_ops.add(y, z),
         lambda: math_ops.mul(y, z))
   self.runAndValidate(graph, expected_res.tensor_data)
예제 #10
0
  def test_cond_op_func_call(self):
    def func(a, b):
      minus_three = Tensor(
          data_layout=types_pb2.N, tensor_data=np.array([-3], dtype=self.dtype))
      return math_ops.add(a, math_ops.mul(b, minus_three))

    with Graph(name=self.graph_name, backend=self.backend) as graph:
      x0 = Tensor(
          data_layout=types_pb2.N, tensor_data=np.array([2], dtype=self.dtype))
      x1 = Tensor(
          data_layout=types_pb2.N, tensor_data=np.array([5], dtype=self.dtype))
      y = Tensor(
          data_layout=types_pb2.N, tensor_data=np.array([10], dtype=self.dtype))
      z = Tensor(
          data_layout=types_pb2.N, tensor_data=np.array([20], dtype=self.dtype))
      expected_res = Tensor(
          data_layout=types_pb2.N, tensor_data=np.array([-50],
                                                        dtype=self.dtype))
      # res = y - 3z if x0 < x1 else y * z
      res = control_flow_ops.cond(
          math_ops.less(x0, x1), lambda: func(y, z), lambda: math_ops.mul(y, z))
    self.runAndValidate(graph, expected_res.tensor_data)
예제 #11
0
 def test_user_supplied_names0(self):
     with Graph(graph_name, backend) as test_graph:
         res = math_ops.add(x, y, name="add")
         res = math_ops.mul(res, res, name="mul")
     self.assertEqual(get_node_names(test_graph), {"add", "mul"})
예제 #12
0
 def test_default_names(self):
     with Graph(graph_name, backend) as test_graph:
         res = math_ops.add(x, y)
         res = math_ops.mul(x, res)
     self.assertEqual(get_node_names(test_graph), {"add", "mul"})
예제 #13
0
 def func_false(a, b):
   two = Tensor(
       data_layout=types_pb2.N, tensor_data=np.array([2], dtype=self.dtype))
   return control_flow_ops.cond(
       math_ops.greater(a, b), lambda: math_ops.mul(a, two),
       lambda: math_ops.mul(b, two))
예제 #14
0
 def func(a, b):
   minus_three = Tensor(
       data_layout=types_pb2.N, tensor_data=np.array([-3], dtype=self.dtype))
   return math_ops.add(a, math_ops.mul(b, minus_three))