Ejemplo n.º 1
0
  def build_test_residual_graph(self, backend):
    """Create a residual model.

    The graph contains a residual connection, where the output of conv0 and
    conv2 is added at the end."""

    np_dtype = test_backend_dtypes[backend]
    self.expected_dtype = datatypes.np_to_smaug_type[np_dtype]
    with Graph(name="test_residual_graph", backend=backend) as graph:
      input_tensor = Tensor(
          data_layout=types_pb2.NCHW,
          tensor_data=np.random.rand(1, 1, 28, 28).astype(np_dtype))
      filter_tensor0 = Tensor(
          data_layout=types_pb2.NCHW,
          tensor_data=np.random.rand(64, 1, 3, 3).astype(np_dtype))
      filter_tensor1 = Tensor(
          data_layout=types_pb2.NCHW,
          tensor_data=np.random.rand(64, 1, 3, 3).astype(np_dtype))
      filter_tensor2 = Tensor(
          data_layout=types_pb2.NCHW,
          tensor_data=np.random.rand(64, 64, 3, 3).astype(np_dtype))
      bn_mean_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))
      bn_var_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))
      bn_gamma_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))
      bn_beta_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))

      act = data_op.input_data(input_tensor, "input")
      x = nn_ops.convolution(
          act, filter_tensor0, stride=[1, 1], padding="same", name="conv0")
      out = nn_ops.convolution(
          act, filter_tensor1, stride=[1, 1], padding="same", name="conv1")
      out = nn_ops.batch_norm(
          out, bn_mean_tensor, bn_var_tensor, bn_gamma_tensor, bn_beta_tensor,
          name="bn")
      out = activation_ops.relu(out, "relu")
      out = nn_ops.convolution(
          out, filter_tensor2, stride=[1, 1], padding="same", name="conv2")
      out = math_ops.add(x, out, "add")
      out = math_ops.mul(x, out, "mul")
      # Concatenate the channel dimension of x and out.
      axis = 1 if out.shape.layout == types_pb2.NCHW else 3
      out = array_ops.concat([x, out], axis, "concat")
      # Evenly split the tensor into 4 over the channel dimension.
      out0, out1, out2, out3 = array_ops.split(out, 4, axis, "split")
      out = math_ops.mul(
          math_ops.add(out0, out1, "add1"), math_ops.add(out2, out3, "add2"),
          "mul1")

    self.test_graph, _ = graph.to_proto()
    self.backend = backend
    self.alignment = global_vars.backend_alignment[
        self.test_graph.backend]
Ejemplo n.º 2
0
 def test_user_supplied_names2(self):
     with Graph(graph_name, backend) as test_graph:
         res = math_ops.add(x, y, name="add")
         res = math_ops.add(res, res, name="add_1")
         res = math_ops.add(res, res, name="add_1")
     self.assertEqual(get_node_names(test_graph),
                      {"add", "add_1", "add_1_1"})
Ejemplo n.º 3
0
 def func_true(a, b):
   minus_one = Tensor(
       data_layout=types_pb2.N, tensor_data=np.array([-1], dtype=self.dtype))
   return control_flow_ops.cond(
       math_ops.less(a, b),
       lambda: math_ops.add(a, math_ops.mul(b, minus_one)),
       lambda: math_ops.add(a, b))
Ejemplo n.º 4
0
 def test_subgraph_merge(self):
   with Graph(parent_graph_name, backend) as parent_graph:
     with Graph(child_graph_name, backend) as child_graph:
       z = math_ops.add(x, y, name="add")
       w = math_ops.add(z, z, name="add_1")
   self.assertGraphContains(parent_graph, {"add", "add_1"})
   self.assertNodesConnected(parent_graph, {"add_1": ["add", "add"]})
Ejemplo n.º 5
0
 def func_true(a, b):
   minus_one = Tensor(
       data_layout=types_pb2.N, tensor_data=np.array([-1], dtype=self.dtype))
   res = control_flow_ops.cond(
       math_ops.less(a, b),
       lambda: math_ops.add(a, math_ops.mul(b, minus_one)),
       lambda: math_ops.add(a, b))[0]
   # Use the cond results before returning.
   return math_ops.mul(res, res)
Ejemplo n.º 6
0
  def test_add(self):
    batch = 4
    channels = 32
    tf_a = tf.Variable(initializer(shape=[batch, channels], dtype=self.dtype))
    tf_b = tf.Variable(initializer(shape=[batch, channels], dtype=self.dtype))
    tf_result = tf.math.add(tf_a, tf_b)

    a = Tensor(data_layout=types_pb2.NC, tensor_data=tf_a.numpy())
    b = Tensor(data_layout=types_pb2.NC, tensor_data=tf_b.numpy())
    with Graph(name=self.graph_name, backend=self.backend) as graph:
      math_ops.add(a, b)
    self.runAndValidate(graph, tf_result, decimal=3)
Ejemplo n.º 7
0
 def test_nested_subgraphs(self):
   with Graph(parent_graph_name, backend) as parent_graph:
     z = math_ops.add(x, y, name="add")
     with Graph(child_graph_name, backend) as child_graph:
       w = math_ops.add(z, z, name="add_1")
       with Graph(grandchild_graph_name, backend) as grandchild_graph:
         u = math_ops.add(z, w, name="add_2")
   self.assertGraphContains(parent_graph, {"add", "add_1", "add_2"})
   self.assertNodesConnected(
       parent_graph, {
           "add_1": ["add", "add"],
           "add_2": ["add", "add_1"]
       })
Ejemplo n.º 8
0
 def test_parent_use_child_outputs(self):
   with Graph(parent_graph_name, backend) as parent_graph:
     with Graph(child_graph_name, backend) as child_graph:
       z = math_ops.add(x, y, name="add")
       w = math_ops.add(z, z, name="add_1")
     u = math_ops.mul(z, z, name="mul")
     out = math_ops.mul(w, u, name="mul_1")
   self.assertGraphContains(parent_graph, {"add", "add_1", "mul", "mul_1"})
   self.assertNodesConnected(
       parent_graph, {
           "add_1": ["add", "add"],
           "mul": ["add", "add"],
           "mul_1": ["add_1", "mul"]
       })
Ejemplo n.º 9
0
 def test_shared_data_op(self):
   with Graph(graph_name, backend) as test_graph:
     x = Tensor(data_layout=types_pb2.N, tensor_data=np.array([1]))
     y = Tensor(data_layout=types_pb2.N, tensor_data=np.array([1]))
     res = math_ops.add(x, y)
     res = math_ops.mul(x, res)
   self.assertEqual(get_num_data_nodes(test_graph), 2)
Ejemplo n.º 10
0
 def test_use_existing_data_op(self):
   with Graph(graph_name, backend) as test_graph:
     x = Tensor(data_layout=types_pb2.N, tensor_data=np.array([1]))
     y = Tensor(data_layout=types_pb2.N, tensor_data=np.array([1]))
     x_ = data_op.input_data(x)
     res= math_ops.add(x, y)
   self.assertEqual(get_num_data_nodes(test_graph), 2)
Ejemplo n.º 11
0
 def test_use_existing_data_op_in_parent_graph(self):
   with Graph(graph_name, backend) as parent_graph:
     x = Tensor(data_layout=types_pb2.N, tensor_data=np.array([1]))
     y = Tensor(data_layout=types_pb2.N, tensor_data=np.array([1]))
     res = math_ops.mul(x, y)
     with Graph(graph_name + "_subgraph", backend) as child_graph:
       res = math_ops.add(x, y)
   self.assertEqual(get_num_data_nodes(parent_graph), 2)
Ejemplo n.º 12
0
  def step(self, input_tensor, timestep):
    """Invoke this cell for a single timestep.

    Args:
      input_tensor: An input tensor of shape [batch, depth].
      timestep: The start timestep. This is used for naming the output tensors.

    Returns:
      Output contains two parts:
      1) An output tensor of shape [Batch, Depth].
      2) The final state of the LSTM.
    """
    x = input_tensor
    name_pfx = self.name + "step%d:" % timestep

    z = nn_ops.mat_mul(x, self.kernel, name=name_pfx + "mm_f")
    z = math_ops.add(
        z, nn_ops.mat_mul(self.h, self.recurrent_kernel, name="mm_u"),
        name=name_pfx + "add_z")
    # i = input_gate, c = new_input, f = forget_gate, o = output_gate
    zi, zf, zc, zo = array_ops.split(z, num_or_size_splits=4, axis=1)
    i = activation_ops.sigmoid(zi, name=name_pfx + "sigmoid_i")
    f = activation_ops.sigmoid(zf, name=name_pfx + "sigmoid_f")
    c = math_ops.add(
        math_ops.mul(f, self.c, name=name_pfx + "mul_f"),
        math_ops.mul(
            i,
            self.activation(
                zc, **self.activation_params, name=name_pfx + "act0"),
            name=name_pfx + "mul_i"), name=name_pfx + "add_c")
    o = activation_ops.sigmoid(zo, name=name_pfx + "sigmoid_o")
    h = math_ops.mul(
        o, self.activation(c, **self.activation_params, name=name_pfx + "act1"),
        name=name_pfx + "mul_h")
    self.c = c
    self.h = h
    return self.h, self.c
Ejemplo n.º 13
0
 def test_cond_op_simple_func(self):
   with Graph(name=self.graph_name, backend=self.backend) as graph:
     x0 = Tensor(
         data_layout=types_pb2.N, tensor_data=np.array([2], dtype=self.dtype))
     x1 = Tensor(
         data_layout=types_pb2.N, tensor_data=np.array([5], dtype=self.dtype))
     y = Tensor(
         data_layout=types_pb2.N, tensor_data=np.array([10], dtype=self.dtype))
     z = Tensor(
         data_layout=types_pb2.N, tensor_data=np.array([20], dtype=self.dtype))
     expected_res = Tensor(
         data_layout=types_pb2.N, tensor_data=np.array([30], dtype=self.dtype))
     # res = y + z if x0 < x1 else y * z
     res = control_flow_ops.cond(
         math_ops.less(x0, x1), lambda: math_ops.add(y, z),
         lambda: math_ops.mul(y, z))
   self.runAndValidate(graph, expected_res.tensor_data)
Ejemplo n.º 14
0
  def compute_score(self, query):
    # The score is computed as tanh(query + keys) * w_alignments.

    name_pfx = self.name + ":score:"
    # Reshape from [batch, depth] to [batch, 1, depth] for broadcasting.
    query = array_ops.expand_dims(query, 1, name=name_pfx + "expand")
    # [batch, time, depth].
    activations = activation_ops.tanh(
        math_ops.add(self.keys, query, name=name_pfx + "add"),
        name=name_pfx + "stack")
    # [batch * time, depth]
    activations = array_ops.reshape(
        activations, [self.batch_size * self.timesteps, self.depth],
        types_pb2.NC, name=name_pfx + "reshape0")
    # [batch * time, 1]
    scores = nn_ops.mat_mul(activations, self.w_alignment, name=name_pfx + "mm")
    # [batch, time]
    scores = array_ops.reshape(
        scores, [self.batch_size, self.timesteps], types_pb2.NC,
        name=name_pfx + "reshape1")
    return scores
Ejemplo n.º 15
0
 def test_auto_unique_names(self):
     with Graph(graph_name, backend) as test_graph:
         res = math_ops.add(x, y)
         res = math_ops.add(res, res)
         res = math_ops.add(res, res)
     self.assertEqual(get_node_names(test_graph), {"add", "add_1", "add_2"})
Ejemplo n.º 16
0
 def test_default_names(self):
     with Graph(graph_name, backend) as test_graph:
         res = math_ops.add(x, y)
         res = math_ops.mul(x, res)
     self.assertEqual(get_node_names(test_graph), {"add", "mul"})
Ejemplo n.º 17
0
 def func(a, b):
   minus_three = Tensor(
       data_layout=types_pb2.N, tensor_data=np.array([-3], dtype=self.dtype))
   return math_ops.add(a, math_ops.mul(b, minus_three))