Exemplo n.º 1
0
 def test_subgraph_merge(self):
   with Graph(parent_graph_name, backend) as parent_graph:
     with Graph(child_graph_name, backend) as child_graph:
       z = math_ops.add(x, y, name="add")
       w = math_ops.add(z, z, name="add_1")
   self.assertGraphContains(parent_graph, {"add", "add_1"})
   self.assertNodesConnected(parent_graph, {"add_1": ["add", "add"]})
Exemplo n.º 2
0
 def test_use_existing_data_op_in_parent_graph(self):
   with Graph(graph_name, backend) as parent_graph:
     x = Tensor(data_layout=types_pb2.N, tensor_data=np.array([1]))
     y = Tensor(data_layout=types_pb2.N, tensor_data=np.array([1]))
     res = math_ops.mul(x, y)
     with Graph(graph_name + "_subgraph", backend) as child_graph:
       res = math_ops.add(x, y)
   self.assertEqual(get_num_data_nodes(parent_graph), 2)
Exemplo n.º 3
0
 def test_nested_subgraphs(self):
   with Graph(parent_graph_name, backend) as parent_graph:
     z = math_ops.add(x, y, name="add")
     with Graph(child_graph_name, backend) as child_graph:
       w = math_ops.add(z, z, name="add_1")
       with Graph(grandchild_graph_name, backend) as grandchild_graph:
         u = math_ops.add(z, w, name="add_2")
   self.assertGraphContains(parent_graph, {"add", "add_1", "add_2"})
   self.assertNodesConnected(
       parent_graph, {
           "add_1": ["add", "add"],
           "add_2": ["add", "add_1"]
       })
Exemplo n.º 4
0
 def test_parent_use_child_outputs(self):
   with Graph(parent_graph_name, backend) as parent_graph:
     with Graph(child_graph_name, backend) as child_graph:
       z = math_ops.add(x, y, name="add")
       w = math_ops.add(z, z, name="add_1")
     u = math_ops.mul(z, z, name="mul")
     out = math_ops.mul(w, u, name="mul_1")
   self.assertGraphContains(parent_graph, {"add", "add_1", "mul", "mul_1"})
   self.assertNodesConnected(
       parent_graph, {
           "add_1": ["add", "add"],
           "mul": ["add", "add"],
           "mul_1": ["add_1", "mul"]
       })
Exemplo n.º 5
0
  def test_multilayered_lstm(self):
    # Build and run an LSTM layer in TF.
    tf.keras.backend.set_floatx(
        global_vars.backend_datatype[self.backend].__name__)
    inputs = tf.random.normal([4, 8, 16],
                              dtype=global_vars.backend_datatype[self.backend])

    model = tf.keras.models.Sequential()
    tf_lstm0 = tf.keras.layers.LSTM(
        32, return_sequences=True, use_bias=False, unit_forget_bias=False)
    # We let TF's LSTM only return the last timestep result, because the SMAUG's
    # C++ runtime returns that.
    tf_lstm1 = tf.keras.layers.LSTM(
        32, return_sequences=False, use_bias=False, unit_forget_bias=False)
    model.add(tf_lstm0)
    model.add(tf_lstm1)
    model.compile()
    tf_output = model.predict(inputs)

    # Build the model in SMAUG using the tensors from the TF model.
    inputs_tensor = Tensor(
        data_layout=types_pb2.NTC, tensor_data=inputs.numpy())
    w0, u0 = createSmaugWeights(tf_lstm0)
    w1, u1 = createSmaugWeights(tf_lstm1)
    with Graph(name=self.graph_name, backend=self.backend) as graph:
      inputs = input_data(inputs_tensor)
      sg_lstm0 = LSTM([w0, u0])
      sg_lstm1 = LSTM([w1, u1])
      sg_outputs, state = sg_lstm0(inputs)
      sg_outputs, state = sg_lstm1(sg_outputs)

    self.runAndValidate(graph, tf_output)
Exemplo n.º 6
0
  def build_test_sequential_graph(self, backend):
    """Create a sequential model."""
    np_dtype = test_backend_dtypes[backend]
    self.expected_dtype = datatypes.np_to_smaug_type[np_dtype]
    with Graph(name="test_sequential_graph", backend=backend) as graph:
      input_tensor = Tensor(
          data_layout=types_pb2.NCHW,
          tensor_data=np.random.rand(1, 3, 28, 28).astype(np_dtype))
      filter_tensor0 = Tensor(
          data_layout=types_pb2.NCHW,
          tensor_data=np.random.rand(64, 3, 3, 3).astype(np_dtype))
      filter_tensor1 = Tensor(
          data_layout=types_pb2.NCHW,
          tensor_data=np.random.rand(64, 64, 3, 3).astype(np_dtype))
      weight_tensor0 = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(254, 12544).astype(np_dtype))
      weight_tensor1 = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(10, 254).astype(np_dtype))
      bn_mean_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))
      bn_var_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))
      bn_gamma_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))
      bn_beta_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))

      out = data_op.input_data(input_tensor, "input")
      out = nn_ops.convolution(
          out, filter_tensor0, stride=[1, 1], padding="same", name="conv0")
      out = activation_ops.relu(out, "conv0_relu")
      out = nn_ops.batch_norm(
          out, bn_mean_tensor, bn_var_tensor, bn_gamma_tensor, bn_beta_tensor,
          name="bn")
      out = nn_ops.convolution(
          out, filter_tensor1, stride=[1, 1], padding="same", name="conv1")
      out = activation_ops.relu(out, "conv1_relu")
      out = nn_ops.max_pool(out, pool_size=[2, 2], stride=[2, 2], name="pool")
      out = array_ops.flatten(out, "flatten")
      out = nn_ops.mat_mul(out, weight_tensor0, name="fc0")
      out = activation_ops.relu(out, "fc0_relu")
      out = nn_ops.mat_mul(out, weight_tensor1, name="fc1")
      out = array_ops.expand_dims(out, 1, "expand_dims")
      out = array_ops.squeeze(out, 1, "squeeze")
      out = array_ops.reshape(out, [2, 5], types_pb2.NC, "reshape")
      out = array_ops.repeat(out, [4, 2], "repeat")
      out = array_ops.stack(out, 4, 1, "stack")
      out0, out1, out2, out3 = array_ops.unstack(out, 1, "unstack")
      out0 = array_ops.reshape(out0, [1, 1, 8, 10], types_pb2.NCHW, "reshape")
      out0 = array_ops.padding(out0, [0, 0, 0, 0, 1, 1, 1, 1], "padding")

    self.test_graph, _ = graph.to_proto()
    self.backend = backend
    self.alignment = global_vars.backend_alignment[backend]
Exemplo n.º 7
0
  def build_test_residual_graph(self, backend):
    """Create a residual model.

    The graph contains a residual connection, where the output of conv0 and
    conv2 is added at the end."""

    np_dtype = test_backend_dtypes[backend]
    self.expected_dtype = datatypes.np_to_smaug_type[np_dtype]
    with Graph(name="test_residual_graph", backend=backend) as graph:
      input_tensor = Tensor(
          data_layout=types_pb2.NCHW,
          tensor_data=np.random.rand(1, 1, 28, 28).astype(np_dtype))
      filter_tensor0 = Tensor(
          data_layout=types_pb2.NCHW,
          tensor_data=np.random.rand(64, 1, 3, 3).astype(np_dtype))
      filter_tensor1 = Tensor(
          data_layout=types_pb2.NCHW,
          tensor_data=np.random.rand(64, 1, 3, 3).astype(np_dtype))
      filter_tensor2 = Tensor(
          data_layout=types_pb2.NCHW,
          tensor_data=np.random.rand(64, 64, 3, 3).astype(np_dtype))
      bn_mean_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))
      bn_var_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))
      bn_gamma_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))
      bn_beta_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))

      act = data_op.input_data(input_tensor, "input")
      x = nn_ops.convolution(
          act, filter_tensor0, stride=[1, 1], padding="same", name="conv0")
      out = nn_ops.convolution(
          act, filter_tensor1, stride=[1, 1], padding="same", name="conv1")
      out = nn_ops.batch_norm(
          out, bn_mean_tensor, bn_var_tensor, bn_gamma_tensor, bn_beta_tensor,
          name="bn")
      out = activation_ops.relu(out, "relu")
      out = nn_ops.convolution(
          out, filter_tensor2, stride=[1, 1], padding="same", name="conv2")
      out = math_ops.add(x, out, "add")
      out = math_ops.mul(x, out, "mul")
      # Concatenate the channel dimension of x and out.
      axis = 1 if out.shape.layout == types_pb2.NCHW else 3
      out = array_ops.concat([x, out], axis, "concat")
      # Evenly split the tensor into 4 over the channel dimension.
      out0, out1, out2, out3 = array_ops.split(out, 4, axis, "split")
      out = math_ops.mul(
          math_ops.add(out0, out1, "add1"), math_ops.add(out2, out3, "add2"),
          "mul1")

    self.test_graph, _ = graph.to_proto()
    self.backend = backend
    self.alignment = global_vars.backend_alignment[
        self.test_graph.backend]
Exemplo n.º 8
0
 def test_user_supplied_names2(self):
     with Graph(graph_name, backend) as test_graph:
         res = math_ops.add(x, y, name="add")
         res = math_ops.add(res, res, name="add_1")
         res = math_ops.add(res, res, name="add_1")
     self.assertEqual(get_node_names(test_graph),
                      {"add", "add_1", "add_1_1"})
Exemplo n.º 9
0
  def test_convolution(self):
    batch = 4
    width = 8
    height = 8
    channels = 32
    filter_height = 3
    filter_width = 3
    num_filters = 128
    tf_inputs = tf.Variable(
        initializer(shape=[batch, height, width, channels], dtype=self.dtype))
    tf_filters = tf.Variable(
        initializer(
            shape=[filter_height, filter_width, channels, num_filters],
            dtype=self.dtype))
    tf_results = tf.nn.conv2d(
        tf_inputs, tf_filters, strides=[1, 1], padding="SAME",
        data_format="NHWC", dilations=None)

    inputs = Tensor(data_layout=types_pb2.NHWC, tensor_data=tf_inputs.numpy())
    filters = Tensor(
        data_layout=types_pb2.NHWC,
        tensor_data=np.transpose(tf_filters.numpy(), (3, 0, 1, 2)))
    with Graph(name=self.graph_name, backend=self.backend) as graph:
      nn_ops.convolution(inputs, filters, stride=[1, 1], padding="same")
    self.runAndValidate(graph, tf_results, decimal=2)
Exemplo n.º 10
0
 def test_attr_smv_padding(self):
     """Test tensor attributes with SMV backend. Additional padding required."""
     tensor_data = np.array([[1.1, 2.2, 3.3, 4.4], [5.5, 6.6, 7.7, 8.8]],
                            dtype=np.float16)
     with Graph("test_graph", "SMV") as test_graph:
         input_tensor = Tensor(data_layout=types_pb2.NCHW,
                               tensor_data=tensor_data)
         act = input_data(input_tensor, "input")
     graph_proto, tensor_data_array = test_graph.to_proto()
     self.assertEqual(graph_proto.backend, "SMV")
     node = get_node_proto(graph_proto, "input")
     self.assertEqual(node.input_tensors[0].data_type, types_pb2.Float16)
     self.assertEqual(node.input_tensors[0].shape.dims, [2, 4])
     self.assertEqual(node.input_tensors[0].shape.layout, types_pb2.NCHW)
     self.assertEqual(node.input_tensors[0].shape.alignment, 8)
     tensor_data_proto = get_tensor_data(tensor_data_array,
                                         node.input_tensors[0].name)
     self.assertEqualFP16(
         tensor_data_proto.half_data,
         np.array([
             1.1, 2.2, 3.3, 4.4, 0, 0, 0, 0, 5.5, 6.6, 7.7, 8.8, 0, 0, 0, 0
         ],
                  dtype=np.float16))
     self.assertEqual(len(tensor_data_proto.float_data), 0)
     self.assertEqual(len(tensor_data_proto.double_data), 0)
     self.assertEqual(len(tensor_data_proto.int_data), 0)
     self.assertEqual(len(tensor_data_proto.int64_data), 0)
Exemplo n.º 11
0
 def test_use_existing_data_op(self):
   with Graph(graph_name, backend) as test_graph:
     x = Tensor(data_layout=types_pb2.N, tensor_data=np.array([1]))
     y = Tensor(data_layout=types_pb2.N, tensor_data=np.array([1]))
     x_ = data_op.input_data(x)
     res= math_ops.add(x, y)
   self.assertEqual(get_num_data_nodes(test_graph), 2)
Exemplo n.º 12
0
 def test_shared_data_op(self):
   with Graph(graph_name, backend) as test_graph:
     x = Tensor(data_layout=types_pb2.N, tensor_data=np.array([1]))
     y = Tensor(data_layout=types_pb2.N, tensor_data=np.array([1]))
     res = math_ops.add(x, y)
     res = math_ops.mul(x, res)
   self.assertEqual(get_num_data_nodes(test_graph), 2)
Exemplo n.º 13
0
  def test_mul(self):
    batch = 4
    channels = 32
    tf_a = tf.Variable(initializer(shape=[batch, channels], dtype=self.dtype))
    tf_b = tf.Variable(initializer(shape=[batch, channels], dtype=self.dtype))
    tf_result = tf.math.multiply(tf_a, tf_b)

    a = Tensor(data_layout=types_pb2.NC, tensor_data=tf_a.numpy())
    b = Tensor(data_layout=types_pb2.NC, tensor_data=tf_b.numpy())
    with Graph(name=self.graph_name, backend=self.backend) as graph:
      math_ops.mul(a, b)
    self.runAndValidate(graph, tf_result, decimal=3)
Exemplo n.º 14
0
 def test_activation_functions(self):
     """Test activation function attributes."""
     with Graph("test_graph", "SMV") as test_graph:
         tensor_data = np.random.rand(*self.tensor_shape).astype(np.float16)
         input_tensor = Tensor(data_layout=types_pb2.NHWC,
                               tensor_data=tensor_data)
         act = data_op.input_data(input_tensor, "input")
         act = activation_ops.relu(act, "relu")
         act = activation_ops.lrelu(act, slope=0.5, name="lrelu")
         act = activation_ops.elu(act, alpha=0.2, name="elu")
         act = activation_ops.selu(act,
                                   alpha=0.4,
                                   lambda_param=0.8,
                                   name="selu")
         act = activation_ops.tanh(act, "tanh")
         act = activation_ops.hard_tanh(act,
                                        min=-1.5,
                                        max=1.5,
                                        name="hard_tanh")
         act = activation_ops.sigmoid(act, "sigmoid")
         # Softmax expects NC format, so reorder NHWC to NC.
         act = array_ops.reorder(act,
                                 target_layout=types_pb2.NC,
                                 name="reorder")
         act = activation_ops.softmax(act, "softmax")
     # ReLU
     self.do_basic_test(test_graph, "relu", types_pb2.ReLU)
     # LReLU
     node = self.do_basic_test(test_graph, "lrelu", types_pb2.LReLU)
     self.assertAlmostEqual(node.params.act_params.lrelu_params.slope, 0.5)
     # ELU
     node = self.do_basic_test(test_graph, "elu", types_pb2.ELU)
     self.assertAlmostEqual(node.params.act_params.elu_params.alpha, 0.2)
     # SELU
     node = self.do_basic_test(test_graph, "selu", types_pb2.SELU)
     self.assertAlmostEqual(node.params.act_params.elu_params.alpha, 0.4)
     self.assertAlmostEqual(node.params.act_params.elu_params.lambda_param,
                            0.8)
     # Tanh
     self.do_basic_test(test_graph, "tanh", types_pb2.Tanh)
     # HardTanh
     node = self.do_basic_test(test_graph, "hard_tanh", types_pb2.HardTanh)
     self.assertAlmostEqual(node.params.act_params.hard_tanh_params.min,
                            -1.5)
     self.assertAlmostEqual(node.params.act_params.hard_tanh_params.max,
                            1.5)
     # Sigmoid
     self.do_basic_test(test_graph, "sigmoid", types_pb2.Sigmoid)
     # Softmax
     self.do_basic_test(test_graph,
                        "softmax",
                        types_pb2.Softmax,
                        tensor_shape=[2, 32768])
Exemplo n.º 15
0
 def test_fp16_odd(self):
     """Test float16 packing when tensor's last dimension is of odd size"""
     tensor_data = np.random.rand(4, 3).astype(np.float16)
     with Graph("test_graph", "Reference") as test_graph:
         input_tensor = Tensor(tensor_data=tensor_data)
         act = input_data(input_tensor, "input")
     graph_proto, tensor_data_array = test_graph.to_proto()
     node = get_node_proto(graph_proto, "input")
     self.assertEqual(node.input_tensors[0].data_type, types_pb2.Float16)
     tensor_data_proto = get_tensor_data(tensor_data_array,
                                         node.input_tensors[0].name)
     self.assertEqualFP16(tensor_data_proto.half_data,
                          tensor_data.flatten())
Exemplo n.º 16
0
  def test_mat_mul(self):
    batch = 4
    channels = 32
    units = 128
    tf_a = tf.Variable(initializer(shape=[batch, channels], dtype=self.dtype))
    tf_b = tf.Variable(initializer(shape=[units, channels], dtype=self.dtype))
    tf_result = tf.linalg.matmul(tf_a, tf_b, transpose_b=True)

    a = Tensor(data_layout=types_pb2.NC, tensor_data=tf_a.numpy())
    b = Tensor(data_layout=types_pb2.NC, tensor_data=tf_b.numpy())
    with Graph(name=self.graph_name, backend=self.backend) as graph:
      nn_ops.mat_mul(a, b)
    self.runAndValidate(graph, tf_result, decimal=3)
Exemplo n.º 17
0
    def test_bahdanau_attention(self):
        # Build and run an Bahdanau layer in TF.
        batch = 2
        units = 32
        timestep = 8
        # Use the Bahdanau attention mechanism.
        memory = tf.random.normal([batch, timestep, units], dtype=self.dtype)
        attention_mechanism = tfa.seq2seq.BahdanauAttention(units=units,
                                                            memory=memory,
                                                            dtype=self.dtype)
        # Compute attention using the query and state.
        tf_cell = tf.keras.layers.LSTMCell(units,
                                           use_bias=False,
                                           unit_forget_bias=False,
                                           dtype=self.dtype)
        attention_wrapper = tfa.seq2seq.AttentionWrapper(tf_cell,
                                                         attention_mechanism,
                                                         output_attention=True,
                                                         dtype=self.dtype)
        query = tf.random.normal([batch, units], dtype=self.dtype)
        tf_initial_state = attention_wrapper.get_initial_state(
            batch_size=batch, dtype=self.dtype)
        # Perform a step of attention-wrapped RNN.
        tf_attention, _ = attention_wrapper(query, tf_initial_state)

        # Build the attention model in SMAUG using the tensors from the TF model.
        query = Tensor(data_layout=types_pb2.NC, tensor_data=query.numpy())
        w, u = recurrent_test.createSmaugWeights(tf_cell)
        memory = Tensor(data_layout=types_pb2.NTC, tensor_data=memory.numpy())
        weights = attention_mechanism.get_weights()
        w_alignment = Tensor(data_layout=types_pb2.NC,
                             tensor_data=np.expand_dims(weights[0], 0))
        w_decoder = Tensor(data_layout=types_pb2.NC,
                           tensor_data=np.transpose(weights[1]))
        w_encoder = Tensor(data_layout=types_pb2.NC,
                           tensor_data=np.transpose(weights[2]))
        with Graph(name=self.graph_name, backend=self.backend) as graph:
            # Create an LSTM and an attention, and perform one step.
            sg_cell = LSTM([w, u])
            sg_attention = BahdanauAttention(memory, w_encoder, w_decoder,
                                             w_alignment)
            sg_initial_attention = Tensor(data_layout=types_pb2.NC,
                                          tensor_data=np.zeros(
                                              (batch, units),
                                              dtype=self.dtype))
            cell_out, _ = sg_cell.step(concat([query, sg_initial_attention],
                                              axis=1),
                                       timestep=0)
            sg_attention(cell_out)
        self.runAndValidate(graph, tf_attention, decimal=2)
Exemplo n.º 18
0
 def test_cond_op_simple_func(self):
   with Graph(name=self.graph_name, backend=self.backend) as graph:
     x0 = Tensor(
         data_layout=types_pb2.N, tensor_data=np.array([2], dtype=self.dtype))
     x1 = Tensor(
         data_layout=types_pb2.N, tensor_data=np.array([5], dtype=self.dtype))
     y = Tensor(
         data_layout=types_pb2.N, tensor_data=np.array([10], dtype=self.dtype))
     z = Tensor(
         data_layout=types_pb2.N, tensor_data=np.array([20], dtype=self.dtype))
     expected_res = Tensor(
         data_layout=types_pb2.N, tensor_data=np.array([30], dtype=self.dtype))
     # res = y + z if x0 < x1 else y * z
     res = control_flow_ops.cond(
         math_ops.less(x0, x1), lambda: math_ops.add(y, z),
         lambda: math_ops.mul(y, z))
   self.runAndValidate(graph, expected_res.tensor_data)
Exemplo n.º 19
0
  def test_use_nested_op_result(self):
    def func_true(a, b):
      minus_one = Tensor(
          data_layout=types_pb2.N, tensor_data=np.array([-1], dtype=self.dtype))
      res = control_flow_ops.cond(
          math_ops.less(a, b),
          lambda: math_ops.add(a, math_ops.mul(b, minus_one)),
          lambda: math_ops.add(a, b))[0]
      # Use the cond results before returning.
      return math_ops.mul(res, res)

    def func_false(a, b):
      two = Tensor(
          data_layout=types_pb2.N, tensor_data=np.array([2], dtype=self.dtype))
      return control_flow_ops.cond(
          math_ops.greater(a, b), lambda: math_ops.mul(a, two),
          lambda: math_ops.mul(b, two))

    with Graph(name=self.graph_name, backend=self.backend) as graph:
      x0 = Tensor(
          data_layout=types_pb2.N, tensor_data=np.array([2], dtype=self.dtype))
      x1 = Tensor(
          data_layout=types_pb2.N, tensor_data=np.array([5], dtype=self.dtype))
      y = Tensor(
          data_layout=types_pb2.N, tensor_data=np.array([10], dtype=self.dtype))
      z = Tensor(
          data_layout=types_pb2.N, tensor_data=np.array([20], dtype=self.dtype))
      expected_res = Tensor(
          data_layout=types_pb2.N, tensor_data=np.array([100],
                                                        dtype=self.dtype))
      # if x0 < x1:
      #   if y < z:
      #     res = (y - z) ^ 2
      #   else:
      #     res = y + z
      # else:
      #   if y > z:
      #     res = 2y
      #   else:
      #     res = 2z
      res = control_flow_ops.cond(
          math_ops.less(x0, x1), lambda: func_true(y, z),
          lambda: func_false(y, z))
    self.runAndValidate(graph, expected_res.tensor_data)
Exemplo n.º 20
0
  def test_bidirectional_lstm(self):
    # Build and run an BidirectionalLSTM layer in TF.
    tf.keras.backend.set_floatx(
        global_vars.backend_datatype[self.backend].__name__)
    inputs = tf.random.normal([1, 8, 32],
                              dtype=global_vars.backend_datatype[self.backend])
    tf_lstm = tf.keras.layers.LSTM(32, use_bias=False, unit_forget_bias=False)
    tf_bilstm = tf.keras.layers.Bidirectional(layer = tf_lstm)
    tf_output = tf_bilstm(inputs)

    # Build the model in SMAUG using the tensors from the TF model.
    input_tensor = Tensor(data_layout=types_pb2.NTC, tensor_data=inputs.numpy())
    fwd_w, fwd_u, bwd_w, bwd_u = createSmaugWeights(tf_bilstm)
    with Graph(name=self.graph_name, backend=self.backend) as graph:
      inputs = input_data(input_tensor)
      sg_bilstm = BidirectionalLSTM([fwd_w, fwd_u], [bwd_w, bwd_u])
      sg_bilstm(inputs)

    self.runAndValidate(graph, tf_output)
Exemplo n.º 21
0
 def test_attr_reference(self):
     """Test tensor attributes with reference backend."""
     tensor_data = np.random.rand(2, 2, 4, 4).astype(np.float32)
     with Graph("test_graph", "Reference") as test_graph:
         input_tensor = Tensor(data_layout=types_pb2.NHWC,
                               tensor_data=tensor_data)
         act = input_data(input_tensor, "input")
     graph_proto, tensor_data_array = test_graph.to_proto()
     self.assertEqual(graph_proto.backend, "Reference")
     node = get_node_proto(graph_proto, "input")
     self.assertEqual(node.input_tensors[0].data_type, types_pb2.Float32)
     self.assertEqual(node.input_tensors[0].shape.dims, [2, 2, 4, 4])
     self.assertEqual(node.input_tensors[0].shape.layout, types_pb2.NHWC)
     self.assertEqual(node.input_tensors[0].shape.alignment, 0)
     tensor_data_proto = get_tensor_data(tensor_data_array,
                                         node.input_tensors[0].name)
     self.assertEqual(tensor_data_proto.float_data,
                      list(tensor_data.flatten()))
     self.assertEqual(len(tensor_data_proto.half_data), 0)
     self.assertEqual(len(tensor_data_proto.double_data), 0)
     self.assertEqual(len(tensor_data_proto.int_data), 0)
     self.assertEqual(len(tensor_data_proto.int64_data), 0)
Exemplo n.º 22
0
 def test_attr_smv_no_padding(self):
     """Test tensor attributes with SMV backend. No padding is required."""
     tensor_data = np.random.rand(2, 2, 4, 8).astype(np.float16)
     with Graph("test_graph", "SMV") as test_graph:
         input_tensor = Tensor(data_layout=types_pb2.NCHW,
                               tensor_data=tensor_data)
         act = input_data(input_tensor, "input")
     graph_proto, tensor_data_array = test_graph.to_proto()
     self.assertEqual(graph_proto.backend, "SMV")
     node = get_node_proto(graph_proto, "input")
     self.assertEqual(node.input_tensors[0].data_type, types_pb2.Float16)
     self.assertEqual(node.input_tensors[0].shape.dims, [2, 2, 4, 8])
     self.assertEqual(node.input_tensors[0].shape.layout, types_pb2.NCHW)
     self.assertEqual(node.input_tensors[0].shape.alignment, 8)
     tensor_data_proto = get_tensor_data(tensor_data_array,
                                         node.input_tensors[0].name)
     self.assertEqualFP16(tensor_data_proto.half_data,
                          tensor_data.flatten())
     self.assertEqual(len(tensor_data_proto.float_data), 0)
     self.assertEqual(len(tensor_data_proto.double_data), 0)
     self.assertEqual(len(tensor_data_proto.int_data), 0)
     self.assertEqual(len(tensor_data_proto.int64_data), 0)
Exemplo n.º 23
0
  def test_cond_op_func_call(self):
    def func(a, b):
      minus_three = Tensor(
          data_layout=types_pb2.N, tensor_data=np.array([-3], dtype=self.dtype))
      return math_ops.add(a, math_ops.mul(b, minus_three))

    with Graph(name=self.graph_name, backend=self.backend) as graph:
      x0 = Tensor(
          data_layout=types_pb2.N, tensor_data=np.array([2], dtype=self.dtype))
      x1 = Tensor(
          data_layout=types_pb2.N, tensor_data=np.array([5], dtype=self.dtype))
      y = Tensor(
          data_layout=types_pb2.N, tensor_data=np.array([10], dtype=self.dtype))
      z = Tensor(
          data_layout=types_pb2.N, tensor_data=np.array([20], dtype=self.dtype))
      expected_res = Tensor(
          data_layout=types_pb2.N, tensor_data=np.array([-50],
                                                        dtype=self.dtype))
      # res = y - 3z if x0 < x1 else y * z
      res = control_flow_ops.cond(
          math_ops.less(x0, x1), lambda: func(y, z), lambda: math_ops.mul(y, z))
    self.runAndValidate(graph, expected_res.tensor_data)
Exemplo n.º 24
0
def cond(predication, true_fn, false_fn, name="cond"):
    """A conditional operator.

  This operator provides the capability of doing if-else statement. Depending on
  the predication value, either the True or the False body of the operator will
  be executed.

  Args:
    predication: A predication tensor of value 0 or 1, determining which path to
      execute.
    true_fn: The callable to be performed if `predication` is 1.
    false_fn: The callable to be performed if `predication` is 0.

  Returns:
    The tensors returned by either true_fn or false_fn.
  """
    def _insert_switch_nodes(predication, branch_result, graph):
        """Insert switch nodes for external tensors in the subgraph.

    An external tensor is a tensor that comes from a node outside this graph,
    this adds switch nodes for every external tensor in `graph`.

    Args:
      predication: The predication tensor used for determining the deadness of
        switch node results.
      branch_result: String value of "true" or "false", representing which
        result of the switch nodes to use.
      graph: A `GraphProto` that represents a branch of the conditional.
    """
        if branch_result not in ["true", "false"]:
            raise ValueError(
                "Use either 'true' or 'false' to indicate the output of the switch "
                "nodes.")
        nodes = [
            node for node in graph.get_nodes() if node.op != types_pb2.Data
        ]
        # This keeps track of all the tensors that come from nodes in the graph.
        internal_tensors = set()
        for node in nodes:
            internal_tensors.update(
                set([tensor.name for tensor in node.outputs]))
        for node in nodes:
            for i, tensor in enumerate(node.inputs):
                # If any input tensor of the graph appear in the graph workspace, then
                # this tensor is an external to the graph and we create a switch node
                # for it.
                # Don't create switch node for an existing one.
                if node.op == types_pb2.Switch:
                    continue
                if tensor.name not in internal_tensors:
                    switch_result = switch(
                        tensor,
                        predication)[switch_op_output_ports[branch_result]]
                    # Update the node's input with the switch node result.
                    node.update_input(switch_result, i)

    cur_graph = global_vars.get_graph()
    backend = cur_graph.backend
    mem_policy = cur_graph.mem_policy
    name = cur_graph.create_unique_name(name)

    # Build the subgraph for the true branch.
    with Graph(name="%s_true_branch" % name,
               backend=backend,
               mem_policy=mem_policy) as subgraph_t:
        res_t = true_fn()
        if not isinstance(res_t, (list, tuple)):
            res_t = [res_t]
        _insert_switch_nodes(predication, "true", subgraph_t)

    # Build the subgraph for the false branch.
    with Graph(name="%s_false_branch" % name,
               backend=backend,
               mem_policy=mem_policy) as subgraph_f:
        res_f = false_fn()
        if not isinstance(res_f, (list, tuple)):
            res_f = [res_f]
        _insert_switch_nodes(predication, "false", subgraph_f)

    # Add the merge nodes for the outputs.
    merges = [merge([t, f]) for (t, f) in zip(res_t, res_f)]
    return merges
Exemplo n.º 25
0
 def test_auto_unique_names(self):
     with Graph(graph_name, backend) as test_graph:
         res = math_ops.add(x, y)
         res = math_ops.add(res, res)
         res = math_ops.add(res, res)
     self.assertEqual(get_node_names(test_graph), {"add", "add_1", "add_2"})
Exemplo n.º 26
0
 def test_default_names(self):
     with Graph(graph_name, backend) as test_graph:
         res = math_ops.add(x, y)
         res = math_ops.mul(x, res)
     self.assertEqual(get_node_names(test_graph), {"add", "mul"})
Exemplo n.º 27
0
 def test_fusing_activation_functions(self):
     """Test activation function when they are fused with other operators."""
     with Graph("test_graph", "SMV") as test_graph:
         input_tensor = Tensor(data_layout=types_pb2.NHWC,
                               tensor_data=np.random.rand(
                                   *self.tensor_shape).astype(np.float16))
         filter_tensor = Tensor(data_layout=types_pb2.NHWC,
                                tensor_data=np.random.rand(
                                    32, 3, 3, 32).astype(np.float16))
         weight_tensor = Tensor(data_layout=types_pb2.NC,
                                tensor_data=np.random.rand(
                                    10, 32768).astype(np.float16))
         bn_mean_tensor = Tensor(data_layout=types_pb2.NC,
                                 tensor_data=np.random.rand(1, 64).astype(
                                     np.float16))
         bn_var_tensor = Tensor(data_layout=types_pb2.NC,
                                tensor_data=np.random.rand(1, 64).astype(
                                    np.float16))
         bn_gamma_tensor = Tensor(data_layout=types_pb2.NC,
                                  tensor_data=np.random.rand(1, 64).astype(
                                      np.float16))
         bn_beta_tensor = Tensor(data_layout=types_pb2.NC,
                                 tensor_data=np.random.rand(1, 64).astype(
                                     np.float16))
         act = data_op.input_data(input_tensor, "input")
         act = nn_ops.convolution(act,
                                  filter_tensor,
                                  stride=[1, 1],
                                  padding="same",
                                  activation=None,
                                  name="conv_none")
         act = nn_ops.convolution(act,
                                  filter_tensor,
                                  stride=[1, 1],
                                  padding="same",
                                  activation="relu",
                                  name="conv_relu")
         act = nn_ops.convolution(act,
                                  filter_tensor,
                                  stride=[1, 1],
                                  padding="same",
                                  activation="lrelu",
                                  name="conv_lrelu")
         act = nn_ops.convolution(act,
                                  filter_tensor,
                                  stride=[1, 1],
                                  padding="same",
                                  activation="elu",
                                  name="conv_elu")
         act = nn_ops.convolution(act,
                                  filter_tensor,
                                  stride=[1, 1],
                                  padding="same",
                                  activation="selu",
                                  name="conv_selu")
         act = nn_ops.convolution(act,
                                  filter_tensor,
                                  stride=[1, 1],
                                  padding="same",
                                  activation="tanh",
                                  name="conv_tanh")
         act = nn_ops.convolution(act,
                                  filter_tensor,
                                  stride=[1, 1],
                                  padding="same",
                                  activation="hard_tanh",
                                  name="conv_hard_tanh")
         act = nn_ops.convolution(act,
                                  filter_tensor,
                                  stride=[1, 1],
                                  padding="same",
                                  activation="sigmoid",
                                  name="conv_sigmoid")
         act = nn_ops.convolution(act,
                                  filter_tensor,
                                  stride=[1, 1],
                                  padding="same",
                                  activation="softmax",
                                  name="conv_softmax")
         act = nn_ops.batch_norm(act,
                                 bn_mean_tensor,
                                 bn_var_tensor,
                                 bn_gamma_tensor,
                                 bn_beta_tensor,
                                 activation="relu",
                                 name="bn_relu")
         out = nn_ops.mat_mul(act,
                              weight_tensor,
                              activation="relu",
                              name="fc_relu")
         out = nn_ops.mat_mul(act,
                              weight_tensor,
                              activation="lrelu",
                              activation_params={"slope": 0.1},
                              name="fc_lrelu")
         out = nn_ops.mat_mul(act,
                              weight_tensor,
                              activation="elu",
                              activation_params={"alpha": 0.3},
                              name="fc_elu")
         out = nn_ops.mat_mul(act,
                              weight_tensor,
                              activation="selu",
                              activation_params={
                                  "alpha": 1.0,
                                  "lambda_param": 1.0
                              },
                              name="fc_selu")
         out = nn_ops.mat_mul(act,
                              weight_tensor,
                              activation="hard_tanh",
                              activation_params={
                                  "min": -2.0,
                                  "max": 2.0
                              },
                              name="fc_hard_tanh")
     graph_proto, _ = test_graph.to_proto()
     # None
     node = get_node_proto(graph_proto, "conv_none")
     self.assertEqual(node.params.act_params.activation,
                      types_pb2.UnknownOp)
     # ReLU
     node = get_node_proto(graph_proto, "conv_relu")
     self.assertEqual(node.params.act_params.activation, types_pb2.ReLU)
     # LReLU (default slope = 0.2)
     node = get_node_proto(graph_proto, "conv_lrelu")
     self.assertEqual(node.params.act_params.activation, types_pb2.LReLU)
     self.assertAlmostEqual(node.params.act_params.lrelu_params.slope, 0.2)
     # ELU (default alpha = 0.1)
     node = get_node_proto(graph_proto, "conv_elu")
     self.assertEqual(node.params.act_params.activation, types_pb2.ELU)
     self.assertAlmostEqual(node.params.act_params.elu_params.alpha, 0.1)
     # SELU (default alpha = 1.6733, lambda = 1.0507)
     node = get_node_proto(graph_proto, "conv_selu")
     self.assertEqual(node.params.act_params.activation, types_pb2.SELU)
     self.assertAlmostEqual(node.params.act_params.elu_params.alpha, 1.6733,
                            5)
     self.assertAlmostEqual(node.params.act_params.elu_params.lambda_param,
                            1.0507, 5)
     # Tanh
     node = get_node_proto(graph_proto, "conv_tanh")
     self.assertEqual(node.params.act_params.activation, types_pb2.Tanh)
     # HardTanh (default min = -1, max = 1)
     node = get_node_proto(graph_proto, "conv_hard_tanh")
     self.assertEqual(node.params.act_params.activation, types_pb2.HardTanh)
     self.assertAlmostEqual(node.params.act_params.hard_tanh_params.min, -1)
     self.assertAlmostEqual(node.params.act_params.hard_tanh_params.max, 1)
     # Sigmoid
     node = get_node_proto(graph_proto, "conv_sigmoid")
     self.assertEqual(node.params.act_params.activation, types_pb2.Sigmoid)
     # Softmax
     node = get_node_proto(graph_proto, "conv_softmax")
     self.assertEqual(node.params.act_params.activation, types_pb2.Softmax)
     # Fusion with inner products and batch norms.
     node = get_node_proto(graph_proto, "bn_relu")
     self.assertEqual(node.params.act_params.activation, types_pb2.ReLU)
     node = get_node_proto(graph_proto, "fc_relu")
     self.assertEqual(node.params.act_params.activation, types_pb2.ReLU)
     node = get_node_proto(graph_proto, "fc_lrelu")
     self.assertEqual(node.params.act_params.activation, types_pb2.LReLU)
     self.assertAlmostEqual(node.params.act_params.lrelu_params.slope, 0.1)
     node = get_node_proto(graph_proto, "fc_elu")
     self.assertEqual(node.params.act_params.activation, types_pb2.ELU)
     self.assertAlmostEqual(node.params.act_params.elu_params.alpha, 0.3)
     node = get_node_proto(graph_proto, "fc_selu")
     self.assertEqual(node.params.act_params.activation, types_pb2.SELU)
     self.assertAlmostEqual(node.params.act_params.elu_params.alpha, 1.0)
     self.assertAlmostEqual(node.params.act_params.elu_params.lambda_param,
                            1.0)
     node = get_node_proto(graph_proto, "fc_hard_tanh")
     self.assertEqual(node.params.act_params.activation, types_pb2.HardTanh)
     self.assertAlmostEqual(node.params.act_params.hard_tanh_params.min,
                            -2.0)
     self.assertAlmostEqual(node.params.act_params.hard_tanh_params.max,
                            2.0)