コード例 #1
0
ファイル: ops_test.py プロジェクト: harvard-acc/smaug
  def build_test_sequential_graph(self, backend):
    """Create a sequential model."""
    np_dtype = test_backend_dtypes[backend]
    self.expected_dtype = datatypes.np_to_smaug_type[np_dtype]
    with Graph(name="test_sequential_graph", backend=backend) as graph:
      input_tensor = Tensor(
          data_layout=types_pb2.NCHW,
          tensor_data=np.random.rand(1, 3, 28, 28).astype(np_dtype))
      filter_tensor0 = Tensor(
          data_layout=types_pb2.NCHW,
          tensor_data=np.random.rand(64, 3, 3, 3).astype(np_dtype))
      filter_tensor1 = Tensor(
          data_layout=types_pb2.NCHW,
          tensor_data=np.random.rand(64, 64, 3, 3).astype(np_dtype))
      weight_tensor0 = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(254, 12544).astype(np_dtype))
      weight_tensor1 = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(10, 254).astype(np_dtype))
      bn_mean_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))
      bn_var_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))
      bn_gamma_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))
      bn_beta_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))

      out = data_op.input_data(input_tensor, "input")
      out = nn_ops.convolution(
          out, filter_tensor0, stride=[1, 1], padding="same", name="conv0")
      out = activation_ops.relu(out, "conv0_relu")
      out = nn_ops.batch_norm(
          out, bn_mean_tensor, bn_var_tensor, bn_gamma_tensor, bn_beta_tensor,
          name="bn")
      out = nn_ops.convolution(
          out, filter_tensor1, stride=[1, 1], padding="same", name="conv1")
      out = activation_ops.relu(out, "conv1_relu")
      out = nn_ops.max_pool(out, pool_size=[2, 2], stride=[2, 2], name="pool")
      out = array_ops.flatten(out, "flatten")
      out = nn_ops.mat_mul(out, weight_tensor0, name="fc0")
      out = activation_ops.relu(out, "fc0_relu")
      out = nn_ops.mat_mul(out, weight_tensor1, name="fc1")
      out = array_ops.expand_dims(out, 1, "expand_dims")
      out = array_ops.squeeze(out, 1, "squeeze")
      out = array_ops.reshape(out, [2, 5], types_pb2.NC, "reshape")
      out = array_ops.repeat(out, [4, 2], "repeat")
      out = array_ops.stack(out, 4, 1, "stack")
      out0, out1, out2, out3 = array_ops.unstack(out, 1, "unstack")
      out0 = array_ops.reshape(out0, [1, 1, 8, 10], types_pb2.NCHW, "reshape")
      out0 = array_ops.padding(out0, [0, 0, 0, 0, 1, 1, 1, 1], "padding")

    self.test_graph, _ = graph.to_proto()
    self.backend = backend
    self.alignment = global_vars.backend_alignment[backend]
コード例 #2
0
ファイル: data_op_test.py プロジェクト: yaoyuannnn/smaug
 def test_use_existing_data_op(self):
   with Graph(graph_name, backend) as test_graph:
     x = Tensor(data_layout=types_pb2.N, tensor_data=np.array([1]))
     y = Tensor(data_layout=types_pb2.N, tensor_data=np.array([1]))
     x_ = data_op.input_data(x)
     res= math_ops.add(x, y)
   self.assertEqual(get_num_data_nodes(test_graph), 2)
コード例 #3
0
ファイル: tensor_test.py プロジェクト: yaoyuannnn/smaug
 def test_attr_smv_padding(self):
     """Test tensor attributes with SMV backend. Additional padding required."""
     tensor_data = np.array([[1.1, 2.2, 3.3, 4.4], [5.5, 6.6, 7.7, 8.8]],
                            dtype=np.float16)
     with Graph("test_graph", "SMV") as test_graph:
         input_tensor = Tensor(data_layout=types_pb2.NCHW,
                               tensor_data=tensor_data)
         act = input_data(input_tensor, "input")
     graph_proto, tensor_data_array = test_graph.to_proto()
     self.assertEqual(graph_proto.backend, "SMV")
     node = get_node_proto(graph_proto, "input")
     self.assertEqual(node.input_tensors[0].data_type, types_pb2.Float16)
     self.assertEqual(node.input_tensors[0].shape.dims, [2, 4])
     self.assertEqual(node.input_tensors[0].shape.layout, types_pb2.NCHW)
     self.assertEqual(node.input_tensors[0].shape.alignment, 8)
     tensor_data_proto = get_tensor_data(tensor_data_array,
                                         node.input_tensors[0].name)
     self.assertEqualFP16(
         tensor_data_proto.half_data,
         np.array([
             1.1, 2.2, 3.3, 4.4, 0, 0, 0, 0, 5.5, 6.6, 7.7, 8.8, 0, 0, 0, 0
         ],
                  dtype=np.float16))
     self.assertEqual(len(tensor_data_proto.float_data), 0)
     self.assertEqual(len(tensor_data_proto.double_data), 0)
     self.assertEqual(len(tensor_data_proto.int_data), 0)
     self.assertEqual(len(tensor_data_proto.int64_data), 0)
コード例 #4
0
ファイル: ops_test.py プロジェクト: harvard-acc/smaug
  def build_test_residual_graph(self, backend):
    """Create a residual model.

    The graph contains a residual connection, where the output of conv0 and
    conv2 is added at the end."""

    np_dtype = test_backend_dtypes[backend]
    self.expected_dtype = datatypes.np_to_smaug_type[np_dtype]
    with Graph(name="test_residual_graph", backend=backend) as graph:
      input_tensor = Tensor(
          data_layout=types_pb2.NCHW,
          tensor_data=np.random.rand(1, 1, 28, 28).astype(np_dtype))
      filter_tensor0 = Tensor(
          data_layout=types_pb2.NCHW,
          tensor_data=np.random.rand(64, 1, 3, 3).astype(np_dtype))
      filter_tensor1 = Tensor(
          data_layout=types_pb2.NCHW,
          tensor_data=np.random.rand(64, 1, 3, 3).astype(np_dtype))
      filter_tensor2 = Tensor(
          data_layout=types_pb2.NCHW,
          tensor_data=np.random.rand(64, 64, 3, 3).astype(np_dtype))
      bn_mean_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))
      bn_var_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))
      bn_gamma_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))
      bn_beta_tensor = Tensor(
          data_layout=types_pb2.NC,
          tensor_data=np.random.rand(1, 64).astype(np_dtype))

      act = data_op.input_data(input_tensor, "input")
      x = nn_ops.convolution(
          act, filter_tensor0, stride=[1, 1], padding="same", name="conv0")
      out = nn_ops.convolution(
          act, filter_tensor1, stride=[1, 1], padding="same", name="conv1")
      out = nn_ops.batch_norm(
          out, bn_mean_tensor, bn_var_tensor, bn_gamma_tensor, bn_beta_tensor,
          name="bn")
      out = activation_ops.relu(out, "relu")
      out = nn_ops.convolution(
          out, filter_tensor2, stride=[1, 1], padding="same", name="conv2")
      out = math_ops.add(x, out, "add")
      out = math_ops.mul(x, out, "mul")
      # Concatenate the channel dimension of x and out.
      axis = 1 if out.shape.layout == types_pb2.NCHW else 3
      out = array_ops.concat([x, out], axis, "concat")
      # Evenly split the tensor into 4 over the channel dimension.
      out0, out1, out2, out3 = array_ops.split(out, 4, axis, "split")
      out = math_ops.mul(
          math_ops.add(out0, out1, "add1"), math_ops.add(out2, out3, "add2"),
          "mul1")

    self.test_graph, _ = graph.to_proto()
    self.backend = backend
    self.alignment = global_vars.backend_alignment[
        self.test_graph.backend]
コード例 #5
0
ファイル: recurrent_test.py プロジェクト: yaoyuannnn/smaug
  def test_multilayered_lstm(self):
    # Build and run an LSTM layer in TF.
    tf.keras.backend.set_floatx(
        global_vars.backend_datatype[self.backend].__name__)
    inputs = tf.random.normal([4, 8, 16],
                              dtype=global_vars.backend_datatype[self.backend])

    model = tf.keras.models.Sequential()
    tf_lstm0 = tf.keras.layers.LSTM(
        32, return_sequences=True, use_bias=False, unit_forget_bias=False)
    # We let TF's LSTM only return the last timestep result, because the SMAUG's
    # C++ runtime returns that.
    tf_lstm1 = tf.keras.layers.LSTM(
        32, return_sequences=False, use_bias=False, unit_forget_bias=False)
    model.add(tf_lstm0)
    model.add(tf_lstm1)
    model.compile()
    tf_output = model.predict(inputs)

    # Build the model in SMAUG using the tensors from the TF model.
    inputs_tensor = Tensor(
        data_layout=types_pb2.NTC, tensor_data=inputs.numpy())
    w0, u0 = createSmaugWeights(tf_lstm0)
    w1, u1 = createSmaugWeights(tf_lstm1)
    with Graph(name=self.graph_name, backend=self.backend) as graph:
      inputs = input_data(inputs_tensor)
      sg_lstm0 = LSTM([w0, u0])
      sg_lstm1 = LSTM([w1, u1])
      sg_outputs, state = sg_lstm0(inputs)
      sg_outputs, state = sg_lstm1(sg_outputs)

    self.runAndValidate(graph, tf_output)
コード例 #6
0
 def test_activation_functions(self):
     """Test activation function attributes."""
     with Graph("test_graph", "SMV") as test_graph:
         tensor_data = np.random.rand(*self.tensor_shape).astype(np.float16)
         input_tensor = Tensor(data_layout=types_pb2.NHWC,
                               tensor_data=tensor_data)
         act = data_op.input_data(input_tensor, "input")
         act = activation_ops.relu(act, "relu")
         act = activation_ops.lrelu(act, slope=0.5, name="lrelu")
         act = activation_ops.elu(act, alpha=0.2, name="elu")
         act = activation_ops.selu(act,
                                   alpha=0.4,
                                   lambda_param=0.8,
                                   name="selu")
         act = activation_ops.tanh(act, "tanh")
         act = activation_ops.hard_tanh(act,
                                        min=-1.5,
                                        max=1.5,
                                        name="hard_tanh")
         act = activation_ops.sigmoid(act, "sigmoid")
         # Softmax expects NC format, so reorder NHWC to NC.
         act = array_ops.reorder(act,
                                 target_layout=types_pb2.NC,
                                 name="reorder")
         act = activation_ops.softmax(act, "softmax")
     # ReLU
     self.do_basic_test(test_graph, "relu", types_pb2.ReLU)
     # LReLU
     node = self.do_basic_test(test_graph, "lrelu", types_pb2.LReLU)
     self.assertAlmostEqual(node.params.act_params.lrelu_params.slope, 0.5)
     # ELU
     node = self.do_basic_test(test_graph, "elu", types_pb2.ELU)
     self.assertAlmostEqual(node.params.act_params.elu_params.alpha, 0.2)
     # SELU
     node = self.do_basic_test(test_graph, "selu", types_pb2.SELU)
     self.assertAlmostEqual(node.params.act_params.elu_params.alpha, 0.4)
     self.assertAlmostEqual(node.params.act_params.elu_params.lambda_param,
                            0.8)
     # Tanh
     self.do_basic_test(test_graph, "tanh", types_pb2.Tanh)
     # HardTanh
     node = self.do_basic_test(test_graph, "hard_tanh", types_pb2.HardTanh)
     self.assertAlmostEqual(node.params.act_params.hard_tanh_params.min,
                            -1.5)
     self.assertAlmostEqual(node.params.act_params.hard_tanh_params.max,
                            1.5)
     # Sigmoid
     self.do_basic_test(test_graph, "sigmoid", types_pb2.Sigmoid)
     # Softmax
     self.do_basic_test(test_graph,
                        "softmax",
                        types_pb2.Softmax,
                        tensor_shape=[2, 32768])
コード例 #7
0
ファイル: tensor_test.py プロジェクト: yaoyuannnn/smaug
 def test_fp16_odd(self):
     """Test float16 packing when tensor's last dimension is of odd size"""
     tensor_data = np.random.rand(4, 3).astype(np.float16)
     with Graph("test_graph", "Reference") as test_graph:
         input_tensor = Tensor(tensor_data=tensor_data)
         act = input_data(input_tensor, "input")
     graph_proto, tensor_data_array = test_graph.to_proto()
     node = get_node_proto(graph_proto, "input")
     self.assertEqual(node.input_tensors[0].data_type, types_pb2.Float16)
     tensor_data_proto = get_tensor_data(tensor_data_array,
                                         node.input_tensors[0].name)
     self.assertEqualFP16(tensor_data_proto.half_data,
                          tensor_data.flatten())
コード例 #8
0
ファイル: recurrent_test.py プロジェクト: yaoyuannnn/smaug
  def test_bidirectional_lstm(self):
    # Build and run an BidirectionalLSTM layer in TF.
    tf.keras.backend.set_floatx(
        global_vars.backend_datatype[self.backend].__name__)
    inputs = tf.random.normal([1, 8, 32],
                              dtype=global_vars.backend_datatype[self.backend])
    tf_lstm = tf.keras.layers.LSTM(32, use_bias=False, unit_forget_bias=False)
    tf_bilstm = tf.keras.layers.Bidirectional(layer = tf_lstm)
    tf_output = tf_bilstm(inputs)

    # Build the model in SMAUG using the tensors from the TF model.
    input_tensor = Tensor(data_layout=types_pb2.NTC, tensor_data=inputs.numpy())
    fwd_w, fwd_u, bwd_w, bwd_u = createSmaugWeights(tf_bilstm)
    with Graph(name=self.graph_name, backend=self.backend) as graph:
      inputs = input_data(input_tensor)
      sg_bilstm = BidirectionalLSTM([fwd_w, fwd_u], [bwd_w, bwd_u])
      sg_bilstm(inputs)

    self.runAndValidate(graph, tf_output)
コード例 #9
0
ファイル: tensor_test.py プロジェクト: yaoyuannnn/smaug
 def test_attr_smv_no_padding(self):
     """Test tensor attributes with SMV backend. No padding is required."""
     tensor_data = np.random.rand(2, 2, 4, 8).astype(np.float16)
     with Graph("test_graph", "SMV") as test_graph:
         input_tensor = Tensor(data_layout=types_pb2.NCHW,
                               tensor_data=tensor_data)
         act = input_data(input_tensor, "input")
     graph_proto, tensor_data_array = test_graph.to_proto()
     self.assertEqual(graph_proto.backend, "SMV")
     node = get_node_proto(graph_proto, "input")
     self.assertEqual(node.input_tensors[0].data_type, types_pb2.Float16)
     self.assertEqual(node.input_tensors[0].shape.dims, [2, 2, 4, 8])
     self.assertEqual(node.input_tensors[0].shape.layout, types_pb2.NCHW)
     self.assertEqual(node.input_tensors[0].shape.alignment, 8)
     tensor_data_proto = get_tensor_data(tensor_data_array,
                                         node.input_tensors[0].name)
     self.assertEqualFP16(tensor_data_proto.half_data,
                          tensor_data.flatten())
     self.assertEqual(len(tensor_data_proto.float_data), 0)
     self.assertEqual(len(tensor_data_proto.double_data), 0)
     self.assertEqual(len(tensor_data_proto.int_data), 0)
     self.assertEqual(len(tensor_data_proto.int64_data), 0)
コード例 #10
0
ファイル: tensor_test.py プロジェクト: yaoyuannnn/smaug
 def test_attr_reference(self):
     """Test tensor attributes with reference backend."""
     tensor_data = np.random.rand(2, 2, 4, 4).astype(np.float32)
     with Graph("test_graph", "Reference") as test_graph:
         input_tensor = Tensor(data_layout=types_pb2.NHWC,
                               tensor_data=tensor_data)
         act = input_data(input_tensor, "input")
     graph_proto, tensor_data_array = test_graph.to_proto()
     self.assertEqual(graph_proto.backend, "Reference")
     node = get_node_proto(graph_proto, "input")
     self.assertEqual(node.input_tensors[0].data_type, types_pb2.Float32)
     self.assertEqual(node.input_tensors[0].shape.dims, [2, 2, 4, 4])
     self.assertEqual(node.input_tensors[0].shape.layout, types_pb2.NHWC)
     self.assertEqual(node.input_tensors[0].shape.alignment, 0)
     tensor_data_proto = get_tensor_data(tensor_data_array,
                                         node.input_tensors[0].name)
     self.assertEqual(tensor_data_proto.float_data,
                      list(tensor_data.flatten()))
     self.assertEqual(len(tensor_data_proto.half_data), 0)
     self.assertEqual(len(tensor_data_proto.double_data), 0)
     self.assertEqual(len(tensor_data_proto.int_data), 0)
     self.assertEqual(len(tensor_data_proto.int64_data), 0)
コード例 #11
0
 def test_fusing_activation_functions(self):
     """Test activation function when they are fused with other operators."""
     with Graph("test_graph", "SMV") as test_graph:
         input_tensor = Tensor(data_layout=types_pb2.NHWC,
                               tensor_data=np.random.rand(
                                   *self.tensor_shape).astype(np.float16))
         filter_tensor = Tensor(data_layout=types_pb2.NHWC,
                                tensor_data=np.random.rand(
                                    32, 3, 3, 32).astype(np.float16))
         weight_tensor = Tensor(data_layout=types_pb2.NC,
                                tensor_data=np.random.rand(
                                    10, 32768).astype(np.float16))
         bn_mean_tensor = Tensor(data_layout=types_pb2.NC,
                                 tensor_data=np.random.rand(1, 64).astype(
                                     np.float16))
         bn_var_tensor = Tensor(data_layout=types_pb2.NC,
                                tensor_data=np.random.rand(1, 64).astype(
                                    np.float16))
         bn_gamma_tensor = Tensor(data_layout=types_pb2.NC,
                                  tensor_data=np.random.rand(1, 64).astype(
                                      np.float16))
         bn_beta_tensor = Tensor(data_layout=types_pb2.NC,
                                 tensor_data=np.random.rand(1, 64).astype(
                                     np.float16))
         act = data_op.input_data(input_tensor, "input")
         act = nn_ops.convolution(act,
                                  filter_tensor,
                                  stride=[1, 1],
                                  padding="same",
                                  activation=None,
                                  name="conv_none")
         act = nn_ops.convolution(act,
                                  filter_tensor,
                                  stride=[1, 1],
                                  padding="same",
                                  activation="relu",
                                  name="conv_relu")
         act = nn_ops.convolution(act,
                                  filter_tensor,
                                  stride=[1, 1],
                                  padding="same",
                                  activation="lrelu",
                                  name="conv_lrelu")
         act = nn_ops.convolution(act,
                                  filter_tensor,
                                  stride=[1, 1],
                                  padding="same",
                                  activation="elu",
                                  name="conv_elu")
         act = nn_ops.convolution(act,
                                  filter_tensor,
                                  stride=[1, 1],
                                  padding="same",
                                  activation="selu",
                                  name="conv_selu")
         act = nn_ops.convolution(act,
                                  filter_tensor,
                                  stride=[1, 1],
                                  padding="same",
                                  activation="tanh",
                                  name="conv_tanh")
         act = nn_ops.convolution(act,
                                  filter_tensor,
                                  stride=[1, 1],
                                  padding="same",
                                  activation="hard_tanh",
                                  name="conv_hard_tanh")
         act = nn_ops.convolution(act,
                                  filter_tensor,
                                  stride=[1, 1],
                                  padding="same",
                                  activation="sigmoid",
                                  name="conv_sigmoid")
         act = nn_ops.convolution(act,
                                  filter_tensor,
                                  stride=[1, 1],
                                  padding="same",
                                  activation="softmax",
                                  name="conv_softmax")
         act = nn_ops.batch_norm(act,
                                 bn_mean_tensor,
                                 bn_var_tensor,
                                 bn_gamma_tensor,
                                 bn_beta_tensor,
                                 activation="relu",
                                 name="bn_relu")
         out = nn_ops.mat_mul(act,
                              weight_tensor,
                              activation="relu",
                              name="fc_relu")
         out = nn_ops.mat_mul(act,
                              weight_tensor,
                              activation="lrelu",
                              activation_params={"slope": 0.1},
                              name="fc_lrelu")
         out = nn_ops.mat_mul(act,
                              weight_tensor,
                              activation="elu",
                              activation_params={"alpha": 0.3},
                              name="fc_elu")
         out = nn_ops.mat_mul(act,
                              weight_tensor,
                              activation="selu",
                              activation_params={
                                  "alpha": 1.0,
                                  "lambda_param": 1.0
                              },
                              name="fc_selu")
         out = nn_ops.mat_mul(act,
                              weight_tensor,
                              activation="hard_tanh",
                              activation_params={
                                  "min": -2.0,
                                  "max": 2.0
                              },
                              name="fc_hard_tanh")
     graph_proto, _ = test_graph.to_proto()
     # None
     node = get_node_proto(graph_proto, "conv_none")
     self.assertEqual(node.params.act_params.activation,
                      types_pb2.UnknownOp)
     # ReLU
     node = get_node_proto(graph_proto, "conv_relu")
     self.assertEqual(node.params.act_params.activation, types_pb2.ReLU)
     # LReLU (default slope = 0.2)
     node = get_node_proto(graph_proto, "conv_lrelu")
     self.assertEqual(node.params.act_params.activation, types_pb2.LReLU)
     self.assertAlmostEqual(node.params.act_params.lrelu_params.slope, 0.2)
     # ELU (default alpha = 0.1)
     node = get_node_proto(graph_proto, "conv_elu")
     self.assertEqual(node.params.act_params.activation, types_pb2.ELU)
     self.assertAlmostEqual(node.params.act_params.elu_params.alpha, 0.1)
     # SELU (default alpha = 1.6733, lambda = 1.0507)
     node = get_node_proto(graph_proto, "conv_selu")
     self.assertEqual(node.params.act_params.activation, types_pb2.SELU)
     self.assertAlmostEqual(node.params.act_params.elu_params.alpha, 1.6733,
                            5)
     self.assertAlmostEqual(node.params.act_params.elu_params.lambda_param,
                            1.0507, 5)
     # Tanh
     node = get_node_proto(graph_proto, "conv_tanh")
     self.assertEqual(node.params.act_params.activation, types_pb2.Tanh)
     # HardTanh (default min = -1, max = 1)
     node = get_node_proto(graph_proto, "conv_hard_tanh")
     self.assertEqual(node.params.act_params.activation, types_pb2.HardTanh)
     self.assertAlmostEqual(node.params.act_params.hard_tanh_params.min, -1)
     self.assertAlmostEqual(node.params.act_params.hard_tanh_params.max, 1)
     # Sigmoid
     node = get_node_proto(graph_proto, "conv_sigmoid")
     self.assertEqual(node.params.act_params.activation, types_pb2.Sigmoid)
     # Softmax
     node = get_node_proto(graph_proto, "conv_softmax")
     self.assertEqual(node.params.act_params.activation, types_pb2.Softmax)
     # Fusion with inner products and batch norms.
     node = get_node_proto(graph_proto, "bn_relu")
     self.assertEqual(node.params.act_params.activation, types_pb2.ReLU)
     node = get_node_proto(graph_proto, "fc_relu")
     self.assertEqual(node.params.act_params.activation, types_pb2.ReLU)
     node = get_node_proto(graph_proto, "fc_lrelu")
     self.assertEqual(node.params.act_params.activation, types_pb2.LReLU)
     self.assertAlmostEqual(node.params.act_params.lrelu_params.slope, 0.1)
     node = get_node_proto(graph_proto, "fc_elu")
     self.assertEqual(node.params.act_params.activation, types_pb2.ELU)
     self.assertAlmostEqual(node.params.act_params.elu_params.alpha, 0.3)
     node = get_node_proto(graph_proto, "fc_selu")
     self.assertEqual(node.params.act_params.activation, types_pb2.SELU)
     self.assertAlmostEqual(node.params.act_params.elu_params.alpha, 1.0)
     self.assertAlmostEqual(node.params.act_params.elu_params.lambda_param,
                            1.0)
     node = get_node_proto(graph_proto, "fc_hard_tanh")
     self.assertEqual(node.params.act_params.activation, types_pb2.HardTanh)
     self.assertAlmostEqual(node.params.act_params.hard_tanh_params.min,
                            -2.0)
     self.assertAlmostEqual(node.params.act_params.hard_tanh_params.max,
                            2.0)