Esempio n. 1
0
 def GetParams(self):
     """Create a graph containing multiple segment."""
     input_name = "input"
     input_dims = [2, 32, 32, 3]
     output_name = "output"
     g = ops.Graph()
     with g.as_default():
         inp = array_ops.placeholder(dtype=dtypes.float32,
                                     shape=input_dims,
                                     name=input_name)
         with g.device("/GPU:0"):
             c1 = constant_op.constant(1.0, name="c1")
             c2 = constant_op.constant(1.0, name="c2")
             d1 = constant_op.constant(1.0, name="d1")
             d2 = self.trt_incompatible_op(inp, name="d2")
             with g.control_dependencies([d1, d2]):
                 add = math_ops.add(inp, c1, name="add")
             with g.control_dependencies([d1, d2]):
                 mul = math_ops.mul(add, add, name="mul")
             with g.control_dependencies([d1, d2]):
                 add1 = math_ops.add(mul, mul, name="add1")
             edge = self.trt_incompatible_op(add1, name="incompatible")
             with g.control_dependencies([d1, d2, add, mul]):
                 add2 = math_ops.add(edge, c2, name="add2")
             with g.control_dependencies([d1, d2, add1, mul]):
                 mul1 = math_ops.mul(add2, add2, name="mul1")
             with g.control_dependencies([d1, d2, add, add1]):
                 add3 = math_ops.add(mul1, mul1, name="add3")
         array_ops.squeeze(add3, name=output_name)
     return trt_test.TfTrtIntegrationTestParams(
         gdef=g.as_graph_def(),
         input_names=[input_name],
         input_dims=[[input_dims]],
         output_names=[output_name],
         expected_output_dims=[[input_dims]])
Esempio n. 2
0
 def GetParams(self):
     """Testing that output type of engine using Top-K is set correctly."""
     dtype = dtypes.float32
     input_name = "input"
     input_dims = [100, 100]
     k = 5
     g = ops.Graph()
     with g.as_default():
         x = array_ops.placeholder(dtype=dtype,
                                   shape=input_dims,
                                   name=input_name)
         k_tensor = constant_op.constant(k,
                                         dtype=dtypes.int32,
                                         name="Const")
         values, indices = nn_ops.top_k(x, k_tensor, name="TopK")
         # Reshape will act as a layer between the TopK output and the engine
         # output, requiring the output tensor of reshape to be set explicitly to
         # int32.
         indices = array_ops.reshape(indices, [100, 1, 5], name="Reshape")
         values = array_ops.identity(values, name="output_values")
         indices = array_ops.identity(indices, name="output_indices")
     return trt_test.TfTrtIntegrationTestParams(
         gdef=g.as_graph_def(),
         input_names=[input_name],
         input_dims=[[input_dims]],
         output_names=["output_values", "output_indices"],
         expected_output_dims=[[[100, k], [100, 1, k]]])
    def GetParams(self):
        """Testing engine with the same tensor repeated as output via identity."""
        input_name = 'input'
        input_dims = [100, 32]
        g = ops.Graph()
        with g.as_default():
            x = array_ops.placeholder(dtype=dtypes.float32,
                                      shape=input_dims,
                                      name=input_name)

            b = self._ConstOp((32, 4))
            x1 = math_ops.matmul(x, b)
            b = self._ConstOp((1, 4))
            x1 = x1 + b

            out1 = array_ops.identity(x1, name='output1')
            out2 = array_ops.identity(x1, name='output2')
            iden1 = array_ops.identity(x1)
            out3 = array_ops.identity(iden1, name='output3')

        return trt_test.TfTrtIntegrationTestParams(
            gdef=g.as_graph_def(),
            input_names=[input_name],
            input_dims=[[input_dims]],
            output_names=['output1', 'output2', 'output3'],
            expected_output_dims=[[[100, 4]] * 3])
Esempio n. 4
0
 def GetParams(self):
     dtype = dtypes.float32
     input_name = "input"
     input_dims = [[[1, 10, 10, 2]], [[2, 10, 10, 2]], [[4, 10, 10, 2]],
                   [[2, 10, 10, 2]]]
     expected_output_dims = [[[1, 10, 10, 1]], [[2, 10, 10, 1]],
                             [[4, 10, 10, 1]], [[2, 10, 10, 1]]]
     output_name = "output"
     g = ops.Graph()
     with g.as_default():
         x = array_ops.placeholder(dtype=dtype,
                                   shape=[None, 10, 10, 2],
                                   name=input_name)
         conv_filter = constant_op.constant(np.random.randn(3, 3, 2, 1),
                                            dtype=dtypes.float32)
         x = nn.conv2d(input=x,
                       filter=conv_filter,
                       strides=[1, 1, 1, 1],
                       padding="SAME",
                       name="conv")
         bias = constant_op.constant(np.random.randn(1, 10, 10, 1),
                                     dtype=dtypes.float32)
         x = math_ops.add(x, bias)
         x = nn.relu(x)
         x = array_ops.identity(x, name="output")
     return trt_test.TfTrtIntegrationTestParams(
         gdef=g.as_graph_def(),
         input_names=[input_name],
         input_dims=input_dims,
         output_names=[output_name],
         expected_output_dims=expected_output_dims)
 def GetParams(self):
     """Testing conversion of BatchMatMul in TF-TRT conversion."""
     dtype = dtypes.float32
     input_name = "input"
     input_dims = [2, 15, 15, 3]
     output_name = "output"
     g = ops.Graph()
     with g.as_default():
         inp = array_ops.placeholder(dtype=dtype,
                                     shape=[None] + input_dims[1:],
                                     name=input_name)
         with g.device("/GPU:0"):
             e1 = constant_op.constant(np.random.randn(1, 1, 3, 5),
                                       name="kernel_1",
                                       dtype=dtype)
             e2 = constant_op.constant(np.random.randn(1, 1, 5, 10),
                                       name="kernel_2",
                                       dtype=dtype)
             conv = nn.conv2d(input=inp,
                              filter=e1,
                              strides=[1, 1, 1, 1],
                              padding="VALID",
                              name="conv")
             out = nn.conv2d(input=conv,
                             filter=e2,
                             strides=[1, 1, 1, 1],
                             padding="VALID",
                             name="conv_2")
         array_ops.squeeze(out, name=output_name)
     return trt_test.TfTrtIntegrationTestParams(
         gdef=g.as_graph_def(),
         input_names=[input_name],
         input_dims=[[input_dims]],
         output_names=[output_name],
         expected_output_dims=[[[2, 15, 15, 10]]])
 def GetParams(self):
     """Neighboring node wiring tests in TF-TRT conversion."""
     dtype = dtypes.float32
     input_name = "input"
     input_dims = [2, 3, 7, 5]
     output_name = "output"
     g = ops.Graph()
     with g.as_default():
         x = array_ops.placeholder(dtype=dtype,
                                   shape=input_dims,
                                   name=input_name)
         e = constant_op.constant(np.random.normal(.3, 0.05, [3, 2, 3, 4]),
                                  name="weights",
                                  dtype=dtype)
         conv = nn.conv2d(input=x,
                          filter=e,
                          data_format="NCHW",
                          strides=[1, 1, 1, 1],
                          padding="VALID",
                          name="conv")
         b = constant_op.constant(np.random.normal(1.0, 1.0, [1, 4, 1, 1]),
                                  name="bias",
                                  dtype=dtype)
         t = math_ops.mul(conv, b, name="mul")
         e = self.trt_incompatible_op(conv, name="incompatible")
         t = math_ops.sub(t, e, name="sub")
         array_ops.squeeze(t, name=output_name)
     return trt_test.TfTrtIntegrationTestParams(
         gdef=g.as_graph_def(),
         input_names=[input_name],
         input_dims=[[input_dims]],
         output_names=[output_name],
         expected_output_dims=[[[2, 4, 5, 4]]])
Esempio n. 7
0
 def GetParams(self):
     """Create a graph containing two segment."""
     input_name = "input"
     input_dims = [2, 32, 32, 3]
     output_name = "output"
     g = ops.Graph()
     with g.as_default():
         inp = array_ops.placeholder(dtype=dtypes.float32,
                                     shape=input_dims,
                                     name=input_name)
         with g.device("/GPU:0"):
             n = inp
             for i in range(2):
                 c = constant_op.constant(1.0, name="c%d" % i)
                 n = math_ops.add(n, c, name="add%d" % i)
                 n = math_ops.mul(n, n, name="mul%d" % i)
             edge = self.trt_incompatible_op(n, name="incompatible")
             with g.control_dependencies([edge]):
                 c = constant_op.constant(1.0, name="c2")
                 n = math_ops.add(n, c, name="add2")
             n = math_ops.mul(n, n, name="mul2")
             c = constant_op.constant(1.0, name="c3")
             n = math_ops.add(n, c, name="add3")
             n = math_ops.mul(n, n, name="mul3")
         array_ops.squeeze(n, name=output_name)
     return trt_test.TfTrtIntegrationTestParams(
         gdef=g.as_graph_def(),
         input_names=[input_name],
         input_dims=[[input_dims]],
         output_names=[output_name],
         expected_output_dims=[[input_dims]])
 def GetParams(self):
     """Tests for scale & elementwise layers in TF-TRT."""
     input_name = "input"
     input_dims = [10, 24, 24, 20]
     output_name = "output"
     g = ops.Graph()
     with g.as_default():
         x = array_ops.placeholder(dtype=dtypes.float32,
                                   shape=input_dims,
                                   name=input_name)
         for weights_shape in [
             (1, ),  # scale
             (24, 1, 1),  # scale
             (24, 24, 20),  # scale
             (20, ),  # elementwise
             (1, 24, 1, 1),  # elementwise
             (1, 24, 24, 1),  # elementwise
             (1, 24, 24, 20),  # elementwise
             (24, 20),  # elementwise
         ]:
             a = self._ConstOp(weights_shape)
             f = x + a
             x = self.trt_incompatible_op(f)
             a = self._ConstOp(weights_shape)
             f = a + x
             x = self.trt_incompatible_op(f)
         gen_array_ops.reshape(x, [5, -1], name=output_name)
     return trt_test.TfTrtIntegrationTestParams(
         gdef=g.as_graph_def(),
         input_names=[input_name],
         input_dims=[[input_dims]],
         output_names=[output_name],
         expected_output_dims=[[[5, 23040]]])
Esempio n. 9
0
 def GetParams(self):
     """Create a graph containing multiple segment."""
     input_name = "input"
     input_dims = [2, 32, 32, 3]
     output_name = "output"
     g = ops.Graph()
     with g.as_default():
         inp = array_ops.placeholder(dtype=dtypes.float32,
                                     shape=input_dims,
                                     name=input_name)
         with g.device("/GPU:0"):
             n = inp
             c = constant_op.constant(1.0, name="c")
             # Adds control dependency from the constant op to a trt incompatible op,
             # and adds control dependency from the trt incompatible op to all other
             # ops, to make sure the constant op cannot be contracted with any trt
             # segment that depends on it.
             with g.control_dependencies([c]):
                 d = self.trt_incompatible_op(n, name="incompatible")
             with g.control_dependencies([d]):
                 n = math_ops.add(n, c, name="add")
                 n = math_ops.mul(n, n, name="mul")
                 n = math_ops.add(n, n, name="add1")
             n = self.trt_incompatible_op(n, name="incompatible1")
             with g.control_dependencies([d]):
                 n = math_ops.add(n, c, name="add2")
                 n = math_ops.mul(n, n, name="mul1")
                 n = math_ops.add(n, n, name="add3")
         array_ops.squeeze(n, name=output_name)
     return trt_test.TfTrtIntegrationTestParams(
         gdef=g.as_graph_def(),
         input_names=[input_name],
         input_dims=[[input_dims]],
         output_names=[output_name],
         expected_output_dims=[[input_dims]])
Esempio n. 10
0
 def GetParams(self):
   """Testing conversion of conv2d_transpose (AKA Conv2DBackpropInput)"""
   np.random.seed(1234)
   dtype = dtypes.float32
   input_name = "input"
   n, c, h, w = 13, 3, 7, 11
   num_filters = 8
   input_dims = [n, c, h, w]
   output_name = "output"
   g = ops.Graph()
   with g.as_default():
     inp = array_ops.placeholder(
         dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
     with g.device("/GPU:0"):
       weights_shape = [2, 2, num_filters, c]
       weights = constant_op.constant(
           np.random.randn(*weights_shape), dtype=dtype)
       output_shape = constant_op.constant([n, num_filters, h * 2, w * 2],
                                           dtype=dtypes.int32)
       output = nn_ops.conv2d_transpose(
           inp,
           weights,
           output_shape,
           strides=[1, 1, 2, 2],
           padding="SAME",
           data_format="NCHW")
       output = array_ops.identity(output, name=output_name)
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=[[input_dims]],
       output_names=[output_name],
       expected_output_dims=[[[n, num_filters, h * 2, w * 2]]])
Esempio n. 11
0
 def GetParams(self):
     """Create a graph containing multiple segment."""
     input_name = "input"
     input_dims = [2, 32, 32, 3]
     output_name = "output"
     g = ops.Graph()
     with g.as_default():
         inp = array_ops.placeholder(dtype=dtypes.float32,
                                     shape=input_dims,
                                     name=input_name)
         with g.device("/GPU:0"):
             n = inp
             c = constant_op.constant(1.0, name="c")
             n = math_ops.add(n, c, name="add")
             n = math_ops.mul(n, n, name="mul")
             n = math_ops.add(n, n, name="add1")
             n = self.trt_incompatible_op(n, name="incompatible1")
             n = math_ops.add(n, c, name="add2")
             n = math_ops.mul(n, n, name="mul1")
             n = math_ops.add(n, n, name="add3")
         array_ops.squeeze(n, name=output_name)
     return trt_test.TfTrtIntegrationTestParams(
         gdef=g.as_graph_def(),
         input_names=[input_name],
         input_dims=[[input_dims]],
         output_names=[output_name],
         expected_output_dims=[[input_dims]])
Esempio n. 12
0
def _GetParams(add_quantization_nodes, dtype=dtypes.float32):
  input_name = "input"
  input_dims = [8, 8]
  output_name = "output"

  def _Quantize(x, r):
    if add_quantization_nodes:
      x = gen_array_ops.fake_quant_with_min_max_vars(x, -r, r)
    return x

  g = ops.Graph()
  with g.as_default():
    x = array_ops.placeholder(
        dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
    x = _Quantize(x, 10.0)
    x = x + 5
    x = _Quantize(x, 15.0)
    x = x - 5
    x = _Quantize(x, 10.0)
    x = x * 0.1
    x = _Quantize(x, 1.0)
    w = constant_op.constant(np.ones((8, 1)), dtype=dtypes.float32)
    x = math_ops.matmul(x, w)
    x = _Quantize(x, 10.0)
    x = array_ops.identity(x, name=output_name)

  return trt_test.TfTrtIntegrationTestParams(
      gdef=g.as_graph_def(),
      input_names=[input_name],
      input_dims=[[input_dims]],
      output_names=[output_name],
      expected_output_dims=[[[8, 1]]])
Esempio n. 13
0
  def GetParams(self):
    """Test for CombinedNMS op in TF-TRT."""
    # Parameters
    batch_size = 1
    num_boxes = 200
    num_classes = 2
    q = 1
    max_output_size_per_class = 3
    max_total_size = 3
    score_threshold = 0.1
    iou_threshold = 0.5
    # Shapes
    boxes_shape = [batch_size, num_boxes, q, 4]
    scores_shape = [batch_size, num_boxes, num_classes]
    nmsed_boxes_shape = [batch_size, max_total_size, 4]
    nmsed_scores_shape = [batch_size, max_total_size]
    nmsed_classes_shape = [batch_size, max_total_size]
    valid_detections_shape = [batch_size]

    g = ops.Graph()
    with g.as_default():
      boxes = array_ops.placeholder(
          dtype=dtypes.float32, shape=boxes_shape, name='boxes')
      scores = array_ops.placeholder(
          dtype=dtypes.float32, shape=scores_shape, name='scores')
      max_output_size_per_class_tensor = constant_op.constant(
          max_output_size_per_class,
          dtype=dtypes.int32,
          name='max_output_size_per_class')
      max_total_size_tensor = constant_op.constant(
          max_total_size, dtype=dtypes.int32, name='max_total_size')
      iou_threshold_tensor = constant_op.constant(
          iou_threshold, dtype=dtypes.float32, name='iou_threshold')
      score_threshold_tensor = constant_op.constant(
          score_threshold, dtype=dtypes.float32, name='score_threshold')
      nms_output = image_ops_impl.combined_non_max_suppression(
          boxes,
          scores,
          max_output_size_per_class_tensor,
          max_total_size_tensor,
          iou_threshold_tensor,
          score_threshold_tensor,
          name='combined_nms')
      nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections = nms_output
      array_ops.identity(nmsed_boxes, name='nmsed_boxes')
      array_ops.identity(nmsed_scores, name='nmsed_scores')
      array_ops.identity(nmsed_classes, name='nmsed_classes')
      array_ops.identity(valid_detections, name='valid_detections')
    return trt_test.TfTrtIntegrationTestParams(
        gdef=g.as_graph_def(),
        input_names=['boxes', 'scores'],
        input_dims=[[boxes_shape, scores_shape]],
        output_names=[
            'nmsed_boxes', 'nmsed_scores', 'nmsed_classes', 'valid_detections'
        ],
        expected_output_dims=[[
            nmsed_boxes_shape, nmsed_scores_shape, nmsed_classes_shape,
            valid_detections_shape
        ]])
Esempio n. 14
0
 def GetParams(self):
     dtype = dtypes.float32
     input_name = "input"
     input_dims = [100, 24, 24, 2]
     output_name = "output"
     g = ops.Graph()
     with g.as_default():
         inp = array_ops.placeholder(dtype=dtype,
                                     shape=[None] + input_dims[1:],
                                     name=input_name)
         outputs = []
         # Here we test two types of reshapes, one changes the batch dimension and
         # the other does not. Note that we're not able to test reshaping to
         # scalar, since TRT requires input tensor to be of rank at least 2, so a
         # reshape with scalar input will be filtered out of the segment before
         # conversion.
         with g.device("/GPU:0"):
             # These reshapes happen at batch dimension, thus conversion should fail.
             for shape in [[2, 50, 24, 24, 2], [-1, 50, 24, 24, 2],
                           [2, 50, -1, 24, 2]]:
                 incompatible_reshape = array_ops.reshape(inp, shape)
                 reshape_back = array_ops.reshape(incompatible_reshape,
                                                  [-1, 24, 24, 2])
                 outputs.append(self.trt_incompatible_op(reshape_back))
             # Add another block with many reshapes that don't change the batch
             # dimension.
             compatible_reshape = array_ops.reshape(inp, [-1, 24 * 24, 2],
                                                    name="reshape-0")
             compatible_reshape = array_ops.reshape(compatible_reshape,
                                                    [100, 24, -1],
                                                    name="reshape-1")
             compatible_reshape = array_ops.reshape(compatible_reshape,
                                                    [100, 24 * 2, 24],
                                                    name="reshape-2")
             compatible_reshape = array_ops.reshape(compatible_reshape,
                                                    [-1, 24, 24 * 2],
                                                    name="reshape-3")
             compatible_reshape = array_ops.reshape(compatible_reshape,
                                                    [-1, 6, 4, 24, 2],
                                                    name="reshape-4")
             compatible_reshape = array_ops.reshape(compatible_reshape,
                                                    [-1, 6, 4, 6, 4, 2, 1],
                                                    name="reshape-5")
             compatible_reshape = array_ops.reshape(compatible_reshape,
                                                    [-1, 24, 24, 2],
                                                    name="reshape-6")
             outputs.append(self.trt_incompatible_op(compatible_reshape))
         math_ops.add_n(outputs, name=output_name)
     return trt_test.TfTrtIntegrationTestParams(
         gdef=g.as_graph_def(),
         input_names=[input_name],
         input_dims=[[input_dims]],
         output_names=[output_name],
         expected_output_dims=[[input_dims]])
    def GetParams(self):
        """Test for multi connection neighboring nodes wiring tests in TF-TRT."""
        dtype = dtypes.float32
        input_name = "input"
        input_dims = [2, 3, 7, 5]
        output_name = "output"
        g = ops.Graph()
        with g.as_default():
            x = array_ops.placeholder(dtype=dtype,
                                      shape=input_dims,
                                      name=input_name)
            e = constant_op.constant(np.random.normal(.05, .005, [3, 2, 3, 4]),
                                     name="weights",
                                     dtype=dtype)
            conv = nn.conv2d(input=x,
                             filter=e,
                             data_format="NCHW",
                             strides=[1, 1, 1, 1],
                             padding="VALID",
                             name="conv")
            b = constant_op.constant(np.random.normal(2.0, 1.0, [1, 4, 1, 1]),
                                     name="bias",
                                     dtype=dtype)
            t = conv + b

            b = constant_op.constant(np.random.normal(5.0, 1.0, [1, 4, 1, 1]),
                                     name="bias",
                                     dtype=dtype)
            q = conv - b
            edge = self.trt_incompatible_op(q)

            b = constant_op.constant(np.random.normal(5.0, 1.0, [1, 4, 1, 1]),
                                     name="bias",
                                     dtype=dtype)
            d = b + conv
            edge3 = self.trt_incompatible_op(d)

            edge1 = self.trt_incompatible_op(conv)
            t = t - edge1
            q = q + edge
            t = t + q
            t = t + d
            t = t - edge3
            array_ops.squeeze(t, name=output_name)
        return trt_test.TfTrtIntegrationTestParams(
            gdef=g.as_graph_def(),
            input_names=[input_name],
            input_dims=[[input_dims]],
            output_names=[output_name],
            expected_output_dims=[[[2, 4, 5, 4]]])
Esempio n. 16
0
    def GetParams(self):
        """Testing conversion of BatchMatMul in TF-TRT conversion."""
        dtype = dtypes.float32
        input_name = "input"
        input_dims = [12, 5, 8, 12]
        output_name = "output"
        w1_name = "matmul_w1"
        w1_dims = [12, 5, 12, 7]
        w2_name = "matmul_w2"
        w2_dims = [12, 12, 7]
        g = ops.Graph()
        with g.as_default():
            inp = array_ops.placeholder(dtype=dtype,
                                        shape=[None] + input_dims[1:],
                                        name=input_name)
            w1 = array_ops.placeholder(dtype=dtype,
                                       shape=w1_dims,
                                       name=w1_name)
            w2 = array_ops.placeholder(dtype=dtype,
                                       shape=w2_dims,
                                       name=w2_name)
            with g.device("/GPU:0"):
                b = constant_op.constant(np.random.randn(12, 5, 12, 7),
                                         dtype=dtype)
                x1 = math_ops.matmul(inp, b)
                c = constant_op.constant(np.random.randn(5, 1, 1), dtype=dtype)
                x1 = x1 + c

                x2 = math_ops.matmul(inp, w1)
                d = constant_op.constant(np.random.randn(5, 1, 1), dtype=dtype)
                x2 = x2 * d

                e = self.trt_incompatible_op(inp)
                e = gen_array_ops.reshape(e, [12, 40, 12])
                x3 = math_ops.matmul(e, w2)
                f = constant_op.constant(np.random.randn(40, 1), dtype=dtype)
                x3 = x3 + f
                x3 = gen_array_ops.reshape(x3, [12, 5, 8, 7])
                x3 = self.trt_incompatible_op(x3)

                out = x1 + x2 + x3
            array_ops.squeeze(out, name=output_name)
        return trt_test.TfTrtIntegrationTestParams(
            gdef=g.as_graph_def(add_shapes=True),
            input_names=[input_name, w1_name, w2_name],
            input_dims=[[input_dims, w1_dims, w2_dims]],
            output_names=[output_name],
            expected_output_dims=[[[12, 5, 8, 7]]])
    def GetParams(self):
        # TODO(laigd): we should test the following cases:
        # - batch size is not changed, other dims are changing
        # - batch size is decreasing, other dims are identical
        # - batch size is decreasing, other dims are changing
        # - batch size is increasing, other dims are identical
        # - batch size is increasing, other dims are changing
        input_dims = [[[1, 5, 5, 1]], [[10, 5, 5, 1]], [[3, 5, 5, 1]],
                      [[1, 5, 5, 1]], [[1, 3, 1, 1]], [[2, 9, 9, 1]],
                      [[1, 224, 224, 1]], [[1, 128, 224, 1]]]
        expected_output_dims = input_dims

        g = ops.Graph()
        with g.as_default():
            x = array_ops.placeholder(shape=(None, None, None, 1),
                                      dtype=dtypes.float32,
                                      name="input")
            conv_filter1 = constant_op.constant(np.ones([3, 3, 1, 8]),
                                                name="weights1",
                                                dtype=dtypes.float32)
            bias1 = constant_op.constant(np.random.randn(8),
                                         dtype=dtypes.float32)
            x = nn.conv2d(input=x,
                          filter=conv_filter1,
                          strides=[1, 1, 1, 1],
                          padding="SAME",
                          name="conv")
            x = nn.bias_add(x, bias1)
            x = nn.relu(x)
            conv_filter2 = constant_op.constant(np.ones([3, 3, 8, 1]),
                                                name="weights2",
                                                dtype=dtypes.float32)
            bias2 = constant_op.constant(np.random.randn(1),
                                         dtype=dtypes.float32)
            x = nn.conv2d(input=x,
                          filter=conv_filter2,
                          strides=[1, 1, 1, 1],
                          padding="SAME",
                          name="conv")
            x = nn.bias_add(x, bias2)
            x = array_ops.identity(x, name="output")

        return trt_test.TfTrtIntegrationTestParams(
            gdef=g.as_graph_def(),
            input_names=["input"],
            input_dims=input_dims,
            output_names=["output"],
            expected_output_dims=expected_output_dims)
Esempio n. 18
0
    def GetParams(self):
        """Create a graph containing multiple segment."""
        # TODO(aaroey): test graph with different dtypes.
        dtype = dtypes.float32
        input_name = "input"
        input_dims = [100, 24, 24, 2]
        output_name = "output"
        g = ops.Graph()
        with g.as_default():
            inp = array_ops.placeholder(dtype=dtype,
                                        shape=input_dims,
                                        name=input_name)
            with g.device("/GPU:0"):
                conv_filter = constant_op.constant(
                    [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]
                     ],
                    name="weights",
                    dtype=dtype)
                conv = nn.conv2d(input=inp,
                                 filter=conv_filter,
                                 strides=[1, 2, 2, 1],
                                 padding="SAME",
                                 name="conv")
                c1 = constant_op.constant(np.random.randn(12, 12, 6),
                                          dtype=dtype,
                                          name="c1")
                p = math_ops.mul(conv, c1, name="mul")
                c2 = constant_op.constant(np.random.randn(12, 12, 6),
                                          dtype=dtype,
                                          name="c2")
                q = math_ops.div(conv, c2, name="div")

                edge = self.trt_incompatible_op(q, name="incompatible")
                edge = math_ops.div(edge, edge, name="div1")
                r = math_ops.add(edge, edge, name="add")

                p = math_ops.sub(p, edge, name="sub")
                q = math_ops.mul(q, edge, name="mul1")
                s = math_ops.add(p, q, name="add1")
                s = math_ops.sub(s, r, name="sub1")
            array_ops.squeeze(s, name=output_name)
        return trt_test.TfTrtIntegrationTestParams(
            gdef=g.as_graph_def(),
            input_names=[input_name],
            input_dims=[[input_dims]],
            output_names=[output_name],
            expected_output_dims=[[[100, 12, 12, 6]]])
 def GetParams(self):
     """Test for Constant broadcasting in TF-TRT."""
     dtype = dtypes.float32
     input_name = 'input'
     input_dims = [5, 12, 12, 2]
     output_name = 'output'
     g = ops.Graph()
     with g.as_default():
         x = array_ops.placeholder(dtype=dtype,
                                   shape=input_dims,
                                   name=input_name)
         filt1 = constant_op.constant(0.3,
                                      shape=(3, 3, 2, 1),
                                      dtype=dtype,
                                      name='filt1')
         y1 = nn.conv2d(x,
                        filt1,
                        strides=[1, 1, 1, 1],
                        padding='SAME',
                        name='y1')
         z1 = nn.relu(y1, name='z1')
         filt2 = constant_op.constant(np.random.randn(9),
                                      shape=(3, 3, 1, 1),
                                      dtype=dtype,
                                      name='filt2')
         y2 = nn.conv2d(z1,
                        filt2,
                        strides=[1, 1, 1, 1],
                        padding='SAME',
                        name='y2')
         z2 = nn.relu(y2, name='z')
         filt3 = constant_op.constant(np.random.randn(3, 3, 1, 1),
                                      shape=(3, 3, 1, 1),
                                      dtype=dtype,
                                      name='filt3')
         y3 = nn.conv2d(z2,
                        filt3,
                        strides=[1, 1, 1, 1],
                        padding='SAME',
                        name='y3')
         nn.relu(y3, name=output_name)
     return trt_test.TfTrtIntegrationTestParams(
         gdef=g.as_graph_def(),
         input_names=[input_name],
         input_dims=[[input_dims]],
         output_names=[output_name],
         expected_output_dims=[[[5, 12, 12, 1]]])
Esempio n. 20
0
 def GetParams(self):
   dtype = dtypes.float32
   input_dims = [[[1, 10, 10, 2]], [[2, 10, 10, 2]], [[4, 10, 10, 2]],
                 [[2, 10, 10, 2]]]
   expected_output_dims = [[[1, 10, 10, 2]], [[2, 10, 10, 2]], [[4, 10, 10,
                                                                 2]],
                           [[2, 10, 10, 2]]]
   return trt_test.TfTrtIntegrationTestParams(
       graph_fn=self.GraphFn,
       input_specs=[
           tensor_spec.TensorSpec([None, 10, 10, 2], dtypes.float32, "input")
       ],
       output_specs=[
           tensor_spec.TensorSpec([None, 10, 10, 1], dtypes.float32, "output")
       ],
       input_dims=input_dims,
       expected_output_dims=expected_output_dims)
Esempio n. 21
0
 def GetParams(self):
   """Testing conversion of Conv2D (data_format=NCHW) in TF-TRT conversion."""
   np.random.seed(1234)
   input_dims = [13, 3, 7, 11]
   g = build_graph(
       input_dims=input_dims,
       dtype=dtypes.float32,
       num_filters=5,
       data_format="channels_first",
       kernel_sizes=[(3, 3), (3, 2)],
       dilation_rates=[(1, 1), (2, 3)])
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=["input"],
       input_dims=[[input_dims]],
       output_names=["output"],
       expected_output_dims=[[[13, 5, 7, 11]]])
 def GetParams(self):
     """Testing Concatenation in TF-TRT conversion."""
     dtype = dtypes.float32
     input_name = "input"
     input_dims = [2, 3, 3, 1]
     output_name = "output"
     g = ops.Graph()
     with g.as_default():
         x = array_ops.placeholder(dtype=dtype,
                                   shape=input_dims,
                                   name=input_name)
         # scale
         a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
         r1 = x / a
         a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
         r2 = a / x
         a = constant_op.constant(np.random.randn(1, 3, 1), dtype=dtype)
         r3 = a + x
         a = constant_op.constant(np.random.randn(1, 3, 1), dtype=dtype)
         r4 = x * a
         a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
         r5 = x - a
         a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
         r6 = a - x
         a = constant_op.constant(np.random.randn(3, 1), dtype=dtype)
         r7 = x - a
         a = constant_op.constant(np.random.randn(3, 1), dtype=dtype)
         r8 = a - x
         a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
         r9 = gen_math_ops.maximum(x, a)
         a = constant_op.constant(np.random.randn(3, 1), dtype=dtype)
         r10 = gen_math_ops.minimum(a, x)
         a = constant_op.constant(np.random.randn(3), dtype=dtype)
         r11 = x * a
         a = constant_op.constant(np.random.randn(1), dtype=dtype)
         r12 = a * x
         concat1 = array_ops.concat([r1, r2, r3, r4, r5, r6], axis=-1)
         concat2 = array_ops.concat([r7, r8, r9, r10, r11, r12], axis=3)
         x = array_ops.concat([concat1, concat2], axis=-1)
         gen_array_ops.reshape(x, [2, -1], name=output_name)
     return trt_test.TfTrtIntegrationTestParams(
         gdef=g.as_graph_def(),
         input_names=[input_name],
         input_dims=[[input_dims]],
         output_names=[output_name],
         expected_output_dims=[[[2, 126]]])
Esempio n. 23
0
    def GetParams(self):
        """Create a graph containing single segment."""
        dtype = dtypes.float32
        input_name = "input"
        input_dims = [100, 24, 24, 2]
        output_name = "output"
        g = ops.Graph()
        with g.as_default():
            inp = array_ops.placeholder(dtype=dtype,
                                        shape=[None] + input_dims[1:],
                                        name=input_name)
            with g.device("/GPU:0"):
                # Add a block with compatible transposes.
                compatible_transpose = array_ops.transpose(inp, [0, 3, 1, 2],
                                                           name="transpose-1")
                compatible_transpose = array_ops.transpose(
                    compatible_transpose, [0, 2, 3, 1], name="transposeback")

                # Add an incompatible op so the first block will not be in the same
                # subgraph where the following block belongs.
                bridge = self.trt_incompatible_op(compatible_transpose)

                # Add a block with incompatible transposes.
                #
                # Note: by default Grappler will run the TRT optimizer twice. At the
                # first time it will group the two transpose ops below to same segment
                # then fail the conversion due to the expected batch dimension problem.
                # At the second time, since the input of bridge op is TRTEngineOp_0, it
                # will fail to do shape inference which then cause conversion to fail.
                # TODO(laigd): support shape inference, make TRT optimizer run only
                # once, and fix this.
                incompatible_transpose = array_ops.transpose(
                    bridge, [2, 1, 0, 3], name="transpose-2")
                excluded_transpose = array_ops.transpose(
                    incompatible_transpose, [0, 2, 3, 1], name="transpose-3")
            array_ops.identity(excluded_transpose, name=output_name)
        return trt_test.TfTrtIntegrationTestParams(
            gdef=g.as_graph_def(),
            input_names=[input_name],
            input_dims=[[input_dims]],
            output_names=[output_name],
            expected_output_dims=[[[24, 100, 2, 24]]])
Esempio n. 24
0
  def GetParams(self):
    """Testing conversion of strided Conv2D (data_format=NCHW) in TF-TRT

    conversion.
    """
    np.random.seed(1234)
    dtype = dtypes.float32
    input_name = "input"
    n, c, h, w = 13, 3, 7, 11
    num_filters = 5
    input_dims = [n, c, h, w]
    output_name = "output"
    g = ops.Graph()
    with g.as_default():
      inp = array_ops.placeholder(
          dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
      with g.device("/GPU:0"):
        output = inp
        output = conv2d_layer(
            output,
            num_filters, (3, 2),
            strides=(2, 2),
            padding="same",
            data_format="channels_first")
        h = div_round_up(h, 2)
        w = div_round_up(w, 2)
        output = conv2d_layer(
            output,
            num_filters, (3, 3),
            strides=(2, 2),
            dilation_rate=(2, 3),
            padding="same",
            data_format="channels_first")
        h = div_round_up(h, 2)
        w = div_round_up(w, 2)
        output = array_ops.identity(output, name=output_name)
    return trt_test.TfTrtIntegrationTestParams(
        gdef=g.as_graph_def(),
        input_names=[input_name],
        input_dims=[[input_dims]],
        output_names=[output_name],
        expected_output_dims=[[[n, num_filters, h, w]]])
Esempio n. 25
0
 def GetParams(self):
     """Test for rank 2 input in TF-TRT."""
     input_names = ["input", "input2"]
     # Two paths: first with rank 2 input, second with rank 4 input.
     input_dims = [[12, 5], [12, 5, 2, 2]]
     output_name = "output"
     g = ops.Graph()
     with g.as_default():
         outputs = []
         for i in range(2):
             x = array_ops.placeholder(dtype=dtypes.float32,
                                       shape=input_dims[i],
                                       name=input_names[i])
             c = constant_op.constant(1.0, name="c%d_1" % i)
             q = math_ops.add(x, c, name="add%d_1" % i)
             q = math_ops.abs(q, name="abs%d_1" % i)
             c = constant_op.constant(2.2, name="c%d_2" % i)
             q = math_ops.add(q, c, name="add%d_2" % i)
             q = math_ops.abs(q, name="abs%d_2" % i)
             c = constant_op.constant(3.0, name="c%d_3" % i)
             q = math_ops.add(q, c, name="add%d_3" % i)
             if i == 0:
                 axis = constant_op.constant(-1,
                                             dtype=dtypes.int32,
                                             name="axis")
                 for j in range(2):
                     q = array_ops.expand_dims(q,
                                               axis,
                                               name="expand%d_%d" % (i, j))
                 q = self.trt_incompatible_op(q)
             q = gen_math_ops.reciprocal(q, name="reciprocal%d" % i)
             outputs.append(q)
         # Combine both paths
         q = math_ops.add(outputs[0], outputs[1], name="add")
         array_ops.squeeze(q, name=output_name)
     return trt_test.TfTrtIntegrationTestParams(
         gdef=g.as_graph_def(),
         input_names=input_names,
         input_dims=[input_dims],
         output_names=[output_name],
         expected_output_dims=[[input_dims[1]]])
Esempio n. 26
0
 def GetParams(self):
   """Single vgg layer in NCHW unit tests in TF-TRT."""
   dtype = dtypes.float32
   input_name = "input"
   input_dims = [5, 2, 8, 8]
   output_name = "output"
   g = ops.Graph()
   with g.as_default():
     x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
     x, _, _ = nn_impl.fused_batch_norm(
         x, [1.0, 1.0], [0.0, 0.0],
         mean=[0.5, 0.5],
         variance=[1.0, 1.0],
         data_format="NCHW",
         is_training=False)
     e = constant_op.constant(
         np.random.randn(1, 1, 2, 6), name="weights", dtype=dtype)
     conv = nn.conv2d(
         input=x,
         filter=e,
         data_format="NCHW",
         strides=[1, 1, 2, 2],
         padding="SAME",
         name="conv")
     b = constant_op.constant(np.random.randn(6), name="bias", dtype=dtype)
     t = nn.bias_add(conv, b, data_format="NCHW", name="biasAdd")
     relu = nn.relu(t, "relu")
     idty = array_ops.identity(relu, "ID")
     v = nn_ops.max_pool(
         idty, [1, 1, 2, 2], [1, 1, 2, 2],
         "VALID",
         data_format="NCHW",
         name="max_pool")
     array_ops.squeeze(v, name=output_name)
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=[[input_dims]],
       output_names=[output_name],
       expected_output_dims=[[[5, 6, 2, 2]]])
Esempio n. 27
0
 def GetParams(self):
     """Test exclusion of ops which are not supported in INT32 mode by TF-TRT"""
     input_name = 'input'
     output_name = 'output'
     input_dims = [100, 4]
     dtype = dtypes.int32
     g = ops.Graph()
     with g.as_default():
         x = array_ops.placeholder(dtype=dtype,
                                   shape=input_dims,
                                   name=input_name)
         b = self._ConstOp((4, 10), dtype)
         x = math_ops.matmul(x, b)
         b = self._ConstOp((10, ), dtype)
         x = nn.bias_add(x, b)
         x = array_ops.identity(x, name=output_name)
     return trt_test.TfTrtIntegrationTestParams(
         gdef=g.as_graph_def(),
         input_names=[input_name],
         input_dims=[[input_dims]],
         output_names=[output_name],
         expected_output_dims=[[[100, 10]]])
Esempio n. 28
0
 def GetParams(self):
     """Create a graph containing single segment."""
     # TODO(aaroey): test graph with different dtypes.
     dtype = dtypes.float32
     input_name = "input"
     input_dims = [100, 24, 24, 2]
     output_name = "output"
     g = ops.Graph()
     with g.as_default():
         inp = array_ops.placeholder(dtype=dtype,
                                     shape=[None] + input_dims[1:],
                                     name=input_name)
         with g.device("/GPU:0"):
             conv_filter = constant_op.constant(
                 [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]
                  ],
                 name="weights",
                 dtype=dtype)
             conv = nn.conv2d(input=inp,
                              filter=conv_filter,
                              strides=[1, 2, 2, 1],
                              padding="SAME",
                              name="conv")
             bias = constant_op.constant([4., 1.5, 2., 3., 5., 7.],
                                         name="bias",
                                         dtype=dtype)
             added = nn.bias_add(conv, bias, name="bias_add")
             relu = nn.relu(added, "relu")
             identity = array_ops.identity(relu, "identity")
             pool = nn_ops.max_pool(identity, [1, 2, 2, 1], [1, 2, 2, 1],
                                    "VALID",
                                    name="max_pool")
         array_ops.squeeze(pool, name=output_name)
     return trt_test.TfTrtIntegrationTestParams(
         gdef=g.as_graph_def(),
         input_names=[input_name],
         input_dims=[[input_dims]],
         output_names=[output_name],
         expected_output_dims=[[[100, 6, 6, 6]]])
Esempio n. 29
0
 def GetParams(self):
     """Testing Top-K in TF-TRT conversion."""
     dtype = dtypes.float32
     input_name = "input"
     input_dims = [100, 100]
     k = 5
     g = ops.Graph()
     with g.as_default():
         x = array_ops.placeholder(dtype=dtype,
                                   shape=input_dims,
                                   name=input_name)
         k_tensor = constant_op.constant(k,
                                         dtype=dtypes.int32,
                                         name="Const")
         values, indices = nn_ops.top_k(x, k_tensor, name="TopK")
         values = array_ops.identity(values, name="output_values")
         indices = array_ops.identity(indices, name="output_indices")
     return trt_test.TfTrtIntegrationTestParams(
         gdef=g.as_graph_def(),
         input_names=[input_name],
         input_dims=[[input_dims]],
         output_names=["output_values", "output_indices"],
         expected_output_dims=[[[100, k], [100, k]]])
Esempio n. 30
0
    def GetParams(self):
        # TODO(laigd): we should test the following cases:
        # - batch size is not changed, other dims are changing
        # - batch size is decreasing, other dims are identical
        # - batch size is decreasing, other dims are changing
        # - batch size is increasing, other dims are identical
        # - batch size is increasing, other dims are changing
        input_dims = [[[1, 5, 5, 1]], [[10, 5, 5, 1]], [[3, 5, 5, 1]],
                      [[1, 5, 5, 1]], [[1, 3, 1, 1]], [[2, 9, 9, 1]],
                      [[1, 224, 224, 1]], [[1, 128, 224, 1]]]
        expected_output_dims = input_dims

        return trt_test.TfTrtIntegrationTestParams(
            graph_fn=self.GraphFn,
            input_specs=[
                tensor_spec.TensorSpec([None, None, None, 1], dtypes.float32,
                                       "input")
            ],
            output_specs=[
                tensor_spec.TensorSpec([None, None, None, 1], dtypes.float32,
                                       "output")
            ],
            input_dims=input_dims,
            expected_output_dims=expected_output_dims)