コード例 #1
0
 def GetParams(self):
     """Testing conversion of BatchMatMul in TF-TRT conversion."""
     dtype = dtypes.float32
     input_name = "input"
     input_dims = [2, 15, 15, 3]
     output_name = "output"
     g = ops.Graph()
     with g.as_default():
         inp = array_ops.placeholder(dtype=dtype,
                                     shape=[None] + input_dims[1:],
                                     name=input_name)
         with g.device("/GPU:0"):
             e1 = constant_op.constant(np.random.randn(1, 1, 3, 5),
                                       name="kernel_1",
                                       dtype=dtype)
             e2 = constant_op.constant(np.random.randn(1, 1, 5, 10),
                                       name="kernel_2",
                                       dtype=dtype)
             conv = nn.conv2d(input=inp,
                              filter=e1,
                              strides=[1, 1, 1, 1],
                              padding="VALID",
                              name="conv")
             out = nn.conv2d(input=conv,
                             filter=e2,
                             strides=[1, 1, 1, 1],
                             padding="VALID",
                             name="conv_2")
         array_ops.squeeze(out, name=output_name)
     return trt_test.TfTrtIntegrationTestParams(
         gdef=g.as_graph_def(),
         input_names=[input_name],
         input_dims=[[input_dims]],
         output_names=[output_name],
         expected_output_dims=[[[2, 15, 15, 10]]])
コード例 #2
0
 def GetParams(self):
   """Test for Constant broadcasting in TF-TRT."""
   dtype = dtypes.float32
   input_name = 'input'
   input_dims = [5, 12, 12, 2]
   g = ops.Graph()
   with g.as_default():
     x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
     filt1 = constant_op.constant(
         0.3, shape=(3, 3, 2, 1), dtype=dtype, name='filt1')
     y1 = nn.conv2d(x, filt1, strides=[1, 1, 1, 1], padding='SAME', name='y1')
     z1 = nn.relu(y1, name='z1')
     filt2 = constant_op.constant(
         np.random.randn(9), shape=(3, 3, 1, 1), dtype=dtype, name='filt2')
     y2 = nn.conv2d(z1, filt2, strides=[1, 1, 1, 1], padding='SAME', name='y2')
     z2 = nn.relu(y2, name='z')
     filt3 = constant_op.constant(
         np.random.randn(3, 3, 1, 1),
         shape=(3, 3, 1, 1),
         dtype=dtype,
         name='filt3')
     y3 = nn.conv2d(z2, filt3, strides=[1, 1, 1, 1], padding='SAME', name='y3')
     nn.relu(y3, name='output')
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=[input_dims],
       num_expected_engines=1,
       expected_output_dims=(5, 12, 12, 1),
       allclose_atol=1.e-02,
       allclose_rtol=1.e-02)
コード例 #3
0
 def GraphFn(self, x):
     """Return the expected graph to convert."""
     dtype = x.dtype
     filt1 = constant_op.constant(0.3,
                                  shape=(3, 3, 2, 1),
                                  dtype=dtype,
                                  name='filt1')
     y1 = nn.conv2d(x,
                    filt1,
                    strides=[1, 1, 1, 1],
                    padding='SAME',
                    name='y1')
     z1 = nn.relu(y1, name='z1')
     filt2 = constant_op.constant(0.3,
                                  shape=(3, 3, 1, 1),
                                  dtype=dtype,
                                  name='filt2')
     y2 = nn.conv2d(z1,
                    filt2,
                    strides=[1, 1, 1, 1],
                    padding='SAME',
                    name='y2')
     z2 = nn.relu(y2, name='z')
     filt3 = constant_op.constant(0.3,
                                  shape=(3, 3, 1, 1),
                                  dtype=dtype,
                                  name='filt3')
     y3 = nn.conv2d(z2,
                    filt3,
                    strides=[1, 1, 1, 1],
                    padding='SAME',
                    name='y3')
     return nn.relu(y3, name='output_0')
コード例 #4
0
 def GetParams(self):
   """Testing conversion of BatchMatMul in TF-TRT conversion."""
   dtype = dtypes.float32
   input_name = "input"
   input_dims = [2, 15, 15, 3]
   g = ops.Graph()
   with g.as_default():
     inp = array_ops.placeholder(
         dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
     with g.device("/GPU:0"):
       e1 = constant_op.constant(
           np.random.randn(1, 1, 3, 5), name="kernel_1", dtype=dtype)
       e2 = constant_op.constant(
           np.random.randn(1, 1, 5, 10), name="kernel_2", dtype=dtype)
       conv = nn.conv2d(
           input=inp,
           filter=e1,
           strides=[1, 1, 1, 1],
           padding="VALID",
           name="conv")
       out = nn.conv2d(
           input=conv,
           filter=e2,
           strides=[1, 1, 1, 1],
           padding="VALID",
           name="conv_2")
     array_ops.squeeze(out, name=self.output_name)
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=[input_dims],
       expected_engines=["my_trt_op_0"],
       expected_output_dims=(2, 15, 15, 10),
       allclose_atol=1.e-02,
       allclose_rtol=1.e-02)
コード例 #5
0
 def GraphFn(self, x):
     dtype = x.dtype
     filt1 = constant_op.constant(0.3,
                                  shape=(3, 3, 2, 1),
                                  dtype=dtype,
                                  name='filt1')
     y1 = nn.conv2d(x,
                    filt1,
                    strides=[1, 1, 1, 1],
                    padding='SAME',
                    name='y1')
     z1 = nn.relu(y1, name='z1')
     filt2 = constant_op.constant(np.random.randn(9),
                                  shape=(3, 3, 1, 1),
                                  dtype=dtype,
                                  name='filt2')
     y2 = nn.conv2d(z1,
                    filt2,
                    strides=[1, 1, 1, 1],
                    padding='SAME',
                    name='y2')
     z2 = nn.relu(y2, name='z')
     filt3 = constant_op.constant(np.random.randn(3, 3, 1, 1),
                                  shape=(3, 3, 1, 1),
                                  dtype=dtype,
                                  name='filt3')
     y3 = nn.conv2d(z2,
                    filt3,
                    strides=[1, 1, 1, 1],
                    padding='SAME',
                    name='y3')
     return nn.relu(y3, name='output_0')
コード例 #6
0
    def GetParams(self):
        # TODO(laigd): we should test the following cases:
        # - batch size is not changed, other dims are changing
        # - batch size is decreasing, other dims are identical
        # - batch size is decreasing, other dims are changing
        # - batch size is increasing, other dims are identical
        # - batch size is increasing, other dims are changing
        input_dims = [[[1, 5, 5, 1]], [[10, 5, 5, 1]], [[3, 5, 5, 1]],
                      [[1, 5, 5, 1]], [[1, 3, 1, 1]], [[2, 9, 9, 1]],
                      [[1, 224, 224, 1]], [[1, 128, 224, 1]]]
        expected_output_dims = input_dims

        g = ops.Graph()
        with g.as_default():
            x = array_ops.placeholder(shape=(None, None, None, 1),
                                      dtype=dtypes.float32,
                                      name="input")
            conv_filter1 = constant_op.constant(np.ones([3, 3, 1, 8]),
                                                name="weights1",
                                                dtype=dtypes.float32)
            bias1 = constant_op.constant(np.random.randn(8),
                                         dtype=dtypes.float32)
            x = nn.conv2d(input=x,
                          filter=conv_filter1,
                          strides=[1, 1, 1, 1],
                          padding="SAME",
                          name="conv")
            x = nn.bias_add(x, bias1)
            x = nn.relu(x)
            conv_filter2 = constant_op.constant(np.ones([3, 3, 8, 1]),
                                                name="weights2",
                                                dtype=dtypes.float32)
            bias2 = constant_op.constant(np.random.randn(1),
                                         dtype=dtypes.float32)
            x = nn.conv2d(input=x,
                          filter=conv_filter2,
                          strides=[1, 1, 1, 1],
                          padding="SAME",
                          name="conv")
            x = nn.bias_add(x, bias2)
            x = array_ops.identity(x, name="output")

        return trt_test.TfTrtIntegrationTestParams(
            gdef=g.as_graph_def(),
            input_names=["input"],
            input_dims=input_dims,
            output_names=["output"],
            expected_output_dims=expected_output_dims)
コード例 #7
0
 def GetParams(self):
   """Neighboring node wiring tests in TF-TRT conversion."""
   dtype = dtypes.float32
   input_name = "input"
   input_dims = [2, 3, 7, 5]
   g = ops.Graph()
   with g.as_default():
     x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
     e = constant_op.constant(
         np.random.normal(.3, 0.05, [3, 2, 3, 4]), name="weights", dtype=dtype)
     conv = nn.conv2d(
         input=x,
         filter=e,
         data_format="NCHW",
         strides=[1, 1, 1, 1],
         padding="VALID",
         name="conv")
     b = constant_op.constant(
         np.random.normal(1.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
     t = conv * b
     e = gen_math_ops.tan(conv)
     t = t - e
     array_ops.squeeze(t, name=self.output_name)
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=[input_dims],
       num_expected_engines=2,
       expected_output_dims=(2, 4, 5, 4),
       allclose_atol=1.e-03,
       allclose_rtol=1.e-03)
コード例 #8
0
 def _annotated_graph(self):
     graph = ops.Graph()
     with graph.as_default():
         random_seed.set_random_seed(2)
         current_activation = variable_scope.get_variable(
             name='start', shape=[1, 2, 2, 5])
         conv_filter = variable_scope.get_variable(name='filter',
                                                   shape=[5, 5, 5, 5])
         for layer_number in range(3):
             with variable_scope.variable_scope(
                     'layer_{}'.format(layer_number)):
                 after_conv = nn.conv2d(current_activation, conv_filter,
                                        [1, 1, 1, 1], 'SAME')
                 current_activation = 2. * after_conv
                 current_activation.op._set_attr(
                     '_recompute_hint',
                     # The value of the attribute does not matter; just that the key
                     # exists in the op's attributes.
                     attr_value_pb2.AttrValue(i=1))
                 current_activation += 5.
                 current_activation.op._set_attr(
                     '_recompute_hint', attr_value_pb2.AttrValue(i=0))
                 current_activation = nn.relu(current_activation)
                 current_activation.op._set_attr(
                     '_recompute_hint', attr_value_pb2.AttrValue(i=1))
         loss = math_ops.reduce_mean(current_activation)
         optimizer = train.AdamOptimizer(0.001)
         train_op = optimizer.minimize(loss)
         init_op = variables.global_variables_initializer()
     return graph, init_op, train_op
コード例 #9
0
 def GraphFn(self, x):
   dtype = x.dtype
   x, _, _ = nn_impl.fused_batch_norm(
       x, [1.0, 1.0], [0.0, 0.0],
       mean=[0.5, 0.5],
       variance=[1.0, 1.0],
       data_format="NCHW",
       is_training=False)
   e = constant_op.constant(
       np.random.randn(1, 1, 2, 6), name="weights", dtype=dtype)
   conv = nn.conv2d(
       input=x,
       filter=e,
       data_format="NCHW",
       strides=[1, 1, 2, 2],
       padding="SAME",
       name="conv")
   b = constant_op.constant(np.random.randn(6), name="bias", dtype=dtype)
   t = nn.bias_add(conv, b, data_format="NCHW", name="biasAdd")
   relu = nn.relu(t, "relu")
   idty = array_ops.identity(relu, "ID")
   v = nn_ops.max_pool(
       idty, [1, 1, 2, 2], [1, 1, 2, 2],
       "VALID",
       data_format="NCHW",
       name="max_pool")
   return array_ops.squeeze(v, name="output_0")
コード例 #10
0
 def GetParams(self):
     """Neighboring node wiring tests in TF-TRT conversion."""
     dtype = dtypes.float32
     input_name = "input"
     input_dims = [2, 3, 7, 5]
     g = ops.Graph()
     with g.as_default():
         x = array_ops.placeholder(dtype=dtype,
                                   shape=input_dims,
                                   name=input_name)
         e = constant_op.constant(np.random.normal(.3, 0.05, [3, 2, 3, 4]),
                                  name="weights",
                                  dtype=dtype)
         conv = nn.conv2d(input=x,
                          filter=e,
                          data_format="NCHW",
                          strides=[1, 1, 1, 1],
                          padding="VALID",
                          name="conv")
         b = constant_op.constant(np.random.normal(1.0, 1.0, [1, 4, 1, 1]),
                                  name="bias",
                                  dtype=dtype)
         t = conv * b
         e = gen_math_ops.tan(conv)
         t = t - e
         array_ops.squeeze(t, name=self.output_name)
     return trt_test.TfTrtIntegrationTestParams(gdef=g.as_graph_def(),
                                                input_names=[input_name],
                                                input_dims=[input_dims],
                                                num_expected_engines=2,
                                                expected_output_dims=(2, 4,
                                                                      5, 4),
                                                allclose_atol=1.e-03,
                                                allclose_rtol=1.e-03)
コード例 #11
0
 def get_simple_graph_def(self):
   """Create a simple graph and return its graph_def."""
   g = ops.Graph()
   with g.as_default():
     a = aops.placeholder(
         dtype=dtypes.float32, shape=(None, 24, 24, 2), name="input")
     e = cop.constant(
         [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
         name="weights",
         dtype=dtypes.float32)
     conv = nn.conv2d(
         input=a,
         filter=e,
         strides=[1, 2, 2, 1],
         padding="SAME",
         name="conv")
     b = cop.constant(
         [4., 1.5, 2., 3., 5., 7.], name="bias", dtype=dtypes.float32)
     t = nn.bias_add(conv, b, name="biasAdd")
     relu = nn.relu(t, "relu")
     idty = aops.identity(relu, "ID")
     v = nn_ops.max_pool(
         idty, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
     aops.squeeze(v, name="output")
   return g.as_graph_def()
コード例 #12
0
ファイル: base_test.py プロジェクト: zuiailaoda/tensorflow
    def GraphFn(self, inp):
        """Create a graph containing multiple segment."""
        dtype = inp.dtype
        conv_filter = constant_op.constant(
            [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
            name="weights",
            dtype=dtype)
        conv = nn.conv2d(input=inp,
                         filter=conv_filter,
                         strides=[1, 2, 2, 1],
                         padding="SAME",
                         name="conv")
        c1 = constant_op.constant(np.random.randn(12, 12, 6),
                                  dtype=dtype,
                                  name="c1")
        p = math_ops.mul(conv, c1, name="mul")
        c2 = constant_op.constant(np.random.randn(12, 12, 6),
                                  dtype=dtype,
                                  name="c2")
        q = math_ops.div(conv, c2, name="div")

        edge = self.trt_incompatible_op(q, name="incompatible")
        edge = math_ops.div(edge, edge, name="div1")
        r = math_ops.add(edge, edge, name="add")

        p = math_ops.sub(p, edge, name="sub")
        q = math_ops.mul(q, edge, name="mul1")
        s = math_ops.add(p, q, name="add1")
        s = math_ops.sub(s, r, name="sub1")
        return array_ops.squeeze(s, name="output_0")
コード例 #13
0
 def GetParams(self):
   """Neighboring node wiring tests in TF-TRT conversion."""
   dtype = dtypes.float32
   input_name = "input"
   input_dims = [2, 3, 7, 5]
   output_name = "output"
   g = ops.Graph()
   with g.as_default():
     x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
     e = constant_op.constant(
         np.random.normal(.3, 0.05, [3, 2, 3, 4]), name="weights", dtype=dtype)
     conv = nn.conv2d(
         input=x,
         filter=e,
         data_format="NCHW",
         strides=[1, 1, 1, 1],
         padding="VALID",
         name="conv")
     b = constant_op.constant(
         np.random.normal(1.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
     t = math_ops.mul(conv, b, name="mul")
     e = self.trt_incompatible_op(conv, name="incompatible")
     t = math_ops.sub(t, e, name="sub")
     array_ops.squeeze(t, name=output_name)
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=[input_dims],
       output_names=[output_name],
       expected_output_dims=[(2, 4, 5, 4)])
コード例 #14
0
 def _annotated_graph(self):
   graph = ops.Graph()
   with graph.as_default():
     random_seed.set_random_seed(2)
     current_activation = variable_scope.get_variable(
         name='start', shape=[1, 2, 2, 5])
     conv_filter = variable_scope.get_variable(
         name='filter', shape=[5, 5, 5, 5])
     for layer_number in range(3):
       with variable_scope.variable_scope('layer_{}'.format(layer_number)):
         after_conv = nn.conv2d(current_activation, conv_filter, [1, 1, 1, 1],
                                'SAME')
         current_activation = 2. * after_conv
         current_activation.op._set_attr(
             '_recompute_hint',
             # The value of the attribute does not matter; just that the key
             # exists in the op's attributes.
             attr_value_pb2.AttrValue(i=1))
         current_activation += 5.
         current_activation.op._set_attr(
             '_recompute_hint', attr_value_pb2.AttrValue(i=0))
         current_activation = nn.relu(current_activation)
         current_activation.op._set_attr(
             '_recompute_hint', attr_value_pb2.AttrValue(i=1))
     loss = math_ops.reduce_mean(current_activation)
     optimizer = train.AdamOptimizer(0.001)
     train_op = optimizer.minimize(loss)
     init_op = variables.global_variables_initializer()
   return graph, init_op, train_op
コード例 #15
0
def GetSingleEngineGraphDef(dtype=dtypes.float32):
    """Create a graph containing single segment."""
    g = ops.Graph()
    with g.as_default():
        inp = array_ops.placeholder(dtype=dtype,
                                    shape=[None] + INPUT_DIMS[1:],
                                    name=INPUT_NAME)
        with g.device("/GPU:0"):
            conv_filter = constant_op.constant(
                [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
                name="weights",
                dtype=dtype)
            conv = nn.conv2d(input=inp,
                             filter=conv_filter,
                             strides=[1, 2, 2, 1],
                             padding="SAME",
                             name="conv")
            bias = constant_op.constant([4., 1.5, 2., 3., 5., 7.],
                                        name="bias",
                                        dtype=dtype)
            added = nn.bias_add(conv, bias, name="bias_add")
            relu = nn.relu(added, "relu")
            identity = array_ops.identity(relu, "identity")
            pool = nn_ops.max_pool(identity, [1, 2, 2, 1], [1, 2, 2, 1],
                                   "VALID",
                                   name="max_pool")
        array_ops.squeeze(pool, name=OUTPUT_NAME)
    return g.as_graph_def()
コード例 #16
0
 def GetParams(self):
   dtype = dtypes.float32
   input_name = "input"
   input_dims = [[[1, 10, 10, 2]], [[2, 10, 10, 2]], [[4, 10, 10, 2]],
                 [[2, 10, 10, 2]]]
   expected_output_dims = [[[1, 10, 10, 1]], [[2, 10, 10, 1]], [[4, 10, 10,
                                                                 1]],
                           [[2, 10, 10, 1]]]
   output_name = "output"
   g = ops.Graph()
   with g.as_default():
     x = array_ops.placeholder(
         dtype=dtype, shape=[None, 10, 10, 2], name=input_name)
     conv_filter = constant_op.constant(
         np.random.randn(3, 3, 2, 1), dtype=dtypes.float32)
     x = nn.conv2d(
         input=x,
         filter=conv_filter,
         strides=[1, 1, 1, 1],
         padding="SAME",
         name="conv")
     bias = constant_op.constant(
         np.random.randn(1, 10, 10, 1), dtype=dtypes.float32)
     x = math_ops.add(x, bias)
     x = nn.relu(x)
     x = array_ops.identity(x, name="output")
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=input_dims,
       output_names=[output_name],
       expected_output_dims=expected_output_dims)
コード例 #17
0
 def _GetMetaGraph(self,
                   batch_size=14,
                   image_dim=12,
                   optimizer_scope_name=''):
     """A simple layered graph with conv, an intermediate op, and a ReLU."""
     graph = ops.Graph()
     with graph.as_default():
         random_seed.set_random_seed(1)
         current_activation = variable_scope.get_variable(
             name='start', shape=[batch_size, image_dim, image_dim, 5])
         conv_filter = variable_scope.get_variable(name='filter',
                                                   shape=[5, 5, 5, 5])
         for layer_number in range(10):
             with variable_scope.variable_scope(
                     'layer_{}'.format(layer_number)):
                 after_conv = nn.conv2d(current_activation, conv_filter,
                                        [1, 1, 1, 1], 'SAME')
                 current_activation = 2. * after_conv
                 current_activation = nn.relu(current_activation)
         loss = math_ops.reduce_mean(current_activation)
         with ops.name_scope(optimizer_scope_name):
             optimizer = train.AdamOptimizer(0.001)
             train_op = optimizer.minimize(loss)
         init_op = variables.global_variables_initializer()
         metagraph = train.export_meta_graph()
     return (metagraph, init_op.name, train_op.name, loss.name)
コード例 #18
0
def GetSingleEngineGraphDef(dtype=dtypes.float32):
  """Create a graph containing single segment."""
  g = ops.Graph()
  with g.as_default():
    inp = array_ops.placeholder(
        dtype=dtype, shape=[None] + INPUT_DIMS[1:], name=INPUT_NAME)
    with g.device("/GPU:0"):
      conv_filter = constant_op.constant(
          [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
          name="weights",
          dtype=dtype)
      conv = nn.conv2d(
          input=inp,
          filter=conv_filter,
          strides=[1, 2, 2, 1],
          padding="SAME",
          name="conv")
      bias = constant_op.constant(
          [4., 1.5, 2., 3., 5., 7.], name="bias", dtype=dtype)
      added = nn.bias_add(conv, bias, name="bias_add")
      relu = nn.relu(added, "relu")
      identity = array_ops.identity(relu, "identity")
      pool = nn_ops.max_pool(
          identity, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
    array_ops.squeeze(pool, name=OUTPUT_NAME)
  return g.as_graph_def()
コード例 #19
0
def GetMultiEngineGraphDef(dtype=dtypes.float32):
  """Create a graph containing multiple segment."""
  g = ops.Graph()
  with g.as_default():
    inp = array_ops.placeholder(
        dtype=dtype, shape=[None] + INPUT_DIMS[1:], name=INPUT_NAME)
    with g.device("/GPU:0"):
      conv_filter = constant_op.constant(
          [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
          name="weights",
          dtype=dtype)
      conv = nn.conv2d(
          input=inp,
          filter=conv_filter,
          strides=[1, 2, 2, 1],
          padding="SAME",
          name="conv")
      c1 = constant_op.constant(
          np.random.randn(INPUT_DIMS[0], 12, 12, 6), dtype=dtype)
      p = conv * c1
      c2 = constant_op.constant(
          np.random.randn(INPUT_DIMS[0], 12, 12, 6), dtype=dtype)
      q = conv / c2

      edge = math_ops.sin(q)
      edge /= edge
      r = edge + edge

      p -= edge
      q *= edge
      s = p + q
      s -= r
    array_ops.squeeze(s, name=OUTPUT_NAME)
  return g.as_graph_def()
コード例 #20
0
 def GetParams(self):
   """Single vgg layer test in TF-TRT conversion."""
   dtype = dtypes.float32
   input_name = "input"
   input_dims = [5, 8, 8, 2]
   output_name = "output"
   g = ops.Graph()
   with g.as_default():
     x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
     x, _, _ = nn_impl.fused_batch_norm(
         x, [1.0, 1.0], [0.0, 0.0],
         mean=[0.5, 0.5],
         variance=[1.0, 1.0],
         is_training=False)
     e = constant_op.constant(
         np.random.randn(1, 1, 2, 6), name="weights", dtype=dtype)
     conv = nn.conv2d(
         input=x, filter=e, strides=[1, 2, 2, 1], padding="SAME", name="conv")
     b = constant_op.constant(np.random.randn(6), name="bias", dtype=dtype)
     t = nn.bias_add(conv, b, name="biasAdd")
     relu = nn.relu(t, "relu")
     idty = array_ops.identity(relu, "ID")
     v = nn_ops.max_pool(
         idty, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
     array_ops.squeeze(v, name=output_name)
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=[input_dims],
       output_names=[output_name],
       expected_output_dims=[(5, 2, 2, 6)])
コード例 #21
0
 def GetParams(self):
     dtype = dtypes.float32
     input_name = "input"
     input_dims = [[[1, 10, 10, 2]], [[2, 10, 10, 2]], [[4, 10, 10, 2]],
                   [[2, 10, 10, 2]]]
     expected_output_dims = [[[1, 10, 10, 1]], [[2, 10, 10, 1]],
                             [[4, 10, 10, 1]], [[2, 10, 10, 1]]]
     output_name = "output"
     g = ops.Graph()
     with g.as_default():
         x = array_ops.placeholder(dtype=dtype,
                                   shape=[None, 10, 10, 2],
                                   name=input_name)
         conv_filter = constant_op.constant(np.random.randn(3, 3, 2, 1),
                                            dtype=dtypes.float32)
         x = nn.conv2d(input=x,
                       filter=conv_filter,
                       strides=[1, 1, 1, 1],
                       padding="SAME",
                       name="conv")
         bias = constant_op.constant(np.random.randn(1, 10, 10, 1),
                                     dtype=dtypes.float32)
         x = math_ops.add(x, bias)
         x = nn.relu(x)
         x = array_ops.identity(x, name="output")
     return trt_test.TfTrtIntegrationTestParams(
         gdef=g.as_graph_def(),
         input_names=[input_name],
         input_dims=input_dims,
         output_names=[output_name],
         expected_output_dims=expected_output_dims)
コード例 #22
0
ファイル: test_tftrt.py プロジェクト: namseob/tensorrt_test
def get_simple_graph_def():
    """Create a simple graph and return its graph_def."""
    g = ops.Graph()
    with g.as_default():
        a = aops.placeholder(dtype=dtypes.float32,
                             shape=(None, 24, 24, 2),
                             name="input")
        e = cop.constant(
            [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
            name="weights",
            dtype=dtypes.float32)
        conv = nn.conv2d(input=a,
                         filter=e,
                         strides=[1, 2, 2, 1],
                         padding="SAME",
                         name="conv")
        b = cop.constant([4., 1.5, 2., 3., 5., 7.],
                         name="bias",
                         dtype=dtypes.float32)
        t = nn.bias_add(conv, b, name="biasAdd")
        relu = nn.relu(t, "relu")
        idty = aops.identity(relu, "ID")
        v = nn_ops.max_pool(idty, [1, 2, 2, 1], [1, 2, 2, 1],
                            "VALID",
                            name="max_pool")
        aops.squeeze(v, name="output")
    return g.as_graph_def()
コード例 #23
0
 def GetParams(self):
     """Neighboring node wiring tests in TF-TRT conversion."""
     dtype = dtypes.float32
     input_name = "input"
     input_dims = [2, 3, 7, 5]
     output_name = "output"
     g = ops.Graph()
     with g.as_default():
         x = array_ops.placeholder(dtype=dtype,
                                   shape=input_dims,
                                   name=input_name)
         e = constant_op.constant(np.random.normal(.3, 0.05, [3, 2, 3, 4]),
                                  name="weights",
                                  dtype=dtype)
         conv = nn.conv2d(input=x,
                          filter=e,
                          data_format="NCHW",
                          strides=[1, 1, 1, 1],
                          padding="VALID",
                          name="conv")
         b = constant_op.constant(np.random.normal(1.0, 1.0, [1, 4, 1, 1]),
                                  name="bias",
                                  dtype=dtype)
         t = math_ops.mul(conv, b, name="mul")
         e = self.trt_incompatible_op(conv, name="incompatible")
         t = math_ops.sub(t, e, name="sub")
         array_ops.squeeze(t, name=output_name)
     return trt_test.TfTrtIntegrationTestParams(gdef=g.as_graph_def(),
                                                input_names=[input_name],
                                                input_dims=[input_dims],
                                                output_names=[output_name],
                                                expected_output_dims=[
                                                    (2, 4, 5, 4)
                                                ])
コード例 #24
0
 def _RunGraphWithConfig(self, config, batch_size=14, image_dim=12):
     """Run a simple layered graph with conv, an intermediate op, and a ReLU."""
     graph = ops.Graph()
     with graph.as_default():
         random_seed.set_random_seed(1)
         current_activation = variable_scope.get_variable(
             name='start', shape=[batch_size, image_dim, image_dim, 5])
         conv_filter = variable_scope.get_variable(name='filter',
                                                   shape=[5, 5, 5, 5])
         for layer_number in range(10):
             with variable_scope.variable_scope(
                     'layer_{}'.format(layer_number)):
                 after_conv = nn.conv2d(current_activation, conv_filter,
                                        [1, 1, 1, 1], 'SAME')
                 current_activation = 2. * after_conv
                 current_activation = nn.relu(current_activation)
         loss = math_ops.reduce_mean(current_activation)
         optimizer = train.AdamOptimizer(0.001)
         train_op = optimizer.minimize(loss)
         init_op = variables.global_variables_initializer()
         with session.Session(config=config, graph=graph) as sess:
             sess.run(init_op)
             sess.run(train_op)
             sess.run(train_op)
             return sess.run(loss)
  def GraphFn(self, x):
    dtype = x.dtype
    e = constant_op.constant(
        np.random.normal(.05, .005, [3, 2, 3, 4]), name="weights", dtype=dtype)
    conv = nn.conv2d(
        input=x,
        filter=e,
        data_format="NCHW",
        strides=[1, 1, 1, 1],
        padding="VALID",
        name="conv")
    b = constant_op.constant(
        np.random.normal(2.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
    t = conv + b

    b = constant_op.constant(
        np.random.normal(5.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
    q = conv - b
    edge = self.trt_incompatible_op(q)

    b = constant_op.constant(
        np.random.normal(5.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
    d = b + conv
    edge3 = self.trt_incompatible_op(d)

    edge1 = self.trt_incompatible_op(conv)
    t = t - edge1
    q = q + edge
    t = t + q
    t = t + d
    t = t - edge3
    return array_ops.squeeze(t, name="output_0")
コード例 #26
0
 def loop_fn(i):
     x1 = array_ops.gather(x, i)
     return nn.conv2d(x1,
                      filt,
                      strides=[1, 2, 2, 1],
                      padding="VALID",
                      data_format="NHWC")
コード例 #27
0
 def model(device):
     with ops.device(device):
         x = array_ops.placeholder(np.float32, shape=[2])
         x_bcast = gen_array_ops.broadcast_to(
             x, shape=[2, 256, 256, 2])
         w_bcast = gen_array_ops.broadcast_to(x, shape=[2, 2, 2, 2])
         y = nn.conv2d(x_bcast,
                       w_bcast,
                       strides=1,
                       padding="SAME",
                       name="a")
         y = nn.conv2d(y,
                       w_bcast,
                       strides=1,
                       padding="SAME",
                       name="b")
         return sess.run(y, {x: np.ones(x.shape)})
コード例 #28
0
  def GetParams(self):
    # TODO(laigd): we should test the following cases:
    # - batch size is not changed, other dims are changing
    # - batch size is decreasing, other dims are identical
    # - batch size is decreasing, other dims are changing
    # - batch size is increasing, other dims are identical
    # - batch size is increasing, other dims are changing
    input_dims = [[[1, 5, 5, 1]], [[10, 5, 5, 1]], [[3, 5, 5, 1]],
                  [[1, 5, 5, 1]], [[1, 3, 1, 1]], [[2, 9, 9, 1]],
                  [[1, 224, 224, 1]], [[1, 128, 224, 1]]]
    expected_output_dims = input_dims

    g = ops.Graph()
    with g.as_default():
      x = array_ops.placeholder(
          shape=(None, None, None, 1), dtype=dtypes.float32, name="input")
      conv_filter1 = constant_op.constant(
          np.ones([3, 3, 1, 8]), name="weights1", dtype=dtypes.float32)
      bias1 = constant_op.constant(np.random.randn(8), dtype=dtypes.float32)
      x = nn.conv2d(
          input=x,
          filter=conv_filter1,
          strides=[1, 1, 1, 1],
          padding="SAME",
          name="conv")
      x = nn.bias_add(x, bias1)
      x = nn.relu(x)
      conv_filter2 = constant_op.constant(
          np.ones([3, 3, 8, 1]), name="weights2", dtype=dtypes.float32)
      bias2 = constant_op.constant(np.random.randn(1), dtype=dtypes.float32)
      x = nn.conv2d(
          input=x,
          filter=conv_filter2,
          strides=[1, 1, 1, 1],
          padding="SAME",
          name="conv")
      x = nn.bias_add(x, bias2)
      x = array_ops.identity(x, name="output")

    return trt_test.TfTrtIntegrationTestParams(
        gdef=g.as_graph_def(),
        input_names=["input"],
        input_dims=input_dims,
        output_names=["output"],
        expected_output_dims=expected_output_dims)
コード例 #29
0
 def GraphFn(self, inp):
     dtype = inp.dtype
     e1 = constant_op.constant(np.random.randn(1, 1, 3, 5),
                               name="kernel_1",
                               dtype=dtype)
     e2 = constant_op.constant(np.random.randn(1, 1, 5, 10),
                               name="kernel_2",
                               dtype=dtype)
     conv = nn.conv2d(input=inp,
                      filter=e1,
                      strides=[1, 1, 1, 1],
                      padding="VALID",
                      name="conv")
     out = nn.conv2d(input=conv,
                     filter=e2,
                     strides=[1, 1, 1, 1],
                     padding="VALID",
                     name="conv_2")
     return array_ops.squeeze(out, name="output_0")
コード例 #30
0
    def GetParams(self):
        """Create a graph containing multiple segment."""
        # TODO(aaroey): test graph with different dtypes.
        dtype = dtypes.float32
        input_name = "input"
        input_dims = [100, 24, 24, 2]
        g = ops.Graph()
        with g.as_default():
            inp = array_ops.placeholder(dtype=dtype,
                                        shape=[None] + input_dims[1:],
                                        name=input_name)
            with g.device("/GPU:0"):
                conv_filter = constant_op.constant(
                    [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]
                     ],
                    name="weights",
                    dtype=dtype)
                conv = nn.conv2d(input=inp,
                                 filter=conv_filter,
                                 strides=[1, 2, 2, 1],
                                 padding="SAME",
                                 name="conv")
                c1 = constant_op.constant(np.random.randn(
                    input_dims[0], 12, 12, 6),
                                          dtype=dtype,
                                          name="c1")
                p = math_ops.mul(conv, c1, name="mul")
                c2 = constant_op.constant(np.random.randn(
                    input_dims[0], 12, 12, 6),
                                          dtype=dtype,
                                          name="c2")
                q = math_ops.div(conv, c2, name="div")

                edge = self.trt_incompatible_op(q, name="incompatible")
                edge = math_ops.div(edge, edge, name="div1")
                r = math_ops.add(edge, edge, name="add")

                p = math_ops.sub(p, edge, name="sub")
                q = math_ops.mul(q, edge, name="mul1")
                s = math_ops.add(p, q, name="add1")
                s = math_ops.sub(s, r, name="sub1")
            array_ops.squeeze(s, name=self.output_name)
        return trt_test.TfTrtIntegrationTestParams(
            gdef=g.as_graph_def(),
            input_names=[input_name],
            input_dims=[input_dims],
            # TODO(aaroey): LayoutOptimizer adds additional nodes to the graph which
            # breaks the connection check, fix it.
            # - my_trt_op_0 should have ["mul", "sub", "div1", "mul1", "add1",
            #   "add", "sub1"];
            # - my_trt_op_1 should have ["weights","conv", "div"]
            expected_engines=["my_trt_op_0", "my_trt_op_1"],
            expected_output_dims=(100, 12, 12, 6),
            allclose_atol=1.e-03,
            allclose_rtol=1.e-03)
コード例 #31
0
 def GraphFn(self, x):
     conv_filter = constant_op.constant(np.random.randn(3, 3, 2, 1),
                                        dtype=dtypes.float32)
     x = nn.conv2d(input=x,
                   filter=conv_filter,
                   strides=[1, 1, 1, 1],
                   padding="SAME",
                   name="conv")
     bias = constant_op.constant(np.random.randn(1, 10, 10, 1),
                                 dtype=dtypes.float32)
     x = math_ops.add(x, bias)
     x = nn.relu(x)
     return array_ops.identity(x, name="output")
コード例 #32
0
    def GetParams(self):
        """Test for multi connection neighboring nodes wiring tests in TF-TRT."""
        dtype = dtypes.float32
        input_name = "input"
        input_dims = [2, 3, 7, 5]
        output_name = "output"
        g = ops.Graph()
        with g.as_default():
            x = array_ops.placeholder(dtype=dtype,
                                      shape=input_dims,
                                      name=input_name)
            e = constant_op.constant(np.random.normal(.05, .005, [3, 2, 3, 4]),
                                     name="weights",
                                     dtype=dtype)
            conv = nn.conv2d(input=x,
                             filter=e,
                             data_format="NCHW",
                             strides=[1, 1, 1, 1],
                             padding="VALID",
                             name="conv")
            b = constant_op.constant(np.random.normal(2.0, 1.0, [1, 4, 1, 1]),
                                     name="bias",
                                     dtype=dtype)
            t = conv + b

            b = constant_op.constant(np.random.normal(5.0, 1.0, [1, 4, 1, 1]),
                                     name="bias",
                                     dtype=dtype)
            q = conv - b
            edge = self.trt_incompatible_op(q)

            b = constant_op.constant(np.random.normal(5.0, 1.0, [1, 4, 1, 1]),
                                     name="bias",
                                     dtype=dtype)
            d = b + conv
            edge3 = self.trt_incompatible_op(d)

            edge1 = self.trt_incompatible_op(conv)
            t = t - edge1
            q = q + edge
            t = t + q
            t = t + d
            t = t - edge3
            array_ops.squeeze(t, name=output_name)
        return trt_test.TfTrtIntegrationTestParams(gdef=g.as_graph_def(),
                                                   input_names=[input_name],
                                                   input_dims=[input_dims],
                                                   output_names=[output_name],
                                                   expected_output_dims=[
                                                       (2, 4, 5, 4)
                                                   ])
コード例 #33
0
    def GetParams(self):
        """Create a graph containing multiple segment."""
        # TODO(aaroey): test graph with different dtypes.
        dtype = dtypes.float32
        input_name = "input"
        input_dims = [100, 24, 24, 2]
        output_name = "output"
        g = ops.Graph()
        with g.as_default():
            inp = array_ops.placeholder(dtype=dtype,
                                        shape=[None] + input_dims[1:],
                                        name=input_name)
            with g.device("/GPU:0"):
                conv_filter = constant_op.constant(
                    [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]
                     ],
                    name="weights",
                    dtype=dtype)
                conv = nn.conv2d(input=inp,
                                 filter=conv_filter,
                                 strides=[1, 2, 2, 1],
                                 padding="SAME",
                                 name="conv")
                c1 = constant_op.constant(np.random.randn(
                    input_dims[0], 12, 12, 6),
                                          dtype=dtype,
                                          name="c1")
                p = math_ops.mul(conv, c1, name="mul")
                c2 = constant_op.constant(np.random.randn(
                    input_dims[0], 12, 12, 6),
                                          dtype=dtype,
                                          name="c2")
                q = math_ops.div(conv, c2, name="div")

                edge = self.trt_incompatible_op(q, name="incompatible")
                edge = math_ops.div(edge, edge, name="div1")
                r = math_ops.add(edge, edge, name="add")

                p = math_ops.sub(p, edge, name="sub")
                q = math_ops.mul(q, edge, name="mul1")
                s = math_ops.add(p, q, name="add1")
                s = math_ops.sub(s, r, name="sub1")
            array_ops.squeeze(s, name=output_name)
        return trt_test.TfTrtIntegrationTestParams(gdef=g.as_graph_def(),
                                                   input_names=[input_name],
                                                   input_dims=[input_dims],
                                                   output_names=[output_name],
                                                   expected_output_dims=[
                                                       (100, 12, 12, 6)
                                                   ])
コード例 #34
0
 def GraphFn(self, x):
     conv_filter1 = constant_op.constant(np.ones([3, 3, 1, 8]),
                                         name="weights1",
                                         dtype=dtypes.float32)
     bias1 = constant_op.constant(np.random.randn(8), dtype=dtypes.float32)
     x = nn.conv2d(input=x,
                   filter=conv_filter1,
                   strides=[1, 1, 1, 1],
                   padding="SAME",
                   name="conv")
     x = nn.bias_add(x, bias1)
     x = nn.relu(x)
     conv_filter2 = constant_op.constant(np.ones([3, 3, 8, 1]),
                                         name="weights2",
                                         dtype=dtypes.float32)
     bias2 = constant_op.constant(np.random.randn(1), dtype=dtypes.float32)
     x = nn.conv2d(input=x,
                   filter=conv_filter2,
                   strides=[1, 1, 1, 1],
                   padding="SAME",
                   name="conv")
     x = nn.bias_add(x, bias2)
     return array_ops.identity(x, name="output")
コード例 #35
0
ファイル: base_test.py プロジェクト: ZhangXinNan/tensorflow
  def GetParams(self):
    """Create a graph containing multiple segment."""
    # TODO(aaroey): test graph with different dtypes.
    dtype = dtypes.float32
    input_name = "input"
    input_dims = [100, 24, 24, 2]
    g = ops.Graph()
    with g.as_default():
      inp = array_ops.placeholder(
          dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
      with g.device("/GPU:0"):
        conv_filter = constant_op.constant(
            [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
            name="weights",
            dtype=dtype)
        conv = nn.conv2d(
            input=inp,
            filter=conv_filter,
            strides=[1, 2, 2, 1],
            padding="SAME",
            name="conv")
        c1 = constant_op.constant(
            np.random.randn(input_dims[0], 12, 12, 6), dtype=dtype, name="c1")
        p = math_ops.mul(conv, c1, name="mul")
        c2 = constant_op.constant(
            np.random.randn(input_dims[0], 12, 12, 6), dtype=dtype, name="c2")
        q = math_ops.div(conv, c2, name="div")

        edge = self.trt_incompatible_op(q, name="incompatible")
        edge = math_ops.div(edge, edge, name="div1")
        r = math_ops.add(edge, edge, name="add")

        p = math_ops.sub(p, edge, name="sub")
        q = math_ops.mul(q, edge, name="mul1")
        s = math_ops.add(p, q, name="add1")
        s = math_ops.sub(s, r, name="sub1")
      array_ops.squeeze(s, name=self.output_name)
    return trt_test.TfTrtIntegrationTestParams(
        gdef=g.as_graph_def(),
        input_names=[input_name],
        input_dims=[input_dims],
        # TODO(aaroey): LayoutOptimizer adds additional nodes to the graph which
        # breaks the connection check, fix it.
        # - my_trt_op_0 should have ["mul", "sub", "div1", "mul1", "add1",
        #   "add", "sub1"];
        # - my_trt_op_1 should have ["weights","conv", "div"]
        expected_engines=["my_trt_op_0", "my_trt_op_1"],
        expected_output_dims=(100, 12, 12, 6),
        allclose_atol=1.e-03,
        allclose_rtol=1.e-03)
コード例 #36
0
ファイル: base_test.py プロジェクト: zyy2020/tensorflow
    def GetParams(self):
        """Create a graph containing multiple segment."""
        # TODO(aaroey): test graph with different dtypes.
        dtype = dtypes.float32
        input_name = "input"
        input_dims = [100, 24, 24, 2]
        g = ops.Graph()
        with g.as_default():
            inp = array_ops.placeholder(dtype=dtype,
                                        shape=[None] + input_dims[1:],
                                        name=input_name)
            with g.device("/GPU:0"):
                conv_filter = constant_op.constant(
                    [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]
                     ],
                    name="weights",
                    dtype=dtype)
                conv = nn.conv2d(input=inp,
                                 filter=conv_filter,
                                 strides=[1, 2, 2, 1],
                                 padding="SAME",
                                 name="conv")
                c1 = constant_op.constant(np.random.randn(
                    input_dims[0], 12, 12, 6),
                                          dtype=dtype)
                p = conv * c1
                c2 = constant_op.constant(np.random.randn(
                    input_dims[0], 12, 12, 6),
                                          dtype=dtype)
                q = conv / c2

                edge = self.trt_incompatible_op(q)
                edge /= edge
                r = edge + edge

                p -= edge
                q *= edge
                s = p + q
                s -= r
            array_ops.squeeze(s, name=self.output_name)
        return trt_test.TfTrtIntegrationTestParams(
            gdef=g.as_graph_def(),
            input_names=[input_name],
            input_dims=[input_dims],
            num_expected_engines=2,
            expected_output_dims=(100, 12, 12, 6),
            allclose_atol=1.e-03,
            allclose_rtol=1.e-03)
コード例 #37
0
ファイル: conv_ops.py プロジェクト: sweaterr/tensorflow-1
def conv2d(tensor_in,
           n_filters,
           filter_shape,
           strides=None,
           padding='SAME',
           bias=True,
           activation=None,
           batch_norm=False):
    """Creates 2D convolutional subgraph with bank of filters.

  Uses tf.nn.conv2d under the hood.
  Creates a filter bank:
    [filter_shape[0], filter_shape[1], tensor_in[3], n_filters]
  and applies it to the input tensor.

  Args:
    tensor_in: input Tensor, 4D shape:
      [batch, in_height, in_width, in_depth].
    n_filters: number of filters in the bank.
    filter_shape: Shape of filters, a list of ints, 1-D of length 2.
    strides: A list of ints, 1-D of length 4. The stride of the sliding
      window for each dimension of input.
    padding: A string: 'SAME' or 'VALID'. The type of padding algorthim to use.
      See the [comment here]
      (https://www.tensorflow.org/api_docs/python/nn.html#convolution)
    bias: Boolean, if to add bias.
    activation: Activation Op, optional. If provided applied on the output.
    batch_norm: Whether to apply batch normalization.

  Returns:
    A Tensor with resulting convolution.
  """
    with vs.variable_scope('convolution'):
        if strides is None:
            strides = [1, 1, 1, 1]
        input_shape = tensor_in.get_shape()
        filter_shape = list(filter_shape) + [input_shape[3], n_filters]
        filters = vs.get_variable('filters', filter_shape, dtypes.float32)
        output = nn.conv2d(tensor_in, filters, strides, padding)
        if bias:
            bias_var = vs.get_variable('bias', [1, 1, 1, n_filters],
                                       dtypes.float32)
            output += bias_var
        if batch_norm:
            output = batch_normalize(output, convnet=True)
        if activation:
            output = activation(output)
        return output
コード例 #38
0
  def GetParams(self):
    """Test for multi connection neighboring nodes wiring tests in TF-TRT."""
    dtype = dtypes.float32
    input_name = "input"
    input_dims = [2, 3, 7, 5]
    g = ops.Graph()
    with g.as_default():
      x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
      e = constant_op.constant(
          np.random.normal(.05, .005, [3, 2, 3, 4]),
          name="weights",
          dtype=dtype)
      conv = nn.conv2d(
          input=x,
          filter=e,
          data_format="NCHW",
          strides=[1, 1, 1, 1],
          padding="VALID",
          name="conv")
      b = constant_op.constant(
          np.random.normal(2.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
      t = conv + b

      b = constant_op.constant(
          np.random.normal(5.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
      q = conv - b
      edge = math_ops.sigmoid(q)

      b = constant_op.constant(
          np.random.normal(5.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
      d = b + conv
      edge3 = math_ops.sigmoid(d)

      edge1 = gen_math_ops.tan(conv)
      t = t - edge1
      q = q + edge
      t = t + q
      t = t + d
      t = t - edge3
      array_ops.squeeze(t, name=self.output_name)
    return trt_test.TfTrtIntegrationTestParams(
        gdef=g.as_graph_def(),
        input_names=[input_name],
        input_dims=[input_dims],
        expected_engines=["my_trt_op_0", "my_trt_op_1"],
        expected_output_dims=(2, 4, 5, 4),
        allclose_atol=1.e-03,
        allclose_rtol=1.e-03)
コード例 #39
0
  def GetParams(self):
    """Test for multi connection neighboring nodes wiring tests in TF-TRT."""
    dtype = dtypes.float32
    input_name = "input"
    input_dims = [2, 3, 7, 5]
    output_name = "output"
    g = ops.Graph()
    with g.as_default():
      x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
      e = constant_op.constant(
          np.random.normal(.05, .005, [3, 2, 3, 4]),
          name="weights",
          dtype=dtype)
      conv = nn.conv2d(
          input=x,
          filter=e,
          data_format="NCHW",
          strides=[1, 1, 1, 1],
          padding="VALID",
          name="conv")
      b = constant_op.constant(
          np.random.normal(2.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
      t = conv + b

      b = constant_op.constant(
          np.random.normal(5.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
      q = conv - b
      edge = self.trt_incompatible_op(q)

      b = constant_op.constant(
          np.random.normal(5.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
      d = b + conv
      edge3 = self.trt_incompatible_op(d)

      edge1 = self.trt_incompatible_op(conv)
      t = t - edge1
      q = q + edge
      t = t + q
      t = t + d
      t = t - edge3
      array_ops.squeeze(t, name=output_name)
    return trt_test.TfTrtIntegrationTestParams(
        gdef=g.as_graph_def(),
        input_names=[input_name],
        input_dims=[[input_dims]],
        output_names=[output_name],
        expected_output_dims=[[[2, 4, 5, 4]]])
コード例 #40
0
ファイル: conv_ops.py プロジェクト: 0ruben/tensorflow
def conv2d(tensor_in,
           n_filters,
           filter_shape,
           strides=None,
           padding='SAME',
           bias=True,
           activation=None,
           batch_norm=False):
  """Creates 2D convolutional subgraph with bank of filters.

  Uses tf.nn.conv2d under the hood.
  Creates a filter bank:
    [filter_shape[0], filter_shape[1], tensor_in[3], n_filters]
  and applies it to the input tensor.

  Args:
    tensor_in: input Tensor, 4D shape:
      [batch, in_height, in_width, in_depth].
    n_filters: number of filters in the bank.
    filter_shape: Shape of filters, a list of ints, 1-D of length 2.
    strides: A list of ints, 1-D of length 4. The stride of the sliding
      window for each dimension of input.
    padding: A string: 'SAME' or 'VALID'. The type of padding algorthim to use.
      See the [comment here]
      (https://www.tensorflow.org/api_docs/python/nn.html#convolution)
    bias: Boolean, if to add bias.
    activation: Activation Op, optional. If provided applied on the output.
    batch_norm: Whether to apply batch normalization.

  Returns:
    A Tensor with resulting convolution.
  """
  with vs.variable_scope('convolution'):
    if strides is None:
      strides = [1, 1, 1, 1]
    input_shape = tensor_in.get_shape()
    filter_shape = list(filter_shape) + [input_shape[3], n_filters]
    filters = vs.get_variable('filters', filter_shape, dtypes.float32)
    output = nn.conv2d(tensor_in, filters, strides, padding)
    if bias:
      bias_var = vs.get_variable('bias', [1, 1, 1, n_filters], dtypes.float32)
      output += bias_var
    if batch_norm:
      output = batch_normalize(output, convnet=True)
    if activation:
      output = activation(output)
    return output
コード例 #41
0
def preact_conv2d(inputs,
                  num_outputs,
                  kernel_size,
                  stride=1,
                  padding='SAME',
                  activation_fn=nn.relu,
                  normalizer_fn=None,
                  normalizer_params=None,
                  weights_initializer=initializers.xavier_initializer(),
                  weights_regularizer=None,
                  reuse=None,
                  variables_collections=None,
                  outputs_collections=None,
                  trainable=True,
                  scope=None):
    """Adds a 2D convolution preceded by batch normalization and activation.
    """
    with variable_scope.variable_scope(scope,
                                       'Conv',
                                       values=[inputs],
                                       reuse=reuse) as sc:
        inputs = ops.convert_to_tensor(inputs)
        dtype = inputs.dtype.base_dtype
        if normalizer_fn:
            normalizer_params = normalizer_params or {}
            inputs = normalizer_fn(inputs,
                                   activation_fn=activation_fn,
                                   **normalizer_params)
        kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
        stride_h, stride_w = utils.two_element_tuple(stride)
        num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
        weights_shape = [kernel_h, kernel_w, num_filters_in, num_outputs]
        weights_collections = utils.get_variable_collections(
            variables_collections, 'weights')
        weights = variables.model_variable('weights',
                                           shape=weights_shape,
                                           dtype=dtype,
                                           initializer=weights_initializer,
                                           regularizer=weights_regularizer,
                                           collections=weights_collections,
                                           trainable=trainable)
        outputs = nn.conv2d(inputs,
                            weights, [1, stride_h, stride_w, 1],
                            padding=padding)
        return utils.collect_named_outputs(outputs_collections, sc.name,
                                           outputs)
コード例 #42
0
 def _conv_and_pool_1(self, inp):
     dtype = inp.dtype
     conv_filter = constant_op.constant(
         [[[[1., 0.5], [4., 6.], [0.5, 1.]]]], name="weights", dtype=dtype)
     conv = nn.conv2d(input=inp,
                      filter=conv_filter,
                      strides=[1, 2, 2, 1],
                      padding="SAME",
                      name="conv")
     bias = constant_op.constant([4., 1.5], name="bias", dtype=dtype)
     added = nn.bias_add(conv, bias, name="bias_add")
     relu = nn.relu(added, "relu")
     identity = array_ops.identity(relu, "identity")
     pool = nn_ops.max_pool(identity, [1, 2, 2, 1], [1, 2, 2, 1],
                            "VALID",
                            name="max_pool")
     return array_ops.squeeze(pool)
コード例 #43
0
 def GetParams(self):
     """Single vgg layer in NCHW unit tests in TF-TRT."""
     dtype = dtypes.float32
     input_name = "input"
     input_dims = [5, 2, 8, 8]
     g = ops.Graph()
     with g.as_default():
         x = array_ops.placeholder(dtype=dtype,
                                   shape=input_dims,
                                   name=input_name)
         x, _, _ = nn_impl.fused_batch_norm(
             x,
             np.random.randn(2).astype(np.float32),
             np.random.randn(2).astype(np.float32),
             mean=np.random.randn(2).astype(np.float32),
             variance=np.random.randn(2).astype(np.float32),
             data_format="NCHW",
             is_training=False)
         e = constant_op.constant(np.random.randn(1, 1, 2, 6),
                                  name="weights",
                                  dtype=dtype)
         conv = nn.conv2d(input=x,
                          filter=e,
                          data_format="NCHW",
                          strides=[1, 1, 2, 2],
                          padding="SAME",
                          name="conv")
         b = constant_op.constant(np.random.randn(6),
                                  name="bias",
                                  dtype=dtype)
         t = nn.bias_add(conv, b, data_format="NCHW", name="biasAdd")
         relu = nn.relu(t, "relu")
         idty = array_ops.identity(relu, "ID")
         v = nn_ops.max_pool(idty, [1, 1, 2, 2], [1, 1, 2, 2],
                             "VALID",
                             data_format="NCHW",
                             name="max_pool")
         array_ops.squeeze(v, name="output")
     return trt_test.TfTrtIntegrationTestParams(gdef=g.as_graph_def(),
                                                input_names=[input_name],
                                                input_dims=[input_dims],
                                                num_expected_engines=1,
                                                expected_output_dims=(5, 6,
                                                                      2, 2),
                                                allclose_atol=1.e-03,
                                                allclose_rtol=1.e-03)
コード例 #44
0
ファイル: base_test.py プロジェクト: StephenOman/tensorflow
  def GetParams(self):
    """Create a graph containing multiple segment."""
    # TODO(aaroey): test graph with different dtypes.
    dtype = dtypes.float32
    input_name = "input"
    input_dims = [100, 24, 24, 2]
    g = ops.Graph()
    with g.as_default():
      inp = array_ops.placeholder(
          dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
      with g.device("/GPU:0"):
        conv_filter = constant_op.constant(
            [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
            name="weights",
            dtype=dtype)
        conv = nn.conv2d(
            input=inp,
            filter=conv_filter,
            strides=[1, 2, 2, 1],
            padding="SAME",
            name="conv")
        c1 = constant_op.constant(
            np.random.randn(input_dims[0], 12, 12, 6), dtype=dtype)
        p = conv * c1
        c2 = constant_op.constant(
            np.random.randn(input_dims[0], 12, 12, 6), dtype=dtype)
        q = conv / c2

        edge = self.trt_incompatible_op(q)
        edge /= edge
        r = edge + edge

        p -= edge
        q *= edge
        s = p + q
        s -= r
      array_ops.squeeze(s, name=self.output_name)
    return trt_test.TfTrtIntegrationTestParams(
        gdef=g.as_graph_def(),
        input_names=[input_name],
        input_dims=[input_dims],
        num_expected_engines=2,
        expected_output_dims=(100, 12, 12, 6),
        allclose_atol=1.e-03,
        allclose_rtol=1.e-03)
コード例 #45
0
ファイル: base_test.py プロジェクト: aeverall/tensorflow
  def GetParams(self):
    """Create a graph containing multiple segment."""
    # TODO(aaroey): test graph with different dtypes.
    dtype = dtypes.float32
    input_name = "input"
    input_dims = [100, 24, 24, 2]
    output_name = "output"
    g = ops.Graph()
    with g.as_default():
      inp = array_ops.placeholder(
          dtype=dtype, shape=input_dims, name=input_name)
      with g.device("/GPU:0"):
        conv_filter = constant_op.constant(
            [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
            name="weights",
            dtype=dtype)
        conv = nn.conv2d(
            input=inp,
            filter=conv_filter,
            strides=[1, 2, 2, 1],
            padding="SAME",
            name="conv")
        c1 = constant_op.constant(
            np.random.randn(12, 12, 6), dtype=dtype, name="c1")
        p = math_ops.mul(conv, c1, name="mul")
        c2 = constant_op.constant(
            np.random.randn(12, 12, 6), dtype=dtype, name="c2")
        q = math_ops.div(conv, c2, name="div")

        edge = self.trt_incompatible_op(q, name="incompatible")
        edge = math_ops.div(edge, edge, name="div1")
        r = math_ops.add(edge, edge, name="add")

        p = math_ops.sub(p, edge, name="sub")
        q = math_ops.mul(q, edge, name="mul1")
        s = math_ops.add(p, q, name="add1")
        s = math_ops.sub(s, r, name="sub1")
      array_ops.squeeze(s, name=output_name)
    return trt_test.TfTrtIntegrationTestParams(
        gdef=g.as_graph_def(),
        input_names=[input_name],
        input_dims=[input_dims],
        output_names=[output_name],
        expected_output_dims=[(100, 12, 12, 6)])
コード例 #46
0
 def GetParams(self):
   """Single vgg layer in NCHW unit tests in TF-TRT."""
   dtype = dtypes.float32
   input_name = "input"
   input_dims = [5, 2, 8, 8]
   g = ops.Graph()
   with g.as_default():
     x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
     x, _, _ = nn_impl.fused_batch_norm(
         x,
         np.random.randn(2).astype(np.float32),
         np.random.randn(2).astype(np.float32),
         mean=np.random.randn(2).astype(np.float32),
         variance=np.random.randn(2).astype(np.float32),
         data_format="NCHW",
         is_training=False)
     e = constant_op.constant(
         np.random.randn(1, 1, 2, 6), name="weights", dtype=dtype)
     conv = nn.conv2d(
         input=x,
         filter=e,
         data_format="NCHW",
         strides=[1, 1, 2, 2],
         padding="SAME",
         name="conv")
     b = constant_op.constant(np.random.randn(6), name="bias", dtype=dtype)
     t = nn.bias_add(conv, b, data_format="NCHW", name="biasAdd")
     relu = nn.relu(t, "relu")
     idty = array_ops.identity(relu, "ID")
     v = nn_ops.max_pool(
         idty, [1, 1, 2, 2], [1, 1, 2, 2],
         "VALID",
         data_format="NCHW",
         name="max_pool")
     array_ops.squeeze(v, name="output")
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=[input_dims],
       num_expected_engines=1,
       expected_output_dims=(5, 6, 2, 2),
       allclose_atol=1.e-03,
       allclose_rtol=1.e-03)
コード例 #47
0
ファイル: base_test.py プロジェクト: ZhangXinNan/tensorflow
 def GetParams(self):
   """Create a graph containing single segment."""
   # TODO(aaroey): test graph with different dtypes.
   dtype = dtypes.float32
   input_name = "input"
   input_dims = [100, 24, 24, 2]
   g = ops.Graph()
   with g.as_default():
     inp = array_ops.placeholder(
         dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
     with g.device("/GPU:0"):
       conv_filter = constant_op.constant(
           [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
           name="weights",
           dtype=dtype)
       conv = nn.conv2d(
           input=inp,
           filter=conv_filter,
           strides=[1, 2, 2, 1],
           padding="SAME",
           name="conv")
       bias = constant_op.constant(
           [4., 1.5, 2., 3., 5., 7.], name="bias", dtype=dtype)
       added = nn.bias_add(conv, bias, name="bias_add")
       relu = nn.relu(added, "relu")
       identity = array_ops.identity(relu, "identity")
       pool = nn_ops.max_pool(
           identity, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
     array_ops.squeeze(pool, name=self.output_name)
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=[input_dims],
       # TODO(aaroey): LayoutOptimizer adds additional nodes to the graph which
       # breaks the connection check, fix it.
       # - my_trt_op_0 should have ["weights", "conv", "bias", "bias_add",
       #   "relu", "identity", "max_pool"]
       expected_engines=["my_trt_op_0"],
       expected_output_dims=(100, 6, 6, 6),
       allclose_atol=1.e-03,
       allclose_rtol=1.e-03)
コード例 #48
0
 def _GetMetaGraph(self, batch_size=14, image_dim=12, optimizer_scope_name=''):
   """A simple layered graph with conv, an intermediate op, and a ReLU."""
   graph = ops.Graph()
   with graph.as_default():
     random_seed.set_random_seed(1)
     current_activation = variable_scope.get_variable(
         name='start', shape=[batch_size, image_dim, image_dim, 5])
     conv_filter = variable_scope.get_variable(
         name='filter', shape=[5, 5, 5, 5])
     for layer_number in range(10):
       with variable_scope.variable_scope('layer_{}'.format(layer_number)):
         after_conv = nn.conv2d(current_activation, conv_filter, [1, 1, 1, 1],
                                'SAME')
         current_activation = 2. * after_conv
         current_activation = nn.relu(current_activation)
     loss = math_ops.reduce_mean(current_activation)
     with ops.name_scope(optimizer_scope_name):
       optimizer = train.AdamOptimizer(0.001)
       train_op = optimizer.minimize(loss)
     init_op = variables.global_variables_initializer()
     metagraph = train.export_meta_graph()
   return (metagraph, init_op.name, train_op.name, loss.name)
コード例 #49
0
ファイル: test_tftrt.py プロジェクト: Eagle732/tensorflow
def get_multi_engine_graph_def(mode="FP32"):
  """Create a simple graph and return its graph_def."""
  dtype = dtypes.float32
  if mode.upper() == "FP16":
    dtype = dtypes.float16
  else:
    pass

  g = ops.Graph()
  with g.as_default():
    x = aops.placeholder(shape=[None, 3, 7, 5], name="input", dtype=dtype)
    with g.name_scope("Global_scope"):
      with g.name_scope("first_scope"):
        e = cop.constant(
            np.random.randn(3, 2, 3, 4), name="weights", dtype=dtype)
        conv = nn.conv2d(
            input=x,
            filter=e,
            data_format="NCHW",
            strides=[1, 1, 1, 1],
            padding="VALID",
            name="conv")
        b = cop.constant(np.random.randn(1, 4, 1, 1), name="bias1", dtype=dtype)
        t = conv * b

        b = cop.constant(np.random.randn(1, 4, 1, 1), name="bias2", dtype=dtype)
        q = conv / b
      edge = mops.sin(q)
      edge1 = mops.cos(conv)
      with g.name_scope("test_scope"):
        de = edge + edge1
        t -= edge1
        q *= edge
        t += q
        t -= de
    k = aops.squeeze(t, name="output")
  print(k.dtype)
  return g.as_graph_def()
コード例 #50
0
ファイル: base_test.py プロジェクト: aeverall/tensorflow
 def GetParams(self):
   """Create a graph containing single segment."""
   # TODO(aaroey): test graph with different dtypes.
   dtype = dtypes.float32
   input_name = "input"
   input_dims = [100, 24, 24, 2]
   output_name = "output"
   g = ops.Graph()
   with g.as_default():
     inp = array_ops.placeholder(
         dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
     with g.device("/GPU:0"):
       conv_filter = constant_op.constant(
           [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
           name="weights",
           dtype=dtype)
       conv = nn.conv2d(
           input=inp,
           filter=conv_filter,
           strides=[1, 2, 2, 1],
           padding="SAME",
           name="conv")
       bias = constant_op.constant([4., 1.5, 2., 3., 5., 7.],
                                   name="bias",
                                   dtype=dtype)
       added = nn.bias_add(conv, bias, name="bias_add")
       relu = nn.relu(added, "relu")
       identity = array_ops.identity(relu, "identity")
       pool = nn_ops.max_pool(
           identity, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
     array_ops.squeeze(pool, name=output_name)
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=[input_dims],
       output_names=[output_name],
       expected_output_dims=[(100, 6, 6, 6)])
コード例 #51
0
 def _RunGraphWithConfig(self, config, batch_size=14, image_dim=12):
   """Run a simple layered graph with conv, an intermediate op, and a ReLU."""
   graph = ops.Graph()
   with graph.as_default():
     random_seed.set_random_seed(1)
     current_activation = variable_scope.get_variable(
         name='start', shape=[batch_size, image_dim, image_dim, 5])
     conv_filter = variable_scope.get_variable(
         name='filter', shape=[5, 5, 5, 5])
     for layer_number in range(10):
       with variable_scope.variable_scope('layer_{}'.format(layer_number)):
         after_conv = nn.conv2d(current_activation, conv_filter, [1, 1, 1, 1],
                                'SAME')
         current_activation = 2. * after_conv
         current_activation = nn.relu(current_activation)
     loss = math_ops.reduce_mean(current_activation)
     optimizer = train.AdamOptimizer(0.001)
     train_op = optimizer.minimize(loss)
     init_op = variables.global_variables_initializer()
     with session.Session(config=config, graph=graph) as sess:
       sess.run(init_op)
       sess.run(train_op)
       sess.run(train_op)
       return sess.run(loss)
コード例 #52
0
  def testSinglePartitionedVariable(self):
    """Ensures partitioned variables fail cleanly with freeze graph."""
    checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint")
    checkpoint_state_name = "checkpoint_state"
    input_graph_name = "input_graph.pb"
    output_graph_name = "output_graph.pb"

    # Create a graph with partition variables. When weights are partitioned into
    # a single partition, the weights variable is followed by a identity ->
    # identity (an additional identity node).
    partitioner = partitioned_variables.fixed_size_partitioner(1)
    with ops.Graph().as_default():
      with variable_scope.variable_scope("part", partitioner=partitioner):
        batch_size, height, width, depth = 5, 128, 128, 3
        input1 = array_ops.zeros(
            (batch_size, height, width, depth), name="input1")
        input2 = array_ops.zeros(
            (batch_size, height, width, depth), name="input2")

        num_nodes = depth
        filter1 = variable_scope.get_variable("filter", [num_nodes, num_nodes])
        filter2 = array_ops.reshape(filter1, [1, 1, num_nodes, num_nodes])
        conv = nn.conv2d(
            input=input1, filter=filter2, strides=[1, 1, 1, 1], padding="SAME")
        node = math_ops.add(conv, input2, name="test/add")
        node = nn.relu6(node, name="test/relu6")

      # Save graph and checkpoints.
      sess = session.Session()
      sess.run(variables.global_variables_initializer())

      saver = saver_lib.Saver()
      checkpoint_path = saver.save(
          sess,
          checkpoint_prefix,
          global_step=0,
          latest_filename=checkpoint_state_name)
      graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)

      # Ensure this graph has partition variables.
      self.assertTrue([
          tensor.name.split(":")[0]
          for op in sess.graph.get_operations()
          for tensor in op.values()
          if re.search(r"/part_\d+/", tensor.name)
      ])

    # Test freezing graph doesn't make it crash.
    output_node_names = "save/restore_all"
    output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)

    return_value = freeze_graph.freeze_graph_with_def_protos(
        input_graph_def=sess.graph_def,
        input_saver_def=None,
        input_checkpoint=checkpoint_path,
        output_node_names=output_node_names,
        restore_op_name="save/restore_all",  # default value
        filename_tensor_name="save/Const:0",  # default value
        output_graph=output_graph_path,
        clear_devices=False,
        initializer_nodes="")
    self.assertTrue(return_value, -1)
コード例 #53
0
ファイル: learn.py プロジェクト: CdricGmd/tensorflow
def convolution2d(x,
                  num_output_channels,
                  kernel_size,
                  activation_fn=None,
                  stride=(1, 1),
                  padding='SAME',
                  weight_init=None,
                  bias_init=standard_ops.constant_initializer(0.),
                  num_input_channels=None,
                  name=None,
                  weight_collections=None,
                  bias_collections=None,
                  weight_regularizer=None,
                  create_summaries=True):
  """Adds the parameters for a conv2d layer and returns the output.

  A neural network convolution layer is generally defined as:
  \\\\(y = f(conv2d(w, x) + b)\\\\) where **f** is given by `activation_fn`,
  **conv2d** is `nn.conv2d` and `x` has shape
  `[batch, height, width, channels]`

  This op creates `w` and optionally `b` and adds various summaries that can be
  useful for visualizing learning or diagnosing training problems. Bias can be
  disabled by setting `bias_init` to `None`.

  The variable creation is compatible with `tf.variable_scope` and so can be
  reused with `tf.variable_scope` or `tf.make_template`.

  In almost all cases, the input channels can be inferred from the shape
  of `x`, but if it is unspecified or additional size checks are
  desired, then `num_input_channels` can be specified.

  Most of the details of variable creation can be controlled by specifying the
  initializers (`weight_init` and `bias_init`) and which collections to place
  the created variables in (`weight_collections` and `bias_collections`).

  A per layer regularization can be specified by setting `weight_regularizer`.
  This is only applied to weights and not the bias.

  Args:
    x: The input `Tensor`.
    num_output_channels: The number of output channels (i.e. the size of
      dim[3]).
    kernel_size: A length 2 `list` or `tuple` containing the kernel size.
    activation_fn: A function that requires a single Tensor that is applied as a
      non-linearity.
    stride: A length 2 `list` or `tuple` specifying the stride of the sliding
      window across the image.
    padding: A `string` from: "SAME", "VALID". The type of padding algorithm to
      use.
    weight_init: An optional initialization. If not specified, uses Xavier
      initialization (see `tf.learn.xavier_initializer`).
    bias_init: An initializer for the bias, defaults to 0. Set to`None` in order
      to disable bias.
    num_input_channels: The length of the channel dimension in the input.
    name: The name for this operation is used to name operations and to find
      variables. If specified it must be unique for this scope, otherwise a
      unique name starting with "convolution2d" will be created.  See
      `tf.variable_op_scope` for details.
    weight_collections: List of graph collections for just weights.
    bias_collections: List of graph collections for just bias.
    weight_regularizer: A regularizer like the result of
      `tf.learn.l1_regularizer` or `tf.learn.l2_regularizer`.
    create_summaries: Set to false to disable summaries.

  Returns:
    The result of applying a fully connected layer.

  Raises:
    ValueError: if `x` is not rank 4; or `x`'s channel dimension is not known
    and `num_input_channels` is not specified.
  """
  with variable_scope.variable_op_scope([x], name, 'convolution2d') as vs:
    # Check rank and if num_input_channels is specified, make sure it matches.
    x.get_shape().assert_is_compatible_with([None, None, None,
                                             num_input_channels])

    if not num_input_channels:
      if x.get_shape().dims is None or x.get_shape().dims[3].value is None:
        raise ValueError(
            'If x has an unknown channels dimension then num_input_channels '
            'must be specified; shape: %s num_input_channels: %s'
            % (x.get_shape(), num_input_channels))
      else:
        num_input_channels = x.get_shape().dims[3].value

    # QQQ: Should we accept a scalar for a square convolution?
    if len(kernel_size) != 2:
      raise ValueError('kernel_size must be length 2: ' % kernel_size)
    if len(stride) != 2:
      raise ValueError('stride must be length 2: ' % kernel_size)

    stride = [1, stride[0], stride[1], 1]
    shape = [kernel_size[0], kernel_size[1], num_input_channels,
             num_output_channels]

    patch_size = kernel_size[0] * kernel_size[1]
    weight_init = weight_init or xavier_initializer(
        num_input_channels * patch_size, num_output_channels * patch_size)

    dtype = x.dtype.base_dtype
    w = variable_scope.get_variable('weights',
                                    shape=shape,
                                    dtype=dtype,
                                    initializer=weight_init,
                                    collections=weight_collections)

    if not vs.reuse and create_summaries:
      _add_histogram_summary(w)

    y = nn.conv2d(x, w, stride, padding)
    # Regularization is only applied to the weights and not bias.
    if weight_regularizer:
      _apply_regularization(w, weight_regularizer)
    if bias_init:
      b = _bias_variable(
          num_output_channels, dtype, bias_init, bias_collections,
          create_summaries)
      y = nn.bias_add(y, b)

    if create_summaries:
      return _apply_activation_with_summaries(y, activation_fn)
    if activation_fn:
      y = activation_fn(y)
    return y
コード例 #54
0
def conv2d(x, w):
  """conv2d returns a 2d convolution layer with full stride."""
  return nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')
コード例 #55
0
def convolution2d(inputs,
                  num_outputs,
                  kernel_size,
                  stride=1,
                  padding='SAME',
                  activation_fn=nn.relu,
                  normalizer_fn=None,
                  normalizer_params=None,
                  weights_initializer=initializers.xavier_initializer(),
                  weights_regularizer=None,
                  biases_initializer=init_ops.zeros_initializer,
                  biases_regularizer=None,
                  reuse=None,
                  variables_collections=None,
                  outputs_collections=None,
                  trainable=True,
                  scope=None):
  """Adds a 2D convolution followed by an optional batch_norm layer.
  `convolution2d` creates a variable called `weights`, representing the
  convolutional kernel, that is convolved with the `inputs` to produce a
  `Tensor` of activations. If a `normalizer_fn` is provided (such as
  `batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is
  None and a `biases_initializer` is provided then a `biases` variable would be
  created and added the activations. Finally, if `activation_fn` is not `None`,
  it is applied to the activations as well.
  Args:
    inputs: a 4-D tensor  `[batch_size, height, width, channels]`.
    num_outputs: integer, the number of output filters.
    kernel_size: a list of length 2 `[kernel_height, kernel_width]` of
      of the filters. Can be an int if both values are the same.
    stride: a list of length 2 `[stride_height, stride_width]`.
      Can be an int if both strides are the same. Note that presently
      both strides must have the same value.
    padding: one of `VALID` or `SAME`.
    activation_fn: activation function.
    normalizer_fn: normalization function to use instead of `biases`. If
      `normalize_fn` is provided then `biases_initializer` and
      `biases_regularizer` are ignored and `biases` are not created nor added.
    normalizer_params: normalization function parameters.
    weights_initializer: An initializer for the weights.
    weights_regularizer: Optional regularizer for the weights.
    biases_initializer: An initializer for the biases. If None skip biases.
    biases_regularizer: Optional regularizer for the biases.
    reuse: whether or not the layer and its variables should be reused. To be
      able to reuse the layer scope must be given.
    variables_collections: optional list of collections for all the variables or
      a dictionay containing a different list of collection per variable.
    outputs_collections: collection to add the outputs.
    trainable: If `True` also add variables to the graph collection
      `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
    scope: Optional scope for `variable_op_scope`.
  Returns:
    a tensor representing the output of the operation.
  """
  with variable_scope.variable_op_scope([inputs],
                                        scope, 'Conv', reuse=reuse) as sc:
    dtype = inputs.dtype.base_dtype
    kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
    stride_h, stride_w = utils.two_element_tuple(stride)
    num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
    weights_shape = [kernel_h, kernel_w,
                     num_filters_in, num_outputs]
    weights_collections = utils.get_variable_collections(
        variables_collections, 'weights')
    weights = variables.model_variable('weights',
                                       shape=weights_shape,
                                       dtype=dtype,
                                       initializer=weights_initializer,
                                       regularizer=weights_regularizer,
                                       collections=weights_collections,
                                       trainable=trainable)
    outputs = nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1],
                        padding=padding)
    if normalizer_fn:
      normalizer_params = normalizer_params or {}
      outputs = normalizer_fn(outputs, **normalizer_params)
    else:
      if biases_initializer is not None:
        biases_collections = utils.get_variable_collections(
            variables_collections, 'biases')
        biases = variables.model_variable('biases',
                                          shape=[num_outputs,],
                                          dtype=dtype,
                                          initializer=biases_initializer,
                                          regularizer=biases_regularizer,
                                          collections=biases_collections,
                                          trainable=trainable)
        outputs = nn.bias_add(outputs, biases)
    if activation_fn:
      outputs = activation_fn(outputs)
    return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
コード例 #56
0
ファイル: common_leaders.py プロジェクト: bowrian/models
def conv2d_leaders(inputs,
                   num_outputs,
                   kernel_size,
                   rates=[1],
                   stride=1,
                   padding='SAME',
                   activation_fn=nn.relu,
                   normalizer_fn=None,
                   normalizer_params=None,
                   weights_initializer=initializers.xavier_initializer(),
                   weights_regularizer=None,
                   biases_initializer=init_ops.zeros_initializer,
                   biases_regularizer=None,
                   reuse=None,
                   variables_collections=None,
                   outputs_collections=None,
                   trainable=True,
                   scope=None,):
    """Adds a 2D convolution followed by an optional batch_norm layer.
    `convolution2d` creates a variable called `weights`, representing the
    convolutional kernel, that is convolved with the `inputs` to produce a
    `Tensor` of activations. If a `normalizer_fn` is provided (such as
    `batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is
    None and a `biases_initializer` is provided then a `biases` variable would be
    created and added the activations. Finally, if `activation_fn` is not `None`,
    it is applied to the activations as well.
    Performs a'trous convolution with input stride equal to rate if rate is
    greater than one.
    Args:
        inputs: a 4-D tensor  `[batch_size, height, width, channels]`.
        num_outputs: integer, the number of output filters.
        kernel_size: a list of length 2 `[kernel_height, kernel_width]` of
          of the filters. Can be an int if both values are the same.
        stride: a list of length 2 `[stride_height, stride_width]`.
          Can be an int if both strides are the same. Note that presently
          both strides must have the same value.
        padding: one of `VALID` or `SAME`.
        rate: integer. If less than or equal to 1, a standard convolution is used.
          If greater than 1, than the a'trous convolution is applied and `stride`
          must be set to 1.
        activation_fn: activation function.
        normalizer_fn: normalization function to use instead of `biases`. If
          `normalize_fn` is provided then `biases_initializer` and
          `biases_regularizer` are ignored and `biases` are not created nor added.
        normalizer_params: normalization function parameters.
        weights_initializer: An initializer for the weights.
        weights_regularizer: Optional regularizer for the weights.
        biases_initializer: An initializer for the biases. If None skip biases.
        biases_regularizer: Optional regularizer for the biases.
        reuse: whether or not the layer and its variables should be reused. To be
          able to reuse the layer scope must be given.
        variables_collections: optional list of collections for all the variables or
          a dictionay containing a different list of collection per variable.
        outputs_collections: collection to add the outputs.
        trainable: If `True` also add variables to the graph collection
          `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
        scope: Optional scope for `variable_op_scope`.
    Returns:
        a tensor representing the output of the operation.
    Raises:
        ValueError: if both 'rate' and `stride` are larger than one.
    """
    with variable_scope.variable_scope(scope, 'Conv', [inputs],
                                       reuse=reuse) as sc:

        inputs = ops.convert_to_tensor(inputs)
        dtype = inputs.dtype.base_dtype
        # inshape = tf.shape(inputs)

        # Leading kernel size.
        kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
        stride_h, stride_w = utils.two_element_tuple(stride)
        num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)

        # Weights variable.
        weights_shape = [kernel_h, kernel_w,
                         num_filters_in, num_outputs]
        weights_collections = utils.get_variable_collections(
            variables_collections, 'weights')
        weights = variables.model_variable('weights',
                                           shape=weights_shape,
                                           dtype=dtype,
                                           initializer=weights_initializer,
                                           regularizer=weights_regularizer,
                                           collections=weights_collections,
                                           trainable=trainable)
        # # Bias variable.
        # biases = None
        # if biases_initializer is not None:
        #     biases_collections = utils.get_variable_collections(
        #         variables_collections, 'biases')
        #     biases = variables.model_variable('biases',
        #                                       shape=[num_outputs, ],
        #                                       dtype=dtype,
        #                                       initializer=biases_initializer,
        #                                       regularizer=biases_regularizer,
        #                                       collections=biases_collections,
        #                                       trainable=trainable)

        # Convolution at different scales.
        outputs_pool = []
        for rate in rates:
            if rate > 1:
                conv = nn.atrous_conv2d(inputs, weights, rate, padding='SAME')
            else:
                conv = nn.conv2d(inputs, weights, [1, 1, 1, 1], padding='SAME')
            outputs_pool.append(conv)
        # 'Pooling' at different scales. A bit hacky. Use of concat + max_pool?
        outputs = None
        outputs_pool.reverse()
        for node in outputs_pool:
            if outputs is None:
                outputs = node
            else:
                outputs = tf.maximum(outputs, node)
        # # Add bias?
        # if biases is not None:
        #     outputs = tf.nn.bias_add(outputs, biases)

        # Fix padding and stride. A bit hacky too and not so efficient!
        if padding == 'VALID' or stride > 1:
            padfilter = np.zeros(shape=(kernel_h, kernel_w, num_filters_in, 1),
                                 dtype=dtype)
            x = (kernel_h - 1) / 2
            y = (kernel_w - 1) / 2
            padfilter[x, y, :, 0] = 1.
            outputs = tf.nn.depthwise_conv2d(outputs, padfilter,
                                             [1, stride_h, stride_w, 1],
                                             padding=padding)

        # Batch norm / bias and activation...
        if normalizer_fn is not None:
            normalizer_params = normalizer_params or {}
            outputs = normalizer_fn(outputs, **normalizer_params)
        else:
            if biases_initializer is not None:
                biases_collections = utils.get_variable_collections(
                    variables_collections, 'biases')
                biases = variables.model_variable('biases',
                                                  shape=[num_outputs, ],
                                                  dtype=dtype,
                                                  initializer=biases_initializer,
                                                  regularizer=biases_regularizer,
                                                  collections=biases_collections,
                                                  trainable=trainable)
                outputs = nn.bias_add(outputs, biases)
        if activation_fn is not None:
            outputs = activation_fn(outputs)
        return utils.collect_named_outputs(outputs_collections,
                                           sc.name, outputs)
コード例 #57
0
ファイル: layers.py プロジェクト: Baaaaam/tensorflow
def legacy_convolution2d(x,
                         num_output_channels,
                         kernel_size,
                         activation_fn=None,
                         stride=(1, 1),
                         padding='SAME',
                         weight_init=initializers.xavier_initializer_conv2d(),
                         bias_init=standard_ops.zeros_initializer,
                         name=None,
                         weight_collections=(ops.GraphKeys.WEIGHTS,),
                         bias_collections=(ops.GraphKeys.BIASES,),
                         output_collections=(ops.GraphKeys.ACTIVATIONS,),
                         trainable=True,
                         weight_regularizer=None,
                         bias_regularizer=None):
  # pylint: disable=g-docstring-has-escape
  """Adds the parameters for a conv2d layer and returns the output.

  A neural network convolution layer is generally defined as:
  \\\\(y = f(conv2d(w, x) + b)\\\\) where **f** is given by `activation_fn`,
  **conv2d** is `tf.nn.conv2d` and `x` has shape
  `[batch, height, width, channels]`. The output of this op is of shape
  `[batch, out_height, out_width, num_output_channels]`, where `out_width` and
  `out_height` are determined by the `padding` argument. See `conv2D` for
  details.

  This op creates `w` and optionally `b` and adds various summaries that can be
  useful for visualizing learning or diagnosing training problems. Bias can be
  disabled by setting `bias_init` to `None`.

  The variable creation is compatible with `tf.variable_scope` and so can be
  reused with `tf.variable_scope` or `tf.make_template`.

  Most of the details of variable creation can be controlled by specifying the
  initializers (`weight_init` and `bias_init`) and which collections to place
  the created variables in (`weight_collections` and `bias_collections`).

  A per layer regularization can be specified by setting `weight_regularizer`.
  This is only applied to weights and not the bias.

  Args:
    x: A 4-D input `Tensor`.
    num_output_channels: The number of output channels (i.e. the size of the
      last dimension of the output).
    kernel_size: A length 2 `list` or `tuple` containing the kernel size.
    activation_fn: A function that requires a single Tensor that is applied as a
      non-linearity.
    stride: A length 2 `list` or `tuple` specifying the stride of the sliding
      window across the image.
    padding: A `string` from: "SAME", "VALID". The type of padding algorithm to
      use.
    weight_init: An optional initialization. If not specified, uses Xavier
      initialization (see `tf.learn.xavier_initializer`).
    bias_init: An initializer for the bias, defaults to 0. Set to`None` in order
      to disable bias.
    name: The name for this operation is used to name operations and to find
      variables. If specified it must be unique for this scope, otherwise a
      unique name starting with "convolution2d" will be created.  See
      `tf.variable_op_scope` for details.
    weight_collections: List of graph collections to which weights are added.
    bias_collections: List of graph collections to which biases are added.
    output_collections: List of graph collections to which outputs are added.
    trainable: If `True` also add variables to the graph collection
      `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
    weight_regularizer: A regularizer like the result of
      `l1_regularizer` or `l2_regularizer`. Used for weights.
    bias_regularizer: A regularizer like the result of
      `l1_regularizer` or `l2_regularizer`. Used for biases.

  Returns:
    The result of applying a 2-D convolutional layer.

  Raises:
    ValueError: If `kernel_size` or `stride` are not length 2.
  """
  with variable_scope.variable_op_scope([x], name, 'convolution2d'):
    num_input_channels = x.get_shape().dims[3].value

    if len(kernel_size) != 2:
      raise ValueError('kernel_size must be length 2: %d ' % kernel_size)
    if len(stride) != 2:
      raise ValueError('stride must be length 2: %d' % stride)

    stride = [1, stride[0], stride[1], 1]
    shape = [kernel_size[0], kernel_size[1], num_input_channels,
             num_output_channels]
    dtype = x.dtype.base_dtype

    weight_collections = set(list(weight_collections or []) +
                             [ops.GraphKeys.VARIABLES])
    w = variable_scope.get_variable('weights',
                                    shape=shape,
                                    dtype=dtype,
                                    initializer=weight_init,
                                    collections=weight_collections,
                                    regularizer=weight_regularizer,
                                    trainable=trainable)

    y = nn.conv2d(x, w, stride, padding)

    if bias_init is not None:
      bias_collections = set(list(bias_collections or []) +
                             [ops.GraphKeys.VARIABLES])
      b = variable_scope.get_variable('bias',
                                      shape=[num_output_channels],
                                      dtype=dtype,
                                      initializer=bias_init,
                                      collections=bias_collections,
                                      regularizer=bias_regularizer,
                                      trainable=trainable)

      y = nn.bias_add(y, b)

    return _apply_activation(y, activation_fn, output_collections)
コード例 #58
0
 def loop_fn(i):
   x1 = array_ops.gather(x, i)
   return nn.conv2d(
       x1, filt, strides=[1, 2, 2, 1], padding="VALID", data_format="NHWC")