示例#1
0
 def stage2(x, label):
     with variable_scope.variable_scope("stage2", use_resource=True):
         x = fc(x, 48)
         x = nn.relu(x)
         x = fc(x, 100)
         x = nn.relu(x)
         return x, label
示例#2
0
 def stage2(x, label):
     with variable_scope.variable_scope("stage2", use_resource=True):
         a = fc(x, 100)
         a = nn.relu(a)
         b = fc(x, 100)
         b = nn.relu(b)
         return a + b, label
示例#3
0
    def block(name, first_stride, out_filters, count, x):

      for i in range(count):
        shape_in = x.shape
        stride = first_stride if (i == 0) else 1
        if stride > 1:
          x = fixed_padding(x, 3)
        sc = x

        with variable_scope.variable_scope(name + "/" + str(i) + "/1"):
          x = conv(x, 3, stride, out_filters)
          x = nn.relu(x)

        with variable_scope.variable_scope(name + "/" + str(i) + "/2"):
          x = conv(x, 3, 1, out_filters)

          # shortcut
          if stride != 1:
            sc = array_ops.strided_slice(sc, [0, 0, 0, 0],
                                         sc.shape,
                                         strides=[1, stride, stride, 1])
          pad = int(x.shape[3] - shape_in[3])
          if pad != 0:
            sc = array_ops.pad(sc, paddings=[[0, 0], [0, 0], [0, 0], [0, pad]])

          x = nn.relu(x + sc)

      return x
def create_test_network_8():
  """Aligned network for test, including an intermediate addition.

  The graph is similar to create_test_network_1(), except that it includes a few
  more layers on top. The added layers compose two different branches whose
  receptive fields are different. This makes this test case more challenging; in
  particular, this test fails if a naive DFS-like algorithm is used for RF
  computation.

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
  g = ops.Graph()
  with g.as_default():
    # An input test image with unknown spatial resolution.
    x = array_ops.placeholder(
        dtypes.float32, (None, None, None, 1), name='input_image')
    # Left branch before first addition.
    l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
    # Right branch before first addition.
    l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
    l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
    l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID')
    # First addition.
    l4 = nn.relu(l1 + l3)
    # Left branch after first addition.
    l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='VALID')
    # Right branch after first addition.
    l6_pad = array_ops.pad(l4, [[0, 0], [1, 0], [1, 0], [0, 0]])
    l6 = slim.conv2d(l6_pad, 1, [3, 3], stride=2, scope='L6', padding='VALID')
    # Final addition.
    nn.relu(l5 + l6, name='output')

  return g
 def GetParams(self):
   """Test for Constant broadcasting in TF-TRT."""
   dtype = dtypes.float32
   input_name = 'input'
   input_dims = [5, 12, 12, 2]
   g = ops.Graph()
   with g.as_default():
     x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
     filt1 = constant_op.constant(
         0.3, shape=(3, 3, 2, 1), dtype=dtype, name='filt1')
     y1 = nn.conv2d(x, filt1, strides=[1, 1, 1, 1], padding='SAME', name='y1')
     z1 = nn.relu(y1, name='z1')
     filt2 = constant_op.constant(
         np.random.randn(9), shape=(3, 3, 1, 1), dtype=dtype, name='filt2')
     y2 = nn.conv2d(z1, filt2, strides=[1, 1, 1, 1], padding='SAME', name='y2')
     z2 = nn.relu(y2, name='z')
     filt3 = constant_op.constant(
         np.random.randn(3, 3, 1, 1),
         shape=(3, 3, 1, 1),
         dtype=dtype,
         name='filt3')
     y3 = nn.conv2d(z2, filt3, strides=[1, 1, 1, 1], padding='SAME', name='y3')
     nn.relu(y3, name='output')
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=[input_dims],
       num_expected_engines=1,
       expected_output_dims=(5, 12, 12, 1),
       allclose_atol=1.e-02,
       allclose_rtol=1.e-02)
示例#6
0
def create_test_network_3():
    """Misaligned network for test.

  The graph corresponds to the example from the first figure in
  go/cnn-rf-computation#arbitrary-computation-graphs

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
    g = ops.Graph()
    with g.as_default():
        # An input test image with unknown spatial resolution.
        x = array_ops.placeholder(dtypes.float32, (None, None, None, 1),
                                  name='input_image')
        # Left branch.
        l1_pad = array_ops.pad(x, [[0, 0], [2, 1], [2, 1], [0, 0]])
        l1 = slim.conv2d(l1_pad,
                         1, [5, 5],
                         stride=2,
                         scope='L1',
                         padding='VALID')
        # Right branch.
        l2 = slim.conv2d(x, 1, [3, 3], stride=1, scope='L2', padding='VALID')
        l3 = slim.conv2d(l2, 1, [3, 3], stride=1, scope='L3', padding='VALID')
        # Addition.
        nn.relu(l1 + l3, name='output')
    return g
示例#7
0
def create_test_network_4():
    """Misaligned network for test.

  The graph corresponds to a variation from the example from the second figure
  in go/cnn-rf-computation#arbitrary-computation-graphs. Layer 2 uses 'SAME'
  padding, which makes its padding dependent on the input image dimensionality.
  In this case, the effective padding will be undetermined, and the utility is
  not able to check the network alignment.

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
    g = ops.Graph()
    with g.as_default():
        # An 8x8 test image.
        x = array_ops.placeholder(dtypes.float32, (1, 8, 8, 1),
                                  name='input_image')
        # Left branch.
        l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
        # Right branch.
        l2 = slim.conv2d(x, 1, [3, 3], stride=2, scope='L2', padding='SAME')
        l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID')
        # Addition.
        nn.relu(l1 + l3, name='output')
    return g
def create_test_network_7():
    """Aligned network for test, with a control dependency.

  The graph is similar to create_test_network_1(), except that it includes an
  assert operation on the left branch.

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
    g = ops.Graph()
    with g.as_default():
        # An 8x8 test image.
        x = array_ops.placeholder(dtypes.float32, (1, 8, 8, 1),
                                  name='input_image')
        # Left branch.
        l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
        l1_shape = array_ops.shape(l1)
        assert_op = control_flow_ops.Assert(gen_math_ops.equal(l1_shape[1], 2),
                                            [l1_shape],
                                            summarize=4)
        # Right branch.
        l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
        l2 = slim.conv2d(l2_pad,
                         1, [3, 3],
                         stride=2,
                         scope='L2',
                         padding='VALID')
        l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID')
        # Addition.
        with ops.control_dependencies([assert_op]):
            nn.relu(l1 + l3, name='output')
    return g
def create_test_network_9():
  """Aligned network for test, including an intermediate addition.

  The graph is the same as create_test_network_8(), except that VALID padding is
  changed to SAME.

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
  g = ops.Graph()
  with g.as_default():
    # An input test image with unknown spatial resolution.
    x = array_ops.placeholder(
        dtypes.float32, (None, None, None, 1), name='input_image')
    # Left branch before first addition.
    l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='SAME')
    # Right branch before first addition.
    l2 = slim.conv2d(x, 1, [3, 3], stride=2, scope='L2', padding='SAME')
    l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='SAME')
    # First addition.
    l4 = nn.relu(l1 + l3)
    # Left branch after first addition.
    l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
    # Right branch after first addition.
    l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
    # Final addition.
    nn.relu(l5 + l6, name='output')

  return g
示例#10
0
def create_test_network_2():
  """Aligned network for test.

  The graph corresponds to a variation to the example from the second figure in
  go/cnn-rf-computation#arbitrary-computation-graphs. Layers 2 and 3 are changed
  to max-pooling operations. Since the functionality is the same as convolution,
  the network is aligned and the receptive field size is the same as from the
  network created using create_test_network_1().

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
  g = ops.Graph()
  with g.as_default():
    # An input test image with unknown spatial resolution.
    x = array_ops.placeholder(
        dtypes.float32, (None, None, None, 1), name='input_image')
    # Left branch.
    l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
    # Right branch.
    l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
    l2 = slim.max_pool2d(l2_pad, [3, 3], stride=2, scope='L2', padding='VALID')
    l3 = slim.max_pool2d(l2, [1, 1], stride=2, scope='L3', padding='VALID')
    # Addition.
    nn.relu(l1 + l3, name='output')
  return g
示例#11
0
def create_test_network_2():
    """Aligned network for test.

  The graph corresponds to a variation to the example from the second figure in
  go/cnn-rf-computation#arbitrary-computation-graphs. Layers 2 and 3 are changed
  to max-pooling operations. Since the functionality is the same as convolution,
  the network is aligned and the receptive field size is the same as from the
  network created using create_test_network_1().

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
    g = ops.Graph()
    with g.as_default():
        # An 8x8 test image.
        x = array_ops.placeholder(dtypes.float32, (1, 8, 8, 1),
                                  name='input_image')
        # Left branch.
        l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
        # Right branch.
        l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
        l2 = slim.max_pool2d(l2_pad, [3, 3],
                             stride=2,
                             scope='L2',
                             padding='VALID')
        l3 = slim.max_pool2d(l2, [1, 1], stride=2, scope='L3', padding='VALID')
        # Addition.
        nn.relu(l1 + l3, name='output')
    return g
def create_test_network_6():
    """Aligned network with dropout for test.

  The graph is similar to create_test_network_1(), except that the right branch
  has dropout normalization.

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
    g = ops.Graph()
    with g.as_default():
        # An 8x8 test image.
        x = array_ops.placeholder(dtypes.float32, (1, 8, 8, 1),
                                  name='input_image')
        # Left branch.
        l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
        # Right branch.
        l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
        l2 = slim.conv2d(l2_pad,
                         1, [3, 3],
                         stride=2,
                         scope='L2',
                         padding='VALID')
        l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID')
        dropout = slim.dropout(l3)
        # Addition.
        nn.relu(l1 + dropout, name='output')
    return g
示例#13
0
def create_test_network_1():
    """Aligned network for test.

  The graph corresponds to the example from the second figure in
  go/cnn-rf-computation#arbitrary-computation-graphs

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
    g = ops.Graph()
    with g.as_default():
        # An 8x8 test image.
        x = array_ops.placeholder(dtypes.float32, (1, 8, 8, 1),
                                  name='input_image')
        # Left branch.
        l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
        # Right branch.
        l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
        l2 = slim.conv2d(l2_pad,
                         1, [3, 3],
                         stride=2,
                         scope='L2',
                         padding='VALID')
        l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID')
        # Addition.
        nn.relu(l1 + l3, name='output')
    return g
 def GetParams(self):
   """Test for Constant broadcasting in TF-TRT."""
   dtype = dtypes.float32
   input_name = 'input'
   input_dims = [5, 12, 12, 2]
   output_name = 'output'
   g = ops.Graph()
   with g.as_default():
     x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
     filt1 = constant_op.constant(
         0.3, shape=(3, 3, 2, 1), dtype=dtype, name='filt1')
     y1 = nn.conv2d(x, filt1, strides=[1, 1, 1, 1], padding='SAME', name='y1')
     z1 = nn.relu(y1, name='z1')
     filt2 = constant_op.constant(
         np.random.randn(9), shape=(3, 3, 1, 1), dtype=dtype, name='filt2')
     y2 = nn.conv2d(z1, filt2, strides=[1, 1, 1, 1], padding='SAME', name='y2')
     z2 = nn.relu(y2, name='z')
     filt3 = constant_op.constant(
         np.random.randn(3, 3, 1, 1),
         shape=(3, 3, 1, 1),
         dtype=dtype,
         name='filt3')
     y3 = nn.conv2d(z2, filt3, strides=[1, 1, 1, 1], padding='SAME', name='y3')
     nn.relu(y3, name=output_name)
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=[input_dims],
       output_names=[output_name],
       expected_output_dims=[(5, 12, 12, 1)])
示例#15
0
def create_test_network_9():
    """Aligned network for test, including an intermediate addition.

  The graph is the same as create_test_network_8(), except that VALID padding is
  changed to SAME.

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
    g = ops.Graph()
    with g.as_default():
        # An input test image with unknown spatial resolution.
        x = array_ops.placeholder(dtypes.float32, (None, None, None, 1),
                                  name='input_image')
        # Left branch before first addition.
        l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='SAME')
        # Right branch before first addition.
        l2 = slim.conv2d(x, 1, [3, 3], stride=2, scope='L2', padding='SAME')
        l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='SAME')
        # First addition.
        l4 = nn.relu(l1 + l3)
        # Left branch after first addition.
        l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
        # Right branch after first addition.
        l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
        # Final addition.
        nn.relu(l5 + l6, name='output')

    return g
示例#16
0
 def GraphFn(self, x):
     dtype = x.dtype
     filt1 = constant_op.constant(0.3,
                                  shape=(3, 3, 2, 1),
                                  dtype=dtype,
                                  name='filt1')
     y1 = nn.conv2d(x,
                    filt1,
                    strides=[1, 1, 1, 1],
                    padding='SAME',
                    name='y1')
     z1 = nn.relu(y1, name='z1')
     filt2 = constant_op.constant(0.3,
                                  shape=(3, 3, 1, 1),
                                  dtype=dtype,
                                  name='filt2')
     y2 = nn.conv2d(z1,
                    filt2,
                    strides=[1, 1, 1, 1],
                    padding='SAME',
                    name='y2')
     z2 = nn.relu(y2, name='z')
     filt3 = constant_op.constant(0.3,
                                  shape=(3, 3, 1, 1),
                                  dtype=dtype,
                                  name='filt3')
     y3 = nn.conv2d(z2,
                    filt3,
                    strides=[1, 1, 1, 1],
                    padding='SAME',
                    name='y3')
     return nn.relu(y3, name='output_0')
示例#17
0
def create_test_network_4():
  """Misaligned network for test.

  The graph corresponds to a variation from the example from the second figure
  in go/cnn-rf-computation#arbitrary-computation-graphs. Layer 2 uses 'SAME'
  padding, which makes its padding dependent on the input image dimensionality.
  In this case, the effective padding will be undetermined, and the utility is
  not able to check the network alignment.

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
  g = ops.Graph()
  with g.as_default():
    # An input test image with unknown spatial resolution.
    x = array_ops.placeholder(
        dtypes.float32, (None, None, None, 1), name='input_image')
    # Left branch.
    l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
    # Right branch.
    l2 = slim.conv2d(x, 1, [3, 3], stride=2, scope='L2', padding='SAME')
    l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID')
    # Addition.
    nn.relu(l1 + l3, name='output')
  return g
示例#18
0
def create_test_network_7():
  """Aligned network for test, with a control dependency.

  The graph is similar to create_test_network_1(), except that it includes an
  assert operation on the left branch.

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
  g = ops.Graph()
  with g.as_default():
    # An 8x8 test image.
    x = array_ops.placeholder(dtypes.float32, (1, 8, 8, 1), name='input_image')
    # Left branch.
    l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
    l1_shape = array_ops.shape(l1)
    assert_op = control_flow_ops.Assert(
        gen_math_ops.equal(l1_shape[1], 2), [l1_shape], summarize=4)
    # Right branch.
    l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
    l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
    l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID')
    # Addition.
    with ops.control_dependencies([assert_op]):
      nn.relu(l1 + l3, name='output')
  return g
示例#19
0
def model():
    print("building model ...")
    with tf.variable_scope('train'):
        print("building model ...")
        X_pl = tf.placeholder(tf.float32, [None, num_features])
        X_expand = tf.expand_dims(X_pl, axis=2)
        print("X_pl", X_pl.get_shape())
        t_pl = tf.placeholder(tf.int32, [None,])
        print("t_pl", t_pl.get_shape())
        is_training_pl = tf.placeholder(tf.bool)
        cell_fw = tf.nn.rnn_cell.GRUCell(100)
        cell_bw = tf.nn.rnn_cell.GRUCell(100)
        seq_len = tf.reduce_sum(tf.ones(tf.shape(X_pl), dtype=tf.int32), axis=1)
        _, enc_states = tf.nn.bidirectional_dynamic_rnn(cell_fw=cell_fw,
            cell_bw=cell_bw, inputs=X_expand, sequence_length=seq_len,
            dtype=tf.float32)
        enc_states = tf.concat(1, enc_states)
        enc_states_drop = dropout(enc_states, is_training=is_training_pl) 
        l1 = fully_connected(enc_states_drop, 100, activation_fn=None)
        l1 = batch_norm(l1, is_training=is_training_pl)
        l1_relu = relu(l1)
        l1_dropout = dropout(l1_relu, is_training=is_training_pl)
        l2 = fully_connected(l1_dropout, 100, activation_fn=None)
        l2 = batch_norm(l2, is_training=is_training_pl)
        l2_relu = relu(l2)
        l_out = fully_connected(l2_relu, num_outputs=num_classes, activation_fn=None)
        l_out_softmax = tf.nn.softmax(l_out)
        tf.contrib.layers.summarize_variables()

    with tf.variable_scope('metrics'):
        loss = sparse_softmax_cross_entropy_with_logits(l_out, t_pl)
        print("loss", loss.get_shape())
        loss = tf.reduce_mean(loss)
        print("loss", loss.get_shape())
        tf.summary.scalar('train/loss', loss)
        argmax = tf.to_int32(tf.argmax(l_out, 1))
        print("argmax", argmax.get_shape())
        correct = tf.to_float(tf.equal(argmax, t_pl))
        print("correct,", correct.get_shape())
        accuracy = tf.reduce_mean(correct)
        print("accuracy", accuracy.get_shape())

    with tf.variable_scope('optimizer'):
        print("building optimizer ...")
        global_step = tf.Variable(0, name='global_step', trainable=False)
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        grads_and_vars = optimizer.compute_gradients(loss)
        gradients, variables = zip(*grads_and_vars)
        clipped_gradients, global_norm = (
            tf.clip_by_global_norm(gradients, clip_norm))
        clipped_grads_and_vars = zip(clipped_gradients, variables)

        tf.summary.scalar('train/global_gradient_norm', global_norm)

        train_op = optimizer.apply_gradients(clipped_grads_and_vars, global_step=global_step)

    return X_pl, t_pl, is_training_pl, l_out, l_out_softmax, loss, accuracy, train_op, global_step
示例#20
0
def model():
    print("building model ...")
    with tf.variable_scope('train'):
        print("building model ...")
        X_pl = tf.placeholder(tf.float32, [None, num_features])
        X_expand = tf.expand_dims(X_pl, axis=2)
        print("X_pl", X_pl.get_shape())
        t_pl = tf.placeholder(tf.int32, [None,])
        print("t_pl", t_pl.get_shape())
        is_training_pl = tf.placeholder(tf.bool)
        cell_fw = tf.nn.rnn_cell.GRUCell(205)
        cell_bw = tf.nn.rnn_cell.GRUCell(205)
        seq_len = tf.reduce_sum(tf.ones(tf.shape(X_pl), dtype=tf.int32), axis=1)
        _, enc_states = tf.nn.bidirectional_dynamic_rnn(cell_fw=cell_fw,
            cell_bw=cell_bw, inputs=X_expand, sequence_length=seq_len,
            dtype=tf.float32)
        enc_states = tf.concat(1, enc_states)
        enc_states_drop = dropout(enc_states, is_training=is_training_pl) 
        l1 = fully_connected(enc_states_drop, 200, activation_fn=None)
        l1 = batch_norm(l1, is_training=is_training_pl)
        l1_relu = relu(l1)
        l1_dropout = dropout(l1_relu, is_training=is_training_pl)
        l2 = fully_connected(l1_dropout, 200, activation_fn=None)
        l2 = batch_norm(l2, is_training=is_training_pl)
        l2_relu = relu(l2)
        l_out = fully_connected(l2_relu, num_outputs=num_classes, activation_fn=None)
        l_out_softmax = tf.nn.softmax(l_out)
        tf.contrib.layers.summarize_variables()

    with tf.variable_scope('metrics'):
        loss = sparse_softmax_cross_entropy_with_logits(l_out, t_pl)
        print("loss", loss.get_shape())
        loss = tf.reduce_mean(loss)
        print("loss", loss.get_shape())
        tf.summary.scalar('train/loss', loss)
        argmax = tf.to_int32(tf.argmax(l_out, 1))
        print("argmax", argmax.get_shape())
        correct = tf.to_float(tf.equal(argmax, t_pl))
        print("correct,", correct.get_shape())
        accuracy = tf.reduce_mean(correct)
        print("accuracy", accuracy.get_shape())

    with tf.variable_scope('optimizer'):
        print("building optimizer ...")
        global_step = tf.Variable(0, name='global_step', trainable=False)
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        grads_and_vars = optimizer.compute_gradients(loss)
        gradients, variables = zip(*grads_and_vars)
        clipped_gradients, global_norm = (
            tf.clip_by_global_norm(gradients, clip_norm))
        clipped_grads_and_vars = zip(clipped_gradients, variables)

        tf.summary.scalar('train/global_gradient_norm', global_norm)

        train_op = optimizer.apply_gradients(clipped_grads_and_vars, global_step=global_step)

    return X_pl, t_pl, is_training_pl, l_out, l_out_softmax, loss, accuracy, train_op, global_step
示例#21
0
def two_layer_model(x):
    x_image = array_ops.reshape(x, [-1, 28, 28, 1])
    w_conv1 = weight([5, 5, 1, 32])
    b_conv1 = bias([32])
    h_conv1 = nn.relu(conv2d(x_image, w_conv1) + b_conv1)
    h_pool1 = max_pool_2x2(h_conv1)
    w_conv2 = weight([5, 5, 32, 64])
    b_conv2 = bias([64])
    h_conv2 = nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)
    h_pool2 = max_pool_2x2(h_conv2)
    return h_pool2
def two_layer_model(x):
  x_image = array_ops.reshape(x, [-1, 28, 28, 1])
  w_conv1 = weight([5, 5, 1, 32])
  b_conv1 = bias([32])
  h_conv1 = nn.relu(conv2d(x_image, w_conv1) + b_conv1)
  h_pool1 = max_pool_2x2(h_conv1)
  w_conv2 = weight([5, 5, 32, 64])
  b_conv2 = bias([64])
  h_conv2 = nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)
  h_pool2 = max_pool_2x2(h_conv2)
  return h_pool2
def _conv_pool(x):
  """(Conv -> bias -> relu -> max_pool) x2."""
  x_image = array_ops.reshape(x, [-1, 8, 8, 1])
  w_conv1 = _weight([3, 3, 1, 6])
  b_conv1 = _bias([6])
  h_conv1 = nn.relu(nn.bias_add(_conv2d(x_image, w_conv1), b_conv1))
  h_pool1 = _max_pool_2x2(h_conv1)
  w_conv2 = _weight([3, 3, 6, 4])
  b_conv2 = _bias([4])
  h_conv2 = nn.relu(nn.bias_add(_conv2d(h_pool1, w_conv2), b_conv2))
  h_pool2 = _max_pool_2x2(h_conv2)
  return h_pool2
def _conv_pool(x):
  """(Conv -> bias -> relu -> max_pool) x2."""
  x_image = array_ops.reshape(x, [-1, 8, 8, 1])
  w_conv1 = _weight([3, 3, 1, 6])
  b_conv1 = _bias([6])
  h_conv1 = nn.relu(nn.bias_add(_conv2d(x_image, w_conv1), b_conv1))
  h_pool1 = _max_pool_2x2(h_conv1)
  w_conv2 = _weight([3, 3, 6, 4])
  b_conv2 = _bias([4])
  h_conv2 = nn.relu(nn.bias_add(_conv2d(h_pool1, w_conv2), b_conv2))
  h_pool2 = _max_pool_2x2(h_conv2)
  return h_pool2
def two_layer_model():
    random_seed.set_random_seed(0)
    x = random_ops.truncated_normal([1, 784], seed=0)
    x_image = array_ops.reshape(x, [-1, 28, 28, 1])
    w_conv1 = weight([5, 5, 1, 32])
    b_conv1 = bias([32])
    h_conv1 = nn.relu(conv2d(x_image, w_conv1) + b_conv1)
    h_pool1 = max_pool_2x2(h_conv1)
    w_conv2 = weight([5, 5, 32, 64])
    b_conv2 = bias([64])
    h_conv2 = nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)
    h_pool2 = max_pool_2x2(h_conv2)
    return h_pool2
示例#26
0
    def graph(x, label):
      x = conv(x, 3, 1, 16)
      x = nn.relu(x)

      x = conv(x, 3, 1, 100)
      x = nn.relu(x)

      x = math_ops.reduce_mean(x, axis=[1, 2])
      loss = math_ops.reduce_mean(
          nn.sparse_softmax_cross_entropy_with_logits(logits=x, labels=label))

      opt = gradient_descent.GradientDescentOptimizer(0.01).minimize(loss)
      return loss, opt
示例#27
0
 def stage1(x, label):
   with variable_scope.variable_scope("stage1", use_resource=True):
     weight = variable_scope.get_variable(
         "w0",
         shape=[224, 48],
         dtype=np.float32,
         initializer=init_ops.ones_initializer())
     a = ipu_math_ops.serialized_matmul(
         x, weight, 2, serialization_dimension="a_rows_b_columns")
     a = nn.relu(a)
     b = fc(x, 48)
     b = nn.relu(b)
     return a + b, label
def two_layer_model():
  random_seed.set_random_seed(0)
  x = random_ops.truncated_normal([1, 784], seed=0)
  x_image = array_ops.reshape(x, [-1, 28, 28, 1])
  w_conv1 = weight([5, 5, 1, 32])
  b_conv1 = bias([32])
  h_conv1 = nn.relu(conv2d(x_image, w_conv1) + b_conv1)
  h_pool1 = max_pool_2x2(h_conv1)
  w_conv2 = weight([5, 5, 32, 64])
  b_conv2 = bias([64])
  h_conv2 = nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)
  h_pool2 = max_pool_2x2(h_conv2)
  return h_pool2
示例#29
0
        def dropped_inputs_training():

            with ops.name_scope("drop_activation_training"):
                x = ops.convert_to_tensor(inputs, name="x")
                if not x.dtype.is_floating:
                    raise ValueError(
                        "x has to be a floating point tensor since it's going to"
                        " be scaled. Got a %s tensor instead." % x.dtype)
                if isinstance(self.p, numbers.Real) and not 0 < self.p <= 1:
                    raise ValueError(
                        "p must be a scalar tensor or a float in the "
                        "range (0, 1], got %g" % self.p)

                # Early return is nothin to be dropped
                if isinstance(self.p, float) and self.p == 1.:
                    return nn.relu(x)
                if context.executing_eagerly():
                    if isinstance(self.p, ops.EagerTensor):
                        if self.p.numpy() == 1:
                            return nn.relu(x)

                else:
                    p = ops.convert_to_tensor(self.p, dtype=x.dtype, name="p")
                    p.get_shape().assert_is_compatible_with(
                        tensor_shape.scalar())

                    # Do nothing if we know keep_prob == 1
                    if tensor_util.constant_value(p) == 1:
                        return nn.relu(x)

                    noise_shape = array_ops.shape(x)
                    random_tensor = 1 - p
                    random_tensor += random_ops.random_uniform(noise_shape,
                                                               seed=self.seed,
                                                               dtype=x.dtype)
                    # random_tensor ~ uniform distrib [1 - p, 2 - p), ex: [0.05, 1.05)

                    binary_tensor = math_ops.floor(random_tensor)
                    # in binary tensor ~ 5% of are set 1 , 95% are set 0

                    # drop 95% of the negative part, keep all in the positive part
                    # old implementation:
                    # ret = - binary_tensor*nn.relu((-x)) + nn.relu(x)
                    # new implementation, only 1 relu operation
                    ret = binary_tensor * x + (1 - binary_tensor) * nn.relu(x)
                    if not context.executing_eagerly():
                        ret.set_shape(x.get_shape())
                    return ret
 def get_simple_graph_def(self):
   """Create a simple graph and return its graph_def."""
   g = ops.Graph()
   with g.as_default():
     a = aops.placeholder(
         dtype=dtypes.float32, shape=(None, 24, 24, 2), name="input")
     e = cop.constant(
         [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
         name="weights",
         dtype=dtypes.float32)
     conv = nn.conv2d(
         input=a,
         filter=e,
         strides=[1, 2, 2, 1],
         padding="SAME",
         name="conv")
     b = cop.constant(
         [4., 1.5, 2., 3., 5., 7.], name="bias", dtype=dtypes.float32)
     t = nn.bias_add(conv, b, name="biasAdd")
     relu = nn.relu(t, "relu")
     idty = aops.identity(relu, "ID")
     v = nn_ops.max_pool(
         idty, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
     aops.squeeze(v, name="output")
   return g.as_graph_def()
def GetSingleEngineGraphDef(dtype=dtypes.float32):
  """Create a graph containing single segment."""
  g = ops.Graph()
  with g.as_default():
    inp = array_ops.placeholder(
        dtype=dtype, shape=[None] + INPUT_DIMS[1:], name=INPUT_NAME)
    with g.device("/GPU:0"):
      conv_filter = constant_op.constant(
          [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
          name="weights",
          dtype=dtype)
      conv = nn.conv2d(
          input=inp,
          filter=conv_filter,
          strides=[1, 2, 2, 1],
          padding="SAME",
          name="conv")
      bias = constant_op.constant(
          [4., 1.5, 2., 3., 5., 7.], name="bias", dtype=dtype)
      added = nn.bias_add(conv, bias, name="bias_add")
      relu = nn.relu(added, "relu")
      identity = array_ops.identity(relu, "identity")
      pool = nn_ops.max_pool(
          identity, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
    array_ops.squeeze(pool, name=OUTPUT_NAME)
  return g.as_graph_def()
示例#32
0
def create_test_network():
  """Convolutional neural network for test.

  Returns:
    name_to_node: Dict keyed by node name, each entry containing the node's
      NodeDef.
  """
  g = ops.Graph()
  with g.as_default():
    # An input test image with unknown spatial resolution.
    x = array_ops.placeholder(
        dtypes.float32, (None, None, None, 1), name='input_image')
    # Left branch before first addition.
    l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
    # Right branch before first addition.
    l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]], name='L2_pad')
    l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
    l3 = slim.max_pool2d(l2, [3, 3], stride=2, scope='L3', padding='SAME')
    # First addition.
    l4 = nn.relu(l1 + l3, name='L4_relu')
    # Left branch after first addition.
    l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
    # Right branch after first addition.
    l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
    # Final addition.
    gen_math_ops.add(l5, l6, name='L7_add')

  name_to_node = graph_compute_order.parse_graph_nodes(g.as_graph_def())
  return name_to_node
示例#33
0
 def GetParams(self):
     dtype = dtypes.float32
     input_name = "input"
     input_dims = [[[1, 10, 10, 2]], [[2, 10, 10, 2]], [[4, 10, 10, 2]],
                   [[2, 10, 10, 2]]]
     expected_output_dims = [[[1, 10, 10, 1]], [[2, 10, 10, 1]],
                             [[4, 10, 10, 1]], [[2, 10, 10, 1]]]
     output_name = "output"
     g = ops.Graph()
     with g.as_default():
         x = array_ops.placeholder(dtype=dtype,
                                   shape=[None, 10, 10, 2],
                                   name=input_name)
         conv_filter = constant_op.constant(np.random.randn(3, 3, 2, 1),
                                            dtype=dtypes.float32)
         x = nn.conv2d(input=x,
                       filter=conv_filter,
                       strides=[1, 1, 1, 1],
                       padding="SAME",
                       name="conv")
         bias = constant_op.constant(np.random.randn(1, 10, 10, 1),
                                     dtype=dtypes.float32)
         x = math_ops.add(x, bias)
         x = nn.relu(x)
         x = array_ops.identity(x, name="output")
     return trt_test.TfTrtIntegrationTestParams(
         gdef=g.as_graph_def(),
         input_names=[input_name],
         input_dims=input_dims,
         output_names=[output_name],
         expected_output_dims=expected_output_dims)
def create_test_network():
    """Convolutional neural network for test.

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
    g = ops.Graph()
    with g.as_default():
        # An input test image with unknown spatial resolution.
        x = array_ops.placeholder(dtypes.float32, (None, None, None, 1),
                                  name='input_image')
        # Left branch before first addition.
        l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
        # Right branch before first addition.
        l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]],
                               name='L2_pad')
        l2 = slim.conv2d(l2_pad,
                         1, [3, 3],
                         stride=2,
                         scope='L2',
                         padding='VALID')
        l3 = slim.max_pool2d(l2, [3, 3], stride=2, scope='L3', padding='SAME')
        # First addition.
        l4 = nn.relu(l1 + l3, name='L4_relu')
        # Left branch after first addition.
        l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
        # Right branch after first addition.
        l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
        # Final addition.
        gen_math_ops.add(l5, l6, name='L7_add')

    return g
def _matmul_act(x):
  """Matmul followed by activation."""
  i = array_ops.reshape(x, [8, 8])
  f = _weight([8, 8])
  x = math_ops.matmul(i, f)
  y = nn.relu(x)
  return y
示例#36
0
  def __init__(self,
               batchnorm_layer=None,
               training=True,
               validate_args=False,
               name="batch_normalization"):
    """Instantiates the `BatchNorm` bijector.

    Args:
      batchnorm_layer: `tf.layers.BatchNormalization` layer object. If `None`,
        defaults to
        `tf.layers.BatchNormalization(gamma_constraint=nn_ops.relu(x) + 1e-6)`.
        This ensures positivity of the scale variable.

      training: If True, updates running-average statistics during call to
        `inverse()`.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.
    Raises:
      ValueError: If bn_layer is not an instance of
        `tf.layers.BatchNormalization`, or if it is specified with `renorm=True`
        or a virtual batch size.
    """
    # Scale must be positive.
    g_constraint = lambda x: nn.relu(x) + 1e-6
    self.batchnorm = batchnorm_layer or normalization.BatchNormalization(
        gamma_constraint=g_constraint)
    self._validate_bn_layer(self.batchnorm)
    self._training = training
    super(BatchNormalization, self).__init__(
        validate_args=validate_args, name=name)
示例#37
0
 def GetParams(self):
   dtype = dtypes.float32
   input_name = "input"
   input_dims = [[[1, 10, 10, 2]], [[2, 10, 10, 2]], [[4, 10, 10, 2]],
                 [[2, 10, 10, 2]]]
   expected_output_dims = [[[1, 10, 10, 1]], [[2, 10, 10, 1]], [[4, 10, 10,
                                                                 1]],
                           [[2, 10, 10, 1]]]
   output_name = "output"
   g = ops.Graph()
   with g.as_default():
     x = array_ops.placeholder(
         dtype=dtype, shape=[None, 10, 10, 2], name=input_name)
     conv_filter = constant_op.constant(
         np.random.randn(3, 3, 2, 1), dtype=dtypes.float32)
     x = nn.conv2d(
         input=x,
         filter=conv_filter,
         strides=[1, 1, 1, 1],
         padding="SAME",
         name="conv")
     bias = constant_op.constant(
         np.random.randn(1, 10, 10, 1), dtype=dtypes.float32)
     x = math_ops.add(x, bias)
     x = nn.relu(x)
     x = array_ops.identity(x, name="output")
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=input_dims,
       output_names=[output_name],
       expected_output_dims=expected_output_dims)
def GetSingleEngineGraphDef(dtype=dtypes.float32):
    """Create a graph containing single segment."""
    g = ops.Graph()
    with g.as_default():
        inp = array_ops.placeholder(dtype=dtype,
                                    shape=[None] + INPUT_DIMS[1:],
                                    name=INPUT_NAME)
        with g.device("/GPU:0"):
            conv_filter = constant_op.constant(
                [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
                name="weights",
                dtype=dtype)
            conv = nn.conv2d(input=inp,
                             filter=conv_filter,
                             strides=[1, 2, 2, 1],
                             padding="SAME",
                             name="conv")
            bias = constant_op.constant([4., 1.5, 2., 3., 5., 7.],
                                        name="bias",
                                        dtype=dtype)
            added = nn.bias_add(conv, bias, name="bias_add")
            relu = nn.relu(added, "relu")
            identity = array_ops.identity(relu, "identity")
            pool = nn_ops.max_pool(identity, [1, 2, 2, 1], [1, 2, 2, 1],
                                   "VALID",
                                   name="max_pool")
        array_ops.squeeze(pool, name=OUTPUT_NAME)
    return g.as_graph_def()
def create_test_network():
  """Convolutional neural network for test.

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
  g = ops.Graph()
  with g.as_default():
    # An input test image with unknown spatial resolution.
    x = array_ops.placeholder(
        dtypes.float32, (None, None, None, 1), name='input_image')
    # Left branch before first addition.
    l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
    # Right branch before first addition.
    l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]], name='L2_pad')
    l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
    l3 = slim.max_pool2d(l2, [3, 3], stride=2, scope='L3', padding='SAME')
    # First addition.
    l4 = nn.relu(l1 + l3, name='L4_relu')
    # Left branch after first addition.
    l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
    # Right branch after first addition.
    l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
    # Final addition.
    gen_math_ops.add(l5, l6, name='L7_add')

  return g
 def _annotated_graph(self):
   graph = ops.Graph()
   with graph.as_default():
     random_seed.set_random_seed(2)
     current_activation = variable_scope.get_variable(
         name='start', shape=[1, 2, 2, 5])
     conv_filter = variable_scope.get_variable(
         name='filter', shape=[5, 5, 5, 5])
     for layer_number in range(3):
       with variable_scope.variable_scope('layer_{}'.format(layer_number)):
         after_conv = nn.conv2d(current_activation, conv_filter, [1, 1, 1, 1],
                                'SAME')
         current_activation = 2. * after_conv
         current_activation.op._set_attr(
             '_recompute_hint',
             # The value of the attribute does not matter; just that the key
             # exists in the op's attributes.
             attr_value_pb2.AttrValue(i=1))
         current_activation += 5.
         current_activation.op._set_attr(
             '_recompute_hint', attr_value_pb2.AttrValue(i=0))
         current_activation = nn.relu(current_activation)
         current_activation.op._set_attr(
             '_recompute_hint', attr_value_pb2.AttrValue(i=1))
     loss = math_ops.reduce_mean(current_activation)
     optimizer = train.AdamOptimizer(0.001)
     train_op = optimizer.minimize(loss)
     init_op = variables.global_variables_initializer()
   return graph, init_op, train_op
示例#41
0
 def GraphFn(self, x):
   dtype = x.dtype
   x, _, _ = nn_impl.fused_batch_norm(
       x, [1.0, 1.0], [0.0, 0.0],
       mean=[0.5, 0.5],
       variance=[1.0, 1.0],
       data_format="NCHW",
       is_training=False)
   e = constant_op.constant(
       np.random.randn(1, 1, 2, 6), name="weights", dtype=dtype)
   conv = nn.conv2d(
       input=x,
       filter=e,
       data_format="NCHW",
       strides=[1, 1, 2, 2],
       padding="SAME",
       name="conv")
   b = constant_op.constant(np.random.randn(6), name="bias", dtype=dtype)
   t = nn.bias_add(conv, b, data_format="NCHW", name="biasAdd")
   relu = nn.relu(t, "relu")
   idty = array_ops.identity(relu, "ID")
   v = nn_ops.max_pool(
       idty, [1, 1, 2, 2], [1, 1, 2, 2],
       "VALID",
       data_format="NCHW",
       name="max_pool")
   return array_ops.squeeze(v, name="output_0")
示例#42
0
 def GetParams(self):
   """Single vgg layer test in TF-TRT conversion."""
   dtype = dtypes.float32
   input_name = "input"
   input_dims = [5, 8, 8, 2]
   output_name = "output"
   g = ops.Graph()
   with g.as_default():
     x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
     x, _, _ = nn_impl.fused_batch_norm(
         x, [1.0, 1.0], [0.0, 0.0],
         mean=[0.5, 0.5],
         variance=[1.0, 1.0],
         is_training=False)
     e = constant_op.constant(
         np.random.randn(1, 1, 2, 6), name="weights", dtype=dtype)
     conv = nn.conv2d(
         input=x, filter=e, strides=[1, 2, 2, 1], padding="SAME", name="conv")
     b = constant_op.constant(np.random.randn(6), name="bias", dtype=dtype)
     t = nn.bias_add(conv, b, name="biasAdd")
     relu = nn.relu(t, "relu")
     idty = array_ops.identity(relu, "ID")
     v = nn_ops.max_pool(
         idty, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
     array_ops.squeeze(v, name=output_name)
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=[input_dims],
       output_names=[output_name],
       expected_output_dims=[(5, 2, 2, 6)])
示例#43
0
 def _GetMetaGraph(self,
                   batch_size=14,
                   image_dim=12,
                   optimizer_scope_name=''):
     """A simple layered graph with conv, an intermediate op, and a ReLU."""
     graph = ops.Graph()
     with graph.as_default():
         random_seed.set_random_seed(1)
         current_activation = variable_scope.get_variable(
             name='start', shape=[batch_size, image_dim, image_dim, 5])
         conv_filter = variable_scope.get_variable(name='filter',
                                                   shape=[5, 5, 5, 5])
         for layer_number in range(10):
             with variable_scope.variable_scope(
                     'layer_{}'.format(layer_number)):
                 after_conv = nn.conv2d(current_activation, conv_filter,
                                        [1, 1, 1, 1], 'SAME')
                 current_activation = 2. * after_conv
                 current_activation = nn.relu(current_activation)
         loss = math_ops.reduce_mean(current_activation)
         with ops.name_scope(optimizer_scope_name):
             optimizer = train.AdamOptimizer(0.001)
             train_op = optimizer.minimize(loss)
         init_op = variables.global_variables_initializer()
         metagraph = train.export_meta_graph()
     return (metagraph, init_op.name, train_op.name, loss.name)
def _matmul_act(x):
  """Matmul followed by activation."""
  i = array_ops.reshape(x, [8, 8])
  f = _weight([8, 8])
  x = math_ops.matmul(i, f)
  y = nn.relu(x)
  return y
示例#45
0
  def testDepthwiseConvBackpropFilter1x1WithRelu(self):
    with ops.device("/device:IPU:0"):
      pa = array_ops.placeholder(np.float32, [1, 6, 6, 3], name="a")
      pb = constant_op.constant([1, 1, 3, 2], dtype=np.int32)  # filter sizes
      pc = array_ops.placeholder(np.float32, [1, 6, 6, 6], name="c")
      c = nn.depthwise_conv2d_native_backprop_filter(
          pa, pb, pc, strides=[1, 1, 1, 1], padding="SAME")
      c = nn.relu(c)

    with ops.device('cpu'):
      report = gen_ipu_ops.ipu_event_trace()

    tu.configure_ipu_system()

    with tu.ipu_session() as sess:
      sess.run(report)

      fd = {pa: np.zeros([1, 6, 6, 3]), pc: np.zeros([1, 6, 6, 6])}
      result = sess.run(c, fd)
      self.assertAllClose(result, np.zeros([1, 1, 3, 2]))

      result = sess.run(report)

      s = tu.extract_all_strings_from_event_trace(result)
      cs_list = tu.get_compute_sets_from_report(s)

      ok = [
          '__seed*', 'Copy_',
          'DepthwiseConv2dNativeBackpropFilter/fusion*/Conv_6x6',
          'Relu/custom-call*/Nonlinearity'
      ]
      self.assertTrue(tu.check_all_compute_sets_and_list(cs_list, ok))
示例#46
0
 def _annotated_graph(self):
     graph = ops.Graph()
     with graph.as_default():
         random_seed.set_random_seed(2)
         current_activation = variable_scope.get_variable(
             name='start', shape=[1, 2, 2, 5])
         conv_filter = variable_scope.get_variable(name='filter',
                                                   shape=[5, 5, 5, 5])
         for layer_number in range(3):
             with variable_scope.variable_scope(
                     'layer_{}'.format(layer_number)):
                 after_conv = nn.conv2d(current_activation, conv_filter,
                                        [1, 1, 1, 1], 'SAME')
                 current_activation = 2. * after_conv
                 current_activation.op._set_attr(
                     '_recompute_hint',
                     # The value of the attribute does not matter; just that the key
                     # exists in the op's attributes.
                     attr_value_pb2.AttrValue(i=1))
                 current_activation += 5.
                 current_activation.op._set_attr(
                     '_recompute_hint', attr_value_pb2.AttrValue(i=0))
                 current_activation = nn.relu(current_activation)
                 current_activation.op._set_attr(
                     '_recompute_hint', attr_value_pb2.AttrValue(i=1))
         loss = math_ops.reduce_mean(current_activation)
         optimizer = train.AdamOptimizer(0.001)
         train_op = optimizer.minimize(loss)
         init_op = variables.global_variables_initializer()
     return graph, init_op, train_op
示例#47
0
def get_simple_graph_def():
    """Create a simple graph and return its graph_def."""
    g = ops.Graph()
    with g.as_default():
        a = aops.placeholder(dtype=dtypes.float32,
                             shape=(None, 24, 24, 2),
                             name="input")
        e = cop.constant(
            [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
            name="weights",
            dtype=dtypes.float32)
        conv = nn.conv2d(input=a,
                         filter=e,
                         strides=[1, 2, 2, 1],
                         padding="SAME",
                         name="conv")
        b = cop.constant([4., 1.5, 2., 3., 5., 7.],
                         name="bias",
                         dtype=dtypes.float32)
        t = nn.bias_add(conv, b, name="biasAdd")
        relu = nn.relu(t, "relu")
        idty = aops.identity(relu, "ID")
        v = nn_ops.max_pool(idty, [1, 2, 2, 1], [1, 2, 2, 1],
                            "VALID",
                            name="max_pool")
        aops.squeeze(v, name="output")
    return g.as_graph_def()
示例#48
0
  def _get_plugin_graph_def(self):
    """Create a simple graph and return its graph_def."""
    g = ops.Graph()
    with g.as_default():
      a = array_ops.placeholder(
          dtype=dtypes.float32, shape=(None, 24, 24, 2), name="input")
      relu = nn.relu(a, "relu")
      v = nn_ops.max_pool(
          relu, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")

      # insert custom_op in the graph
      v = custom_plugin_examples.inc_op(v, inc=[16.5], name="plugin_test")

      v *= 2.0
      v = nn.relu(v)
      v = nn.relu(v)
      array_ops.squeeze(v, name="output")
    return g.as_graph_def()
def create_test_network_5():
  """Single-path network for testing non-square kernels.

  The graph is similar to the right branch of the graph from
  create_test_network_1(), except that the kernel sizes are changed to be
  non-square.

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
  g = ops.Graph()
  with g.as_default():
    # An 8x8 test image.
    x = array_ops.placeholder(dtypes.float32, (1, 8, 8, 1), name='input_image')
    # Two convolutional layers, where the first one has non-square kernel.
    l1 = slim.conv2d(x, 1, [3, 5], stride=2, scope='L1', padding='VALID')
    l2 = slim.conv2d(l1, 1, [3, 1], stride=2, scope='L2', padding='VALID')
    # ReLU.
    nn.relu(l2, name='output')
  return g
def create_test_network_3():
  """Misaligned network for test.

  The graph corresponds to the example from the first figure in
  go/cnn-rf-computation#arbitrary-computation-graphs

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
  g = ops.Graph()
  with g.as_default():
    # An 8x8 test image.
    x = array_ops.placeholder(dtypes.float32, (1, 8, 8, 1), name='input_image')
    # Left branch.
    l1_pad = array_ops.pad(x, [[0, 0], [2, 1], [2, 1], [0, 0]])
    l1 = slim.conv2d(l1_pad, 1, [5, 5], stride=2, scope='L1', padding='VALID')
    # Right branch.
    l2 = slim.conv2d(x, 1, [3, 3], stride=1, scope='L2', padding='VALID')
    l3 = slim.conv2d(l2, 1, [3, 3], stride=1, scope='L3', padding='VALID')
    # Addition.
    nn.relu(l1 + l3, name='output')
  return g
def _make_node_with_color(color, input_tensor, name=None):
  """Returns a node representative of the specified list type."""
  color = color.lower()
  if color == 'w':  # White node
    weights = _weight(input_tensor.get_shape().as_list())
    return math_ops.matmul(input_tensor, weights, name=name)
  if color == 'g':  # Gray node
    return math_ops.add(input_tensor, 0.1, name=name)
  if color == 'c':  # Clear node
    return nn.relu(input_tensor, name=name)
  if color == 'b':  # Black node
    return math_ops.sqrt(math_ops.pow(input_tensor, 2.), name=name)
  raise ValueError('Invalid node color: ' + str(color))
def create_test_network_6():
  """Aligned network with dropout for test.

  The graph is similar to create_test_network_1(), except that the right branch
  has dropout normalization.

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
  g = ops.Graph()
  with g.as_default():
    # An 8x8 test image.
    x = array_ops.placeholder(dtypes.float32, (1, 8, 8, 1), name='input_image')
    # Left branch.
    l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
    # Right branch.
    l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
    l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
    l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID')
    dropout = slim.dropout(l3)
    # Addition.
    nn.relu(l1 + dropout, name='output')
  return g
示例#53
0
def create_test_network_1():
  """Aligned network for test.

  The graph corresponds to the example from the second figure in
  go/cnn-rf-computation#arbitrary-computation-graphs

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
  g = ops.Graph()
  with g.as_default():
    # An input test image with unknown spatial resolution.
    x = array_ops.placeholder(
        dtypes.float32, (None, None, None, 1), name='input_image')
    # Left branch.
    l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
    # Right branch.
    l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
    l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
    l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID')
    # Addition.
    nn.relu(l1 + l3, name='output')
  return g
  def GetParams(self):
    # TODO(laigd): we should test the following cases:
    # - batch size is not changed, other dims are changing
    # - batch size is decreasing, other dims are identical
    # - batch size is decreasing, other dims are changing
    # - batch size is increasing, other dims are identical
    # - batch size is increasing, other dims are changing
    input_dims = [[[1, 5, 5, 1]], [[10, 5, 5, 1]], [[3, 5, 5, 1]],
                  [[1, 5, 5, 1]], [[1, 3, 1, 1]], [[2, 9, 9, 1]],
                  [[1, 224, 224, 1]], [[1, 128, 224, 1]]]
    expected_output_dims = input_dims

    g = ops.Graph()
    with g.as_default():
      x = array_ops.placeholder(
          shape=(None, None, None, 1), dtype=dtypes.float32, name="input")
      conv_filter1 = constant_op.constant(
          np.ones([3, 3, 1, 8]), name="weights1", dtype=dtypes.float32)
      bias1 = constant_op.constant(np.random.randn(8), dtype=dtypes.float32)
      x = nn.conv2d(
          input=x,
          filter=conv_filter1,
          strides=[1, 1, 1, 1],
          padding="SAME",
          name="conv")
      x = nn.bias_add(x, bias1)
      x = nn.relu(x)
      conv_filter2 = constant_op.constant(
          np.ones([3, 3, 8, 1]), name="weights2", dtype=dtypes.float32)
      bias2 = constant_op.constant(np.random.randn(1), dtype=dtypes.float32)
      x = nn.conv2d(
          input=x,
          filter=conv_filter2,
          strides=[1, 1, 1, 1],
          padding="SAME",
          name="conv")
      x = nn.bias_add(x, bias2)
      x = array_ops.identity(x, name="output")

    return trt_test.TfTrtIntegrationTestParams(
        gdef=g.as_graph_def(),
        input_names=["input"],
        input_dims=input_dims,
        output_names=["output"],
        expected_output_dims=expected_output_dims)
 def GetParams(self):
   """Single vgg layer in NCHW unit tests in TF-TRT."""
   dtype = dtypes.float32
   input_name = "input"
   input_dims = [5, 2, 8, 8]
   g = ops.Graph()
   with g.as_default():
     x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
     x, _, _ = nn_impl.fused_batch_norm(
         x,
         np.random.randn(2).astype(np.float32),
         np.random.randn(2).astype(np.float32),
         mean=np.random.randn(2).astype(np.float32),
         variance=np.random.randn(2).astype(np.float32),
         data_format="NCHW",
         is_training=False)
     e = constant_op.constant(
         np.random.randn(1, 1, 2, 6), name="weights", dtype=dtype)
     conv = nn.conv2d(
         input=x,
         filter=e,
         data_format="NCHW",
         strides=[1, 1, 2, 2],
         padding="SAME",
         name="conv")
     b = constant_op.constant(np.random.randn(6), name="bias", dtype=dtype)
     t = nn.bias_add(conv, b, data_format="NCHW", name="biasAdd")
     relu = nn.relu(t, "relu")
     idty = array_ops.identity(relu, "ID")
     v = nn_ops.max_pool(
         idty, [1, 1, 2, 2], [1, 1, 2, 2],
         "VALID",
         data_format="NCHW",
         name="max_pool")
     array_ops.squeeze(v, name="output")
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=[input_dims],
       num_expected_engines=1,
       expected_output_dims=(5, 6, 2, 2),
       allclose_atol=1.e-03,
       allclose_rtol=1.e-03)
示例#56
0
 def GetParams(self):
   """Create a graph containing single segment."""
   # TODO(aaroey): test graph with different dtypes.
   dtype = dtypes.float32
   input_name = "input"
   input_dims = [100, 24, 24, 2]
   g = ops.Graph()
   with g.as_default():
     inp = array_ops.placeholder(
         dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
     with g.device("/GPU:0"):
       conv_filter = constant_op.constant(
           [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
           name="weights",
           dtype=dtype)
       conv = nn.conv2d(
           input=inp,
           filter=conv_filter,
           strides=[1, 2, 2, 1],
           padding="SAME",
           name="conv")
       bias = constant_op.constant(
           [4., 1.5, 2., 3., 5., 7.], name="bias", dtype=dtype)
       added = nn.bias_add(conv, bias, name="bias_add")
       relu = nn.relu(added, "relu")
       identity = array_ops.identity(relu, "identity")
       pool = nn_ops.max_pool(
           identity, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
     array_ops.squeeze(pool, name=self.output_name)
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=[input_dims],
       # TODO(aaroey): LayoutOptimizer adds additional nodes to the graph which
       # breaks the connection check, fix it.
       # - my_trt_op_0 should have ["weights", "conv", "bias", "bias_add",
       #   "relu", "identity", "max_pool"]
       expected_engines=["my_trt_op_0"],
       expected_output_dims=(100, 6, 6, 6),
       allclose_atol=1.e-03,
       allclose_rtol=1.e-03)
示例#57
0
 def _GetMetaGraph(self, batch_size=14, image_dim=12, optimizer_scope_name=''):
   """A simple layered graph with conv, an intermediate op, and a ReLU."""
   graph = ops.Graph()
   with graph.as_default():
     random_seed.set_random_seed(1)
     current_activation = variable_scope.get_variable(
         name='start', shape=[batch_size, image_dim, image_dim, 5])
     conv_filter = variable_scope.get_variable(
         name='filter', shape=[5, 5, 5, 5])
     for layer_number in range(10):
       with variable_scope.variable_scope('layer_{}'.format(layer_number)):
         after_conv = nn.conv2d(current_activation, conv_filter, [1, 1, 1, 1],
                                'SAME')
         current_activation = 2. * after_conv
         current_activation = nn.relu(current_activation)
     loss = math_ops.reduce_mean(current_activation)
     with ops.name_scope(optimizer_scope_name):
       optimizer = train.AdamOptimizer(0.001)
       train_op = optimizer.minimize(loss)
     init_op = variables.global_variables_initializer()
     metagraph = train.export_meta_graph()
   return (metagraph, init_op.name, train_op.name, loss.name)
示例#58
0
 def GetParams(self):
   """Create a graph containing single segment."""
   # TODO(aaroey): test graph with different dtypes.
   dtype = dtypes.float32
   input_name = "input"
   input_dims = [100, 24, 24, 2]
   output_name = "output"
   g = ops.Graph()
   with g.as_default():
     inp = array_ops.placeholder(
         dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
     with g.device("/GPU:0"):
       conv_filter = constant_op.constant(
           [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
           name="weights",
           dtype=dtype)
       conv = nn.conv2d(
           input=inp,
           filter=conv_filter,
           strides=[1, 2, 2, 1],
           padding="SAME",
           name="conv")
       bias = constant_op.constant([4., 1.5, 2., 3., 5., 7.],
                                   name="bias",
                                   dtype=dtype)
       added = nn.bias_add(conv, bias, name="bias_add")
       relu = nn.relu(added, "relu")
       identity = array_ops.identity(relu, "identity")
       pool = nn_ops.max_pool(
           identity, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
     array_ops.squeeze(pool, name=output_name)
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=[input_dims],
       output_names=[output_name],
       expected_output_dims=[(100, 6, 6, 6)])