def testMaxPoolV2(self):
    if test.is_gpu_available(cuda_only=True):
      random_seed.set_random_seed(0)
      x = random_ops.truncated_normal([1, 784], seed=0)
      conv = _two_layer_model(x)
      ksize = constant_op.constant([1, 2, 3, 1], shape=[4])
      strides = array_ops.placeholder(dtype='int32', shape=[4])
      max_pool = gen_nn_ops._max_pool_v2(conv, ksize, strides, 'VALID')
      output = array_ops.identity(max_pool)

      strides_val = [1, 3, 2, 1]
      with session.Session() as sess:
        output_val_ref = sess.run(output, feed_dict={strides: strides_val})

      with session.Session(config=_get_config()) as sess:
        metadata = config_pb2.RunMetadata()
        output_val = sess.run(
            output, run_metadata=metadata, feed_dict={
                strides: strides_val
            })

      nodes = []
      num_transposes = 0
      for node in metadata.cost_graph.node:
        if _is_transpose(node.name):
          num_transposes += 1
        nodes.append(node.name)

      expected_num_transposes = 2
      self.assertEqual(expected_num_transposes, num_transposes)
      self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
      self._assert_trans_nchw_to_nhwc('MaxPoolV2-0-0', nodes)
      self._assert_vec_nhwc_to_nchw('MaxPoolV2-2', nodes)
      self.assertIn('MaxPoolV2-1-LayoutOptimizer', nodes)
      self.assertAllClose(output_val_ref, output_val, atol=1e-3)
  def testMaxPoolV2(self):
    if test.is_gpu_available(cuda_only=True):
      random_seed.set_random_seed(0)
      x = random_ops.truncated_normal([1, 784], seed=0)
      conv = _two_layer_model(x)
      ksize = constant_op.constant([1, 2, 3, 1], shape=[4])
      strides = array_ops.placeholder(dtype='int32', shape=[4])
      max_pool = gen_nn_ops._max_pool_v2(conv, ksize, strides, 'VALID')
      output = array_ops.identity(max_pool)

      strides_val = [1, 3, 2, 1]
      with session.Session() as sess:
        output_val_ref = sess.run(output, feed_dict={strides: strides_val})

      with session.Session(config=_get_config()) as sess:
        metadata = config_pb2.RunMetadata()
        output_val = sess.run(
            output, run_metadata=metadata, feed_dict={
                strides: strides_val
            })

      nodes = []
      num_transposes = 0
      for node in metadata.cost_graph.node:
        if node.name.startswith('LayoutOptimizerTranspose'):
          num_transposes += 1
        nodes.append(node.name)

      expected_num_transposes = 2
      self.assertEqual(expected_num_transposes, num_transposes)
      self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
      self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-MaxPoolV2-0-0', nodes)
      self.assertIn('LayoutOptimizerVecPermuteNHWCToNCHW_MaxPoolV2_2', nodes)
      self.assertIn('LayoutOptimizer-MaxPoolV2-Const_2', nodes)
      self.assertAllClose(output_val_ref, output_val, atol=1e-3)
예제 #3
0
 def model(data):
     conv = tf.nn.conv2d(data,
                         layer1_weights, [1, 1, 1, 1],
                         padding='SAME')
     hidden = tf.nn.relu(conv + layer1_biases)
     hidden = gen_nn_ops._max_pool_v2(
         hidden,
         ksize=[1, self.max_pool_window_size_ph, 1, 1],
         strides=[1, self.max_pool_window_size_ph, 1, 1],
         padding='SAME')
     N = data.get_shape().as_list()[0]
     reshape = tf.reshape(hidden, [N, 9 * D * depth])
     hidden_no_relu = tf.matmul(reshape, layer2_weights) + layer2_biases
     hidden = tf.nn.relu(hidden_no_relu)
     return (tf.matmul(hidden, layer3_weights) + layer3_biases), hidden
    def __init__(self, inputs, sequence_length,embedding_size, num_filters, filter_sizes, dropout_keep_prob,scope):

        # filter_sizes=[2,3]
        self.embedded_chars=inputs
        with tf.name_scope("cnn_subtract"+scope):
            pooled_outputs = []
            for i, filter_size in enumerate(filter_sizes):
                with tf.name_scope("conv-maxpool-%s" % filter_size):
                    # Convolution Layer
                    #filter_shape = [filter_size, embedding_size, 1, num_filters]
                    filter_shape = [filter_size, embedding_size, num_filters]
                    W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
                    b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
                    conv = tf.nn.conv1d(
                        self.embedded_chars,
                        W,
                        stride=1,
                        padding="SAME",
                        name="conv")
                    # Apply nonlinearity
                    h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
                    h_expand = tf.expand_dims(h, -1)
                    # Maxpooling over the outputs
                    pooled = gen_nn_ops._max_pool_v2(
                        h_expand,
                        ksize=[1, sequence_length, 1, 1],
                        strides=[1, 1, 1, 1],
                        padding='VALID',
                        name="pool")
                    pooled_outputs.append(pooled)

            # Combine all the pooled features
            num_filters_total = num_filters * len(filter_sizes)
            #self.test=conv
            self.h_pool = tf.concat(pooled_outputs, 3)
            self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])

            # Add dropout

            self.h_drop = tf.nn.dropout(self.h_pool_flat, dropout_keep_prob)