def _train(self, checkpoint_path, layout_optimizer=False, restore=False):
    ops.reset_default_graph()
    graph = ops.get_default_graph()
    with session.Session(
        config=get_config(layout_optimizer), graph=graph) as sess:
      batch = 2
      height = 6
      width = 7
      input_channels = 3
      shape = [batch, height, width, input_channels]
      image = array_ops.placeholder(dtype='float32', shape=shape)
      conv1 = conv_layers.conv2d(image, 32, [3, 3])
      conv2 = conv_layers.conv2d(conv1, 32, [3, 3])
      optimizer = gradient_descent.GradientDescentOptimizer(0.01)
      loss = math_ops.reduce_mean(conv2)
      train_op = optimizer.minimize(loss)
      saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)

      if restore:
        saver.restore(sess, checkpoint_path)
      else:
        sess.run(variables.global_variables_initializer())

      np.random.seed(0)
      for _ in range(2):
        image_val = np.random.rand(*shape).astype(np.float32)
        sess.run([loss, train_op], feed_dict={image: image_val})

      if restore:
        all_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
        all_vars_values = [var.eval(session=sess) for var in all_vars]
        return all_vars_values
      else:
        saver.save(sess, checkpoint_path)
示例#2
0
    def testGradient(self):
        if not test.is_gpu_available(cuda_only=True):
            self.skipTest('GPU required')

        random_seed.set_random_seed(0)
        x = random_ops.truncated_normal([1, 200, 200, 3], seed=0)
        y = conv_layers.conv2d(x, 32, [3, 3])
        z = conv_layers.conv2d(y, 32, [3, 3])
        optimizer = gradient_descent.GradientDescentOptimizer(1e-4)
        loss = math_ops.reduce_mean(z)
        train_op = optimizer.minimize(loss)
        graph = ops.get_default_graph()
        graph.add_to_collection('train_op', train_op)
        meta_graph = saver.export_meta_graph(graph_def=graph.as_graph_def())

        rewrite_options = rewriter_config_pb2.RewriterConfig(
            optimize_tensor_layout=True)
        optimized_graph = tf_optimizer.OptimizeGraph(rewrite_options,
                                                     meta_graph)

        found = 0
        for node in optimized_graph.node:
            if node.op in [
                    'Conv2D', 'Conv2DBackpropFilter', 'Conv2DBackpropInput'
            ]:
                found += 1
                self.assertEqual(node.attr['data_format'].s, 'NCHW')
        self.assertEqual(found, 5)
 def testFunctionalConv2DNoReuse(self):
   height, width = 7, 9
   images = random_ops.random_uniform((5, height, width, 3), seed=1)
   conv_layers.conv2d(images, 32, [3, 3])
   self.assertEqual(len(variables.trainable_variables()), 2)
   conv_layers.conv2d(images, 32, [3, 3])
   self.assertEqual(len(variables.trainable_variables()), 4)
    def _train(self, checkpoint_path, layout_optimizer=False, restore=False):
        ops.reset_default_graph()
        graph = ops.get_default_graph()
        with session.Session(config=get_config(layout_optimizer),
                             graph=graph) as sess:
            batch = 2
            height = 6
            width = 7
            input_channels = 3
            shape = [batch, height, width, input_channels]
            image = array_ops.placeholder(dtype='float32', shape=shape)
            conv1 = conv_layers.conv2d(image, 32, [3, 3])
            conv2 = conv_layers.conv2d(conv1, 32, [3, 3])
            optimizer = gradient_descent.GradientDescentOptimizer(0.01)
            loss = math_ops.reduce_mean(conv2)
            train_op = optimizer.minimize(loss)
            saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)

            if restore:
                saver.restore(sess, checkpoint_path)
            else:
                sess.run(variables.global_variables_initializer())

            np.random.seed(0)
            for _ in range(2):
                image_val = np.random.rand(*shape).astype(np.float32)
                sess.run([loss, train_op], feed_dict={image: image_val})

            if restore:
                all_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
                all_vars_values = [var.eval(session=sess) for var in all_vars]
                return all_vars_values
            else:
                saver.save(sess, checkpoint_path)
 def testFunctionalConv2DNoReuse(self):
     height, width = 7, 9
     images = random_ops.random_uniform((5, height, width, 3), seed=1)
     conv_layers.conv2d(images, 32, [3, 3])
     self.assertEqual(len(variables.trainable_variables()), 2)
     conv_layers.conv2d(images, 32, [3, 3])
     self.assertEqual(len(variables.trainable_variables()), 4)
  def testGradient(self):
    if not test.is_gpu_available(cuda_only=True):
      self.skipTest('GPU required')

    random_seed.set_random_seed(0)
    x = random_ops.truncated_normal([1, 200, 200, 3], seed=0)
    y = conv_layers.conv2d(x, 32, [3, 3])
    z = conv_layers.conv2d(y, 32, [3, 3])
    optimizer = gradient_descent.GradientDescentOptimizer(1e-4)
    loss = math_ops.reduce_mean(z)
    train_op = optimizer.minimize(loss)
    graph = ops.get_default_graph()
    graph.add_to_collection('train_op', train_op)
    meta_graph = saver_lib.export_meta_graph(graph_def=graph.as_graph_def())

    rewrite_options = rewriter_config_pb2.RewriterConfig(
        optimize_tensor_layout=True)
    optimized_graph = tf_optimizer.OptimizeGraph(rewrite_options, meta_graph)

    found = 0
    for node in optimized_graph.node:
      if node.op in ['Conv2D', 'Conv2DBackpropFilter', 'Conv2DBackpropInput']:
        found += 1
        self.assertEqual(node.attr['data_format'].s, 'NCHW')
    self.assertEqual(found, 5)
  def testInvalidStrides(self):
    height, width = 7, 9
    images = random_ops.random_uniform((5, height, width, 3), seed=1)
    with self.assertRaisesRegexp(ValueError, 'strides'):
      conv_layers.conv2d(images, 32, 3, strides=(1, 2, 3))

    with self.assertRaisesRegexp(ValueError, 'strides'):
      conv_layers.conv2d(images, 32, 3, strides=None)
  def testInvalidKernelSize(self):
    height, width = 7, 9
    images = random_ops.random_uniform((5, height, width, 3), seed=1)
    with self.assertRaisesRegexp(ValueError, 'kernel_size'):
      conv_layers.conv2d(images, 32, (1, 2, 3))

    with self.assertRaisesRegexp(ValueError, 'kernel_size'):
      conv_layers.conv2d(images, 32, None)
    def testInvalidStrides(self):
        height, width = 7, 9
        images = random_ops.random_uniform((5, height, width, 3), seed=1)
        with self.assertRaisesRegexp(ValueError, 'strides'):
            conv_layers.conv2d(images, 32, 3, strides=(1, 2, 3))

        with self.assertRaisesRegexp(ValueError, 'strides'):
            conv_layers.conv2d(images, 32, 3, strides=None)
示例#10
0
    def testInvalidKernelSize(self):
        height, width = 7, 9
        images = random_ops.random_uniform((5, height, width, 3), seed=1)
        with self.assertRaisesRegexp(ValueError, 'kernel_size'):
            conv_layers.conv2d(images, 32, (1, 2, 3))

        with self.assertRaisesRegexp(ValueError, 'kernel_size'):
            conv_layers.conv2d(images, 32, None)
示例#11
0
    def testGroupNormalizeInference(self):
        with self.session() as sess:
            with ops.device("/device:IPU:0"):
                x = array_ops.placeholder(np.float32, shape=[1, 4, 4, 2])

                with variable_scope.variable_scope("vs", use_resource=True):
                    y = convolutional.conv2d(
                        x,
                        2,
                        1,
                        use_bias=False,
                        kernel_initializer=init_ops.ones_initializer())
                    gamma = constant_op.constant([0.5, 0.5], np.float32)
                    beta = constant_op.constant([0.5, 0.5], np.float32)
                    mean = constant_op.constant([0.5, 0.5], np.float32)
                    inv_std_dev = constant_op.constant([0.5, 0.5], np.float32)
                    y = gen_popnn_ops.popnn_group_norm_inference(
                        inputs=y,
                        gamma=gamma,
                        beta=beta,
                        mean=mean,
                        inv_std_dev=inv_std_dev,
                        data_format="NHWC",
                        epsilon=0.0015,
                        num_groups=2)
                    y = convolutional.conv2d(
                        y,
                        2,
                        1,
                        use_bias=False,
                        kernel_initializer=init_ops.ones_initializer())
                    y = gen_popnn_ops.popnn_group_norm_inference(
                        inputs=y,
                        gamma=gamma,
                        beta=beta,
                        mean=mean,
                        inv_std_dev=inv_std_dev,
                        data_format="NHWC",
                        epsilon=0.0015,
                        num_groups=2)

            report = ReportJSON(self, sess)

            sess.run(variables.global_variables_initializer())

            report.reset()

            sess.run(y, {x: np.zeros([1, 4, 4, 2])})

            report.parse_log()

            # Would fail if there were two batch norms in the graph
            ok = [
                '__seed*', 'Copy_',
                'vs/conv2d/Conv2D/convolution.*/Conv_1x1/Convolve',
                'vs/PopnnGroupNormInference/group-norm-inference*/'
            ]
            report.assert_all_compute_sets_and_list(ok)
 def testFunctionalConv2DReuseFromScope(self):
   with variable_scope.variable_scope('scope'):
     height, width = 7, 9
     images = random_ops.random_uniform((5, height, width, 3), seed=1)
     conv_layers.conv2d(images, 32, [3, 3], name='conv1')
     self.assertEqual(len(variables.trainable_variables()), 2)
   with variable_scope.variable_scope('scope', reuse=True):
     conv_layers.conv2d(images, 32, [3, 3], name='conv1')
     self.assertEqual(len(variables.trainable_variables()), 2)
    def testBatchNormAndGroupNormalizeMixedInference(self):
        with ops.device("/device:IPU:0"):
            x = array_ops.placeholder(np.float32, shape=[1, 4, 4, 2])

            with variable_scope.variable_scope("vs", use_resource=True):
                y = convolutional.conv2d(
                    x,
                    2,
                    1,
                    use_bias=False,
                    kernel_initializer=init_ops.ones_initializer())
                gamma = constant_op.constant([0.5, 0.5], np.float32)
                beta = constant_op.constant([0.5, 0.5], np.float32)
                mean = constant_op.constant([0.5, 0.5], np.float32)
                inv_std_dev = constant_op.constant([0.5, 0.5], np.float32)
                y = gen_popnn_ops.popnn_group_norm_inference(
                    inputs=y,
                    gamma=gamma,
                    beta=beta,
                    mean=mean,
                    inv_std_dev=inv_std_dev,
                    data_format="NHWC",
                    epsilon=0.0015,
                    num_groups=2)
                y = convolutional.conv2d(
                    y,
                    2,
                    1,
                    use_bias=False,
                    kernel_initializer=init_ops.ones_initializer())
                y = layers_norm.batch_normalization(y, fused=True)

            with ops.device('cpu'):
                report = gen_ipu_ops.ipu_event_trace()

        tu.configure_ipu_system(True, True, True)

        with tu.ipu_session() as sess:
            sess.run(variables.global_variables_initializer())

            sess.run(report)

            sess.run(y, {x: np.zeros([1, 4, 4, 2])})

            result = sess.run(report)

            s = tu.extract_all_strings_from_event_trace(result)
            cs_list = tu.get_compute_sets_from_report(s)

            # Would fail if there were two batch norms in the graph
            ok = [
                '__seed*', 'host-exchange-local-copy', 'Copy_',
                'vs/conv2d/Conv2D/convolution.*/Conv_1x1/Convolve',
                'vs/PopnnGroupNormInference/custom-call*/',
                'vs/batch_normalization/FusedBatchNorm/batch-norm-inference.*/'
            ]
            self.assertTrue(tu.check_all_compute_sets_and_list(cs_list, ok))
 def testFunctionalConv2DNoReuse(self):
   height, width = 7, 9
   images = random_ops.random_uniform((5, height, width, 3), seed=1)
   conv_layers.conv2d(images, 32, [3, 3])
   self.assertEqual(
       len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 2)
   conv_layers.conv2d(images, 32, [3, 3])
   self.assertEqual(
       len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 4)
示例#15
0
 def testFunctionalConv2DReuseFromScope(self):
     with variable_scope.variable_scope('scope'):
         height, width = 7, 9
         images = random_ops.random_uniform((5, height, width, 3), seed=1)
         conv_layers.conv2d(images, 32, [3, 3], name='conv1')
         self.assertEqual(len(variables.trainable_variables()), 2)
     with variable_scope.variable_scope('scope', reuse=True):
         conv_layers.conv2d(images, 32, [3, 3], name='conv1')
         self.assertEqual(len(variables.trainable_variables()), 2)
示例#16
0
    def _bottleneck(self, inputs, size=None):
        conv1 = conv2d(inputs, filters=size, kernel_size=1, padding='same')
        ac2 = self._add_common_layers(conv1)
        conv2 = conv2d(ac2, filters=size, kernel_size=3, padding='same')
        ac3 = self._add_common_layers(conv2)
        conv3 = conv2d(ac3, filters=size * 4, kernel_size=1, padding='same')

        # This 1x1 conv is used to match the dimension of x and F(x)
        hack_conv = conv2d(inputs, filters=size * 4, kernel_size=1, padding='same')

        return tf.add(hack_conv, conv3)
示例#17
0
    def scconv_Qd(self, scc_type, bn_type, use_relu, input_layer=None):
        if input_layer is None:
            input_layer = self.top_layer
        name = 'scconv' + str(self.counts['scconv'])
        self.counts['scconv'] += 1
        strides = [1, 1]
        channel_pos = 'channels_first'

        print('input of scconv: ', input_layer.name, input_layer.get_shape())

        with tf.variable_scope(name + '_h'):
            x = tf.pad(input_layer, [[0, 0], [0, 0], [1, 1], [1, 1]])
            x = conv_layers.conv2d(x, filters=input_layer.get_shape()[1].value / 2, kernel_size=3, strides=strides,
                padding='VALID', data_format=channel_pos,
                kernel_initializer=None,
                use_bias=(bn_type == 'NONE'))
            if bn_type != 'NONE':
                x = tf.contrib.layers.batch_norm(
                    x,
                    decay=self.batch_norm_config.get('decay', 0.9),
                    scale=True,
                    epsilon=self.batch_norm_config.get('epsilon', 0.001),
                    is_training=self.phase_train,
                    fused=True,
                    data_format=bn_type)
            if use_relu:
                x = tf.nn.relu(x)

        print('height compressed: ', x.name, x.get_shape())

        with tf.variable_scope(name + '_w'):
            x = tf.transpose(x, [0, 2, 1, 3])
            x = tf.pad(x, [[0, 0], [0, 0], [1, 1], [1, 1]])
            x = conv_layers.conv2d(x, filters=input_layer.get_shape()[2].value / 2, kernel_size=3, strides=strides,
                padding='VALID', data_format=channel_pos,
                kernel_initializer=None,
                use_bias=(bn_type == 'NONE'))
            if bn_type != 'NONE':
                x = tf.contrib.layers.batch_norm(
                    x,
                    decay=self.batch_norm_config.get('decay', 0.9),
                    scale=True,
                    epsilon=self.batch_norm_config.get('epsilon', 0.001),
                    is_training=self.phase_train,
                    fused=True,
                    data_format=bn_type)
            if use_relu:
                x = tf.nn.relu(x)
            x = tf.transpose(x, [0, 2, 1, 3])

        print('width compressed: ', x.name, x.get_shape())
        self.top_layer = x
        return x
示例#18
0
 def flow(self, source, reference):
     ds=1
     activate=tf.nn.relu#tf.nn.leaky_relu
     ki=tf.contrib.layers.xavier_initializer()
     n,h,w,c=reference.shape
     with tf.variable_scope('flow',reuse=tf.AUTO_REUSE) as scope:
         x=tf.concat([reference,source],-1)
         x=conv2d(x,32,9,strides=ds, padding='same', activation=activate, kernel_initializer=ki,name='conv0')
         x=tf.nn.max_pool(x, [1,2,2,1], [1,2,2,1], padding='SAME', name='pool0')
         x=conv2d(x,32,9,strides=ds, padding='same', activation=activate, kernel_initializer=ki,name='conv1')
         x=tf.nn.max_pool(x, [1,2,2,1], [1,2,2,1], padding='SAME', name='pool1')
         x=tf.image.resize_images(x,[h,w],method=0)
         uv=conv2d(x,2,3,strides=ds, padding='same', activation=tf.nn.tanh, kernel_initializer=ki,name='conv2')
     return uv
示例#19
0
def cnn_2d(images, is_training):
    """
    Build the model for 2D-CNN.

    Inputs:
    -- images: Images placeholder
    -- is_training: bool placeholder, training or not

    Output:
    -- Logits: Return the output of the model

    """
    # Build the CNN model
    l_conv1 = conv2d(images,
                     CONV1_FILTERS,
                     KERNEL_SIZE1,
                     strides=STRIDE_CONV1,
                     activation=relu,
                     name='Conv1')

    l_maxpool1 = max_pooling2d(l_conv1,
                               POOL_SIZE1,
                               POOL_SIZE1,
                               padding='same',
                               name='Maxpool1')

    l_conv2 = conv2d(l_maxpool1,
                     CONV2_FILTERS,
                     KERNEL_SIZE2,
                     strides=STRIDE_CONV2,
                     activation=relu,
                     name='Conv2')

    l_maxpool2 = max_pooling2d(l_conv2,
                               POOL_SIZE2,
                               POOL_SIZE2,
                               padding='same',
                               name='Maxpool2')

    l_flatten = flatten(l_maxpool2, scope='Flatten')

    l_fc1 = dense(l_flatten, FC1, activation=relu, name='Fc1')

    l_drop = dropout(l_fc1, DROP_RATE, training=is_training, name='Dropout')

    l_fc2 = dense(l_drop, FC2, activation=relu, name='Fc2')

    logits = dense(l_fc2, NUM_CLASSES, name='Output')

    return logits
示例#20
0
文件: tf_vgg.py 项目: muzi-8/ReactCNN
 def _conv_with_bn(self, idx, bottom):
     # initializer_dict = {'gamma': init_ops.glorot_normal_initializer()}
     conved = conv2d(
         inputs=bottom,
         filters=self.deps[idx],
         kernel_size=[3, 3],
         strides=[1, 1],
         padding='same',
         activation=None,
         use_bias=True,
         kernel_initializer=tf.contrib.layers.xavier_initializer(),
         name=self.conv_names[idx])
     bn = batch_norm(inputs=conved,
                     decay=0.99,
                     center=True,
                     scale=True,
                     activation_fn=tf.nn.relu,
                     is_training=self.training,
                     scope=self.conv_names[idx])
     # initializer_dict = {'gamma': init_ops.glorot_normal_initializer()}
     # conved = conv2d(inputs=bottom, filters=self.deps[idx], kernel_size=[3,3], strides=[1,1], padding='same',
     #     activation=tf.nn.relu, use_bias=True, kernel_initializer=tf.contrib.layers.xavier_initializer(), name=self.conv_names[idx])
     # bn = batch_norm(inputs=conved, center=True, scale=True, activation_fn=None, is_training=self.training, scope=self.conv_names[idx],
     #                 param_initializers=initializer_dict)
     return bn
示例#21
0
 def _conv2d_impl(self, input_layer, num_channels_in, filters, kernel_size,
                  strides, padding, kernel_initializer):
     if self.use_tf_layers:
         return conv_layers.conv2d(input_layer,
                                   filters,
                                   kernel_size,
                                   strides,
                                   padding,
                                   self.channel_pos,
                                   kernel_initializer=kernel_initializer,
                                   use_bias=False)
     else:
         weights_shape = [
             kernel_size[0], kernel_size[1], num_channels_in, filters
         ]
         weights = self.get_variable("conv2d/kernel",
                                     weights_shape,
                                     self.variable_dtype,
                                     self.dtype,
                                     initializer=kernel_initializer)
         if self.data_format == "NHWC":
             strides = [1] + strides + [1]
         else:
             strides = [1, 1] + strides
         return tf.nn.conv2d(input_layer,
                             weights,
                             strides,
                             padding,
                             data_format=self.data_format)
示例#22
0
    def _conv2d_impl(self, input_layer, num_channels_in, filters, kernel_size,
                     strides, padding, kernel_initializer):
        """Construct a custom convolution layer."""
        if self.use_tf_layers:
            assert not (self.params.tanh_weight_transform
                        or self.params.quant_weight)
            return conv_layers.conv2d(input_layer,
                                      filters,
                                      kernel_size,
                                      strides,
                                      padding,
                                      self.channel_pos,
                                      kernel_initializer=kernel_initializer,
                                      use_bias=False)
        else:
            weights_shape = [
                kernel_size[0], kernel_size[1], num_channels_in, filters
            ]
            # We use the name 'conv2d/kernel' so the variable has the same name as its
            # tf.layers equivalent. This way, if a checkpoint is written when
            # self.use_tf_layers == True, it can be loaded when
            # self.use_tf_layers == False, and vice versa.
            weights = self.get_variable('conv2d/kernel',
                                        weights_shape,
                                        self.variable_dtype,
                                        self.dtype,
                                        initializer=kernel_initializer)
            if self.params.tanh_weight_transform:
                if not (self.params.first_weight_name in weights.name
                        or self.params.last_weight_name in weights.name):
                    print('Dorefa quantizing weight %s' % weights.name)
                    weights = self.dorefa_weight_quantize(
                        weights, self.params.quant_weight,
                        self.params.quant_weight_bits,
                        self.params.quant_weight_per_channel,
                        self.params.quant_weight_delay)
            elif self.params.quant_weight:
                if not (self.params.first_weight_name in weights.name
                        or self.params.last_weight_name in weights.name):
                    print('Quantizing weight %s' % weights.name)
                    weights = self.last_value_quantize(
                        weights,
                        per_channel=self.params.quant_weight_per_channel,
                        is_training=self.phase_train,
                        num_bits=self.params.quant_weight_bits,
                        narrow_range=self.params.quant_weight_narrow_range,
                        relative_quantile=self.params.
                        quant_weight_relative_quantile,
                        freeze=self.params.freeze_weight_range,
                        quant_delay=self.params.quant_weight_delay)

            if self.data_format == 'NHWC':
                strides = [1] + strides + [1]
            else:
                strides = [1, 1] + strides
            return tf.nn.conv2d(input_layer,
                                weights,
                                strides,
                                padding,
                                data_format=self.data_format)
 def testConv2DFloat16(self):
     height, width = 7, 9
     images = random_ops.random_uniform((5, height, width, 4),
                                        dtype='float16')
     output = conv_layers.conv2d(images, 32, [3, 3], activation=nn_ops.relu)
     self.assertListEqual(output.get_shape().as_list(),
                          [5, height - 2, width - 2, 32])
示例#24
0
 def _conv2d_impl(self, input_layer, num_channels_in, filters, kernel_size,
                  strides, padding, kernel_initializer):
     if self.use_tf_layers:
         return conv_layers.conv2d(input_layer,
                                   filters,
                                   kernel_size,
                                   strides,
                                   padding,
                                   self.channel_pos,
                                   kernel_initializer=kernel_initializer,
                                   use_bias=False)
     else:
         weights_shape = [
             kernel_size[0], kernel_size[1], num_channels_in, filters
         ]
         # We use the name 'conv2d/kernel' so the variable has the same name as its
         # tf.layers equivalent. This way, if a checkpoint is written when
         # self.use_tf_layers == True, it can be loaded when
         # self.use_tf_layers == False, and vice versa.
         weights = self.get_variable('conv2d/kernel',
                                     weights_shape,
                                     self.variable_dtype,
                                     self.dtype,
                                     initializer=kernel_initializer)
         if self.data_format == 'NHWC':
             strides = [1] + strides + [1]
         else:
             strides = [1, 1] + strides
         return tf.nn.conv2d(input_layer,
                             weights,
                             strides,
                             padding,
                             data_format=self.data_format)
示例#25
0
 def _conv2d_impl(self, input_layer, num_channels_in, filters, kernel_size,
                  strides, padding, kernel_initializer):
     if self.use_tf_layers:
         return conv_layers.conv2d(
             input_layer,
             filters,
             kernel_size,
             strides,
             padding,
             self.channel_pos,
             kernel_initializer=kernel_initializer,
             use_bias=False)
     else:
         weights_shape = [
             kernel_size[0], kernel_size[1], num_channels_in, filters
         ]
         weights = self.get_variable(
             'conv2d/kernel',
             weights_shape,
             self.variable_dtype,
             self.dtype,
             initializer=kernel_initializer)
         if self.data_format == 'NHWC':
             strides = [1] + strides + [1]
         else:
             strides = [1, 1] + strides
         return tf.nn.conv2d(
             input_layer,
             weights,
             strides,
             padding,
             data_format=self.data_format)
示例#26
0
    def testBatchNormalizeInferenceMatchWithSharding(self):
        with ops.device("/device:IPU:0"):
            x = array_ops.placeholder(np.float32, shape=[1, 4, 4, 2])

            with variable_scope.variable_scope("vs", use_resource=True):
                with tu.ipu_shard(0):
                    a = convolutional.conv2d(
                        x,
                        2,
                        1,
                        use_bias=False,
                        kernel_initializer=init_ops.ones_initializer())
                    b = layers_norm.batch_normalization(a, fused=True)

                with tu.ipu_shard(0):
                    c = convolutional.conv2d(
                        b,
                        2,
                        1,
                        use_bias=False,
                        kernel_initializer=init_ops.ones_initializer())
                    d = layers_norm.batch_normalization(c, fused=True)

            with ops.device('cpu'):
                report = gen_ipu_ops.ipu_event_trace()

        tu.configure_ipu_system(True, True, True, sharded=True)

        with tu.ipu_session() as sess:
            sess.run(variables.global_variables_initializer())

            sess.run(report)

            sess.run(d, {x: np.zeros([1, 4, 4, 2])})

            result = sess.run(report)

            s = tu.extract_all_strings_from_event_trace(result)
            cs_list = tu.get_compute_sets_from_report(s)

            # Would fail if there were two batch norms in the graph
            ok = [
                '__seed*', '*OnTileCopy*',
                'vs/conv2d/Conv2D/convolution.*/Conv_1x1/Convolve',
                'vs/batch_normalization/FusedBatchNorm/batch-norm-inference.*/'
            ]
            self.assertTrue(tu.check_all_compute_sets_and_list(cs_list, ok))
    def testBatchNormalizeInferenceDontMatchDifferentTypes(self):
        with ops.device("/device:IPU:0"):
            x = array_ops.placeholder(np.float32, shape=[1, 4, 4, 2])

            with variable_scope.variable_scope("vs", use_resource=True):
                y = convolutional.conv2d(
                    x,
                    2,
                    1,
                    use_bias=False,
                    kernel_initializer=init_ops.ones_initializer())
                y = layers_norm.batch_normalization(y, fused=True)
                y = math_ops.cast(y, np.float16)
                y = convolutional.conv2d(
                    y,
                    2,
                    1,
                    use_bias=False,
                    kernel_initializer=init_ops.ones_initializer())
                y = layers_norm.batch_normalization(y, fused=True)

            with ops.device('cpu'):
                report = gen_ipu_ops.ipu_event_trace()

        tu.configure_ipu_system(True, True, True)

        with tu.ipu_session() as sess:
            sess.run(variables.global_variables_initializer())

            sess.run(report)

            sess.run(y, {x: np.zeros([1, 4, 4, 2])})

            result = sess.run(report)

            s = tu.extract_all_strings_from_event_trace(result)
            cs_list = tu.get_compute_sets_from_report(s)
            # Matches two convolutions
            ok = [
                '__seed*', 'host-exchange-local-copy-', 'Copy_',
                'vs/conv2d/Conv2D/convolution.*/Conv_1x1',
                'vs/batch_normalization/FusedBatchNorm/batch-norm-inference.*/',
                'vs/Cast/convert.*/Cast',
                'vs/conv2d_1/Conv2D/convolution.*/Conv_1x1',
                'vs/batch_normalization_1/FusedBatchNormV2/batch-norm-inference.*/'
            ]
            self.assertTrue(tu.check_all_compute_sets_and_list(cs_list, ok))
示例#28
0
 def testFunctionalConv2DInitializerFromScope(self):
   with self.test_session() as sess:
     with variable_scope.variable_scope(
         'scope', initializer=init_ops.ones_initializer()):
       height, width = 7, 9
       images = random_ops.random_uniform((5, height, width, 3), seed=1)
       conv_layers.conv2d(images, 32, [3, 3], name='conv1')
       weights = variables.trainable_variables()
       # Check the names of weights in order.
       self.assertTrue('kernel' in weights[0].name)
       self.assertTrue('bias' in weights[1].name)
       sess.run(variables.global_variables_initializer())
       weights = sess.run(weights)
       # Check that the kernel weights got initialized to ones (from scope)
       self.assertAllClose(weights[0], np.ones((3, 3, 3, 32)))
       # Check that the bias still got initialized to zeros.
       self.assertAllClose(weights[1], np.zeros((32)))
示例#29
0
    def testCloneBatchNorm(self):
        g = ops.Graph()
        with g.as_default():
            np.random.seed(1234)
            x_value = np.random.random([2, 5, 5, 3])
            w_value = np.random.random([3, 3, 3, 2])
            is_training_t = array_ops.placeholder(dtypes.bool,
                                                  name='is_training_t')
            x_t = array_ops.constant(x_value, dtype=dtypes.float32, name='x_t')
            y_t = conv2d(
                x_t,
                2, [3, 3],
                kernel_initializer=init_ops.constant_initializer(w_value))
            y_t = batch_norm(y_t, training=is_training_t)
            optimizer_t = train.AdamOptimizer()
            optimize_t = optimizer_t.minimize(math_ops.reduce_sum(y_t))
            with self.test_session(use_gpu=True) as sess:
                sess.run(variables.global_variables_initializer())
                y_test_1 = sess.run(y_t, feed_dict={is_training_t: False})
                sess.run(optimize_t, feed_dict={is_training_t: True})
                y_test_2 = sess.run(y_t, feed_dict={is_training_t: False})

            is_training = array_ops.placeholder(dtypes.bool,
                                                name='is_training')
            x = array_ops.constant(np.zeros([2, 5, 5, 3]),
                                   dtype=dtypes.float32,
                                   name='x')
            y = conv2d(
                x,
                2, [3, 3],
                kernel_initializer=init_ops.constant_initializer(w_value))
            y = batch_norm(y, training=is_training)
            x_new = array_ops.constant(x_value, dtype=dtypes.float32, name='x')
            y_out = meta_graph.clone(y, "copy", replace={x: x_new})
            optimizer = train.AdamOptimizer()
            optimize = optimizer.minimize(math_ops.reduce_sum(y_out))
            with self.test_session(use_gpu=True) as sess:
                sess.run(variables.global_variables_initializer())
                y_out_1 = sess.run(y_out, feed_dict={is_training: False})
                y_out_2 = sess.run(y_out, feed_dict={is_training: False})
                sess.run(optimize, feed_dict={is_training: True})
                y_out_3 = sess.run(y_out, feed_dict={is_training: False})
            self.assertAllClose(y_out_1, y_out_2)
            self.assertTrue(np.abs(y_out_1 - y_out_3).max() > 1e-6)
            self.assertAllClose(y_test_1, y_out_1)
            self.assertAllClose(y_test_2, y_out_3)
 def testFunctionalConv2DInitializerFromScope(self):
   with self.test_session() as sess:
     with variable_scope.variable_scope(
         'scope', initializer=init_ops.ones_initializer()):
       height, width = 7, 9
       images = random_ops.random_uniform((5, height, width, 3), seed=1)
       conv_layers.conv2d(images, 32, [3, 3], name='conv1')
       weights = variables.trainable_variables()
       # Check the names of weights in order.
       self.assertTrue('kernel' in weights[0].name)
       self.assertTrue('bias' in weights[1].name)
       sess.run(variables.global_variables_initializer())
       weights = sess.run(weights)
       # Check that the kernel weights got initialized to ones (from scope)
       self.assertAllClose(weights[0], np.ones((3, 3, 3, 32)))
       # Check that the bias still got initialized to zeros.
       self.assertAllClose(weights[1], np.zeros((32)))
示例#31
0
 def training_step(inputs, scale):
   outputs = convolutional.conv2d(
       inputs,
       filters=16,
       kernel_size=(3, 3),
       data_format="channels_first",
       kernel_regularizer=make_regularizer(scale))
   loss = math_ops.reduce_mean(math_ops.square(outputs))
   return loss.op
示例#32
0
文件: tf_vgg.py 项目: muzi-8/ReactCNN
 def _conv(self, idx, bottom):
     return conv2d(inputs=bottom,
                   filters=self.deps[idx],
                   kernel_size=[3, 3],
                   strides=[1, 1],
                   padding='same',
                   activation=nn.relu,
                   use_bias=True,
                   name=self.conv_names[idx])
示例#33
0
 def training_step(inputs, scale):
     outputs = convolutional.conv2d(
         inputs,
         filters=16,
         kernel_size=(3, 3),
         data_format="channels_first",
         kernel_regularizer=make_regularizer(scale))
     loss = math_ops.reduce_mean(math_ops.square(outputs))
     return loss.op
示例#34
0
 def conv(input, filters, stride, name):
     return conv2d(input,
                   filters, [3, 3],
                   strides=[stride, stride],
                   name=name,
                   padding='same',
                   activation=None,
                   use_bias=False,
                   kernel_initializer=tf.random_normal_initializer(
                       stddev=np.sqrt(2.0 / 9 / filters)))
 def add_transition(name, input, nb_filters):
     # shape = input.get_shape().as_list()
     # in_channel = shape[3]
     with tf.variable_scope(name) as scope:
         l = self._batch_norm_default(input, name)
         l = tf.nn.relu(l)
         l = conv2d(l, nb_filters, [1,1], strides=[1,1], name='conv1',
             padding='same', activation=None, use_bias=False)
         l = tf.nn.relu(l)
         l = self._avgpool(l, 2)
     return l
示例#36
0
    def testBatchNormalizeInferenceDontMatchDifferentTypes(self):
        with self.session() as sess:
            with ops.device("/device:IPU:0"):
                x = array_ops.placeholder(np.float32, shape=[1, 4, 4, 2])

                with variable_scope.variable_scope("vs", use_resource=True):
                    y = convolutional.conv2d(
                        x,
                        2,
                        1,
                        use_bias=False,
                        kernel_initializer=init_ops.ones_initializer())
                    y = layers_norm.batch_normalization(y, fused=True)
                    y = math_ops.cast(y, np.float16)
                    y = convolutional.conv2d(
                        y,
                        2,
                        1,
                        use_bias=False,
                        kernel_initializer=init_ops.ones_initializer())
                    y = layers_norm.batch_normalization(y, fused=True)

            report = ReportJSON(self, sess)

            sess.run(variables.global_variables_initializer())

            report.reset()

            sess.run(y, {x: np.zeros([1, 4, 4, 2])})

            report.parse_log()

            # Matches two convolutions
            ok = [
                '__seed*', 'Copy_', 'vs/conv2d/Conv2D/convolution.*/Conv_1x1',
                'vs/batch_normalization/FusedBatchNorm*/batch-norm-inference.*/',
                'vs/Cast/convert.*/Cast',
                'vs/conv2d_1/Conv2D/convolution.*/Conv_1x1',
                'vs/batch_normalization_1/FusedBatchNorm*/batch-norm-inference.*/'
            ]
            report.assert_all_compute_sets_and_list(ok)
示例#37
0
def cnn_test(images, is_training):
    # Build the CNN model
    l_conv1 = conv2d(images,
                     10, [7, 7],
                     strides=[1, 1],
                     activation=relu,
                     name='Conv1')

    l_flatten = flatten(l_conv1, scope='Flatten')

    logits = dense(l_flatten, NUM_CLASSES, name='Output')

    return logits
示例#38
0
    def testCloneConvolution(self):
        g = ops.Graph()
        with g.as_default():
            x = array_ops.ones([2, 5, 5, 3], name='x')
            y = conv2d(x, 2, [3, 3], padding='same')

            x_new = array_ops.zeros([2, 5, 5, 3], name='x_new')
            y_out = meta_graph.clone(y,
                                     "conv2d/copy1",
                                     from_scope="conv2d",
                                     replace={x: x_new})
            with self.test_session(use_gpu=True) as sess:
                sess.run(variables.global_variables_initializer())
                y_out_ = sess.run(y_out)
            self.assertEqual(y_out.op.inputs[0].op.inputs[1].name,
                             "conv2d/kernel/read:0")
            self.assertAllClose(y_out_, np.zeros((2, 5, 5, 2)))
示例#39
0
def conv_bn(x,
            feature_channel,
            kernel_size,
            block_name,
            stride=(1, 1),
            padding='same',
            dilation_rate=(1, 1),
            activation=None):
    with tf.name_scope(block_name):
        x = conv2d(x,
                   feature_channel,
                   kernel_size,
                   stride=stride,
                   padding=padding,
                   dilation_rate=dilation_rate,
                   activation=activation,
                   name=block_name + 'conv')
        x = batch_normalization(x, name=block_name + 'bn')
        return x
示例#40
0
 def _simple_model(self, image, fused, freeze_mode):
   output_channels, kernel_size = 2, 3
   conv = conv_layers.conv2d(
       image,
       output_channels,
       kernel_size,
       use_bias=False,
       kernel_initializer=init_ops.ones_initializer())
   bn_layer = normalization_layers.BatchNormalization(fused=fused)
   bn_layer._bessels_correction_test_only = False
   training = not freeze_mode
   bn = bn_layer.apply(conv, training=training)
   loss = math_ops.reduce_sum(math_ops.abs(bn))
   optimizer = gradient_descent.GradientDescentOptimizer(0.01)
   if not freeze_mode:
     update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
     with ops.control_dependencies(update_ops):
       train_op = optimizer.minimize(loss)
   else:
     train_op = optimizer.minimize(loss)
   saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
   return loss, train_op, saver
 def _conv2d_impl(self, input_layer, num_channels_in, filters, kernel_size,
                  strides, padding, kernel_initializer):
   if self.use_tf_layers:
     return conv_layers.conv2d(input_layer, filters, kernel_size, strides,
                               padding, self.channel_pos,
                               kernel_initializer=kernel_initializer,
                               use_bias=False)
   else:
     weights_shape = [kernel_size[0], kernel_size[1], num_channels_in, filters]
     # We use the name 'conv2d/kernel' so the variable has the same name as its
     # tf.layers equivalent. This way, if a checkpoint is written when
     # self.use_tf_layers == True, it can be loaded when
     # self.use_tf_layers == False, and vice versa.
     weights = self.get_variable('conv2d/kernel', weights_shape,
                                 self.variable_dtype, self.dtype,
                                 initializer=kernel_initializer)
     if self.data_format == 'NHWC':
       strides = [1] + strides + [1]
     else:
       strides = [1, 1] + strides
     return tf.nn.conv2d(input_layer, weights, strides, padding,
                         data_format=self.data_format)
示例#42
0
 def test_invalid_shape(self):
   inputs = random_ops.random_uniform((10, 100, 100, 3), seed=1)
   graph = conv_layers.conv2d(inputs, 3, 10, strides=(1, 1))
   with self.assertRaisesRegexp(ValueError, 'number of features'):
     graph = maxout.maxout(graph, num_units=2)
 def testInvalidDataFormat(self):
   height, width = 7, 9
   images = random_ops.random_uniform((5, height, width, 3), seed=1)
   with self.assertRaisesRegexp(ValueError, 'data_format'):
     conv_layers.conv2d(images, 32, 3, data_format='invalid')
 def testConv2DFloat16(self):
   height, width = 7, 9
   images = random_ops.random_uniform((5, height, width, 4), dtype='float16')
   output = conv_layers.conv2d(images, 32, [3, 3], activation=nn_ops.relu)
   self.assertListEqual(output.get_shape().as_list(),
                        [5, height - 2, width - 2, 32])