def forward(self, x):
        with tf.variable_scope(self.Gscope):
            h = self.downSampling1(x)
            with tf.variable_scope("downSampling1_norm") as self.Gscope:
                h = instance_norm(h, scale=False, epsilon=1e-5)
            h = tf.nn.relu(h)

            h = self.downSampling2(h)
            with tf.variable_scope("downSampling2_norm") as self.Gscope:
                h = instance_norm(h, scale=False, epsilon=1e-5)
            h = tf.nn.relu(h)

            h = self.downSampling3(h)
            with tf.variable_scope("downSampling3_norm") as self.Gscope:
                h = instance_norm(h, scale=False, epsilon=1e-5)
            h = tf.nn.relu(h)

            h = self.residualBlock1.forward(h)
            h = self.residualBlock2.forward(h)
            h = self.residualBlock3.forward(h)
            h = self.residualBlock4.forward(h)
            h = self.residualBlock5.forward(h)
            h = self.residualBlock6.forward(h)

            h = self.Y_upSampling1(h)
            with tf.variable_scope("Y_upSampling1_norm") as self.Gscope:
                h = instance_norm(h, scale=True, epsilon=1e-5)
            h = tf.nn.relu(h)

            h = self.Y_upSampling2(h)
            with tf.variable_scope("Y_upSampling2_norm") as self.Gscope:
                h = instance_norm(h, scale=True, epsilon=1e-5)
            h = tf.nn.relu(h)

            return self.fakeGeneration(h)
예제 #2
0
 def testCreateVariables(self):
   height, width = 3, 3
   images = random_ops.random_uniform((5, height, width, 3), seed=1)
   normalization.instance_norm(images, center=True, scale=True)
   beta = contrib_variables.get_variables_by_name('beta')[0]
   gamma = contrib_variables.get_variables_by_name('gamma')[0]
   self.assertEqual('InstanceNorm/beta', beta.op.name)
   self.assertEqual('InstanceNorm/gamma', gamma.op.name)
예제 #3
0
 def testReuseVariables(self):
   height, width = 3, 3
   images = random_ops.random_uniform((5, height, width, 3), seed=1)
   normalization.instance_norm(images, scale=True, scope='IN')
   normalization.instance_norm(images, scale=True, scope='IN', reuse=True)
   beta = contrib_variables.get_variables_by_name('beta')
   gamma = contrib_variables.get_variables_by_name('gamma')
   self.assertEqual(1, len(beta))
   self.assertEqual(1, len(gamma))
    def forward(self, x):
        h = self.Y1(x)
        with tf.variable_scope("Generator/ResBlock" + str(self.num) + "1"):
            h = instance_norm(h, scale=False, epsilon=1e-5)
        h = tf.nn.relu(h)
        h = self.Y2(h)
        with tf.variable_scope("Generator/ResBlock" + str(self.num) + "2"):
            h = instance_norm(h, scale=False, epsilon=1e-5)

        return x + h
예제 #5
0
 def testValueCorrectWithReuseVars(self):
   height, width = 3, 3
   image_shape = (10, height, width, 3)
   images = random_ops.random_uniform(image_shape, seed=1)
   output_train = normalization.instance_norm(images, scope='IN')
   output_eval = normalization.instance_norm(images, scope='IN', reuse=True)
   with self.test_session() as sess:
     sess.run(variables.global_variables_initializer())
     # output_train and output_eval should be the same.
     train_np, eval_np = sess.run([output_train, output_eval])
     self.assertAllClose(train_np, eval_np)
예제 #6
0
  def doOutputTest(self, input_shape, data_format, tol=1e-3):
    axis = -1 if data_format == 'NHWC' else 1
    for mu in (0.0, 1e2):
      for sigma in (1.0, 0.1):
        # Determine shape of Tensor after normalization.
        reduced_shape = (input_shape[0], input_shape[axis])
        expected_mean = np.zeros(reduced_shape)
        expected_var = np.ones(reduced_shape)

        # Determine axes that will be normalized.
        reduced_axes = list(range(len(input_shape)))
        del reduced_axes[axis]
        del reduced_axes[0]
        reduced_axes = tuple(reduced_axes)

        inputs = random_ops.random_uniform(input_shape, seed=0) * sigma + mu
        output_op = normalization.instance_norm(
            inputs, center=False, scale=False, data_format=data_format)
        with self.test_session() as sess:
          sess.run(variables.global_variables_initializer())
          outputs = sess.run(output_op)
          # Make sure that there are no NaNs
          self.assertFalse(np.isnan(outputs).any())
          mean = np.mean(outputs, axis=reduced_axes)
          var = np.var(outputs, axis=reduced_axes)
          # The mean and variance of each example should be close to 0 and 1
          # respectively.
          self.assertAllClose(expected_mean, mean, rtol=tol, atol=tol)
          self.assertAllClose(expected_var, var, rtol=tol, atol=tol)
예제 #7
0
 def testCreateOp(self):
     height, width = 3, 3
     images = random_ops.random_uniform((5, height, width, 3), seed=1)
     output = normalization.instance_norm(images)
     print('name: ', output.op.name)
     self.assertStartsWith(output.op.name, 'InstanceNorm/instancenorm')
     self.assertListEqual([5, height, width, 3], output.shape.as_list())
 def testCreateOpFloat64(self):
   height, width = 3, 3
   images = random_ops.random_uniform(
       (5, height, width, 3), dtype=dtypes.float64, seed=1)
   output = normalization.instance_norm(images)
   self.assertStartsWith(
       output.op.name, 'InstanceNorm/instancenorm')
   self.assertListEqual([5, height, width, 3], output.shape.as_list())
예제 #9
0
 def testCreateOpNoScaleCenter(self):
   height, width = 3, 3
   images = random_ops.random_uniform(
       (5, height, width, 3), dtype=dtypes.float64, seed=1)
   output = normalization.instance_norm(images, center=False, scale=False)
   self.assertStartsWith(
       output.op.name, 'InstanceNorm/instancenorm')
   self.assertListEqual([5, height, width, 3], output.shape.as_list())
   self.assertEqual(0, len(contrib_variables.get_variables_by_name('beta')))
   self.assertEqual(0, len(contrib_variables.get_variables_by_name('gamma')))
예제 #10
0
 def testParamsShapeNotFullyDefinedNHWC(self):
   inputs = array_ops.placeholder(dtypes.float32, shape=(3, 4, None))
   with self.assertRaisesRegexp(ValueError, 'undefined channels dimension'):
     normalization.instance_norm(inputs, data_format='NHWC')
예제 #11
0
 def testBadDataFormat(self):
   inputs = array_ops.placeholder(dtypes.float32, shape=(2, 5, 5))
   with self.assertRaisesRegexp(ValueError,
                                'data_format has to be either NCHW or NHWC.'):
     normalization.instance_norm(inputs, data_format='NHCW')
예제 #12
0
 def testUnknownShape(self):
   inputs = array_ops.placeholder(dtypes.float32)
   with self.assertRaisesRegexp(ValueError, 'undefined rank'):
     normalization.instance_norm(inputs)
예제 #13
0
파일: ops.py 프로젝트: tasx0823/EasyMesh
def residual_block_with_IN(incoming,
                           nb_blocks,
                           out_channels,
                           downsample=False,
                           downsample_strides=2,
                           activation='relu',
                           batch_norm=True,
                           bias=True,
                           weights_init='variance_scaling',
                           bias_init='zeros',
                           regularizer='L2',
                           weight_decay=0.0001,
                           trainable=True,
                           restore=True,
                           reuse=False,
                           scope=None,
                           name="ResidualBlock",
                           is_training=True):
    """ Residual Block.

	A residual block as described in MSRA's Deep Residual Network paper.
	Full pre-activation architecture is used here.

	Input:
		4-D Tensor [batch, height, width, in_channels].

	Output:
		4-D Tensor [batch, new height, new width, nb_filter].

	Arguments:
		incoming: `Tensor`. Incoming 4-D Layer.
		nb_blocks: `int`. Number of layer blocks.
		out_channels: `int`. The number of convolutional filters of the
			convolution layers.
		downsample: `bool`. If True, apply downsampling using
			'downsample_strides' for strides.
		downsample_strides: `int`. The strides to use when downsampling.
		activation: `str` (name) or `function` (returning a `Tensor`).
			Activation applied to this layer (see tflearn.activations).
			Default: 'linear'.
		batch_norm: `bool`. If True, apply batch normalization.
		bias: `bool`. If True, a bias is used.
		weights_init: `str` (name) or `Tensor`. Weights initialization.
			(see tflearn.initializations) Default: 'uniform_scaling'.
		bias_init: `str` (name) or `tf.Tensor`. Bias initialization.
			(see tflearn.initializations) Default: 'zeros'.
		regularizer: `str` (name) or `Tensor`. Add a regularizer to this
			layer weights (see tflearn.regularizers). Default: None.
		weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
		trainable: `bool`. If True, weights will be trainable.
		restore: `bool`. If True, this layer weights will be restored when
			loading a model.
		reuse: `bool`. If True and 'scope' is provided, this layer variables
			will be reused (shared).
		scope: `str`. Define this layer scope (optional). A scope can be
			used to share variables between layers. Note that scope will
			override name.
		name: A name for this layer (optional). Default: 'ShallowBottleneck'.
		is_training: True for training mode and False for val or test mode.
	References:
		- Deep Residual Learning for Image Recognition. Kaiming He, Xiangyu
			Zhang, Shaoqing Ren, Jian Sun. 2015.
		- Identity Mappings in Deep Residual Networks. Kaiming He, Xiangyu
			Zhang, Shaoqing Ren, Jian Sun. 2015.

	Links:
		- [http://arxiv.org/pdf/1512.03385v1.pdf]
			(http://arxiv.org/pdf/1512.03385v1.pdf)
		- [Identity Mappings in Deep Residual Networks]
			(https://arxiv.org/pdf/1603.05027v2.pdf)

	"""
    resnet = incoming
    in_channels = incoming.get_shape().as_list()[-1]

    with tf.variable_scope(values=[incoming],
                           name_or_scope=scope,
                           default_name=name,
                           reuse=reuse) as scope:
        name = scope.name  # TODO
        for i in range(nb_blocks):

            identity = resnet

            if not downsample:
                downsample_strides = 1

            if batch_norm:
                resnet = normalization.instance_norm(resnet,
                                                     scope=name + '_bn_' +
                                                     str(i) + '_1')
            resnet = tflearn.activation(resnet, activation)

            resnet = tflearn.conv_2d(resnet, out_channels, 3,
                                     downsample_strides, 'same', 'linear',
                                     bias, weights_init, bias_init,
                                     regularizer, weight_decay, trainable,
                                     restore)

            if batch_norm:
                resnet = normalization.instance_norm(resnet,
                                                     scope=name + '_bn_' +
                                                     str(i) + '_2')
            resnet = tflearn.activation(resnet, activation)

            resnet = tflearn.conv_2d(resnet, out_channels, 3,
                                     downsample_strides, 'same', 'linear',
                                     bias, weights_init, bias_init,
                                     regularizer, weight_decay, trainable,
                                     restore)

            # Downsampling
            if downsample_strides > 1:
                identity = tflearn.avg_pool_2d(identity, 1, downsample_strides)

            # Projection to new dimension
            '''
			if in_channels != out_channels:
				ch = (out_channels - in_channels)//2
				identity = tf.pad(identity,
								  [[0, 0], [0, 0], [0, 0], [ch, ch]])
				in_channels = out_channels
			'''
            resnet = resnet + identity

    return resnet