def add_encoder_layer(self, input, name, training, layer_to_skip_connect, local_inner_layers, num_features,
                       dim_reduce=False, dropout_rate=0.0):
     [b1, h1, w1, d1] = input.get_shape().as_list()
     if layer_to_skip_connect is not None:
         [b0, h0, w0, d0] = layer_to_skip_connect.get_shape().as_list()
         if h0 > h1:
             skip_connect_layer = self.conv_layer(layer_to_skip_connect, int(layer_to_skip_connect.get_shape()[3]),
                                                  [3, 3], strides=(2, 2))
         else:
             skip_connect_layer = layer_to_skip_connect
     else:
         skip_connect_layer = layer_to_skip_connect
     current_layers = [input, skip_connect_layer]
     current_layers.extend(local_inner_layers)
     current_layers = remove_duplicates(current_layers)
     outputs = tf.concat(current_layers, axis=3)
     if dim_reduce:
         outputs = self.conv_layer(outputs, num_features, [3, 3], strides=(2, 2))
         outputs = leaky_relu(features=outputs)
         outputs = layer_norm(inputs=outputs, center=True, scale=True)
         # outputs = tf.nn.relu(outputs)
         # outputs = tf.layers.batch_normalization(outputs, training=training, momentum=0.9)
         outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=training)
     else:
         outputs = self.conv_layer(outputs, num_features, [3, 3], strides=(1, 1))
         outputs = leaky_relu(features=outputs)
         outputs = layer_norm(inputs=outputs, center=True, scale=True)
         # outputs = tf.nn.relu(outputs)
         # outputs = tf.layers.batch_normalization(outputs, training=training, momentum=0.9)
     return outputs
Пример #2
0
 def testName(self):
   np_values = np.array([-2, -1, 0, 1, 2], dtype=np.float64)
   outputs_with_name_set = nn_ops.leaky_relu(
       constant_op.constant(np_values),
       name='test_relu_op')
   self.assertEqual(outputs_with_name_set.name, 'test_relu_op:0')
   outputs_without_name_set = nn_ops.leaky_relu(
       constant_op.constant(np_values))
   self.assertEqual(outputs_without_name_set.name, 'LeakyRelu:0')
Пример #3
0
 def testName(self):
   np_values = np.array([-2, -1, 0, 1, 2], dtype=np.float64)
   outputs_with_name_set = nn_ops.leaky_relu(
       constant_op.constant(np_values),
       name='test_relu_op')
   self.assertEqual(outputs_with_name_set.name, 'test_relu_op:0')
   outputs_without_name_set = nn_ops.leaky_relu(
       constant_op.constant(np_values))
   self.assertEqual(outputs_without_name_set.name, 'LeakyRelu:0')
Пример #4
0
    def add_encoder_layer(self,
                          input,
                          name,
                          training,
                          layer_to_skip_connect,
                          local_inner_layers,
                          num_features,
                          dim_reduce=False,
                          dropout_rate=0.0):
        """
        Adds a resnet encoder layer.
        :param input: The input to the encoder layer
        :param training: Flag for training or validation
        :param dropout_rate: A float or a placeholder for the dropout rate
        :param layer_to_skip_connect: Layer to skip-connect this layer to
        :param local_inner_layers: A list with the inner layers of the current Multi-Layer
        :param num_features: Number of feature maps for the convolutions
        :param dim_reduce: Boolean value indicating if this is a dimensionality reducing layer or not
        :return: The output of the encoder layer
        :return:
        """
        [b1, h1, w1, d1] = input.get_shape().as_list()
        if layer_to_skip_connect is not None:
            [b0, h0, w0, d0] = layer_to_skip_connect.get_shape().as_list()

            if h0 > h1:
                skip_connect_layer = self.conv_layer(
                    layer_to_skip_connect,
                    int(layer_to_skip_connect.get_shape()[3]), [1, 1],
                    strides=(2, 2))
            else:
                skip_connect_layer = layer_to_skip_connect
        else:
            skip_connect_layer = layer_to_skip_connect
        current_layers = [input, skip_connect_layer]
        current_layers.extend(local_inner_layers)
        current_layers = remove_duplicates(current_layers)
        outputs = tf.concat(current_layers, axis=3)
        if dim_reduce:
            outputs = self.conv_layer(outputs,
                                      num_features, [3, 3],
                                      strides=(2, 2))
            outputs = leaky_relu(features=outputs)
            outputs = layer_norm(inputs=outputs, center=True, scale=True)
            outputs = tf.layers.dropout(outputs,
                                        rate=dropout_rate,
                                        training=training)
        else:
            outputs = self.conv_layer(outputs,
                                      num_features, [3, 3],
                                      strides=(1, 1))
            outputs = leaky_relu(features=outputs)
            outputs = layer_norm(inputs=outputs, center=True, scale=True)

        return outputs
Пример #5
0
 def testUnexpectedAlphaValue(self):
     self.assertAllClose(
         np.array([[-9.0, 0.7, -5.0, 0.3, -0.1],
                   [0.1, -3.0, 0.5, -27.0, 0.9]]),
         nn_ops.leaky_relu(np.array([[-0.9, 0.7, -0.5, 0.3, -0.01],
                                     [0.1, -0.3, 0.5, -2.7, 0.9]]),
                           alpha=10))
     self.assertAllClose(
         np.array([[9.0, 0.7, 5.0, 0.3, 0.1], [0.1, 3.0, 0.5, 27.0, 0.9]]),
         nn_ops.leaky_relu(np.array([[-0.9, 0.7, -0.5, 0.3, -0.01],
                                     [0.1, -0.3, 0.5, -2.7, 0.9]]),
                           alpha=-10))
Пример #6
0
 def _testLeakyRelu(self, np_features, alpha, use_gpu=False):
   np_leaky_relu = self._npLeakyRelu(np_features, alpha)
   with self.test_session(use_gpu=use_gpu):
     leaky_relu = nn_ops.leaky_relu(np_features, alpha)
     tf_leaky_relu = leaky_relu.eval()
   self.assertAllClose(np_leaky_relu, tf_leaky_relu)
   self.assertShapeEqual(np_leaky_relu, leaky_relu)
Пример #7
0
 def _testLeakyRelu(self, np_features, alpha, use_gpu=False):
     np_leaky_relu = self._npLeakyRelu(np_features, alpha)
     with self.test_session(use_gpu=use_gpu):
         leaky_relu = nn_ops.leaky_relu(np_features, alpha)
         tf_leaky_relu = leaky_relu.eval()
     self.assertAllClose(np_leaky_relu, tf_leaky_relu)
     self.assertShapeEqual(np_leaky_relu, leaky_relu)
Пример #8
0
def conv_block(input, phase, convs, do_skip=True):
    x = input
    count = 0

    for conv in convs:
        if count == (len(convs) - 2) and do_skip:
            skip_connection = x
        count += 1

        if conv['stride'] > 1:
            x = ZeroPadding2D(((1, 0), (1, 0)))(x)
        x = Conv2D(conv['filter'],
                   conv['kernel'],
                   strides=conv['stride'],
                   padding='valid' if conv['stride'] > 1 else 'same',
                   name='conv_' + str(conv['layer_idx']),
                   use_bias=False if conv['norm'] else True)(x)
        if conv['norm']:
            x = batch_normalization(inputs=x,
                                    training=phase,
                                    name='norm_' + str(conv['layer_idx']))
        if conv['leaky']:
            x = leaky_relu(x, alpha=0.1)

    return add([skip_connection, x]) if do_skip else x
Пример #9
0
    def __call__(self, image_input, training=False, dropout_rate=0.0):
        """
        Runs the CNN producing the embeddings and the gradients.
        :param image_input: Image input to produce embeddings for. [batch_size, 28, 28, 1]
        :param training: A flag indicating training or evaluation
        :param dropout_rate: A tf placeholder of type tf.float32 indicating the amount of dropout applied
        :return: Embeddings of size [batch_size, 64]
        """
        with tf.variable_scope(self.name, reuse=self.reuse):
            outputs = image_input
            with tf.variable_scope('conv_layers'):
                for idx, num_filters in enumerate(self.layer_sizes):
                    with tf.variable_scope('g_conv_{}'.format(idx)):
                        if idx == len(self.layer_sizes) - 1:
                            outputs = tf.layers.conv2d(outputs, num_filters, [2, 2], strides=(1, 1),
                                                       padding='VALID')
                        else:
                            outputs = tf.layers.conv2d(outputs, num_filters, [3, 3], strides=(1, 1),
                                                               padding='VALID')
                        outputs = leaky_relu(outputs)
                        outputs = tf.contrib.layers.batch_norm(outputs, updates_collections=None,
                                                                       decay=0.99,
                                                                       scale=True, center=True,
                                                                       is_training=training)
                        outputs = max_pool(outputs, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
                                                   padding='SAME')
                        outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=training)

            image_embedding = tf.contrib.layers.flatten(outputs)


        self.reuse = tf.AUTO_REUSE
        self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
        return image_embedding
Пример #10
0
 def testValues(self):
   for dtype in [np.int32, np.int64, np.float16, np.float32, np.float64]:
     np_values = np.array([-2, -1, 0, 1, 2], dtype=dtype)
     outputs = nn_ops.leaky_relu(constant_op.constant(np_values))
     with self.test_session() as sess:
       outputs = sess.run(outputs)
     tol = 2e-3 if dtype == np.float16 else 1e-6
     self.assertAllClose(
         outputs, [-0.4, -0.2, 0.0, 1.0, 2.0], rtol=tol, atol=tol)
Пример #11
0
 def testValues(self):
   for dtype in [np.int32, np.int64, np.float16, np.float32, np.float64]:
     np_values = np.array([-2, -1, 0, 1, 2], dtype=dtype)
     outputs = nn_ops.leaky_relu(constant_op.constant(np_values))
     with self.cached_session() as sess:
       outputs = sess.run(outputs)
     tol = 2e-3 if dtype == np.float16 else 1e-6
     self.assertAllClose(
         outputs, [-0.4, -0.2, 0.0, 1.0, 2.0], rtol=tol, atol=tol)
Пример #12
0
    def __call__(self, image_input, training=False, dropout_rate=0.0):
        """
        Runs the CNN producing the embeddings and the gradients.
        :param image_input: Image input to produce embeddings for. [batch_size, 28, 28, 1]
        :param training: A flag indicating training or evaluation
        :param dropout_rate: A tf placeholder of type tf.float32 indicating the amount of dropout applied
        :return: Embeddings of size [batch_size, 64]
        """
        with tf.variable_scope(self.name, reuse=self.reuse):
            outputs = image_input  #image_input.shape: (32,28,28,1)
            with tf.variable_scope('conv_layers'):
                for idx, num_filters in enumerate(self.layer_sizes):
                    with tf.variable_scope('g_conv_{}'.format(idx)):
                        if idx == len(self.layer_sizes) - 1:
                            outputs = tf.layers.conv2d(outputs,
                                                       num_filters, [2, 2],
                                                       strides=(1, 1),
                                                       padding='VALID')
                        else:
                            outputs = tf.layers.conv2d(outputs,
                                                       num_filters, [3, 3],
                                                       strides=(1, 1),
                                                       padding='VALID')
                        outputs = leaky_relu(outputs)
                        outputs = tf.contrib.layers.batch_norm(
                            outputs,
                            updates_collections=None,
                            decay=0.99,
                            scale=True,
                            center=True,
                            is_training=training)
                        outputs = max_pool(outputs,
                                           ksize=[1, 2, 2, 1],
                                           strides=[1, 2, 2, 1],
                                           padding='SAME')
                        #outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=training)

                        # # 全连接层1
                        # W_fc1 = weight_variable([64,1024])
                        # b_fc1 = bias_variable([1024])
                        # h_pool2_flat = tf.reshape(outputs, [-1,7*7*64])   #[n_samples,1,1,64]->>[n_samples,1*1*64]
                        # h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
                        # h_fc1_drop = tf.nn.dropout(h_fc1, dropout_rate) # 减少计算量dropout

                        # # 全连接层2
                        # W_fc2 = weight_variable([1024, 所有标签种类数])
                        # b_fc2 = bias_variable([所有标签种类数])
                        # prediction = tf.matmul(h_fc1_drop, W_fc2) + b_fc2

            image_embedding = tf.contrib.layers.flatten(
                outputs)  #image_embedding: (32,64)

        self.reuse = True
        self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                           scope=self.name)
        return image_embedding
Пример #13
0
 def testGradientScalar(self):
   with self.test_session() as sess:
     x = variables.Variable(-100.)
     y = nn_ops.leaky_relu(x, 0.05)
     loss = y**2
     optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.2)
     train_op = optimizer.minimize(loss)
     sess.run(variables.global_variables_initializer())
     sess.run(train_op)
     self.assertAllClose(x.eval(), -99.9)
Пример #14
0
 def testGradientScalar(self):
   with self.test_session() as sess:
     x = variables.Variable(-100.)
     y = nn_ops.leaky_relu(x, 0.05)
     loss = y**2
     optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.2)
     train_op = optimizer.minimize(loss)
     sess.run(variables.global_variables_initializer())
     sess.run(train_op)
     self.assertAllClose(x.eval(), -99.9)
Пример #15
0
    def __call__(self, image_input, training=False, dropout_rate=0.0):
        """
        Runs the CNN producing the predictions and the gradients.
        :param image_input: Image input to produce embeddings for. e.g. for EMNIST [batch_size, 28, 28, 1]
        :param training: A flag indicating training or evaluation
        :param dropout_rate: A tf placeholder of type tf.float32 indicating the amount of dropout applied
        :return: Embeddings of size [batch_size, self.num_classes]
        """

        with tf.variable_scope(self.name, reuse=self.reuse):
            layer_features = []
            with tf.variable_scope('FCCLayerNet'):
                outputs = image_input
                for i in range(len(self.layer_stage_sizes)):
                    with tf.variable_scope('conv_stage_{}'.format(i)):
                        for j in range(self.inner_layer_depth):
                            with tf.variable_scope('conv_{}_{}'.format(i, j)):
                                outputs = tf.layers.dense(
                                    outputs, units=self.layer_stage_sizes[i])
                                outputs = leaky_relu(
                                    outputs, name="leaky_relu{}".format(i))
                                layer_features.append(outputs)
                                if self.batch_norm_use:
                                    outputs = batch_norm(outputs,
                                                         decay=0.99,
                                                         scale=True,
                                                         center=True,
                                                         is_training=training,
                                                         renorm=False)
                        outputs = tf.layers.dropout(outputs,
                                                    rate=dropout_rate,
                                                    training=training)
                        # apply dropout only at dimensionality
                        # reducing steps, i.e. the last layer in
                        # every group

            c_conv_encoder = outputs
            c_conv_encoder = tf.contrib.layers.flatten(c_conv_encoder)
            c_conv_encoder = tf.layers.dense(c_conv_encoder,
                                             units=self.num_classes)

        self.reuse = True
        self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                           scope=self.name)

        if not self.build_completed:
            self.build_completed = True
            count_parameters(self.variables, "FCCLayerNet")

        return c_conv_encoder, layer_features
Пример #16
0
 def testGradientFloat32(self):
   with self.test_session():
     x = constant_op.constant(
         [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
         shape=[2, 5],
         name="x")
     y = nn_ops.leaky_relu(x, alpha=0.1, name="leaky_relu")
     x_init = np.asarray(
         [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
         dtype=np.float32,
         order="F")
     err = gradient_checker.compute_gradient_error(
         x, [2, 5], y, [2, 5], x_init_value=x_init)
   print("leaky_relu (float32) gradient err = ", err)
   self.assertLess(err, 1e-4)
Пример #17
0
  def testRange(self):
    batch_size = 3
    height, width = 4, 4
    np.random.seed(1)  # Make it reproducible.
    inputs = np.random.uniform(
        size=(batch_size, height, width, 3)).astype(np.float32)
    inputs = constant_op.constant(inputs)

    outputs = nn_ops.leaky_relu(inputs)
    self.assertEquals(inputs.shape, outputs.shape)
    with self.test_session() as sess:
      inputs, outputs = sess.run([inputs, outputs])
    self.assertGreaterEqual(outputs.min(), 0.0)
    self.assertLessEqual(outputs.max(), 1.0)
    self.assertAllClose(inputs, outputs)
Пример #18
0
 def testGradientFloat32(self):
   with self.test_session():
     x = constant_op.constant(
         [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
         shape=[2, 5],
         name="x")
     y = nn_ops.leaky_relu(x, alpha=0.1, name="leaky_relu")
     x_init = np.asarray(
         [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
         dtype=np.float32,
         order="F")
     err = gradient_checker.compute_gradient_error(
         x, [2, 5], y, [2, 5], x_init_value=x_init)
   print("leaky_relu (float32) gradient err = ", err)
   self.assertLess(err, 1e-4)
Пример #19
0
    def __call__(self, image_input, scope, training=False, dropout_rate=0.0):
        outputs = image_input
        encoder_layers = []
        current_layers = [outputs]
        with tf.variable_scope(scope):
            for i, layer_size in enumerate(self.layer_sizes):
                encoder_inner_layers = [outputs]
                with tf.variable_scope('g_conv{}'.format(i)):
                    if i == 0:  
                        outputs = self.conv_layer(outputs, num_filters=64,
                                                  filter_size=(3, 3), strides=(2, 2),
                                                  scope='g_conv{}'.format(i))
                        outputs = leaky_relu(features=outputs)
                        outputs = batch_norm(outputs, decay=0.99, scale=True,
                                             center=True, is_training=training,
                                             renorm=True, scope='bn_1')
                        current_layers.append(outputs)
                        encoder_inner_layers.append(outputs)
                    else:
                        for j in range(self.inner_layers[i]):  # Build the inner Layers of the MultiLayer
                            with tf.variable_scope('g_conv_inner_layer{}'.format(j)):
                                outputs = self.add_encoder_layer(input=outputs,
                                                                 training=training,
                                                                 name="encoder_layer_{}_{}".format(i,
                                                                                                   j),
                                                                 layer_to_skip_connect=current_layers,
                                                                 num_features=self.layer_sizes[i],
                                                                 dim_reduce=False,
                                                                 local_inner_layers=encoder_inner_layers,
                                                                 dropout_rate=dropout_rate,
                                                                 scope="encoder_layer_{}_{}".format(i,
                                                                                                    j))
                                encoder_inner_layers.append(outputs)
                                # current_layers.append(outputs)
                        # add final dim reducing conv layer for this MultiLayer
                        outputs = self.add_encoder_layer(input=outputs,
                                                         name="encoder_layer_{}".format(j),
                                                         training=training,
                                                         layer_to_skip_connect=current_layers,
                                                         local_inner_layers=encoder_inner_layers,
                                                         num_features=self.layer_sizes[i],
                                                         dim_reduce=True, dropout_rate=dropout_rate,
                                                         scope="encoder_layer_{}".format(i))
                        current_layers.append(outputs)
                    encoder_layers.append(outputs)
                    # print('{}_th encoder output', outputs)

        return outputs, encoder_layers
Пример #20
0
 def testGradGradFloat32(self):
     with compat.forward_compatibility_horizon(2018, 11, 2):
         with self.test_session():
             x = constant_op.constant(
                 [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
                 shape=[2, 5],
                 name="x")
             y = nn_ops.leaky_relu(x, alpha=0.1, name="leaky_relu")
             z = gradients_impl.gradients(y, x)
             x_init = np.asarray([[-0.9, -0.7, -0.5, -0.3, -0.1],
                                  [0.1, 0.3, 0.5, 0.7, 0.9]],
                                 dtype=np.float32,
                                 order="F")
             err = gradient_checker.compute_gradient_error(
                 x, [2, 5], z[0], [2, 5], x_init_value=x_init)
         print("leaky_relu (float32) gradient of gradient err = ", err)
         self.assertLess(err, 1e-4)
Пример #21
0
 def testGradGradFloat32(self):
   with compat.forward_compatibility_horizon(2018, 11, 2):
     with self.test_session():
       x = constant_op.constant(
           [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
           shape=[2, 5],
           name="x")
       y = nn_ops.leaky_relu(x, alpha=0.1, name="leaky_relu")
       z = gradients_impl.gradients(y, x)
       x_init = np.asarray(
           [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
           dtype=np.float32,
           order="F")
       err = gradient_checker.compute_gradient_error(
           x, [2, 5], z[0], [2, 5], x_init_value=x_init)
     print("leaky_relu (float32) gradient of gradient err = ", err)
     self.assertLess(err, 1e-4)
Пример #22
0
 def testValues(self):
   np_values = np.array([-1.0, 0.0, 0.5, 1.0, 2.0], dtype=np.float32)
   outputs = nn_ops.leaky_relu(constant_op.constant(np_values))
   with self.test_session() as sess:
     outputs = sess.run(outputs)
   self.assertAllClose(outputs, [-0.2, 0.0, 0.5, 1.0, 2.0])
Пример #23
0
    def __call__(self, text_input, training=False, dropout_rate=0.0):
        """
        Runs the CNN producing the predictions and the gradients.
        :param text_input: Text input to produce embeddings for. e.g. for text data [batch_size, 300]
        :param training: A flag indicating training or evaluation
        :param dropout_rate: A tf placeholder of type tf.float32 indicating the amount of dropout applied
        :return: Embeddings of size [batch_size, self.num_classes]
        """

        with tf.variable_scope(self.name, reuse=self.reuse):
            layer_features = []
            with tf.variable_scope('VGGNet'):
                outputs = image_input
                for i in range(len(self.layer_stage_sizes)):
                    with tf.variable_scope('conv_stage_{}'.format(i)):
                        for j in range(self.inner_layer_depth):
                            with tf.variable_scope('conv_{}_{}'.format(i, j)):
                                if (j == self.inner_layer_depth -
                                        1) and self.strided_dim_reduction:
                                    stride = 2
                                else:
                                    stride = 1
                                outputs = tf.layers.conv2d(
                                    outputs,
                                    self.layer_stage_sizes[i], [3, 3],
                                    strides=(stride, stride),
                                    padding='SAME',
                                    activation=None)
                                outputs = leaky_relu(
                                    outputs, name="leaky_relu{}".format(i))
                                layer_features.append(outputs)
                                if self.batch_norm_use:
                                    outputs = batch_norm(outputs,
                                                         decay=0.99,
                                                         scale=True,
                                                         center=True,
                                                         is_training=training,
                                                         renorm=False)
                        if self.strided_dim_reduction == False:
                            outputs = tf.layers.max_pooling2d(outputs,
                                                              pool_size=(2, 2),
                                                              strides=2)

                        outputs = tf.layers.dropout(outputs,
                                                    rate=dropout_rate,
                                                    training=training)
                        # apply dropout only at dimensionality
                        # reducing steps, i.e. the last layer in
                        # every group

            c_conv_encoder = outputs
            c_conv_encoder = tf.contrib.layers.flatten(c_conv_encoder)
            c_conv_encoder = tf.layers.dense(c_conv_encoder,
                                             units=self.num_classes)

        self.reuse = True
        self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                           scope=self.name)

        if not self.build_completed:
            self.build_completed = True
            count_parameters(self.variables, "VGGNet")

        return c_conv_encoder, layer_features
Пример #24
0
 def _testLeakyRelu(self, np_features, alpha):
   np_leaky_relu = self._npLeakyRelu(np_features, alpha)
   tf_leaky_relu = nn_ops.leaky_relu(np_features, alpha)
   self.assertAllClose(np_leaky_relu, tf_leaky_relu)
   self.assertShapeEqual(np_leaky_relu, tf_leaky_relu)
Пример #25
0
 def f(x):
   assert x.dtype == dtypes.float64
   with backprop.GradientTape() as tape:
     tape.watch(x)
     y = nn_ops.leaky_relu(x)
   return tape.gradient(y, x)
Пример #26
0
 def loss():
   return nn_ops.leaky_relu(x, 0.05)**2
Пример #27
0
    def __call__(self,
                 conditional_input,
                 generated_input,
                 training=False,
                 dropout_rate=0.0):
        """
        :param conditional_input: A batch of conditional inputs (x_i) of size [batch_size, height, width, channel]
        :param generated_input: A batch of generated inputs (x_g) of size [batch_size, height, width, channel]
        :param training: Placeholder for training or a boolean indicating training or validation
        :param dropout_rate: A float placeholder for dropout rate or a float indicating the dropout rate
        :param name: Network name
        :return:
        """
        conditional_input = tf.convert_to_tensor(conditional_input)
        generated_input = tf.convert_to_tensor(generated_input)
        with tf.variable_scope(self.name, reuse=self.reuse):
            concat_images = tf.concat([conditional_input, generated_input],
                                      axis=3)
            outputs = concat_images
            encoder_layers = []
            current_layers = [outputs]
            with tf.variable_scope('conv_layers'):
                for i, layer_size in enumerate(self.layer_sizes):
                    encoder_inner_layers = [outputs]
                    with tf.variable_scope('g_conv{}'.format(i)):
                        if i == 0:
                            outputs = self.conv_layer(outputs,
                                                      num_filters=64,
                                                      filter_size=(3, 3),
                                                      strides=(2, 2))
                            outputs = leaky_relu(features=outputs)
                            outputs = layer_norm(inputs=outputs,
                                                 center=True,
                                                 scale=True)
                            current_layers.append(outputs)
                        else:
                            for j in range(self.inner_layers[i]):
                                outputs = self.add_encoder_layer(
                                    input=outputs,
                                    name="encoder_inner_conv_{}_{}".format(
                                        i, j),
                                    training=training,
                                    layer_to_skip_connect=current_layers[-2],
                                    num_features=self.layer_sizes[i],
                                    dropout_rate=dropout_rate,
                                    dim_reduce=False,
                                    local_inner_layers=encoder_inner_layers)
                                current_layers.append(outputs)
                            outputs = self.add_encoder_layer(
                                input=outputs,
                                name="encoder_outer_conv_{}".format(i),
                                training=training,
                                layer_to_skip_connect=current_layers[-2],
                                local_inner_layers=encoder_inner_layers,
                                num_features=self.layer_sizes[i],
                                dropout_rate=dropout_rate,
                                dim_reduce=True)
                            current_layers.append(outputs)
                        encoder_layers.append(outputs)

            flatten = tf.contrib.layers.flatten(encoder_layers[-1])
            with tf.variable_scope('discriminator_out'):
                outputs = tf.layers.dense(flatten, 1, name='outputs')
        self.reuse = True
        self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                           scope=self.name)
        #view_names_of_variables(self.variables)
        if self.build:
            print("discr layers", self.conv_layer_num)
            count_parameters(self.variables,
                             name="discriminator_parameter_num")
        self.build = False
        return outputs, current_layers
Пример #28
0
    def __call__(self,
                 inputs,
                 input_y,
                 classes,
                 training=False,
                 dropout_rate=0.0):
        inputs = tf.convert_to_tensor(inputs)
        with tf.variable_scope(self.name, reuse=self.reuse):
            outputs = inputs
            encoder_layers = []
            current_layers = [outputs]
            with tf.variable_scope('conv_layers'):
                for i, layer_size in enumerate(self.layer_sizes):
                    encoder_inner_layers = [outputs]
                    with tf.variable_scope('g_conv{}'.format(i)):
                        if i == 0:
                            outputs = self.conv_layer(outputs,
                                                      num_filters=64,
                                                      filter_size=(3, 3),
                                                      strides=(2, 2))
                            outputs = leaky_relu(features=outputs)
                            outputs = layer_norm(inputs=outputs,
                                                 center=True,
                                                 scale=True)
                            # outputs = tf.nn.relu(outputs)
                            # outputs = tf.layers.batch_normalization(outputs, training=training, momentum=0.9)
                            current_layers.append(outputs)
                        else:
                            for j in range(self.inner_layers[i]):
                                outputs = self.add_encoder_layer(
                                    input=outputs,
                                    name="encoder_inner_conv_{}_{}".format(
                                        i, j),
                                    training=training,
                                    layer_to_skip_connect=current_layers[-2],
                                    num_features=self.layer_sizes[i],
                                    dropout_rate=dropout_rate,
                                    dim_reduce=False,
                                    local_inner_layers=encoder_inner_layers)
                                current_layers.append(outputs)
                                encoder_inner_layers.append(outputs)
                            outputs = self.add_encoder_layer(
                                input=outputs,
                                name="encoder_outer_conv_{}".format(i),
                                training=training,
                                layer_to_skip_connect=current_layers[-2],
                                local_inner_layers=encoder_inner_layers,
                                num_features=self.layer_sizes[i],
                                dropout_rate=dropout_rate,
                                dim_reduce=True)
                            current_layers.append(outputs)
                        encoder_layers.append(outputs)

            with tf.variable_scope('classifier_dense_block'):
                if self.use_wide_connections:
                    mean_encoder_layers = []
                    concat_encoder_layers = []
                    for layer in encoder_layers:
                        mean_encoder_layers.append(
                            tf.reduce_mean(layer, axis=[1, 2]))
                        concat_encoder_layers.append(tf.layers.flatten(layer))
                    feature_level_flatten = tf.concat(mean_encoder_layers,
                                                      axis=1)
                    location_level_flatten = tf.concat(concat_encoder_layers,
                                                       axis=1)
                else:
                    feature_level_flatten = tf.reduce_mean(encoder_layers[-1],
                                                           axis=[1, 2])
                    location_level_flatten = tf.layers.flatten(
                        encoder_layers[-1])

                feature_level_dense = tf.layers.dense(feature_level_flatten,
                                                      units=1024,
                                                      activation=leaky_relu)
                # feature_level_dense = tf.layers.dense(feature_level_flatten, units=1024)
                combo_level_flatten = tf.concat(
                    [feature_level_dense, location_level_flatten], axis=1)

            with tf.variable_scope('classifier_out_block'):
                logits = tf.layers.dense(combo_level_flatten, units=classes)
                cost = tf.reduce_mean(
                    tf.nn.softmax_cross_entropy_with_logits(labels=input_y,
                                                            logits=logits))
                correct_prediction = tf.equal(tf.argmax(logits, 1),
                                              tf.argmax(input_y, 1))
                accuracy = tf.reduce_mean(
                    tf.cast(correct_prediction, tf.float32))

                # cost = tf.losses.softmax_cross_entropy(onehot_labels=input_y, logits=logits)
                # correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(input_y, 1))
                # accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

                # cost = tf.losses.softmax_cross_entropy(onehot_labels=input_y, logits=logits)
                # correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(input_y, 1))
                # # correct_prediction=tf.Print(correct_prediction,[correct_prediction],'correct_prediction')
                # accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        self.reuse = True
        self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                           scope=self.name)
        if self.build:
            print("classification layers", self.conv_layer_num)
            count_parameters(self.variables, name="classifier_parameter_num")
        self.build = False
        return cost, accuracy
Пример #29
0
 def f(x):
     assert x.dtype == dtypes.float64
     with backprop.GradientTape() as tape:
         tape.watch(x)
         y = nn_ops.leaky_relu(x)
     return tape.gradient(y, x)
Пример #30
0
 def _testLeakyRelu(self, np_features, alpha):
     np_leaky_relu = self._npLeakyRelu(np_features, alpha)
     tf_leaky_relu = nn_ops.leaky_relu(np_features, alpha)
     self.assertAllClose(np_leaky_relu, tf_leaky_relu)
     self.assertShapeEqual(np_leaky_relu, tf_leaky_relu)
Пример #31
0
    def add_decoder_layer(self,
                          input,
                          name,
                          training,
                          dropout_rate,
                          layer_to_skip_connect,
                          local_inner_layers,
                          num_features,
                          dim_upscale=False,
                          h_size=None,
                          w_size=None):
        """
        Adds a resnet decoder layer.
        :param input: Input features
        :param name: Layer Name
        :param training: Training placeholder or boolean flag
        :param dropout_rate: Float placeholder or float indicating the dropout rate
        :param layer_to_skip_connect: Layer to skip connect to.
        :param local_inner_layers: A list with the inner layers of the current MultiLayer
        :param num_features: Num feature maps for convolution
        :param dim_upscale: Dimensionality upscale
        :param h_size: Height to upscale to
        :param w_size: Width to upscale to
        :return: The output of the decoder layer
        """
        [b1, h1, w1, d1] = input.get_shape().as_list()
        if len(layer_to_skip_connect) >= 2:
            layer_to_skip_connect = layer_to_skip_connect[-2]
        else:
            layer_to_skip_connect = None

        if layer_to_skip_connect is not None:
            [b0, h0, w0, d0] = layer_to_skip_connect.get_shape().as_list()

            if h0 < h1:
                skip_connect_layer = self.conv_layer(
                    layer_to_skip_connect,
                    int(layer_to_skip_connect.get_shape()[3]), [1, 1],
                    strides=(1, 1),
                    transpose=True,
                    h_size=h_size,
                    w_size=w_size)
            else:
                skip_connect_layer = layer_to_skip_connect
            current_layers = [input, skip_connect_layer]
        else:
            current_layers = [input]

        current_layers.extend(local_inner_layers)
        current_layers = remove_duplicates(current_layers)
        outputs = tf.concat(current_layers, axis=3)

        if dim_upscale:
            outputs = self.conv_layer(outputs,
                                      num_features, [3, 3],
                                      strides=(1, 1),
                                      transpose=True,
                                      w_size=w_size,
                                      h_size=h_size)
            outputs = leaky_relu(features=outputs)
            outputs = batch_norm(outputs,
                                 decay=0.99,
                                 scale=True,
                                 center=True,
                                 is_training=training,
                                 renorm=True)
            outputs = tf.layers.dropout(outputs,
                                        rate=dropout_rate,
                                        training=training)
        else:
            outputs = self.conv_layer(outputs,
                                      num_features, [3, 3],
                                      strides=(1, 1),
                                      transpose=False)
            outputs = leaky_relu(features=outputs)
            outputs = batch_norm(outputs,
                                 decay=0.99,
                                 scale=True,
                                 center=True,
                                 is_training=training,
                                 renorm=True)

        return outputs
Пример #32
0
 def loss():
     return nn_ops.leaky_relu(x, 0.05)**2
Пример #33
0
    def __call__(self,
                 z_inputs,
                 conditional_input,
                 training=False,
                 dropout_rate=0.0):
        """
        Apply network on data.
        :param z_inputs: Random noise to inject [batch_size, z_dim]
        :param conditional_input: A batch of images to use as conditionals [batch_size, height, width, channels]
        :param training: Training placeholder or boolean
        :param dropout_rate: Dropout rate placeholder or float
        :return: Returns x_g (generated images), encoder_layers(encoder features), decoder_layers(decoder features)
        """
        conditional_input = tf.convert_to_tensor(conditional_input)
        with tf.variable_scope(self.name, reuse=self.reuse):
            # reshape from inputs
            outputs = conditional_input
            encoder_layers = []
            current_layers = [outputs]
            with tf.variable_scope('conv_layers'):

                for i, layer_size in enumerate(self.layer_sizes):
                    encoder_inner_layers = [outputs]
                    with tf.variable_scope('g_conv{}'.format(i)):
                        if i == 0:  #first layer is a single conv layer instead of MultiLayer for best results
                            outputs = self.conv_layer(outputs,
                                                      num_filters=64,
                                                      filter_size=(3, 3),
                                                      strides=(2, 2))
                            outputs = leaky_relu(features=outputs)
                            outputs = batch_norm(outputs,
                                                 decay=0.99,
                                                 scale=True,
                                                 center=True,
                                                 is_training=training,
                                                 renorm=True)
                            current_layers.append(outputs)
                            encoder_inner_layers.append(outputs)
                        else:
                            for j in range(
                                    self.inner_layers[i]
                            ):  #Build the inner Layers of the MultiLayer
                                outputs = self.add_encoder_layer(
                                    input=outputs,
                                    training=training,
                                    name="encoder_layer_{}_{}".format(i, j),
                                    layer_to_skip_connect=current_layers,
                                    num_features=self.layer_sizes[i],
                                    dim_reduce=False,
                                    local_inner_layers=encoder_inner_layers,
                                    dropout_rate=dropout_rate)
                                encoder_inner_layers.append(outputs)
                                current_layers.append(outputs)
                            #add final dim reducing conv layer for this MultiLayer
                            outputs = self.add_encoder_layer(
                                input=outputs,
                                name="encoder_layer_{}".format(i),
                                training=training,
                                layer_to_skip_connect=current_layers,
                                local_inner_layers=encoder_inner_layers,
                                num_features=self.layer_sizes[i],
                                dim_reduce=True,
                                dropout_rate=dropout_rate)
                            current_layers.append(outputs)
                        encoder_layers.append(outputs)

            g_conv_encoder = outputs

            with tf.variable_scope(
                    "vector_expansion"
            ):  # Used for expanding the z injected noise to match the
                # dimensionality of the various decoder MultiLayers, injecting
                # noise into multiple decoder layers in a skip-connection way
                # improves quality of results. We inject in the first 3 decode
                # multi layers
                num_filters = 8
                concat_shape = tuple(encoder_layers[-1].get_shape())
                concat_shape = [int(i) for i in concat_shape]
                z_dense_0 = tf.layers.dense(z_inputs,
                                            concat_shape[1] * concat_shape[2] *
                                            num_filters,
                                            name='input_dense_0')
                z_reshape_0 = tf.reshape(z_dense_0, [
                    self.batch_size, concat_shape[1], concat_shape[2],
                    num_filters
                ],
                                         name='z_reshape_0')
                concat_shape = tuple(encoder_layers[-2].get_shape())
                concat_shape = [int(i) for i in concat_shape]
                z_dense_1 = tf.layers.dense(z_inputs,
                                            concat_shape[1] * concat_shape[2] *
                                            num_filters,
                                            name='input_dense_1')
                z_reshape_1 = tf.reshape(z_dense_1, [
                    self.batch_size, concat_shape[1], concat_shape[2],
                    num_filters
                ],
                                         name='z_reshape_1')
                num_filters = num_filters / 2
                concat_shape = tuple(encoder_layers[-3].get_shape())
                concat_shape = [int(i) for i in concat_shape]
                z_dense_2 = tf.layers.dense(z_inputs,
                                            concat_shape[1] * concat_shape[2] *
                                            num_filters,
                                            name='input_dense_2')
                z_reshape_2 = tf.reshape(z_dense_2, [
                    self.batch_size, concat_shape[1], concat_shape[2],
                    num_filters
                ],
                                         name='z_reshape_2')
                num_filters = num_filters / 2
                concat_shape = tuple(encoder_layers[-4].get_shape())
                concat_shape = [int(i) for i in concat_shape]
                z_dense_3 = tf.layers.dense(z_inputs,
                                            concat_shape[1] * concat_shape[2] *
                                            num_filters,
                                            name='input_dense_3')
                z_reshape_3 = tf.reshape(z_dense_3, [
                    self.batch_size, concat_shape[1], concat_shape[2],
                    num_filters
                ],
                                         name='z_reshape_3')

            z_layers = [z_reshape_0, z_reshape_1, z_reshape_2, z_reshape_3]
            outputs = g_conv_encoder
            decoder_layers = []
            current_layers = [outputs]
            with tf.variable_scope('g_deconv_layers'):
                for i in range(len(self.layer_sizes) + 1):
                    if i < 3:  #Pass the injected noise to the first 3 decoder layers for sharper results
                        outputs = tf.concat([z_layers[i], outputs], axis=3)
                        current_layers[-1] = outputs
                    idx = len(self.layer_sizes) - 1 - i
                    num_features = self.layer_sizes[idx]
                    inner_layers = self.inner_layers[idx]
                    upscale_shape = encoder_layers[idx].get_shape().as_list()
                    if idx < 0:
                        num_features = self.layer_sizes[0]
                        inner_layers = self.inner_layers[0]
                        outputs = tf.concat([outputs, conditional_input],
                                            axis=3)
                        upscale_shape = conditional_input.get_shape().as_list()

                    with tf.variable_scope('g_deconv{}'.format(i)):
                        decoder_inner_layers = [outputs]
                        for j in range(inner_layers):
                            if i == 0 and j == 0:
                                outputs = self.add_decoder_layer(
                                    input=outputs,
                                    name="decoder_inner_conv_{}_{}".format(
                                        i, j),
                                    training=training,
                                    layer_to_skip_connect=current_layers,
                                    num_features=num_features,
                                    dim_upscale=False,
                                    local_inner_layers=decoder_inner_layers,
                                    dropout_rate=dropout_rate)
                                decoder_inner_layers.append(outputs)
                            else:
                                outputs = self.add_decoder_layer(
                                    input=outputs,
                                    name="decoder_inner_conv_{}_{}".format(
                                        i, j),
                                    training=training,
                                    layer_to_skip_connect=current_layers,
                                    num_features=num_features,
                                    dim_upscale=False,
                                    local_inner_layers=decoder_inner_layers,
                                    w_size=upscale_shape[1],
                                    h_size=upscale_shape[2],
                                    dropout_rate=dropout_rate)
                                decoder_inner_layers.append(outputs)
                        current_layers.append(outputs)
                        decoder_layers.append(outputs)

                        if idx >= 0:
                            upscale_shape = encoder_layers[
                                idx - 1].get_shape().as_list()
                            if idx == 0:
                                upscale_shape = conditional_input.get_shape(
                                ).as_list()
                            outputs = self.add_decoder_layer(
                                input=outputs,
                                name="decoder_outer_conv_{}".format(i),
                                training=training,
                                layer_to_skip_connect=current_layers,
                                num_features=num_features,
                                dim_upscale=True,
                                local_inner_layers=decoder_inner_layers,
                                w_size=upscale_shape[1],
                                h_size=upscale_shape[2],
                                dropout_rate=dropout_rate)
                            current_layers.append(outputs)
                        if (idx - 1) >= 0:
                            outputs = tf.concat(
                                [outputs, encoder_layers[idx - 1]], axis=3)
                            current_layers[-1] = outputs

                high_res_layers = []

                for p in range(2):
                    outputs = self.conv_layer(outputs,
                                              self.layer_sizes[0], [3, 3],
                                              strides=(1, 1),
                                              transpose=False)
                    outputs = leaky_relu(features=outputs)

                    outputs = batch_norm(outputs,
                                         decay=0.99,
                                         scale=True,
                                         center=True,
                                         is_training=training,
                                         renorm=True)
                    high_res_layers.append(outputs)
                outputs = self.conv_layer(outputs,
                                          self.num_channels, [3, 3],
                                          strides=(1, 1),
                                          transpose=False)
            # output images
            with tf.variable_scope('g_tanh'):
                gan_decoder = tf.tanh(outputs, name='outputs')

        self.reuse = True
        self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                           scope='g')

        if self.build:
            print("generator_total_layers", self.conv_layer_num)
            count_parameters(self.variables, name="generator_parameter_num")
        self.build = False
        return gan_decoder, encoder_layers, decoder_layers