def channel_attention(x, outdim, kernel_size, strides, name):
    with tf.variable_scope("CA-%s" % name):
        skip_conn = x

        # skip_conn = tf.identity(x, name='identity')

        x = tfu.adaptive_global_average_pool_2d(x)

        x = tf.layers.conv2d(x,
                             outdim // 16,
                             kernel_size=kernel_size,
                             strides=strides,
                             padding='SAME',
                             name="conv_1",
                             activation=None)
        x = tf.nn.leaky_relu(x, alpha=0.2)

        x = tf.layers.conv2d(x,
                             outdim,
                             kernel_size=kernel_size,
                             strides=strides,
                             padding='SAME',
                             name="conv_2",
                             activation=None)
        x = tf.nn.sigmoid(x)

        x = tf.multiply(skip_conn, x)

        return x
예제 #2
0
    def channel_attention(self, x, f, reduction, name):
        """
        Channel Attention (CA) Layer
        :param x: input layer
        :param f: conv2d filter size
        :param reduction: conv2d filter reduction rate
        :param name: scope name
        :return: output layer
        """
        with tf.variable_scope("CA-%s" % name):
            skip_conn = tf.identity(x, name='identity')

            x = tfu.adaptive_global_average_pool_2d(x)

            x = tfu.conv2d(x, f=f // reduction, k=1, name="conv2d-1")
            x = self.act(x)

            x = tfu.conv2d(x, f=f, k=1, name="conv2d-2")
            x = tf.nn.sigmoid(x)
            return tf.multiply(skip_conn, x)
예제 #3
0
            self.opt = tf.train.AdamOptimizer(learning_rate=self.lr,
                                              beta1=self.beta1, beta2=self.beta2, epsilon=self.opt_eps)
        elif self.optimizer == 'sgd':  # sgd + m with nesterov
        elif self.optimizer == 'sgd':  # sgd + m with nestrov
            self.opt = tf.train.MomentumOptimizer(learning_rate=self.lr, momentum=self.momentum, use_nesterov=True)
        else:
            raise NotImplementedError("[-] Not supported optimizer (%s)" % self.optimizer)
            raise NotImplementedError("[-] Not supported optimizer {}".format(self.optimizer))

    def image_processing(self, x, sign, name):
        with tf.variable_scope(name):
@@ -143,24 +144,24 @@ def channel_attention(self, x, f, reduction, name):
        with tf.variable_scope("CA-%s" % name):
            skip_conn = tf.identity(x, name='identity')

            x = tfutil.adaptive_global_average_pool_2d(x)
            x = tfu.adaptive_global_average_pool_2d(x)

            x = tfutil.conv2d(x, f=f // reduction, k=1, name="conv2d-1")
            x = tfu.conv2d(x, f=f // reduction, k=1, name="conv2d-1")
            x = self.act(x)

            x = tfutil.conv2d(x, f=f, k=1, name="conv2d-2")
            x = tfu.conv2d(x, f=f, k=1, name="conv2d-2")
            x = tf.nn.sigmoid(x)
            return tf.multiply(skip_conn, x)

    def residual_channel_attention_block(self, x, f, kernel_size, reduction, use_bn, name):
        with tf.variable_scope("RCAB-%s" % name):
            skip_conn = tf.identity(x, name='identity')