Example #1
0
def deconv_relu(x, kernal, scope = None):
    with tf.name_scope(scope):
        W = weight_xavier_init(shape = kernal, n_inputs = kernal[0] * kernal[1] * kernal[-1], n_outputs = kernal[-2],
                               activefunction = 'relu', variable_name = scope + 'W')
        B = bias_variable([kernal[-2]], variable_name = scope + 'B')
        deconv = deconv2d(x, W) + B
        deconv = tf.nn.relu(deconv)
        return deconv
Example #2
0
def conv_bn_relu_drop(x, kernal, phase, drop, scope = None):
    with tf.name_scope(scope):
        W = weight_xavier_init(shape = kernal, n_inputs = kernal[0] * kernal[1] * kernal[2], n_outputs = kernal[-1],
                               activefunction = 'relu', variable_name = scope + 'conv_W')
        B = bias_variable([kernal[-1]], variable_name = scope + 'conv_B')
        conv = conv2d(x, W) + B
        conv = normalizationlayer(conv, is_train=phase, norm_type='batch', scope = scope + 'normalization')
        conv = tf.nn.dropout(tf.nn.relu(conv), drop, name = scope + 'conv_dropout')
        return conv
def attention_layer(x, ratio=4, scope=None):
    with tf.name_scope(scope):
        _, width, height, channel = x.get_shape().as_list()
        x_shape = x.get_shape().as_list()
        recalibrate1 = conv_sigmoid(x,
                                    kernal=(1, 1, channel, 1),
                                    scope=scope + 'spatial_squeeze')

        squeeze = tf.reduce_mean(x,
                                 axis=(1, 2),
                                 name=scope + 'channel_squeeze')
        exciation = full_connected_relu(squeeze,
                                        kernal=(channel, channel // ratio),
                                        activefuncation='relu',
                                        scope=scope + '_fully_connected1')
        exciation = full_connected_relu(exciation,
                                        kernal=(channel // ratio, channel),
                                        activefuncation='sigmoid',
                                        scope=scope + '_fully_connected2')
        recalibrate2 = tf.reshape(exciation, [-1, 1, 1, channel])

        recalibrate3 = conv_sigmoid(x,
                                    kernal=(3, 3, channel, channel // 2),
                                    scope=scope + 'conv1')
        recalibrate3 = max_pooling_2x2(recalibrate3)
        recalibrate3 = conv_sigmoid(recalibrate3,
                                    kernal=(3, 3, channel // 2, channel // 4),
                                    scope=scope + 'conv2')
        recalibrate3 = max_pooling_2x2(recalibrate3)

        kernal = [3, 3, channel, channel // 4]
        W = weight_xavier_init(shape=kernal,
                               n_inputs=kernal[0] * kernal[1] * kernal[-1],
                               n_outputs=kernal[-2],
                               activefunction='relu',
                               variable_name=scope + 'W')
        B = bias_variable([kernal[-2]], variable_name=scope + 'B')
        output_shape = tf.stack([2, x_shape[1], x_shape[2], channel])
        deconv = tf.nn.conv2d_transpose(recalibrate3,
                                        W,
                                        output_shape,
                                        strides=[1, 4, 4, 1],
                                        padding="SAME") + B
        recalibrate3 = tf.nn.leaky_relu(deconv)

        # recalibrate3 =  tf.image.resize_images(images=recalibrate3, size=[width, height])

        recalibrate4 = tf.multiply(recalibrate1, recalibrate2)
        alpha_attention = tf.Variable(tf.constant(1.0))
        beta_attention = tf.Variable(tf.constant(1.0))
        attention = tf.multiply(alpha_attention * recalibrate4,
                                beta_attention * recalibrate3)

        out = tf.multiply(attention, x)
        return out
Example #4
0
def full_connected_relu(x, kernal, activefuncation='relu', scope=None):
    with tf.name_scope(scope):
        W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1],
                               n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'W')
        B = bias_variable([kernal[-1]], variable_name=scope + 'B')
        FC = tf.matmul(x, W) + B
        if activefuncation == 'relu':
            FC = tf.nn.relu(FC)
        elif activefuncation == 'softmax':
            FC = tf.nn.softmax(FC)
        elif activefuncation == 'sigmoid':
            FC = tf.nn.sigmoid(FC)
        return FC