def attention_layer(x, ratio=4, scope=None):
    with tf.name_scope(scope):
        _, width, height, channel = x.get_shape().as_list()
        x_shape = x.get_shape().as_list()
        recalibrate1 = conv_sigmoid(x,
                                    kernal=(1, 1, channel, 1),
                                    scope=scope + 'spatial_squeeze')

        squeeze = tf.reduce_mean(x,
                                 axis=(1, 2),
                                 name=scope + 'channel_squeeze')
        exciation = full_connected_relu(squeeze,
                                        kernal=(channel, channel // ratio),
                                        activefuncation='relu',
                                        scope=scope + '_fully_connected1')
        exciation = full_connected_relu(exciation,
                                        kernal=(channel // ratio, channel),
                                        activefuncation='sigmoid',
                                        scope=scope + '_fully_connected2')
        recalibrate2 = tf.reshape(exciation, [-1, 1, 1, channel])

        recalibrate3 = conv_sigmoid(x,
                                    kernal=(3, 3, channel, channel // 2),
                                    scope=scope + 'conv1')
        recalibrate3 = max_pooling_2x2(recalibrate3)
        recalibrate3 = conv_sigmoid(recalibrate3,
                                    kernal=(3, 3, channel // 2, channel // 4),
                                    scope=scope + 'conv2')
        recalibrate3 = max_pooling_2x2(recalibrate3)

        kernal = [3, 3, channel, channel // 4]
        W = weight_xavier_init(shape=kernal,
                               n_inputs=kernal[0] * kernal[1] * kernal[-1],
                               n_outputs=kernal[-2],
                               activefunction='relu',
                               variable_name=scope + 'W')
        B = bias_variable([kernal[-2]], variable_name=scope + 'B')
        output_shape = tf.stack([2, x_shape[1], x_shape[2], channel])
        deconv = tf.nn.conv2d_transpose(recalibrate3,
                                        W,
                                        output_shape,
                                        strides=[1, 4, 4, 1],
                                        padding="SAME") + B
        recalibrate3 = tf.nn.leaky_relu(deconv)

        # recalibrate3 =  tf.image.resize_images(images=recalibrate3, size=[width, height])

        recalibrate4 = tf.multiply(recalibrate1, recalibrate2)
        alpha_attention = tf.Variable(tf.constant(1.0))
        beta_attention = tf.Variable(tf.constant(1.0))
        attention = tf.multiply(alpha_attention * recalibrate4,
                                beta_attention * recalibrate3)

        out = tf.multiply(attention, x)
        return out
Example #2
0
def channel_squeeze_spatial_excitiation_layer(x, out_dim, scope=None):
    with tf.name_scope(scope):
        squeeze = conv_sigmoid(x,
                               kernal=(1, 1, out_dim, 1),
                               scope=scope + 'spatial_squeeze')
        scale = x * squeeze
        return scale
Example #3
0
def subtract_refine(subtract, layer7, layer8, layer9, phase, drop_conv, n_class=1):
    _, y_width, y_height, y_channel = subtract.get_shape().as_list()  # 拿出 输入 tensor 的 最后一维:也就是通道数
    seed = tf.reshape(subtract, [-1, y_width, y_height, y_channel])  # 将图片转换成tf识别格式


    refine1 = conv_bn_relu_drop(x=layer7, kernal=[1, 1, 128, 64], phase=phase, drop=drop_conv, scope='refine1_1')
    seed1 = tf.image.resize_images(images=seed, size=[int(y_width / 4), int(y_height / 4)], method=0)
    refine1 = crop_and_concat(refine1, seed1)
    refine1 = conv_bn_relu_drop(x=refine1, kernal=[3, 3, 65, 65], phase=phase, drop=drop_conv, scope='refine1_2')
    refine1 = squeeze_excitation_model(refine1, out_dim=65, scope='sem2_1')
    refine1 = deconv_relu(x=refine1, kernal=[3, 3, 32, 65], scope='refine1_deconv')

    refine2 = conv_bn_relu_drop(x=layer8, kernal=[1, 1, 64, 32], phase=phase, drop=drop_conv, scope='refine2_1')
    seed2 = tf.image.resize_images(images=seed, size=[int(y_width / 2), int(y_height / 2)], method=0)
    refine2 = crop_and_concat(refine2, seed2)
    refine2 = conv_bn_relu_drop(x=refine2, kernal=[3, 3, 33, 32], phase=phase, drop=drop_conv, scope='refine2_2')
    refine2 = crop_and_concat(refine1, refine2)
    refine2 = conv_bn_relu_drop(x=refine2, kernal=[3, 3, 64, 64], phase=phase, drop=drop_conv, scope='refine1_3')
    refine2 = squeeze_excitation_model(refine2, out_dim=64, scope='sem2_2')
    refine2 = deconv_relu(x=refine2, kernal=[3, 3, 32, 64], scope='refine2_deconv')

    refine3 = conv_bn_relu_drop(x=layer9, kernal=[1, 1, 32, 1], phase=phase, drop=drop_conv, scope='refine3_1')
    seed3 = tf.image.resize_images(images=seed, size=[int(y_width), int(y_height)], method=0)
    refine3 = crop_and_concat(refine3, seed3)
    refine3 = conv_bn_relu_drop(x=refine3, kernal=[3, 3, 2, 1], phase=phase, drop=drop_conv, scope='refine3_2')
    refine3 = crop_and_concat(refine2, refine3)
    refine3 = conv_bn_relu_drop(x=refine3, kernal=[3, 3, 33, 33], phase=phase, drop=drop_conv, scope='refine3_3')
    refine3 = squeeze_excitation_model(refine3, out_dim=33, scope='sem2_3')


    out1 = conv_sigmoid(x=refine3, kernal=[1, 1, 33, n_class], scope='out1')

    return out1
Example #4
0
def attention_layer(x, ratio=4, scope=None):
    with tf.name_scope(scope):
        _, width, height, channel = x.get_shape().as_list()
        x_shape = x.get_shape().as_list()
        recalibrate1 = conv_sigmoid(x,
                                    kernal=(1, 1, channel, 1),
                                    scope=scope + 'spatial_squeeze')

        squeeze = tf.reduce_mean(x,
                                 axis=(1, 2),
                                 name=scope + 'channel_squeeze')
        exciation = full_connected_relu(squeeze,
                                        kernal=(channel, channel // ratio),
                                        activefuncation='relu',
                                        scope=scope + '_fully_connected1')
        exciation = full_connected_relu(exciation,
                                        kernal=(channel // ratio, channel),
                                        activefuncation='sigmoid',
                                        scope=scope + '_fully_connected2')
        recalibrate2 = tf.reshape(exciation, [-1, 1, 1, channel])

        recalibrate4 = tf.multiply(recalibrate1, recalibrate2)
        out = tf.multiply(recalibrate4, x)
        return out
Example #5
0
def _create_conv_net(X, image_width, image_height, image_channel, phase, drop_conv, n_class=1):
    inputX = tf.reshape(X, [-1, image_width, image_height, image_channel])  # shape=(?, 32, 32, 1)
    # UNet model
    # layer1->convolution
    layer0 = conv_bn_relu_drop(x = inputX, kernal = [3, 3, image_channel, 32], phase= phase, drop = drop_conv, scope = 'layer0')
    layer1 = conv_bn_relu_drop(x = layer0, kernal= [3, 3, 32, 32], phase = phase, drop=drop_conv, scope = 'layer1')
    # print(layer1.get_shape().as_list())

    pool1 = max_pooling_2x2(layer1)
    # layer2->convolution
    layer2 = conv_bn_relu_drop(x = pool1, kernal = [3, 3, 32, 64], phase= phase, drop = drop_conv, scope= 'layer2_1')
    layer2 = conv_bn_relu_drop(x = layer2, kernal=[3, 3, 64, 64], phase = phase, drop = drop_conv, scope = 'layer2_2')

    pool2 = max_pooling_2x2(layer2)

    # layer3->convolution
    layer3 = conv_bn_relu_drop(x = pool2, kernal= [3, 3, 64, 128], phase = phase, drop=drop_conv, scope = 'layer3_1')
    layer3 = conv_bn_relu_drop(x = layer3, kernal= [3, 3, 128, 128], phase = phase, drop=drop_conv, scope = 'layer3_2')

    pool3 = max_pooling_2x2(layer3)

    # layer4->convolution
    layer4 = conv_bn_relu_drop(x = pool3, kernal= [3, 3, 128, 256], phase=phase, drop=drop_conv, scope = 'layer4_1')
    layer4 = conv_bn_relu_drop(x = layer4, kernal=[3, 3, 256, 256], phase=phase, drop=drop_conv, scope= 'layer4_2')

    pool4 = max_pooling_2x2(layer4)

    # layer5->convolution
    layer5 = conv_bn_relu_drop(x = pool4, kernal= [3, 3, 256, 512], phase=phase, drop=drop_conv, scope='layer5_1')
    layer5 = conv_bn_relu_drop(x = layer5, kernal=[3, 3, 512, 512], phase=phase, drop = drop_conv, scope='layer5_2')

    # layer6->deconvolution

    deconv1 = deconv_relu(x = layer5, kernal=[3, 3, 256, 512], scope = 'deconv1')

    layer6 = crop_and_concat(layer4, deconv1)


    # layer7->convolution
    layer6 = conv_bn_relu_drop(x = layer6, kernal= [3, 3, 512, 256], phase=phase, drop=drop_conv, scope = 'layer6_1')
    layer6 = conv_bn_relu_drop(x = layer6, kernal= [3, 3, 256, 256], phase= phase, drop=drop_conv, scope = 'layer6_2')

    deconv2 = deconv_relu(layer6, kernal=[3, 3, 128, 256], scope = 'deconv2')

    layer7 = crop_and_concat(layer3, deconv2)

    # layer9->convolution
    layer7 = conv_bn_relu_drop(x = layer7, kernal= [3, 3, 256, 128], phase=phase, drop=drop_conv, scope = 'layer7_1')
    layer7 = conv_bn_relu_drop(x = layer7, kernal = [3, 3, 128, 128], phase=phase, drop=drop_conv, scope='layer7_2')


    # layer10->deconvolution
    deconv3 = deconv_relu(x = layer7, kernal= [3, 3, 64, 128], scope = 'deconv3')

    layer8 = crop_and_concat(layer2, deconv3)

    # layer11->convolution
    layer8 = conv_bn_relu_drop(x = layer8, kernal= [3, 3, 128, 64], phase=phase, drop=drop_conv, scope='layer8_1')
    layer8 = conv_bn_relu_drop(x = layer8, kernal= [3, 3, 64, 64], phase=phase, drop=drop_conv, scope='layer8_2')

    deconv4 = deconv_relu(layer8, kernal=[3, 3, 32, 64], scope='deconv4')

    layer9 = crop_and_concat(layer1, deconv4)

    # layer 13->convolution
    layer9 = conv_bn_relu_drop(x = layer9, kernal=[3, 3, 64, 32], phase=phase, drop=drop_conv, scope='layer9_1')
    layer9 = conv_bn_relu_drop(x = layer9, kernal=[3, 3, 32, 32], phase=phase, drop=drop_conv, scope='layer9_2')

    # layer14->output
    out1 = conv_sigmoid(x = layer9, kernal=[1, 1, 32, n_class], scope = 'out1')
    return out1