コード例 #1
0
def attention(scale_input, is_training=False):
    l2_reg = FLAGS.learning_rate
    dropout_ratio = 0
    if is_training is True:
        dropout_ratio = 0.5

    conv1 = Utils.conv(scale_input, filters=512, l2_reg_scale=l2_reg)
    conv1 = Utils.dropout(conv1, dropout_ratio, is_training)
    conv2 = Utils.conv(conv1, filters=3, kernel_size=[
                       1, 1], l2_reg_scale=l2_reg)
    return conv2
コード例 #2
0
ファイル: ASUNet.py プロジェクト: minar09/ASU-Net
def attention(scale_input, is_training=False):
    l2_reg = FLAGS.learning_rate
    keep_prob = 1.0
    if is_training is True:
        keep_prob = 0.5

    with tf.variable_scope("attention"):
        conv1 = Utils.conv(scale_input, filters=512, l2_reg_scale=l2_reg)
        conv1 = Utils.dropout(conv1, keep_prob)
        conv2 = Utils.conv(conv1,
                           filters=3,
                           kernel_size=[1, 1],
                           l2_reg_scale=l2_reg)

        return conv2
コード例 #3
0
def attention_gate(encoder_input, decoder_input, filters, is_training=False):
    l2_reg = FLAGS.learning_rate
    decoder_input = tf.image.resize_images(decoder_input,
                                           tf.shape(encoder_input)[1:3, ])
    decoder_input = Utils.conv(decoder_input,
                               filters=filters,
                               l2_reg_scale=l2_reg,
                               is_training=is_training)

    gated = tf.reduce_mean(tf.stack([encoder_input, decoder_input]), axis=0)
    gated = tf.nn.relu(gated)
    gated = Utils.conv(gated,
                       filters=filters,
                       l2_reg_scale=l2_reg,
                       is_training=is_training)
    gated = tf.nn.sigmoid(gated)
    gated = tf.multiply(gated, encoder_input)

    return gated
コード例 #4
0
ファイル: ASUNet.py プロジェクト: minar09/ASU-Net
def unetinference(image, keep_prob):
    net = {}
    l2_reg = FLAGS.learning_rate
    # added for resume better
    global_iter_counter = tf.Variable(0, name='global_step', trainable=False)
    net['global_step'] = global_iter_counter
    with tf.variable_scope("inference"):
        inputs = image
        teacher = tf.placeholder(
            tf.float32, [None, IMAGE_SIZE, IMAGE_SIZE, NUM_OF_CLASSES])
        is_training = True

        # 1, 1, 3
        conv1_1 = Utils.conv(inputs,
                             filters=64,
                             l2_reg_scale=l2_reg,
                             batchnorm_istraining=is_training)
        conv1_2 = Utils.conv(conv1_1,
                             filters=64,
                             l2_reg_scale=l2_reg,
                             batchnorm_istraining=is_training)
        pool1 = Utils.pool(conv1_2)

        # 1/2, 1/2, 64
        conv2_1 = Utils.conv(pool1,
                             filters=128,
                             l2_reg_scale=l2_reg,
                             batchnorm_istraining=is_training)
        conv2_2 = Utils.conv(conv2_1,
                             filters=128,
                             l2_reg_scale=l2_reg,
                             batchnorm_istraining=is_training)
        pool2 = Utils.pool(conv2_2)

        # 1/4, 1/4, 128
        conv3_1 = Utils.conv(pool2,
                             filters=256,
                             l2_reg_scale=l2_reg,
                             batchnorm_istraining=is_training)
        conv3_2 = Utils.conv(conv3_1,
                             filters=256,
                             l2_reg_scale=l2_reg,
                             batchnorm_istraining=is_training)
        pool3 = Utils.pool(conv3_2)

        # 1/8, 1/8, 256
        conv4_1 = Utils.conv(pool3,
                             filters=512,
                             l2_reg_scale=l2_reg,
                             batchnorm_istraining=is_training)
        conv4_2 = Utils.conv(conv4_1,
                             filters=512,
                             l2_reg_scale=l2_reg,
                             batchnorm_istraining=is_training)
        pool4 = Utils.pool(conv4_2)

        # 1/16, 1/16, 512
        conv5_1 = Utils.conv(pool4, filters=1024, l2_reg_scale=l2_reg)
        conv5_2 = Utils.conv(conv5_1, filters=1024, l2_reg_scale=l2_reg)
        concated1 = tf.concat([
            Utils.conv_transpose(conv5_2, filters=512, l2_reg_scale=l2_reg),
            conv4_2
        ],
                              axis=3)

        conv_up1_1 = Utils.conv(concated1, filters=512, l2_reg_scale=l2_reg)
        conv_up1_2 = Utils.conv(conv_up1_1, filters=512, l2_reg_scale=l2_reg)
        concated2 = tf.concat([
            Utils.conv_transpose(conv_up1_2, filters=256, l2_reg_scale=l2_reg),
            conv3_2
        ],
                              axis=3)

        conv_up2_1 = Utils.conv(concated2, filters=256, l2_reg_scale=l2_reg)
        conv_up2_2 = Utils.conv(conv_up2_1, filters=256, l2_reg_scale=l2_reg)
        concated3 = tf.concat([
            Utils.conv_transpose(conv_up2_2, filters=128, l2_reg_scale=l2_reg),
            conv2_2
        ],
                              axis=3)

        conv_up3_1 = Utils.conv(concated3, filters=128, l2_reg_scale=l2_reg)
        conv_up3_2 = Utils.conv(conv_up3_1, filters=128, l2_reg_scale=l2_reg)
        concated4 = tf.concat([
            Utils.conv_transpose(conv_up3_2, filters=64, l2_reg_scale=l2_reg),
            conv1_2
        ],
                              axis=3)

        conv_up4_1 = Utils.conv(concated4, filters=64, l2_reg_scale=l2_reg)
        conv_up4_2 = Utils.conv(conv_up4_1, filters=64, l2_reg_scale=l2_reg)
        outputs = Utils.conv(conv_up4_2,
                             filters=NUM_OF_CLASSES,
                             kernel_size=[1, 1],
                             activation=None)
        annotation_pred = tf.argmax(outputs, dimension=3, name="prediction")

        return tf.expand_dims(annotation_pred, dim=3), outputs, net, conv5_2
コード例 #5
0
def u_net_inference(image, is_training=False):
    net = {}
    l2_reg = FLAGS.learning_rate

    # added for resume better
    global_iter_counter = tf.Variable(0, name='global_step', trainable=False)
    net['global_step'] = global_iter_counter

    with tf.variable_scope("inference"):
        inputs = image

        # 1, 1, 3 Encoder 1st
        conv1_1 = Utils.conv(inputs,
                             filters=64,
                             l2_reg_scale=l2_reg,
                             is_training=is_training)
        conv1_2 = Utils.conv(conv1_1,
                             filters=64,
                             l2_reg_scale=l2_reg,
                             is_training=is_training)
        pool1 = Utils.pool(conv1_2)

        # 1/2, 1/2, 64 Encoder 2nd
        conv2_1 = Utils.conv(pool1,
                             filters=128,
                             l2_reg_scale=l2_reg,
                             is_training=is_training)
        conv2_2 = Utils.conv(conv2_1,
                             filters=128,
                             l2_reg_scale=l2_reg,
                             is_training=is_training)
        pool2 = Utils.pool(conv2_2)

        # 1/4, 1/4, 128 Encoder 3rd
        conv3_1 = Utils.conv(pool2,
                             filters=256,
                             l2_reg_scale=l2_reg,
                             is_training=is_training)
        conv3_2 = Utils.conv(conv3_1,
                             filters=256,
                             l2_reg_scale=l2_reg,
                             is_training=is_training)
        pool3 = Utils.pool(conv3_2)

        # 1/8, 1/8, 256 Encoder 4th
        conv4_1 = Utils.conv(pool3,
                             filters=512,
                             l2_reg_scale=l2_reg,
                             is_training=is_training)
        conv4_2 = Utils.conv(conv4_1,
                             filters=512,
                             l2_reg_scale=l2_reg,
                             is_training=is_training)
        pool4 = Utils.pool(conv4_2)

        # 1/16, 1/16, 512 Encoder 5th, upsample, skip 1
        conv5_1 = Utils.conv(pool4,
                             filters=1024,
                             l2_reg_scale=l2_reg,
                             is_training=is_training)
        conv5_2 = Utils.conv(conv5_1,
                             filters=1024,
                             l2_reg_scale=l2_reg,
                             is_training=is_training)
        concated1 = tf.concat([
            Utils.conv_transpose(conv5_2,
                                 filters=512,
                                 l2_reg_scale=l2_reg,
                                 is_training=is_training), conv4_2
        ],
                              axis=3)

        # Decoder 1st, skip 2
        conv_up1_1 = Utils.conv(concated1,
                                filters=512,
                                l2_reg_scale=l2_reg,
                                is_training=is_training)
        conv_up1_2 = Utils.conv(conv_up1_1,
                                filters=512,
                                l2_reg_scale=l2_reg,
                                is_training=is_training)
        concated2 = tf.concat([
            Utils.conv_transpose(conv_up1_2,
                                 filters=256,
                                 l2_reg_scale=l2_reg,
                                 is_training=is_training), conv3_2
        ],
                              axis=3)

        # Decoder 2nd, skip 3
        conv_up2_1 = Utils.conv(concated2,
                                filters=256,
                                l2_reg_scale=l2_reg,
                                is_training=is_training)
        conv_up2_2 = Utils.conv(conv_up2_1,
                                filters=256,
                                l2_reg_scale=l2_reg,
                                is_training=is_training)
        concated3 = tf.concat([
            Utils.conv_transpose(conv_up2_2,
                                 filters=128,
                                 l2_reg_scale=l2_reg,
                                 is_training=is_training), conv2_2
        ],
                              axis=3)

        # Decoder 3rd, skip 4
        conv_up3_1 = Utils.conv(concated3,
                                filters=128,
                                l2_reg_scale=l2_reg,
                                is_training=is_training)
        conv_up3_2 = Utils.conv(conv_up3_1,
                                filters=128,
                                l2_reg_scale=l2_reg,
                                is_training=is_training)
        concated4 = tf.concat([
            Utils.conv_transpose(conv_up3_2,
                                 filters=64,
                                 l2_reg_scale=l2_reg,
                                 is_training=is_training), conv1_2
        ],
                              axis=3)

        # Decoder 4th
        conv_up4_1 = Utils.conv(concated4,
                                filters=64,
                                l2_reg_scale=l2_reg,
                                is_training=is_training)
        conv_up4_2 = Utils.conv(conv_up4_1,
                                filters=64,
                                l2_reg_scale=l2_reg,
                                is_training=is_training)
        # logits/probability
        logits = Utils.conv(conv_up4_2,
                            filters=NUM_OF_CLASSES,
                            kernel_size=[1, 1],
                            activation=None)
        # Output/prediction
        annotation_pred = tf.argmax(logits, dimension=3, name="prediction")
        outputs = tf.expand_dims(annotation_pred, dim=3)

        return outputs, logits, net, conv5_2
コード例 #6
0
def unetinference(image, is_training=False):
    net = {}
    l2_reg = FLAGS.learning_rate
    # added for resume better
    global_iter_counter = tf.Variable(0, name='global_step', trainable=False)
    net['global_step'] = global_iter_counter
    with tf.variable_scope("inference"):
        inputs = image

        # 1, 1, 3
        conv1_1 = Utils.conv(inputs,
                             filters=64,
                             l2_reg_scale=l2_reg,
                             is_training=is_training)
        conv1_2 = Utils.conv(conv1_1,
                             filters=64,
                             l2_reg_scale=l2_reg,
                             is_training=is_training)
        pool1 = Utils.pool(conv1_2)

        # 1/2, 1/2, 64
        conv2_1 = Utils.conv(pool1,
                             filters=128,
                             l2_reg_scale=l2_reg,
                             is_training=is_training)
        conv2_2 = Utils.conv(conv2_1,
                             filters=128,
                             l2_reg_scale=l2_reg,
                             is_training=is_training)
        pool2 = Utils.pool(conv2_2)

        # 1/4, 1/4, 128
        conv3_1 = Utils.conv(pool2,
                             filters=256,
                             l2_reg_scale=l2_reg,
                             is_training=is_training)
        conv3_2 = Utils.conv(conv3_1,
                             filters=256,
                             l2_reg_scale=l2_reg,
                             is_training=is_training)
        pool3 = Utils.pool(conv3_2)

        # 1/8, 1/8, 256
        conv4_1 = Utils.conv(pool3,
                             filters=512,
                             l2_reg_scale=l2_reg,
                             is_training=is_training)
        conv4_2 = Utils.conv(conv4_1,
                             filters=512,
                             l2_reg_scale=l2_reg,
                             is_training=is_training)
        pool4 = Utils.pool(conv4_2)

        # 1/16, 1/16, 512
        conv5_1 = Utils.conv(pool4,
                             filters=1024,
                             l2_reg_scale=l2_reg,
                             is_training=is_training)
        conv5_2 = Utils.conv(conv5_1,
                             filters=1024,
                             l2_reg_scale=l2_reg,
                             is_training=is_training)
        concated1 = tf.concat([
            Utils.conv_transpose(conv5_2,
                                 filters=512,
                                 l2_reg_scale=l2_reg,
                                 is_training=is_training),
            attention_gate(conv4_2, conv5_2, 512)
        ],
                              axis=3)

        conv_up1_1 = Utils.conv(concated1,
                                filters=512,
                                l2_reg_scale=l2_reg,
                                is_training=is_training)
        conv_up1_2 = Utils.conv(conv_up1_1,
                                filters=512,
                                l2_reg_scale=l2_reg,
                                is_training=is_training)
        concated2 = tf.concat([
            Utils.conv_transpose(conv_up1_2,
                                 filters=256,
                                 l2_reg_scale=l2_reg,
                                 is_training=is_training),
            attention_gate(conv3_2, conv_up1_2, 256)
        ],
                              axis=3)

        conv_up2_1 = Utils.conv(concated2,
                                filters=256,
                                l2_reg_scale=l2_reg,
                                is_training=is_training)
        conv_up2_2 = Utils.conv(conv_up2_1,
                                filters=256,
                                l2_reg_scale=l2_reg,
                                is_training=is_training)
        concated3 = tf.concat([
            Utils.conv_transpose(conv_up2_2,
                                 filters=128,
                                 l2_reg_scale=l2_reg,
                                 is_training=is_training),
            attention_gate(conv2_2, conv_up2_2, 128)
        ],
                              axis=3)

        conv_up3_1 = Utils.conv(concated3,
                                filters=128,
                                l2_reg_scale=l2_reg,
                                is_training=is_training)
        conv_up3_2 = Utils.conv(conv_up3_1,
                                filters=128,
                                l2_reg_scale=l2_reg,
                                is_training=is_training)
        concated4 = tf.concat([
            Utils.conv_transpose(conv_up3_2,
                                 filters=64,
                                 l2_reg_scale=l2_reg,
                                 is_training=is_training),
            attention_gate(conv1_2, conv_up3_2, 64)
        ],
                              axis=3)

        conv_up4_1 = Utils.conv(concated4,
                                filters=64,
                                l2_reg_scale=l2_reg,
                                is_training=is_training)
        conv_up4_2 = Utils.conv(conv_up4_1,
                                filters=64,
                                l2_reg_scale=l2_reg,
                                is_training=is_training)
        logits = Utils.conv(conv_up4_2,
                            filters=NUM_OF_CLASSES,
                            kernel_size=[1, 1],
                            activation=None,
                            is_training=is_training)
        annotation_pred = tf.argmax(logits, dimension=3, name="prediction")

        return tf.expand_dims(annotation_pred, dim=3), logits, net