示例#1
0
def net(image, classes):

    #encoding - convolution/pooling
    conv1 = util.conv(image, [K, K, 3, 128], "c1", pad="SAME")
    pool1 = util.pool(conv1, 1, STRIDE, name="p1")

    conv2 = util.conv(pool1, [K, K, 128, 256], "c2", pad="SAME")
    pool2 = util.pool(conv2, 1, STRIDE, name="p2")

    conv3 = util.conv(pool2, [K, K, 256, 256], "c3", pad="SAME")
    pool3 = util.pool(conv3, 1, STRIDE, name="p2")

    conv4 = util.conv(pool3, [K, K, 256, 512], "c4", pad="SAME")

    #decoding - deconvolution/transposing

    #deconv1 = util.deconv(conv4, tf.shape(conv3), [K,K,256,512], "dc1")
    #deconv2 = util.deconv(deconv1, tf.shape(conv2), [K,K,256,256], "dc2")
    #deconv3 = util.deconv(deconv2, tf.shape(conv1), [K,K,128,256], "dc3")

    deconv1 = pixelDeconv.pixel_dcl(conv4, 256, [K, K], "dc1")
    deconv2 = pixelDeconv.pixel_dcl(deconv1, 256, [K, K], "dc2")
    deconv3 = pixelDeconv.pixel_dcl(deconv2, 128, [K, K], "dc3")

    conv6 = util.conv(deconv3, [1, 1, 128, classes], "c6", pad="SAME")

    softmax = tf.nn.softmax(conv6)

    return conv6, tf.argmax(softmax, axis=3), softmax
def refineNet(inputs, downsampleLevel):

    bsize = inputs[0].get_shape()[0].value

    currLevelFeatures = adaptiveConv(inputs[0], downsampleLevel)
    if not downsampleLevel == "32":
        previousFeatures = adaptiveConv(inputs[1],
                                        "prev" + downsampleLevel,
                                        downsample=False)
    else:
        previousFeatures = None

    # Multi resolution Fusion with current resolution and the previous one
    mRF = multiResolutionFusion([currLevelFeatures, previousFeatures],
                                downsampleLevel)

    # Chained residual pooling
    reluRCP = tf.nn.relu(mRF)
    chainedResidual_p1 = util.pool(reluRCP,
                                   5,
                                   STRIDE,
                                   name="convResidual_p1_" + downsampleLevel)
    chainedResidual_conv1 = util.conv(chainedResidual_p1,
                                      [K, K, initDepth * 2, initDepth * 2],
                                      "convResidual_conv1_" + downsampleLevel)

    chainedResidualSum1 = tf.add(reluRCP, chainedResidual_conv1)

    chainedResidual_pool2 = util.pool(chainedResidual_conv1,
                                      5,
                                      STRIDE,
                                      name="chainedResidual_pool2_" +
                                      downsampleLevel)
    chainedResidual_conv1 = util.conv(
        chainedResidual_pool2, [K, K, initDepth * 2, initDepth * 2],
        "chainedResidual_conv2_" + downsampleLevel)

    chainedResidualSum2 = tf.add(chainedResidualSum1, chainedResidual_conv1)

    drop = tf.nn.dropout(chainedResidualSum2, 0.80)

    # Output RCU
    out_conv1_residual1 = util.conv(chainedResidualSum2,
                                    [K, K, initDepth * 2, initDepth],
                                    "out_conv1_residual1_" + downsampleLevel)
    out_conv2_residual1 = util.conv(out_conv1_residual1,
                                    [K, K, initDepth, initDepth],
                                    "out_conv2_residual1_" + downsampleLevel)

    return out_conv2_residual1
示例#3
0
def net(image, classes, MODE):

    # Paper: Context Network
    # conv1 = util.conv(image, [K,K,3,16], "c1", dilation=1, pad="SAME")
    # conv2 = util.conv(conv1, [K,K,16,16], "c2", dilation=1, pad="SAME")
    # conv3 = util.conv(conv2, [K,K,16,32], "c3", dilation=2, pad="SAME")
    # conv4 = util.conv(conv3, [K,K,32,32], "c4", dilation=4, pad="SAME")
    # conv5 = util.conv(conv4, [K,K,32,64], "c5", dilation=8, pad="SAME")
    # conv6 = util.conv(conv5, [K,K,64,64], "c6", dilation=16, pad="SAME")
    # conv7 = util.conv(conv6, [K,K,64,64], "c7", dilation=1, pad="SAME")
    # conv8 = util.conv(conv7, [1,1,64,classes], "c8", dilation=1, pad="SAME")

    # return conv8, tf.argmax(conv8, axis=3)

    # Paper: Front End 

    e1_c1 = util.conv(image, [K, K, 3, 64], "e1_c1")
    e1_c2 = util.conv(e1_c1, [K, K, 64, 64], "e1_c2")
    pool1 = util.pool(e1_c1, 2, STRIDE, name="pool1")

    e2_c1 = util.conv(pool1, [K, K, 64, 128], "e2_c1")
    e2_c2 = util.conv(e2_c1, [K, K, 128, 128], "e2_c2")
    pool2 = util.pool(e2_c2, 2, STRIDE, name="pool2")

    e3_c1 = util.conv(pool2, [K, K, 128, 256], "e3_c1")
    e3_c2 = util.conv(e3_c1, [K, K, 256, 256], "e3_c2")
    e3_c3 = util.conv(e3_c2, [1, 1, 256, 256], "e3_c3")
    pool3 = util.pool(e3_c3, 2, STRIDE, name="pool3")

    e4_c1 = util.conv(pool3, [K, K, 256, 512], "e4_c1", dilation=2)
    e4_c2 = util.conv(e4_c1, [K, K, 512, 512], "e4_c2", dilation=2)
    e4_c3 = util.conv(e4_c2, [1, 1, 512, 512], "e4_c3", dilation=2)
    
    e5_c1 = util.conv(e4_c3, [K, K, 512, 512], "e5_c1", dilation=4)
    e5_c2 = util.conv(e5_c1, [K, K, 512, 512], "e5_c2", dilation=4)
    e5_c3 = util.conv(e5_c2, [1, 1, 512, 512], "e5_c3", dilation=4)

    #de1 = util.deconv(e5_c3, tf.shape(e2_c2), [K,K,512,1024], "de1")
    #de2 = util.deconv(de1, tf.shape(e1_c2), [K,K,128,512], "de2")

    deconv1 = pixelDeconv.pixel_dcl(e5_c3 , 512, [K,K], "dc1")
    deconv2 = pixelDeconv.pixel_dcl(deconv1, 256, [K,K], "dc2")
    deconv3 = pixelDeconv.pixel_dcl(deconv2, 128, [K,K], "dc3")

    
    final = util.conv(deconv3, [K,K, 128,classes], "final")

    softmax = tf.nn.softmax(final)

    return final, tf.argmax(softmax, axis=3), softmax
示例#4
0
def net(image, classes):
    bsize = image.get_shape()[0].value
    f = 3  # kernel size

    #residual convolution unit (RCU)
    #residual block 1
    conv1_residual1 = util.conv(image, [f, f, 3, 128], "conv1_residual1")
    conv2_residual1 = util.conv(conv1_residual1, [f, f, 128, 128],
                                "conv2_residual1")

    #residual block 2
    conv1_residual2 = util.conv(conv2_residual1, [f, f, 128, 128],
                                "conv1_residual2")
    conv2_residual2 = util.conv(conv2_residual1, [f, f, 128, 128],
                                "conv2_residual2")

    residual1Sum = tf.add(conv2_residual1, conv2_residual2)

    # Multi resolution Fusion - in single cascaded this is just one conv layer, no deconv

    conv1 = util.conv(residual1Sum, [f, f, 128, 256], "conv1")

    # Chained residual pooling

    chainedResidual_p1 = util.pool(conv1, 5, STRIDE, name="convResidual_p1")
    chainedResidual_conv1 = util.conv(chainedResidual_p1, [f, f, 256, 256],
                                      "convResidual_conv1")

    chainedResidualSum1 = tf.add(conv1, chainedResidual_conv1)

    chainedResidual_pool2 = util.pool(chainedResidual_conv1,
                                      5,
                                      STRIDE,
                                      name="chainedResidual_pool2")
    chainedResidual_conv1 = util.conv(chainedResidual_pool2, [f, f, 256, 256],
                                      "chainedResidual_conv2")

    chainedResidualSum2 = tf.add(chainedResidualSum1, chainedResidual_conv1)

    drop = tf.nn.dropout(chainedResidualSum2, 0.80)

    # Output RCU
    out_conv1_residual1 = util.conv(drop, [f, f, 256, 128],
                                    "out_conv1_residual1")
    out_conv2_residual1 = util.conv(out_conv1_residual1, [f, f, 128, classes],
                                    "out_conv2_residual1")

    softmax = tf.nn.softmax(out_conv2_residual1)

    return out_conv2_residual1, tf.argmax(softmax, axis=3), softmax
def adaptiveConv(input, downsampleLevel, downsample=True):

    if downsample:
        #downsample image
        input = util.pool(input,
                          1,
                          int(downsampleLevel),
                          name="RCUPool_" + downsampleLevel)

    #residual convolution unit (RCU) 1
    firstRCU = residualConvUnit(input, downsampleLevel, "1")
    #residual convolution unit (RCU) 2
    secondRCU = residualConvUnit(firstRCU, downsampleLevel, "2")

    return tf.add(firstRCU, secondRCU)
示例#6
0
def net(image, classes):

    initDepth = 512
    bsize = image.get_shape()[0].value
    f = 7  # kernel size

    #encoding - downsampling
    # encoding level 1
    e1_c1 = util.conv(image, [f, f, 3, initDepth], "e1_c1")
    e1_c2 = util.conv(e1_c1, [f, f, initDepth, initDepth], "e1_c2")
    pool1, pool1_indices = util.pool(e1_c2,
                                     2,
                                     2,
                                     poolIndices=True,
                                     name="pool1")

    # encoding level 2
    e2_c1 = util.conv(pool1, [f, f, initDepth, initDepth], "e2_c1")
    e2_c2 = util.conv(e2_c1, [f, f, initDepth, initDepth], "e2_c2")
    pool2, pool2_indices = util.pool(e2_c2,
                                     2,
                                     2,
                                     poolIndices=True,
                                     name="pool2")

    # encoding level 3
    e3_c1 = util.conv(pool2, [f, f, initDepth, initDepth], "e3_c1")
    e3_c2 = util.conv(e3_c1, [f, f, initDepth, initDepth], "e3_c2")
    e3_c3 = util.conv(e3_c2, [f, f, initDepth, initDepth], "e3_c3")
    pool3, pool3_indices = util.pool(e3_c3,
                                     2,
                                     2,
                                     poolIndices=True,
                                     name="pool3")

    # encoding level 4
    e4_c1 = util.conv(pool3, [f, f, initDepth, initDepth], "e4_c1")
    e4_c2 = util.conv(e4_c1, [f, f, initDepth, initDepth], "e4_c2")
    e4_c3 = util.conv(e4_c2, [f, f, initDepth, initDepth], "e4_c3")
    pool4, pool4_indices = util.pool(e4_c3,
                                     2,
                                     2,
                                     poolIndices=True,
                                     name="pool4")

    # encoding level 5
    e5_c1 = util.conv(pool4, [f, f, initDepth, initDepth], "e5_c1")
    e5_c2 = util.conv(e5_c1, [f, f, initDepth, initDepth], "e5_c2")
    e5_c3 = util.conv(e5_c2, [f, f, initDepth, initDepth], "e5_c3")
    pool5, pool5_indices = util.pool(e5_c3,
                                     2,
                                     2,
                                     poolIndices=True,
                                     name="pool5")

    #decoding with pool indices
    # decoding level 1
    # upsample with pooling indices
    upSam5 = unpool_layer2x2(pool5, pool5_indices, pool4.get_shape())
    de1_c1 = util.conv(upSam5, [f, f, initDepth, initDepth], "de1_c1")
    de1_c2 = util.conv(de1_c1, [f, f, initDepth, initDepth], "de1_c2")

    # dencoding level 2
    upSam4 = unpool_layer2x2(pool4, pool4_indices, pool3.get_shape())
    de2_c1 = util.conv(upSam4, [f, f, initDepth, initDepth], "de2_c1")
    de2_c2 = util.conv(de2_c1, [f, f, initDepth, initDepth], "de2_c2")

    # decoding level 3
    upSam3 = unpool_layer2x2(pool3, pool3_indices, pool2.get_shape())
    de3_c1 = util.conv(upSam3, [f, f, initDepth, initDepth], "de3_c1")
    de3_c2 = util.conv(de3_c1, [f, f, initDepth, initDepth], "de3_c2")
    de3_c2 = util.conv(de3_c2, [f, f, initDepth, initDepth], "de3_c3")

    # decoding level 4
    upSam2 = unpool_layer2x2(pool2, pool2_indices, pool1.get_shape())
    de4_c1 = util.conv(upSam2, [f, f, initDepth, initDepth], "de4_c1")
    de4_c2 = util.conv(de4_c1, [f, f, initDepth, initDepth], "de4_c2")
    de4_c3 = util.conv(de4_c2, [f, f, initDepth, initDepth], "de4_c3")

    # decoding level 5
    upSam1 = unpool_layer2x2(pool1, pool1_indices, e1_c1.get_shape())
    de5_c1 = util.conv(upSam1, [f, f, initDepth, initDepth], "de5_c1")
    de5_c2 = util.conv(de5_c1, [f, f, initDepth, initDepth], "de5_c2")
    de5_c3 = util.conv(de5_c2, [f, f, initDepth, classes], "de5_c3")

    softmax = tf.nn.softmax(de5_c3)

    return de5_c3, tf.argmax(softmax, axis=3), softmax
def transitionDown(input, name):
    conv = util.conv(input, [1,1, input.get_shape()[3], input.get_shape()[3]], name+"_c", pad="SAME")
    drop = tf.nn.dropout(conv, 0.2)
    return util.pool(drop, 2, STRIDE, name=name+"p")
示例#8
0
def net(image, classes):

    #image = tf.image.resize_images(image, [101,101])#, method=ResizeMethod.NEAREST_NEIGHBOR)
    bsize = image.get_shape()[0].value
    f = 3  # kernel size
    #tf.summary.scalar("input", tf.reduce_sum(image))

    #encoding - downsampling
    # encoding level 1
    e1_c1 = util.conv(image, [f, f, 3, 1], "e1_c1", "SAME")
    e1_c2 = util.conv(e1_c1, [f, f, 1, 64], "e1_c2", "SAME")
    e1_c3 = util.conv(e1_c2, [f, f, 64, 64], "e1_c3", "SAME")
    pool1 = util.pool(e1_c3, 2, 2, name="pool1")

    #tf.summary.scalar("e1_c1", tf.reduce_sum(e1_c1))

    # encoding level 2
    e2_c1 = util.conv(pool1, [f, f, 64, 128], "e2_c1", "SAME")
    e2_c2 = util.conv(e2_c1, [f, f, 128, 128], "e2_c2", "SAME")
    e2_c3 = util.conv(e2_c2, [f, f, 128, 128], "e2_c3", "SAME")
    pool2 = util.pool(e2_c3, 2, 2, name="pool2")

    tf.summary.scalar("e2_c1", tf.reduce_sum(e2_c1))

    # encoding level 3
    e3_c1 = util.conv(pool2, [f, f, 128, 256], "e3_c1", "SAME")
    e3_c2 = util.conv(e3_c1, [f, f, 256, 256], "e3_c2", "SAME")
    e3_c3 = util.conv(e3_c2, [f, f, 256, 256], "e3_c3", "SAME")
    pool3 = util.pool(e3_c3, 2, 2, name="pool3")

    #tf.summary.scalar("e3_c1", tf.reduce_sum(e3_c1))

    # encoding level 4
    e4_c1 = util.conv(pool3, [f, f, 256, 512], "e4_c1", "SAME")
    e4_c2 = util.conv(e4_c1, [f, f, 512, 512], "e4_c2", "SAME")
    e4_c3 = util.conv(e4_c2, [f, f, 512, 512], "e4_c3", "SAME")
    pool4 = util.pool(e4_c3, 2, 2, name="pool4")

    #tf.summary.scalar("e4_c1", tf.reduce_sum(e4_c1))

    # encoding level 5
    e5_c1 = util.conv(pool4, [f, f, 512, 1024], "e5_c1", "SAME")
    e5_c2 = util.conv(e5_c1, [f, f, 1024, 1024], "e5_c2", "SAME")
    deOut = [
        bsize,
        e5_c2.get_shape()[1].value * STRIDE,
        e5_c2.get_shape()[2].value * STRIDE, 512
    ]
    de_dc1 = util.deconv(e5_c2, deOut, [f, f, 512, 1024], "de_dc1")
    #de_dc1 = pixelDeconv.pixel_dcl(e5_c2, 512, [f,f], "de_dc1")
    #tf.summary.scalar("e5_c2", tf.reduce_sum(e5_c2))
    #tf.summary.scalar("de_dc1", tf.reduce_sum(de_dc1))

    # decoding - upsampling
    # decoding level 1
    sliced = tf.slice(e4_c3, [0, 0, 0, 0], [-1, deOut[1], deOut[2], -1])
    de1_c1 = util.conv(tf.concat([sliced, de_dc1], 3), [f, f, 1024, 512],
                       "de1_c1", "SAME")
    de1_c2 = util.conv(de1_c1, [f, f, 512, 512], "de1_c2", "SAME")
    deOut = [
        bsize,
        de1_c2.get_shape()[1].value * STRIDE,
        de1_c2.get_shape()[2].value * STRIDE, 256
    ]
    de1_dc1 = util.deconv(de1_c2, deOut, [f, f, 256, 512], "de1_dc1")
    #de1_dc1 = pixelDeconv.pixel_dcl(de1_c2, 256, [f,f], "de1_dc1")

    # decoding level 2
    sliced = tf.slice(e3_c3, [0, 0, 0, 0], [-1, deOut[1], deOut[2], -1])
    de2_c1 = util.conv(tf.concat([sliced, de1_dc1], 3), [f, f, 512, 256],
                       "de2_c1", "SAME")
    de2_c2 = util.conv(de2_c1, [f, f, 256, 256], "de2_c2", "SAME")
    deOut = [
        bsize,
        de2_c2.get_shape()[1].value * STRIDE,
        de2_c2.get_shape()[2].value * STRIDE, 128
    ]
    de2_dc1 = util.deconv(de2_c2, deOut, [f, f, 128, 256], "de2_dc1")
    #de2_dc1 = pixelDeconv.pixel_dcl(de2_c2, 128, [f,f], "de2_dc1")

    # decoding level 3
    sliced = tf.slice(e2_c2, [0, 0, 0, 0], [-1, deOut[1], deOut[2], -1])
    de3_c1 = util.conv(tf.concat([sliced, de2_dc1], 3), [f, f, 256, 128],
                       "de3_c1", "SAME")
    de3_c2 = util.conv(de3_c1, [f, f, 128, 128], "de3_c2", "SAME")
    deOut = [
        bsize,
        de3_c2.get_shape()[1].value * STRIDE,
        de3_c2.get_shape()[2].value * STRIDE, 64
    ]
    de3_dc1 = util.deconv(de3_c2, deOut, [f, f, 64, 128], "de3_dc1")
    #de3_dc1 = pixelDeconv.pixel_dcl(de3_c2, 64, [f,f], "de3_dc1")

    # decoding level 3
    sliced = tf.slice(e1_c2, [0, 0, 0, 0], [-1, deOut[1], deOut[2], -1])
    de4_c1 = util.conv(tf.concat([sliced, de3_dc1], 3), [f, f, 128, 64],
                       "de4_c1", "SAME")
    de4_c2 = util.conv(de4_c1, [f, f, 64, 64], "de4_c2", "SAME")
    de4_c3 = util.conv(de4_c2, [f, f, 64, 64], "de4_c3", "SAME")
    de4_c4 = util.conv(de4_c3, [f, f, 64, 64], "de4_c4", "SAME")

    final = util.conv(de4_c4, [1, 1, 64, classes], "final", "SAME")
    #tf.summary.scalar("final", tf.reduce_sum(final))

    softmax = tf.nn.softmax(final)

    return final, tf.argmax(softmax, axis=3), softmax