예제 #1
0
        def up_layer(x1, x2, in_size1, in_size2, out_size, i):
            # Up 1
            W1 = utils.weight_variable([2, 2, 2, in_size2, in_size1],
                                       name="W_u_" + str(i) + "_1")
            b1 = utils.bias_variable([in_size2], name="b_u_" + str(i) + "_1")
            deco1 = utils.conv3d_transpose_strided(x1,
                                                   W1,
                                                   b1,
                                                   output_shape=tf.shape(x2))
            relu1 = tf.nn.relu(deco1, name="relu_d_" + str(i) + "_1")

            # Concat
            conc1 = tf.concat([relu1, x2],
                              -1)  # concat along the channels dimension

            # Conv1
            W2 = utils.weight_variable([3, 3, 3, in_size2 * 2, out_size],
                                       name="W_u_" + str(i) + "_2")
            b2 = utils.bias_variable([out_size], name="b_u_" + str(i) + "_2")
            conv1 = utils.conv3d_basic(conc1, W2, b2)
            relu2 = tf.nn.relu(conv1, name="relu_u_" + str(i) + "_2")
            relu2 = tf.nn.dropout(relu2, keep_prob=keep_prob)

            # Conv2
            W3 = utils.weight_variable([3, 3, 3, out_size, out_size],
                                       name="W_u_" + str(i) + "_3")
            b3 = utils.bias_variable([out_size], name="b_u_" + str(i) + "_3")
            conv3 = utils.conv3d_basic(relu2, W3, b3)
            relu3 = tf.nn.relu(conv3, name="relu_u_" + str(i) + "_3")
            relu3 = tf.nn.dropout(relu3, keep_prob=keep_prob)

            return relu3
예제 #2
0
        def down_layer(x, in_size, out_size, i):
            # Down 1
            W1 = utils.weight_variable([3, 3, 3, in_size, out_size // 2],
                                       name="W_d_" + str(i) + "_1")
            b1 = utils.bias_variable([out_size // 2],
                                     name="b_d_" + str(i) + "_1")
            conv1 = utils.conv3d_basic(x, W1, b1)
            relu1 = tf.nn.relu(conv1, name="relu_d_" + str(i) + "_1")
            relu1 = tf.nn.dropout(relu1, keep_prob=keep_prob)

            # Down 2
            W2 = utils.weight_variable([3, 3, 3, out_size // 2, out_size],
                                       name="W_d_" + str(i) + "_2")
            b2 = utils.bias_variable([out_size], name="b_d_" + str(i) + "_2")
            conv2 = utils.conv3d_basic(relu1, W2, b2)
            relu2 = tf.nn.relu(conv2, name="relu_d_" + str(i) + "_2")
            relu2 = tf.nn.dropout(relu2, keep_prob=keep_prob)

            # Pool 1
            pool = utils.max_pool_2x2x2(relu2)

            return relu2, pool
예제 #3
0
def inference(image, keep_prob):
    """
    Semantic segmentation network definition
    :param image: input image. Should have values in range 0-255
    :param keep_prob:
    :return:
    """

    #using architecture described in https://arxiv.org/pdf/1704.06382.pdf
    #TODO: Look up preprocessing step
    processed_image = image / 100

    with tf.variable_scope("inference"):

        def down_layer(x, in_size, out_size, i):
            # Down 1
            W1 = utils.weight_variable([3, 3, 3, in_size, out_size // 2],
                                       name="W_d_" + str(i) + "_1")
            b1 = utils.bias_variable([out_size // 2],
                                     name="b_d_" + str(i) + "_1")
            conv1 = utils.conv3d_basic(x, W1, b1)
            relu1 = tf.nn.relu(conv1, name="relu_d_" + str(i) + "_1")
            relu1 = tf.nn.dropout(relu1, keep_prob=keep_prob)

            # Down 2
            W2 = utils.weight_variable([3, 3, 3, out_size // 2, out_size],
                                       name="W_d_" + str(i) + "_2")
            b2 = utils.bias_variable([out_size], name="b_d_" + str(i) + "_2")
            conv2 = utils.conv3d_basic(relu1, W2, b2)
            relu2 = tf.nn.relu(conv2, name="relu_d_" + str(i) + "_2")
            relu2 = tf.nn.dropout(relu2, keep_prob=keep_prob)

            # Pool 1
            pool = utils.max_pool_2x2x2(relu2)

            return relu2, pool

        # Apply 4 down layes of increasing sizes
        d1, p1 = down_layer(processed_image, 1, 64, 1)
        d2, p2 = down_layer(p1, 64, 128, 2)
        d3, p3 = down_layer(p2, 128, 256, 3)
        d4, p4 = down_layer(p3, 256, 512, 4)

        def up_layer(x1, x2, in_size1, in_size2, out_size, i):
            # Up 1
            W1 = utils.weight_variable([2, 2, 2, in_size2, in_size1],
                                       name="W_u_" + str(i) + "_1")
            b1 = utils.bias_variable([in_size2], name="b_u_" + str(i) + "_1")
            deco1 = utils.conv3d_transpose_strided(x1,
                                                   W1,
                                                   b1,
                                                   output_shape=tf.shape(x2))
            relu1 = tf.nn.relu(deco1, name="relu_d_" + str(i) + "_1")

            # Concat
            conc1 = tf.concat([relu1, x2],
                              -1)  # concat along the channels dimension

            # Conv1
            W2 = utils.weight_variable([3, 3, 3, in_size2 * 2, out_size],
                                       name="W_u_" + str(i) + "_2")
            b2 = utils.bias_variable([out_size], name="b_u_" + str(i) + "_2")
            conv1 = utils.conv3d_basic(conc1, W2, b2)
            relu2 = tf.nn.relu(conv1, name="relu_u_" + str(i) + "_2")
            relu2 = tf.nn.dropout(relu2, keep_prob=keep_prob)

            # Conv2
            W3 = utils.weight_variable([3, 3, 3, out_size, out_size],
                                       name="W_u_" + str(i) + "_3")
            b3 = utils.bias_variable([out_size], name="b_u_" + str(i) + "_3")
            conv3 = utils.conv3d_basic(relu2, W3, b3)
            relu3 = tf.nn.relu(conv3, name="relu_u_" + str(i) + "_3")
            relu3 = tf.nn.dropout(relu3, keep_prob=keep_prob)

            return relu3

        #Apply 3 Up layers with skip connections
        u3 = up_layer(d4, d3, 512, 256, 256, 3)
        u2 = up_layer(u3, d2, 256, 128, 128, 2)
        u1 = up_layer(u2, d1, 128, 64, 64, 1)

        #Apply a final Conv layer
        W = utils.weight_variable([1, 1, 1, 64, NUM_OF_CLASSES], name="W_o")
        b = utils.bias_variable([NUM_OF_CLASSES], name="b_o")
        conv = utils.conv3d_basic(u1, W, b)

        annotation_pred = tf.argmax(conv, dimension=4, name="prediction")

    return tf.expand_dims(annotation_pred, dim=4), conv