Example #1
0
    def _generator(self, z, dims, train_phase, activation=tf.nn.relu, scope_name="generator"):
        N = len(dims)
        image_size = self.resized_image_size // (2 ** (N - 1))
        with tf.variable_scope(scope_name) as scope:
            W_z = utils.weight_variable([self.z_dim, dims[0] * image_size * image_size], name="W_z")
            b_z = utils.bias_variable([dims[0] * image_size * image_size], name="b_z")
            h_z = tf.matmul(z, W_z) + b_z
            h_z = tf.reshape(h_z, [-1, image_size, image_size, dims[0]])
            h_bnz = utils.batch_norm(h_z, dims[0], train_phase, scope="gen_bnz")
            h = activation(h_bnz, name='h_z')
            utils.add_activation_summary(h)

            for index in range(N - 2):
                image_size *= 2
                W = utils.weight_variable([5, 5, dims[index + 1], dims[index]], name="W_%d" % index)
                b = utils.bias_variable([dims[index + 1]], name="b_%d" % index)
                deconv_shape = tf.stack([tf.shape(h)[0], image_size, image_size, dims[index + 1]])
                h_conv_t = utils.conv2d_transpose_strided(h, W, b, output_shape=deconv_shape)
                h_bn = utils.batch_norm(h_conv_t, dims[index + 1], train_phase, scope="gen_bn%d" % index)
                h = activation(h_bn, name='h_%d' % index)
                utils.add_activation_summary(h)

            image_size *= 2
            W_pred = utils.weight_variable([5, 5, dims[-1], dims[-2]], name="W_pred")
            b_pred = utils.bias_variable([dims[-1]], name="b_pred")
            deconv_shape = tf.stack([tf.shape(h)[0], image_size, image_size, dims[-1]])
            h_conv_t = utils.conv2d_transpose_strided(h, W_pred, b_pred, output_shape=deconv_shape)
            pred_image = tf.nn.tanh(h_conv_t, name='pred_image')
            utils.add_activation_summary(pred_image)

        return pred_image
Example #2
0
    def _generator(self,
                   z,
                   dims,
                   train_phase,
                   activation=tf.nn.relu,
                   scope_name="generator"):
        N = len(dims)
        image_size = self.resized_image_size // (2**(N - 1))
        with tf.variable_scope(scope_name) as scope:
            W_z = utils.weight_variable(
                [self.z_dim, dims[0] * image_size * image_size], name="W_z")
            h_z = tf.matmul(z, W_z)
            h_z = tf.reshape(h_z, [-1, image_size, image_size, dims[0]])
            # h_bnz = tf.contrib.layers.batch_norm(inputs=h_z, decay=0.9, epsilon=1e-5, is_training=train_phase,
            #                                      scope="gen_bnz")
            # h_bnz = utils.batch_norm(h_z, dims[0], train_phase, scope="gen_bnz")
            h_bnz = utils.batch_norm('gen_bnz', h_z, True, 'NHWC', train_phase)
            h = activation(h_bnz, name='h_z')
            utils.add_activation_summary(h)

            for index in range(N - 2):
                image_size *= 2
                W = utils.weight_variable([4, 4, dims[index + 1], dims[index]],
                                          name="W_%d" % index)
                b = tf.zeros([dims[index + 1]])
                deconv_shape = tf.stack(
                    [tf.shape(h)[0], image_size, image_size, dims[index + 1]])
                h_conv_t = utils.conv2d_transpose_strided(
                    h, W, b, output_shape=deconv_shape)
                # h_bn = tf.contrib.layers.batch_norm(inputs=h_conv_t, decay=0.9, epsilon=1e-5, is_training=train_phase,
                #                                     scope="gen_bn%d" % index)
                # h_bn = utils.batch_norm(h_conv_t, dims[index + 1], train_phase, scope="gen_bn%d" % index)
                h_bn = utils.batch_norm("gen_bn%d" % index, h_conv_t, True,
                                        'NHWC', train_phase)
                h = activation(h_bn, name='h_%d' % index)
                utils.add_activation_summary(h)

            image_size *= 2
            W_pred = utils.weight_variable([4, 4, dims[-1], dims[-2]],
                                           name="W_pred")
            b = tf.zeros([dims[-1]])
            deconv_shape = tf.stack(
                [tf.shape(h)[0], image_size, image_size, dims[-1]])
            h_conv_t = utils.conv2d_transpose_strided(
                h, W_pred, b, output_shape=deconv_shape)
            pred_image = tf.nn.tanh(h_conv_t, name='pred_image')
            utils.add_activation_summary(pred_image)

        return pred_image
Example #3
0
def deconv_layer(input,
                 r_field,
                 in_channels,
                 out_channels,
                 out_shape,
                 nr,
                 stride=2):
    W = utils.weight_variable([r_field, r_field, out_channels, in_channels],
                              name="W_t" + nr)
    b = utils.bias_variable([out_channels], name="b_t" + nr)
    conv_t1 = utils.conv2d_transpose_strided(input, W, b, out_shape)
    return conv_t1
Example #4
0
def single_frame_inference(image, keep_prob, train=False):
    # set phase
    with tf.variable_scope("inference"):
        print("Single Frame Inference")
        net = compact_base(image, train)

        W7 = weight_variable([8, 8, 32, 512], name="W7")
        b7 = bias_variable([512], name="b7")
        # conv = tf.nn.conv2d(net['layer6_p'], W7, strides=[1, 1, 1, 1], padding="VALID")
        # conv7 = tf.nn.bias_add(conv, b7)
        conv7 = conv2d_basic(net['layer6_p'], W7, b7)
        relu7 = tf.nn.relu(conv7, name="relu7")

        print(relu7.get_shape(), net['layer6_p'])
        W8 = weight_variable([1, 1, 512, 512], name="W8")
        b8 = bias_variable([512], name="b8")
        conv8 = conv2d_basic(relu7, W8, b8)
        relu8 = tf.nn.relu(conv8, name="relu8")

        W9 = weight_variable([1, 1, 512, FLAGS.NUM_OF_CLASSES], name="W9")
        b9 = bias_variable([FLAGS.NUM_OF_CLASSES], name="b9")
        conv9 = conv2d_basic(relu8, W9, b9)

        deconv_shape1 = net['layer4_p'].get_shape()
        W_t1 = weight_variable(
            [4, 4, deconv_shape1[3].value, FLAGS.NUM_OF_CLASSES], name="W_t1")
        b_t1 = bias_variable([deconv_shape1[3].value], name="b_t1")
        conv_t1 = conv2d_transpose_strided(conv9,
                                           W_t1,
                                           b_t1,
                                           output_shape=tf.shape(
                                               net['layer4_p']))
        fuse_1 = tf.add(conv_t1, net['layer4_p'], name="fuse_1")

        deconv_shape2 = net['layer2_p'].get_shape()
        W_t2 = weight_variable(
            [4, 4, deconv_shape2[3].value, deconv_shape1[3].value],
            name="W_t2")
        b_t2 = bias_variable([deconv_shape2[3].value], name="b_t2")
        conv_t2 = conv2d_transpose_strided(fuse_1,
                                           W_t2,
                                           b_t2,
                                           output_shape=tf.shape(
                                               net['layer2_p']))
        fuse_2 = tf.add(conv_t2, net['layer2_p'], name="fuse_2")

        shape = tf.shape(image)
        deconv_shape3 = tf.pack(
            [shape[0], shape[1], shape[2], FLAGS.NUM_OF_CLASSES])
        W_t3 = weight_variable(
            [16, 16, FLAGS.NUM_OF_CLASSES, deconv_shape2[3].value],
            name="W_t3")
        b_t3 = bias_variable([FLAGS.NUM_OF_CLASSES], name="b_t3")
        conv_t3 = conv2d_transpose_strided(fuse_2,
                                           W_t3,
                                           b_t3,
                                           output_shape=deconv_shape3)

        annotation_pred = tf.argmax(conv_t3, dimension=3, name="prediction")

    return tf.expand_dims(annotation_pred, dim=3), conv_t3
Example #5
0
def multi_frame_inference(image, frame_depth, train=False):
    frames = tf.split(1, frame_depth, image)
    frame_intermediates = []

    with tf.variable_scope("bottleneck"):
        image = tf.squeeze(frames[0], squeeze_dims=1)
        net = compact_base(image, train)
        frame_intermediates.append(net['layer6_p'])
    for i in range(1, frame_depth):
        with tf.variable_scope("bottleneck", reuse=True):
            # print("image", image.get_shape())
            image = tf.squeeze(frames[i], squeeze_dims=1)
            net = compact_base(image, train)
            frame_intermediates.append(net['layer6_p'])

    print("why, ", net['layer1'])

    conv_final_layer = tf.concat(3, frame_intermediates)

    with tf.variable_scope("inference"):

        W7 = weight_variable([8, 8, 32 * frame_depth, 32], name="W7")
        b7 = bias_variable([32], name="b7")
        conv7 = conv2d_basic(conv_final_layer, W7, b7)
        relu7 = tf.nn.relu(conv7, name="relu7")

        W8 = weight_variable([1, 1, 32, 32], name="W8")
        b8 = bias_variable([32], name="b8")
        conv8 = conv2d_basic(relu7, W8, b8)
        relu8 = tf.nn.relu(conv8, name="relu8")

        W9 = weight_variable([1, 1, 32, FLAGS.NUM_OF_CLASSES], name="W9")
        b9 = bias_variable([FLAGS.NUM_OF_CLASSES], name="b9")
        conv9 = conv2d_basic(relu8, W9, b9)

        deconv_shape1 = net['layer4_p'].get_shape()
        W_t1 = weight_variable(
            [4, 4, deconv_shape1[3].value, FLAGS.NUM_OF_CLASSES], name="W_t1")
        b_t1 = bias_variable([deconv_shape1[3].value], name="b_t1")
        conv_t1 = conv2d_transpose_strided(conv9,
                                           W_t1,
                                           b_t1,
                                           output_shape=tf.shape(
                                               net['layer4_p']))
        fuse_1 = tf.add(conv_t1, net['layer4_p'], name="fuse_1")

        deconv_shape2 = net['layer2_p'].get_shape()
        W_t2 = weight_variable(
            [4, 4, deconv_shape2[3].value, deconv_shape1[3].value],
            name="W_t2")
        b_t2 = bias_variable([deconv_shape2[3].value], name="b_t2")
        conv_t2 = conv2d_transpose_strided(fuse_1,
                                           W_t2,
                                           b_t2,
                                           output_shape=tf.shape(
                                               net['layer2_p']))
        fuse_2 = tf.add(conv_t2, net['layer2_p'], name="fuse_2")

        shape = tf.shape(image)
        deconv_shape3 = tf.pack(
            [shape[0], shape[1], shape[2], FLAGS.NUM_OF_CLASSES])
        W_t3 = weight_variable(
            [16, 16, FLAGS.NUM_OF_CLASSES, deconv_shape2[3].value],
            name="W_t3")
        b_t3 = bias_variable([FLAGS.NUM_OF_CLASSES], name="b_t3")
        conv_t3 = conv2d_transpose_strided(fuse_2,
                                           W_t3,
                                           b_t3,
                                           output_shape=deconv_shape3)

        annotation_pred = tf.argmax(conv_t3, dimension=3, name="prediction")

    return tf.expand_dims(annotation_pred, dim=3), conv_t3
Example #6
0
def segment(image, keep_prob_conv, input_channels, output_channels, scope):

    with tf.variable_scope(scope):

        ###############
        # downsample  #
        ###############

        W2 = utils.weight_variable([3, 3, input_channels, 64], name="W2")
        b2 = utils.bias_variable([64], name="b2")
        conv2 = utils.conv2d_basic(image, W2, b2, name="conv2")
        relu2 = tf.nn.relu(conv2, name="relu2")
        pool2 = utils.max_pool_2x2(relu2)
        dropout2 = tf.nn.dropout(pool2, keep_prob=keep_prob_conv)

        W3 = utils.weight_variable([3, 3, 64, 128], name="W3")
        b3 = utils.bias_variable([128], name="b3")
        conv3 = utils.conv2d_basic(dropout2, W3, b3, name="conv3")
        relu3 = tf.nn.relu(conv3, name="relu3")
        pool3 = utils.max_pool_2x2(relu3)
        dropout3 = tf.nn.dropout(pool3, keep_prob=keep_prob_conv)

        W4 = utils.weight_variable([3, 3, 128, 256], name="W4")
        b4 = utils.bias_variable([256], name="b4")
        conv4 = utils.conv2d_basic(dropout3, W4, b4, name="conv4")
        relu4 = tf.nn.relu(conv4, name="relu4")
        pool4 = utils.max_pool_2x2(relu4)
        dropout4 = tf.nn.dropout(pool4, keep_prob=keep_prob_conv)

        W5 = utils.weight_variable([3, 3, 256, 512], name="W5")
        b5 = utils.bias_variable([512], name="b5")
        conv5 = utils.conv2d_basic(dropout4, W5, b5, name="conv5")
        relu5 = tf.nn.relu(conv5, name="relu5")
        pool5 = utils.max_pool_2x2(relu5)
        dropout5 = tf.nn.dropout(pool5, keep_prob=keep_prob_conv)

        W6 = utils.weight_variable([3, 3, 512, 512], name="W6")
        b6 = utils.bias_variable([512], name="b6")
        conv6 = utils.conv2d_basic(dropout5, W6, b6, name="conv6")
        relu6 = tf.nn.relu(conv6, name="relu6")
        pool6 = utils.max_pool_2x2(relu6)
        dropout6 = tf.nn.dropout(pool6, keep_prob=keep_prob_conv)

        W7 = utils.weight_variable([3, 3, 512, 4096], name="W7")
        b7 = utils.bias_variable([4096], name="b7")
        conv7 = utils.conv2d_basic(dropout6, W7, b7, name="conv7")

        ############
        # upsample #
        ############

        deconv_shape1 = pool5.get_shape()
        W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, 4096],
                                     name="W_t1")
        b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
        conv_t1 = utils.conv2d_transpose_strided(conv7,
                                                 W_t1,
                                                 b_t1,
                                                 output_shape=tf.shape(pool5))

        stacked_1 = tf.concat([conv_t1, pool5], -1)
        fuse_1_1 = conv_layer(stacked_1, 1, 2 * deconv_shape1[3].value,
                              deconv_shape1[3].value, "fuse_1_1")
        fuse_1_2 = conv_layer(fuse_1_1, 1, deconv_shape1[3].value,
                              deconv_shape1[3].value, "fuse_1_2")

        deconv_shape2 = pool4.get_shape()
        W_t2 = utils.weight_variable(
            [4, 4, deconv_shape2[3].value, deconv_shape1[3].value],
            name="W_t2")
        b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
        conv_t2 = utils.conv2d_transpose_strided(fuse_1_2,
                                                 W_t2,
                                                 b_t2,
                                                 output_shape=tf.shape(pool4))

        stacked_2 = tf.concat([conv_t2, pool4], -1)
        fuse_2_1 = conv_layer(stacked_2, 1, 2 * deconv_shape2[3].value,
                              deconv_shape2[3].value, "fuse_2_1")
        fuse_2_2 = conv_layer(fuse_2_1, 1, deconv_shape2[3].value,
                              deconv_shape2[3].value, "fuse_2_2")

        deconv_shape3 = pool3.get_shape()
        W_t3 = utils.weight_variable(
            [4, 4, deconv_shape3[3].value, deconv_shape2[3].value],
            name="W_t3")
        b_t3 = utils.bias_variable([deconv_shape3[3].value], name="b_t3")
        conv_t3 = utils.conv2d_transpose_strided(fuse_2_2,
                                                 W_t3,
                                                 b_t3,
                                                 output_shape=tf.shape(pool3))

        stacked_3 = tf.concat([conv_t3, pool3], -1)
        fuse_3_1 = conv_layer(stacked_3, 1, 2 * deconv_shape3[3].value,
                              deconv_shape3[3].value, "fuse_3_1")
        fuse_3_2 = conv_layer(fuse_3_1, 1, deconv_shape3[3].value,
                              deconv_shape3[3].value, "fuse_3_2")

        deconv_shape4 = pool2.get_shape()
        W_t4 = utils.weight_variable(
            [4, 4, deconv_shape4[3].value, deconv_shape3[3].value],
            name="W_t4")
        b_t4 = utils.bias_variable([deconv_shape4[3].value], name="b_t4")
        conv_t4 = utils.conv2d_transpose_strided(fuse_3_2,
                                                 W_t4,
                                                 b_t4,
                                                 output_shape=tf.shape(pool2))

        stacked_4 = tf.concat([conv_t4, pool2], -1)
        fuse_4_1 = conv_layer(stacked_4, 1, 2 * deconv_shape4[3].value,
                              deconv_shape4[3].value, "fuse_4_1")
        fuse_4_2 = conv_layer(fuse_4_1, 1, deconv_shape4[3].value,
                              deconv_shape4[3].value, "fuse_4_2")

        # do the final upscaling
        shape = tf.shape(image)
        deconv_shape5 = tf.stack(
            [shape[0], shape[1], shape[2], output_channels])
        W_t5 = utils.weight_variable(
            [16, 16, output_channels, deconv_shape4[3].value], name="W_t5")
        b_t5 = utils.bias_variable([output_channels], name="b_t5")
        conv_t5 = utils.conv2d_transpose_strided(fuse_4_2,
                                                 W_t5,
                                                 b_t5,
                                                 output_shape=deconv_shape5,
                                                 stride=2)

    annotation_pred = tf.argmax(conv_t5, dimension=3, name="prediction")
    return tf.expand_dims(annotation_pred, dim=3), conv_t5
Example #7
0
def inference(image, keep_prob):
    """
    Semantic segmentation network definition
    :param image:
    :param keep_prob:
    :return:
    """
    print('setting up vgg model initialized params')
    model_data = utils.get_model_data("data", MODEL_URL)
    mean = model_data['normalization'][0][0][0]
    mean_pixel = np.mean(mean, axis=(0, 1))
    weights = np.squeeze(model_data['layers'])

    processed_image = utils.process_image(image, mean_pixel)

    with tf.name_scope('inference'):
        image_net = vgg_net(weights, processed_image)
        conv_final_layer = image_net['conv5_3']

        pool5 = utils.max_pool_2x2(conv_final_layer)

        W6 = utils.weights_variable([7, 7, 512, 4096], name="W6")
        b6 = utils.bias_variable([4096], name='b6')
        conv6 = utils.conv2d_basic(pool5, W6, b6)
        relu6 = tf.nn.relu(conv6, name='relu6')

        relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)

        W7 = utils.weights_variable([1, 1, 4096, 4096], name="W7")
        b7 = utils.bias_variable([4096], name="b7")
        conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)
        relu7 = tf.nn.relu(conv7, name="relu7")

        relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)

        W8 = utils.weights_variable([1, 1, 4096, NUM_OF_CLASSESS], name='W8')
        b8 = utils.bias_variable([NUM_OF_CLASSESS], name="b8")
        conv8 = utils.conv2d_basic(relu_dropout7, W8, b8)

        #unsampling to actual image size
        deconv_shape1 = image_net['pool4'].get_shape()
        W_t1 = utils.weights_variable(
            [4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name='W_t1')
        b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
        conv_t1 = utils.conv2d_transpose_strided(conv8,
                                                 W_t1,
                                                 b_t1,
                                                 output_shape=tf.shape(
                                                     image_net['pool4']))
        fuse_1 = tf.add(conv_t1, image_net['pool4'], name='fuse_1')

        deconv_shape2 = image_net['pool3'].get_shape()
        W_t2 = utils.weights_variable(
            [4, 4, deconv_shape2[3].value, deconv_shape1[3].value],
            name='W_t2')
        b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
        conv_t2 = utils.conv2d_transpose_strided(fuse_1,
                                                 W_t2,
                                                 b_t2,
                                                 output_shape=tf.shape(
                                                     image_net['pool3']))
        fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2")

        shape = tf.shape(image)
        output_shape = tf.stack(
            [shape[0], shape[1], shape[2], NUM_OF_CLASSESS])
        W_t3 = utils.weights_variable(
            [7, 7, NUM_OF_CLASSESS, deconv_shape2[3].value], name='W_t3')
        b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name="b_t3")
        conv_t3 = utils.conv2d_transpose_strided(fuse_2,
                                                 W_t3,
                                                 b_t3,
                                                 output_shape=output_shape)

        annotation_pre = tf.argmax(conv_t3, dimension=3, name='prediction')

        return tf.expand_dims(annotation_pre, dim=3), conv_t3
Example #8
0
def fine_tune_net(image, keep_prob):
    """
    the network to be fine tuned and used to perform the semantic segmentation
    :param image: input image.
    :param keep_prob: for doupout
    :return: annotation prediction, probability map and 2nd last layer of vgg
    """
    print("setting up vgg initialized conv layers ...")
    model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)

    mean = model_data['normalization'][0][0][0]
    mean_pixel = np.mean(mean, axis=(0, 1))

    weights = np.squeeze(model_data['layers'])

    processed_image = image - mean_pixel

    with tf.variable_scope("fine_tune"):
        image_net = vgg_net(weights, processed_image)
        conv_final_layer = image_net["conv5_3"]  # 14x14x512

        pool5 = utils.max_pool_2x2(conv_final_layer)  # 7x7x512

        W6 = utils.weight_variable([7, 7, 512, 4096], name="W6")
        b6 = utils.bias_variable([4096], name="b6")
        conv6 = utils.conv2d_basic(pool5, W6, b6)
        relu6 = tf.nn.relu(conv6, name="relu6")
        relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)  # 7x7x4096

        W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7")
        b7 = utils.bias_variable([4096], name="b7")
        conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)
        relu7 = tf.nn.relu(conv7, name="relu7")
        relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)  # 7x7x4096

        W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSESS], name="W8")
        b8 = utils.bias_variable([NUM_OF_CLASSESS], name="b8")
        conv8 = utils.conv2d_basic(relu_dropout7, W8, b8)
        # annotation_pred1 = tf.argmax(conv8, dimension=3, name="prediction1")

        # upscale
        deconv_shape1 = image_net["pool4"].get_shape()  # 14x14x512
        W_t1 = utils.weight_variable(
            [4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name="W_t1")
        b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
        conv_t1 = utils.conv2d_transpose_strided(conv8,
                                                 W_t1,
                                                 b_t1,
                                                 output_shape=tf.shape(
                                                     image_net["pool4"]))  #
        fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1")

        deconv_shape2 = image_net["pool3"].get_shape()
        W_t2 = utils.weight_variable(
            [4, 4, deconv_shape2[3].value, deconv_shape1[3].value],
            name="W_t2")
        b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
        conv_t2 = utils.conv2d_transpose_strided(fuse_1,
                                                 W_t2,
                                                 b_t2,
                                                 output_shape=tf.shape(
                                                     image_net["pool3"]))
        fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2")

        shape = tf.shape(image)
        deconv_shape3 = tf.stack(
            [shape[0], shape[1], shape[2], NUM_OF_CLASSESS])
        W_t3 = utils.weight_variable(
            [4, 4, NUM_OF_CLASSESS, deconv_shape2[3].value], name="W_t3")
        b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name="b_t3")
        conv_t3 = utils.conv2d_transpose_strided(fuse_2,
                                                 W_t3,
                                                 b_t3,
                                                 output_shape=deconv_shape3,
                                                 stride=8)
        #conv_t3 = tf.layers.conv2d_transpose(fuse_2,NUM_OF_CLASSESS,16,strides=(8,8),padding='SAME')
        #conv_t3.set_shape([None,IMAGE_SIZE,IMAGE_SIZE,NUM_OF_CLASSESS])

        conv_t3 = tf.nn.softmax(conv_t3, axis=-1)
        annotation_pred = tf.argmax(conv_t3, axis=-1, name="prediction")

    return tf.expand_dims(annotation_pred, axis=-1), conv_t3, conv_final_layer
Example #9
0
def u_net(image, phase_train, train=True, reuse=False, dtype=t):
    with tf.variable_scope("u_net", reuse=reuse):
        w1_1 = utils.weight_variable([3,3,int(image.shape[3]),32],name="w1_1", dtype=dtype)
        b1_1 = utils.bias_variable([32],name="b1_1", dtype=dtype)
        conv1_1 = utils.conv2d_basic(image,w1_1,b1_1, dtype=dtype)
        relu1_1 = tf.nn.relu(conv1_1, name="relu1_1")
        w1_2 = utils.weight_variable([3,3,32,32],name="w1_2", dtype=dtype)
        b1_2 = utils.bias_variable([32],name="b1_2", dtype=dtype)
        conv1_2 = utils.conv2d_basic(relu1_1,w1_2,b1_2, dtype=dtype)
        relu1_2 = tf.nn.relu(conv1_2, name="relu1_2")
        pool1 = utils.max_pool_2x2(relu1_2, dtype=dtype)
        bn1 = utils.batch_norm(pool1,pool1.get_shape()[3],phase_train,scope="bn1", is_train=train, dtype=dtype)
         
        w2_1 = utils.weight_variable([3,3,32,64],name="w2_1", dtype=dtype)
        b2_1 = utils.bias_variable([64],name="b2_1", dtype=dtype)
        conv2_1 = utils.conv2d_basic(bn1,w2_1,b2_1, dtype=dtype)
        relu2_1 = tf.nn.relu(conv2_1, name="relu2_1")
        w2_2 = utils.weight_variable([3,3,64,64],name="w2_2", dtype=dtype)
        b2_2 = utils.bias_variable([64],name="b2_2", dtype=dtype)
        conv2_2 = utils.conv2d_basic(relu2_1,w2_2,b2_2, dtype=dtype)
        relu2_2 = tf.nn.relu(conv2_2, name="relu2_2")
        pool2 = utils.max_pool_2x2(relu2_2, dtype=dtype)
        bn2 = utils.batch_norm(pool2,pool2.get_shape()[3],phase_train,scope="bn2", is_train=train, dtype=dtype)
        
        w3_1 = utils.weight_variable([3,3,64,128],name="w3_1", dtype=dtype)
        b3_1 = utils.bias_variable([128],name="b3_1", dtype=dtype)
        conv3_1 = utils.conv2d_basic(bn2,w3_1,b3_1, dtype=dtype)
        relu3_1 = tf.nn.relu(conv3_1, name="relu3_1")
        w3_2 = utils.weight_variable([3,3,128,128],name="w3_2", dtype=dtype)
        b3_2 = utils.bias_variable([128],name="b3_2", dtype=dtype)
        conv3_2 = utils.conv2d_basic(relu3_1,w3_2,b3_2, dtype=dtype)
        relu3_2 = tf.nn.relu(conv3_2, name="relu3_2")
        pool3 = utils.max_pool_2x2(relu3_2)
        bn3 = utils.batch_norm(pool3,pool3.get_shape()[3],phase_train,scope="bn3", is_train=train, dtype=dtype)
        
        w4_1 = utils.weight_variable([3,3,128,256],name="w4_1", dtype=dtype)
        b4_1 = utils.bias_variable([256],name="b4_1", dtype=dtype)
        conv4_1 = utils.conv2d_basic(bn3,w4_1,b4_1, dtype=dtype)
        relu4_1 = tf.nn.relu(conv4_1, name="relu4_1")
        w4_2 = utils.weight_variable([3,3,256,256],name="w4_2", dtype=dtype)
        b4_2 = utils.bias_variable([256],name="b4_2", dtype=dtype)
        conv4_2 = utils.conv2d_basic(relu4_1,w4_2,b4_2, dtype=dtype)
        relu4_2 = tf.nn.relu(conv4_2, name="relu4_2")
        bn4 = utils.batch_norm(relu4_2,relu4_2.get_shape()[3],phase_train,scope="bn4", is_train=train, dtype=dtype)
                
        W_t1 = utils.weight_variable([2, 2, 128, 256], name="W_t1", dtype=dtype)
        b_t1 = utils.bias_variable([128], name="b_t1", dtype=dtype)
        conv_t1 = utils.conv2d_transpose_strided(bn4, W_t1, b_t1, output_shape=tf.shape(relu3_2),dtype=dtype)
        merge1 = tf.concat([conv_t1,relu3_2],3)
        w5_1 = utils.weight_variable([3,3,256,128],name="w5_1", dtype=dtype)
        b5_1 = utils.bias_variable([128],name="b5_1", dtype=dtype)
        conv5_1 = utils.conv2d_basic(merge1,w5_1,b5_1, dtype=dtype)
        relu5_1 = tf.nn.relu(conv5_1, name="relu6_1")
        w5_2 = utils.weight_variable([3,3,128,128],name="w5_2", dtype=dtype)
        b5_2 = utils.bias_variable([128],name="b5_2", dtype=dtype)
        conv5_2 = utils.conv2d_basic(relu5_1,w5_2,b5_2,dtype=dtype)
        relu5_2 = tf.nn.relu(conv5_2, name="relu5_2")
        bn5 = utils.batch_norm(relu5_2,relu5_2.get_shape()[3],phase_train,scope="bn5", is_train=train, dtype=dtype)
                
        W_t2 = utils.weight_variable([2, 2, 64, 128], name="W_t2", dtype=dtype)
        b_t2 = utils.bias_variable([64], name="b_t2", dtype=dtype)
        conv_t2 = utils.conv2d_transpose_strided(bn5, W_t2, b_t2, output_shape=tf.shape(relu2_2),dtype=dtype)
        merge2 = tf.concat([conv_t2,relu2_2],3)
        w6_1= utils.weight_variable([3,3,128,64],name="w6_1", dtype=dtype)
        b6_1= utils.bias_variable([64],name="b6_1", dtype=dtype)
        conv6_1 = utils.conv2d_basic(merge2,w6_1,b6_1, dtype=dtype)
        relu6_1 = tf.nn.relu(conv6_1, name="relu6_1")
        w6_2 = utils.weight_variable([3,3,64,64],name="w6_2", dtype=dtype)
        b6_2 = utils.bias_variable([64],name="b6_2", dtype=dtype)
        conv6_2 = utils.conv2d_basic(relu6_1,w6_2,b6_2, dtype=dtype)
        relu6_2 = tf.nn.relu(conv6_2, name="relu6_2")
        bn6 = utils.batch_norm(relu6_2,relu6_2.get_shape()[3],phase_train,scope="bn6", is_train=train, dtype=dtype)
        
        W_t3 = utils.weight_variable([2, 2, 32, 64], name="W_t3", dtype=dtype)
        b_t3 = utils.bias_variable([32], name="b_t3", dtype=dtype)
        conv_t3 = utils.conv2d_transpose_strided(bn6, W_t3, b_t3, output_shape=tf.shape(relu1_2),dtype=dtype)
        merge3 = tf.concat([conv_t3,relu1_2],3)
        w7_1 = utils.weight_variable([3,3,64,32],name="w7_1", dtype=dtype)
        b7_1 = utils.bias_variable([32],name="b7_1", dtype=dtype)
        conv7_1 = utils.conv2d_basic(merge3,w7_1,b7_1, dtype=dtype)
        relu7_1 = tf.nn.relu(conv7_1, name="relu7_1")
        w7_2 = utils.weight_variable([3,3,32,32],name="w7_2", dtype=dtype)
        b7_2 = utils.bias_variable([32],name="b7_2", dtype=dtype)
        conv7_2 = utils.conv2d_basic(relu7_1,w7_2,b7_2, dtype=dtype)
        relu7_2 = tf.nn.relu(conv7_2, name="relu7_2")
        bn7 = utils.batch_norm(relu7_2,relu7_2.get_shape()[3],phase_train,scope="bn8", is_train=train, dtype=dtype)
                
        w8 = utils.weight_variable([1, 1, 32, 1], name="w8", dtype=dtype)
        b8 = utils.bias_variable([1],name="b8", dtype=dtype)
        conv8 = utils.conv2d_basic(bn7,w8,b8, dtype=dtype)
        return conv8
Example #10
0
def segmentation(image, keep_prob):
    """
    图像语义分割模型定义
    Parameters
    ----------
        image: 输入图像,每个通道的像素值为0到255
        keep_prob: 防止过拟合的dropout参数
    Returns
    -------

    """
    print("setting up vgg initialized conv layers ...")
    model_data = utils.get_model_data(FLAGS.model_dir)
    # vgg模型的权重值
    weights = np.squeeze(model_data['layers'])
    # 计算图片像素值的均值, 然后对图像加上均值
    mean = model_data['normalization'][0][0][0]
    mean_pixel = np.mean(mean, axis=(0, 1))
    processed_image = utils.process_image(image, mean_pixel)
    # 共享变量名空间-segmentation
    with tf.variable_scope("segmentation"):
        image_net = vgg_net(weights, processed_image)
        conv_final_layer = image_net["conv5_3"]

        pool5 = utils.max_pool_2x2(conv_final_layer)
        # 全连接层用卷积层来代替
        W6 = utils.weight_variable([7, 7, 512, 4096], name = "W6")
        b6 = utils.bias_variable([4096], name="b6")
        conv6 = utils.conv2d_basic(pool5, W6, b6)
        relu6 = tf.nn.relu(conv6, name="relu6")
        if FLAGS.debug:
            utils.add_activation_summary(relu6)
        # 随机去掉一些神经元防止过拟合
        relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)

        W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7")
        b7 = utils.bias_variable([4096], name="b7")
        conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)
        relu7 = tf.nn.relu(conv7, name="relu7")
        if FLAGS.debug:
            utils.add_activation_summary(relu7)
        relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)

        W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSESS], name="W8")
        b8 = utils.bias_variable([NUM_OF_CLASSESS], name="b8")
        conv8 = utils.conv2d_basic(relu_dropout7, W8, b8)
        # annotation_pred1 = tf.argmax(conv8, dimension=3, name="prediction1")

        # now to upscale to actual image size
        deconv_shape1 = image_net["pool4"].get_shape()
        W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name="W_t1")
        b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
        conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=tf.shape(image_net["pool4"]))
        fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1")

        deconv_shape2 = image_net["pool3"].get_shape()
        W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2")
        b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
        conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net["pool3"]))
        fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2")

        shape = tf.shape(image)
        deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSESS])
        W_t3 = utils.weight_variable([16, 16, NUM_OF_CLASSESS, deconv_shape2[3].value], name="W_t3")
        b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name = "b_t3")
        conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape = deconv_shape3, stride = 8)
        # 预测结果层
        annotation_pred = tf.argmax(conv_t3, dimension = 3, name = "prediction")

    return tf.expand_dims(annotation_pred, dim = 3), conv_t3
Example #11
0
def inference(image, keep_prob, train=False):
    """
    Semantic segmentation network definition
    :param image: input image. Should have values in range 0-255
    :param keep_prob:
    :return:
    """
    print("setting up vgg initialized conv layers ...")
    model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)

    mean = model_data['normalization'][0][0][0]
    mean_pixel = np.mean(mean, axis=(0, 1))

    weights = np.squeeze(model_data['layers'])

    # accounts for the mean being subtracted from the image
    processed_image = utils.process_image(image, mean_pixel)

    with tf.variable_scope("inference"):
        image_net = vgg_net(weights, processed_image)
        conv_final_layer = image_net["conv5_3"]

        pool5 = utils.max_pool_2x2(conv_final_layer)

        W6 = utils.weight_variable([7, 7, 512, 4096], name="W6")
        b6 = utils.bias_variable([4096], name="b6")
        conv6 = utils.conv2d_basic(pool5, W6, b6)
        relu6 = tf.nn.relu(conv6, name="relu6")

        if FLAGS.debug:
            utils.add_activation_summary(relu6)
        if train:
            relu6 = tf.nn.dropout(relu6, keep_prob=keep_prob)

        W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7")
        b7 = utils.bias_variable([4096], name="b7")
        conv7 = utils.conv2d_basic(relu6, W7, b7)
        relu7 = tf.nn.relu(conv7, name="relu7")

        if FLAGS.debug:
            utils.add_activation_summary(relu7)
        if train:
            relu7 = tf.nn.dropout(relu7, keep_prob=keep_prob)

        W8 = utils.weight_variable([1, 1, 4096, FLAGS.NUM_OF_CLASSES], name="W8")
        b8 = utils.bias_variable([FLAGS.NUM_OF_CLASSES], name="b8")
        conv8 = utils.conv2d_basic(relu7, W8, b8)
        # annotation_pred1 = tf.argmax(conv8, dimension=3, name="prediction1")

        # now to upscale to actual image size
        deconv_shape1 = image_net["pool4"].get_shape()
        W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, FLAGS.NUM_OF_CLASSES], name="W_t1")
        b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
        conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=tf.shape(image_net["pool4"]))
        fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1")

        deconv_shape2 = image_net["pool3"].get_shape()
        W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2")
        b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
        conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net["pool3"]))
        fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2")

        shape = tf.shape(image)
        deconv_shape3 = tf.pack([shape[0], shape[1], shape[2], FLAGS.NUM_OF_CLASSES])
        W_t3 = utils.weight_variable([16, 16, FLAGS.NUM_OF_CLASSES, deconv_shape2[3].value], name="W_t3")
        b_t3 = utils.bias_variable([FLAGS.NUM_OF_CLASSES], name="b_t3")
        conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8)

        annotation_pred = tf.argmax(conv_t3, dimension=3, name="prediction")

    return tf.expand_dims(annotation_pred, dim=3), conv_t3
Example #12
0
    def _generator(self,
                   z,
                   dims,
                   train_phase,
                   activation=tf.nn.relu,
                   scope_name="generator"):
        N = len(dims)
        image_size = self.resized_image_size // (2**(N - 1))

        input_labels = tf.cond(
            train_phase, lambda: self.labels, lambda: tf.one_hot(
                self.class_num * tf.ones(shape=self.batch_size, dtype=tf.int32
                                         ), self.num_cls))

        with tf.variable_scope(scope_name) as scope:
            W_ebd = utils.weight_variable([self.num_cls, self.z_dim],
                                          name='W_ebd')
            b_ebd = utils.bias_variable([self.z_dim], name='b_ebd')
            h_ebd = tf.matmul(input_labels, W_ebd) + b_ebd
            h_bnebd = utils.batch_norm(h_ebd,
                                       self.z_dim,
                                       train_phase,
                                       scope='gen_bnebd')
            h_ebd = activation(h_bnebd, name='h_bnebd')
            #h_ebd = activation(h_ebd, name='h_ebd')
            utils.add_activation_summary(h_ebd)

            h_zebd = tf.multiply(h_ebd, z)  #for TensorFlow 1.0
            #h_zebd = tf.mul(h_ebd, z)

            W_z = utils.weight_variable(
                [self.z_dim, dims[0] * image_size * image_size], name="W_z")
            b_z = utils.bias_variable([dims[0] * image_size * image_size],
                                      name="b_z")
            h_z = tf.matmul(h_zebd, W_z) + b_z
            h_z = tf.reshape(h_z, [-1, image_size, image_size, dims[0]])
            h_bnz = utils.batch_norm(h_z,
                                     dims[0],
                                     train_phase,
                                     scope="gen_bnz")
            h = activation(h_bnz, name='h_z')
            utils.add_activation_summary(h)

            for index in range(N - 2):
                image_size *= 2
                W = utils.weight_variable([4, 4, dims[index + 1], dims[index]],
                                          name="W_%d" % index)
                b = utils.bias_variable([dims[index + 1]], name="b_%d" % index)
                deconv_shape = tf.stack(
                    [tf.shape(h)[0], image_size, image_size, dims[index + 1]])
                h_conv_t = utils.conv2d_transpose_strided(
                    h, W, b, output_shape=deconv_shape)
                h_bn = utils.batch_norm(h_conv_t,
                                        dims[index + 1],
                                        train_phase,
                                        scope="gen_bn%d" % index)
                h = activation(h_bn, name='h_%d' % index)
                utils.add_activation_summary(h)

            image_size *= 2
            W_pred = utils.weight_variable([4, 4, dims[-1], dims[-2]],
                                           name="W_pred")
            b_pred = utils.bias_variable([dims[-1]], name="b_pred")
            deconv_shape = tf.stack(
                [tf.shape(h)[0], image_size, image_size, dims[-1]])
            h_conv_t = utils.conv2d_transpose_strided(
                h, W_pred, b_pred, output_shape=deconv_shape)
            pred_image = tf.nn.tanh(h_conv_t, name='pred_image')
            utils.add_activation_summary(pred_image)

        return pred_image  #, input_labels