def main(argv=None):
    environ["CUDA_VISIBLE_DEVICES"] = argv[1]
    resnet101_net = utils.get_model_data(
        '../pretrained_models/imagenet-resnet-101-dag.mat')
    weights = np.squeeze(resnet101_net['params'])

    img = imread(argv[2])
    mean = resnet101_net['meta'][0][0][2][0][0][2]
    resized_img = resize(img, (224, 224), preserve_range=True, mode='reflect')
    normalised_img = utils.process_image(resized_img, mean)

    input_tensor, keep_prob, is_training = _input()
    predicted_class, image_net = inference(input_tensor, weights, keep_prob,
                                           is_training)
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    score, category = sess.run(
        [tf.reduce_max(image_net['prob'][0][0][0]), predicted_class],
        feed_dict={
            input_tensor:
            normalised_img[np.newaxis, :, :, :].astype(np.float32),
            keep_prob: 1.0,
            is_training: False
        })
    print('Category:', resnet101_net['meta'][0][0][1][0][0][1][0][category][0])
    print('Score:', score)
def inference(image, keep_prob, is_training):
    """
    Semantic segmentation network definition
    :param image: input image. Should have values in range 0-255
    :param keep_prob:
    :return:
    """
    print(">> Setting up resnet-101 pretrained layers ...")

    resnet101_model = utils.get_model_data(FLAGS.model_dir)
    weights = np.squeeze(resnet101_model['params'])

    mean_pixel_init = resnet101_model['meta'][0][0][2][0][0][2]
    mean_pixel = np.zeros((IMAGE_SIZE, IMAGE_SIZE, 6))
    mean_pixel[:, :, 0] = mean_pixel_init[:, :, 0]
    mean_pixel[:, :, 1] = mean_pixel_init[:, :, 1]
    mean_pixel[:, :, 2] = mean_pixel_init[:, :, 2]
    mean_pixel[:, :, 3] = np.ones((IMAGE_SIZE, IMAGE_SIZE)) * 97.639895122076
    mean_pixel[:, :, 4] = np.ones((IMAGE_SIZE, IMAGE_SIZE)) * 45.548982715963994
    mean_pixel[:, :, 5] = np.ones((IMAGE_SIZE, IMAGE_SIZE)) * 37.69138

    normalised_img = utils.process_image(image, mean_pixel)

    with tf.variable_scope("inference"):
        net = resnet101_net(normalised_img, weights, keep_prob, is_training)
        last_layer = net["res5c_relu"]

        fc_filter = utils.weight_variable([1, 1, 2048, NUM_OF_CLASSES], name="fc_filter")
        fc_bias = utils.bias_variable([NUM_OF_CLASSES], name="fc_bias")
        fc = tf.nn.bias_add(tf.nn.conv2d(last_layer, fc_filter, strides=[1, 1, 1, 1], padding="SAME"), fc_bias, name='fc')

        # now to upscale to actual image size
        deconv_shape1 = net["res4b22_relu"].get_shape()
        W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_OF_CLASSES], name="W_t1")
        b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
        conv_t1 = utils.conv2d_transpose_strided(fc, W_t1, b_t1, output_shape=tf.shape(net["res4b22_relu"]))
        fuse_1 = tf.add(conv_t1, net["res4b22_relu"], name="fuse_1")

        deconv_shape2 = net["res3b3_relu"].get_shape()
        W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2")
        b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
        conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(net["res3b3_relu"]))
        fuse_2 = tf.add(conv_t2, net["res3b3_relu"], name="fuse_2")

        deconv_shape3 = net["res2c_relu"].get_shape()
        W_t3 = utils.weight_variable([4, 4, deconv_shape3[3].value, deconv_shape2[3].value], name="W_t3")
        b_t3 = utils.bias_variable([deconv_shape3[3].value], name="b_t3")
        conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=tf.shape(net["res2c_relu"]))
        fuse_3 = tf.add(conv_t3, net["res2c_relu"], name="fuse_3")

        shape = tf.shape(image)
        deconv_shape4 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSES])
        W_t4 = utils.weight_variable([8, 8, NUM_OF_CLASSES, deconv_shape3[3].value], name="W_t4")
        b_t4 = utils.bias_variable([NUM_OF_CLASSES], name="b_t4")
        conv_t4 = utils.conv2d_transpose_strided(fuse_3, W_t4, b_t4, output_shape=deconv_shape4, stride=4)

        annotation_pred = tf.argmax(conv_t4, axis=3, name="prediction")

    return tf.expand_dims(annotation_pred, dim=3), conv_t4
def inference(image, keep_prob):
    print("setting up vgg initialized conv layers ...")
    model_data = utils.get_model_data(FLAGS.model_dir)

    mean = model_data['normalization'][0][0][0]
    mean_pixel = np.mean(mean, axis=(0, 1))
    mean_pixel = np.append(mean_pixel, [97.6398951221, 45.548982716, 31.4374])
    weights = np.squeeze(model_data['layers'])

    processed_image = utils.process_image(image, mean_pixel)

    with tf.variable_scope("inference"):
        image_net = vgg_net(weights, processed_image)
        conv_final_layer = image_net["conv5_3"]

        pool5 = utils.max_pool_2x2(conv_final_layer)

        W6 = utils.weight_variable([7, 7, 512, 4096], name="W6")
        b6 = utils.bias_variable([4096], name="b6")
        conv6 = utils.conv2d_basic(pool5, W6, b6)
        relu6 = tf.nn.relu(conv6, name="relu6")

        relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)

        W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7")
        b7 = utils.bias_variable([4096], name="b7")
        conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)
        relu7 = tf.nn.relu(conv7, name="relu7")

        relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)

        W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSESS], name="W8")
        b8 = utils.bias_variable([NUM_OF_CLASSESS], name="b8")
        conv8 = utils.conv2d_basic(relu_dropout7, W8, b8)

        deconv_shape1 = image_net["pool4"].get_shape()
        W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name="W_t1")
        b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
        conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=tf.shape(image_net["pool4"]))
        fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1")

        deconv_shape2 = image_net["pool3"].get_shape()
        W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2")
        b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
        conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net["pool3"]))
        fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2")

        shape = tf.shape(image)
        deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSESS])
        W_t3 = utils.weight_variable([16, 16, NUM_OF_CLASSESS, deconv_shape2[3].value], name="W_t3")
        b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name="b_t3")
        conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8)

        annotation_pred = tf.argmax(conv_t3, axis=3, name="prediction")

    return tf.expand_dims(annotation_pred, dim=3), conv_t3
Exemple #4
0
def inference(image, keep_prob, is_training):
    mean_pixel = np.zeros((IMAGE_SIZE, IMAGE_SIZE, 3))
    mean_pixel[:, :, 0] = np.ones((IMAGE_SIZE, IMAGE_SIZE)) * 135.21788372620313
    mean_pixel[:, :, 1] = np.ones((IMAGE_SIZE, IMAGE_SIZE)) * 145.12055858608417
    mean_pixel[:, :, 2] = np.ones((IMAGE_SIZE, IMAGE_SIZE)) * 135.06357015876557

    normalised_img = utils.process_image(image, mean_pixel)

    with tf.variable_scope("inference"):
        net = encoding_net(normalised_img, keep_prob, is_training)
        last_layer = net["pool3"]

        fc_filter = utils.weight_variable([1, 1, C_3, NUM_OF_CLASSES], name="fc_filter")
        fc_bias = utils.bias_variable([NUM_OF_CLASSES], name="fc_bias")
        fc = tf.nn.bias_add(tf.nn.conv2d(last_layer, fc_filter, strides=[1, 1, 1, 1], padding="SAME"), fc_bias, name='fc')

        # now to upscale to actual image size
        shape = tf.shape(image)
        deconv_shape1 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSES])
        W_t1 = tf.get_variable(name='W_t1', initializer=init, shape=(32, 32, NUM_OF_CLASSES, NUM_OF_CLASSES))
        b_t1 = tf.get_variable(name='b_t1', initializer=init, shape=(NUM_OF_CLASSES))
        conv_t1 = utils.conv2d_transpose_strided(fc, W_t1, b_t1, output_shape=deconv_shape1, stride=8)

        """ deconv_shape1 = net["res4b22_relu"].get_shape()
        W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_OF_CLASSES], name="W_t1")
        b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
        conv_t1 = utils.conv2d_transpose_strided(fc, W_t1, b_t1, output_shape=tf.shape(net["res4b22_relu"]))
        fuse_1 = tf.add(conv_t1, net["res4b22_relu"], name="fuse_1")

        deconv_shape2 = net["res3b3_relu"].get_shape()
        W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2")
        b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
        conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(net["res3b3_relu"]))
        fuse_2 = tf.add(conv_t2, net["res3b3_relu"], name="fuse_2")

        shape = tf.shape(image)
        deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSES])
        W_t3 = utils.weight_variable([16, 16, NUM_OF_CLASSES, deconv_shape2[3].value], name="W_t3")
        b_t3 = utils.bias_variable([NUM_OF_CLASSES], name="b_t3")
        conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8) """

        annotation_pred = tf.argmax(conv_t1, axis=3, name="prediction")

    return tf.expand_dims(annotation_pred, dim=3), conv_t1, net
def inference(image, keep_prob):
    """
    Semantic segmentation network definition
    :param image: input image. Should have values in range 0-255
    :param keep_prob:
    :return:
    """
    print("setting up vgg initialized conv layers ...")
    model_data = utils.get_model_data(FLAGS.model_dir)

    mean = model_data['normalization'][0][0][0]
    mean_pixel = np.mean(mean, axis=(0, 1))
    mean_pixel = np.append(mean_pixel, [
        30.6986130799, 284.97018, 106.314329243, 124.171918054, 109.260369903,
        182.615729022, 75.1762766769, 84.3529895303, 100.699252985,
        66.8837693324, 98.6030061849, 133.955897217
    ])
    weights = np.squeeze(model_data['layers'])

    processed_image = utils.process_image(image, mean_pixel)

    with tf.variable_scope("inference"):
        image_net = vgg_net(weights, processed_image)
        conv_final_layer = image_net["conv5_3"]

        pool5 = utils.max_pool_2x2(conv_final_layer)

        W6 = utils.weight_variable([7, 7, 512, 4096], name="W6")
        b6 = utils.bias_variable([4096], name="b6")
        conv6 = utils.conv2d_basic(pool5, W6, b6)
        relu6 = tf.nn.relu(conv6, name="relu6")
        if FLAGS.debug:
            utils.add_activation_summary(relu6)
        relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)

        W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7")
        b7 = utils.bias_variable([4096], name="b7")
        conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)
        relu7 = tf.nn.relu(conv7, name="relu7")
        if FLAGS.debug:
            utils.add_activation_summary(relu7)
        relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)

        W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSESS], name="W8")
        b8 = utils.bias_variable([NUM_OF_CLASSESS], name="b8")
        conv8 = utils.conv2d_basic(relu_dropout7, W8, b8)
        # annotation_pred1 = tf.argmax(conv8, dimension=3, name="prediction1")

        # now to upscale to actual image size
        deconv_shape1 = image_net["pool4"].get_shape()
        W_t1 = utils.weight_variable(
            [4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name="W_t1")
        b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
        conv_t1 = utils.conv2d_transpose_strided(conv8,
                                                 W_t1,
                                                 b_t1,
                                                 output_shape=tf.shape(
                                                     image_net["pool4"]))
        fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1")

        deconv_shape2 = image_net["pool3"].get_shape()
        W_t2 = utils.weight_variable(
            [4, 4, deconv_shape2[3].value, deconv_shape1[3].value],
            name="W_t2")
        b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
        conv_t2 = utils.conv2d_transpose_strided(fuse_1,
                                                 W_t2,
                                                 b_t2,
                                                 output_shape=tf.shape(
                                                     image_net["pool3"]))
        fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2")

        shape = tf.shape(image)
        deconv_shape3 = tf.stack(
            [shape[0], shape[1], shape[2], NUM_OF_CLASSESS])
        W_t3 = utils.weight_variable(
            [16, 16, NUM_OF_CLASSESS, deconv_shape2[3].value], name="W_t3")
        b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name="b_t3")
        conv_t3 = utils.conv2d_transpose_strided(fuse_2,
                                                 W_t3,
                                                 b_t3,
                                                 output_shape=deconv_shape3,
                                                 stride=8)

        annotation_pred = tf.argmax(conv_t3, axis=3, name="prediction")

    return tf.expand_dims(annotation_pred, dim=3), conv_t3
def inference(image, keep_prob, is_training):
    """
    Semantic segmentation network definition
    :param image: input image. Should have values in range 0-255
    :param keep_prob:
    :return:
    """
    print(">> Setting up resnet-101 pretrained layers ...")

    resnet101_model = utils.get_model_data(FLAGS.model_dir)
    weights = np.squeeze(resnet101_model['params'])

    mean_pixel_init = resnet101_model['meta'][0][0][2][0][0][2]
    mean_pixel = np.zeros((IMAGE_SIZE, IMAGE_SIZE, 15))
    mean_pixel[:, :, 0] = mean_pixel_init[:, :, 0]
    mean_pixel[:, :, 1] = mean_pixel_init[:, :, 1]
    mean_pixel[:, :, 2] = mean_pixel_init[:, :, 2]
    mean_pixel[:, :, 3] = np.ones(
        (IMAGE_SIZE, IMAGE_SIZE)) * 30.69861307993539  # nDSM
    mean_pixel[:, :, 4] = np.ones((IMAGE_SIZE, IMAGE_SIZE)) * 284.9702  # DSM
    mean_pixel[:, :, 5] = np.ones(
        (IMAGE_SIZE, IMAGE_SIZE)) * 136.495572072  # A
    mean_pixel[:, :, 6] = np.ones((IMAGE_SIZE, IMAGE_SIZE)) * -0.0827414  # azi
    mean_pixel[:, :, 7] = np.ones(
        (IMAGE_SIZE, IMAGE_SIZE)) * 106.472206683  # B
    mean_pixel[:, :, 8] = np.ones((IMAGE_SIZE, IMAGE_SIZE)) * 1.12959  # ele
    mean_pixel[:, :, 9] = np.ones(
        (IMAGE_SIZE, IMAGE_SIZE)) * 1.74663508206  # entpy
    mean_pixel[:, :, 10] = np.ones(
        (IMAGE_SIZE, IMAGE_SIZE)) * 2.01737815343  # entpy2
    mean_pixel[:, :, 11] = np.ones(
        (IMAGE_SIZE, IMAGE_SIZE)) * 91.7477946018  # L
    mean_pixel[:, :, 12] = np.ones(
        (IMAGE_SIZE, IMAGE_SIZE)) * 4.82043402578  # ndvi
    mean_pixel[:, :, 13] = np.ones(
        (IMAGE_SIZE, IMAGE_SIZE)) * 79.7208191052  # sat
    mean_pixel[:, :, 14] = np.ones(
        (IMAGE_SIZE, IMAGE_SIZE)) * 25.6281567428  # texton

    normalised_img = utils.process_image(image, mean_pixel)

    with tf.variable_scope("inference"):
        net = resnet101_net(normalised_img, weights, keep_prob, is_training)
        last_layer = net["res5c_relu"]

        fc_filter = utils.weight_variable([1, 1, 2048, NUM_OF_CLASSES],
                                          name="fc_filter")
        fc_bias = utils.bias_variable([NUM_OF_CLASSES], name="fc_bias")
        fc = tf.nn.bias_add(tf.nn.conv2d(last_layer,
                                         fc_filter,
                                         strides=[1, 1, 1, 1],
                                         padding="SAME"),
                            fc_bias,
                            name='fc')

        # now to upscale to actual image size
        deconv_shape1 = net["res4b22_relu"].get_shape()
        W_t1 = utils.weight_variable(
            [4, 4, deconv_shape1[3].value, NUM_OF_CLASSES], name="W_t1")
        b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
        conv_t1 = utils.conv2d_transpose_strided(fc,
                                                 W_t1,
                                                 b_t1,
                                                 output_shape=tf.shape(
                                                     net["res4b22_relu"]))
        fuse_1 = tf.add(conv_t1, net["res4b22_relu"], name="fuse_1")

        deconv_shape2 = net["res3b3_relu"].get_shape()
        W_t2 = utils.weight_variable(
            [4, 4, deconv_shape2[3].value, deconv_shape1[3].value],
            name="W_t2")
        b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
        conv_t2 = utils.conv2d_transpose_strided(fuse_1,
                                                 W_t2,
                                                 b_t2,
                                                 output_shape=tf.shape(
                                                     net["res3b3_relu"]))
        fuse_2 = tf.add(conv_t2, net["res3b3_relu"], name="fuse_2")

        deconv_shape3 = net["res2c_relu"].get_shape()
        W_t3 = utils.weight_variable(
            [4, 4, deconv_shape3[3].value, deconv_shape2[3].value],
            name="W_t3")
        b_t3 = utils.bias_variable([deconv_shape3[3].value], name="b_t3")
        conv_t3 = utils.conv2d_transpose_strided(fuse_2,
                                                 W_t3,
                                                 b_t3,
                                                 output_shape=tf.shape(
                                                     net["res2c_relu"]))
        fuse_3 = tf.add(conv_t3, net["res2c_relu"], name="fuse_3")

        shape = tf.shape(image)
        deconv_shape4 = tf.stack(
            [shape[0], shape[1], shape[2], NUM_OF_CLASSES])
        W_t4 = utils.weight_variable(
            [8, 8, NUM_OF_CLASSES, deconv_shape3[3].value], name="W_t4")
        b_t4 = utils.bias_variable([NUM_OF_CLASSES], name="b_t4")
        conv_t4 = utils.conv2d_transpose_strided(fuse_3,
                                                 W_t4,
                                                 b_t4,
                                                 output_shape=deconv_shape4,
                                                 stride=4)

        annotation_pred = tf.argmax(conv_t4, axis=3, name="prediction")

    return tf.expand_dims(annotation_pred, dim=3), conv_t4
def inference(image, keep_prob):
    """
    Semantic segmentation network definition
    :param image: input image. Should have values in range 0-255
    :param keep_prob:
    :return:
    """
    print("setting up vgg pretrained conv layers ...")
    model_data = utils.get_model_data(FLAGS.model_dir)

    mean_pixel_init = model_data['normalization'][0][0][0]
    # mean_pixel_init = np.mean(mean, axis=(0, 1))
    mean_pixel = np.zeros((IMAGE_SIZE, IMAGE_SIZE, 6))
    mean_pixel[:, :, 0] = mean_pixel_init[:, :, 0]
    mean_pixel[:, :, 1] = mean_pixel_init[:, :, 1]
    mean_pixel[:, :, 2] = mean_pixel_init[:, :, 2]
    mean_pixel[:, :, 3] = np.ones((IMAGE_SIZE, IMAGE_SIZE)) * 97.639895122076
    mean_pixel[:, :, 4] = np.ones(
        (IMAGE_SIZE, IMAGE_SIZE)) * 45.548982715963994
    mean_pixel[:, :, 5] = np.ones((IMAGE_SIZE, IMAGE_SIZE)) * 37.69138
    weights = np.squeeze(model_data['layers'])

    processed_image = utils.process_image(image, mean_pixel)

    with tf.variable_scope("inference"):
        image_net = vgg_net(weights, processed_image)
        conv_final_layer = image_net["conv5_3"]

        pool5 = utils.max_pool_2x2(conv_final_layer)

        W6 = utils.weight_variable([7, 7, 512, 4096], name="W6")
        b6 = utils.bias_variable([4096], name="b6")
        conv6 = utils.conv2d_basic(pool5, W6, b6)
        relu6 = tf.nn.relu(conv6, name="relu6")
        if FLAGS.debug:
            utils.add_activation_summary(relu6)
        relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)

        W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7")
        b7 = utils.bias_variable([4096], name="b7")
        conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)
        relu7 = tf.nn.relu(conv7, name="relu7")
        if FLAGS.debug:
            utils.add_activation_summary(relu7)
        relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)

        W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSES], name="W8")
        b8 = utils.bias_variable([NUM_OF_CLASSES], name="b8")
        conv8 = utils.conv2d_basic(relu_dropout7, W8, b8)
        annotation_pred1 = tf.argmax(conv8, axis=3, name="prediction1")

        # now to upscale to actual image size
        deconv_shape1 = image_net["pool4"].get_shape()
        W_t1 = utils.weight_variable(
            [4, 4, deconv_shape1[3].value, NUM_OF_CLASSES], name="W_t1")
        b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
        conv_t1 = utils.conv2d_transpose_strided(conv8,
                                                 W_t1,
                                                 b_t1,
                                                 output_shape=tf.shape(
                                                     image_net["pool4"]))
        fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1")

        deconv_shape2 = image_net["pool3"].get_shape()
        W_t2 = utils.weight_variable(
            [4, 4, deconv_shape2[3].value, deconv_shape1[3].value],
            name="W_t2")
        b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
        conv_t2 = utils.conv2d_transpose_strided(fuse_1,
                                                 W_t2,
                                                 b_t2,
                                                 output_shape=tf.shape(
                                                     image_net["pool3"]))
        fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2")

        shape = tf.shape(image)
        deconv_shape3 = tf.stack(
            [shape[0], shape[1], shape[2], NUM_OF_CLASSES])
        W_t3 = utils.weight_variable(
            [16, 16, NUM_OF_CLASSES, deconv_shape2[3].value], name="W_t3")
        b_t3 = utils.bias_variable([NUM_OF_CLASSES], name="b_t3")
        conv_t3 = utils.conv2d_transpose_strided(fuse_2,
                                                 W_t3,
                                                 b_t3,
                                                 output_shape=deconv_shape3,
                                                 stride=8)

        annotation_pred = tf.argmax(conv_t3, axis=3, name="prediction")

    return tf.expand_dims(annotation_pred, dim=3), conv_t3
Exemple #8
0
def inference(image, keep_prob):
    n_filters_first_conv = 48
    n_pool = 4
    growth_rate = 12
    n_layers_per_block = [5] * 11
    n_classes = 6
    mean_pixel = np.array([
        120.895239985, 81.9300816234, 81.2898876188, 66.8837693324,
        30.6986130799, 284.97018
    ])
    processed_image = utils.process_image(image, mean_pixel)
    print(np.shape(processed_image))
    W_first = utils.weight_variable(
        [3, 3,
         processed_image.get_shape().as_list()[3], n_filters_first_conv],
        name='W_first')
    b_first = utils.bias_variable([n_filters_first_conv], name='b_first')
    conv_first = utils.conv2d_basic(processed_image, W_first, b_first)
    stack = tf.nn.relu(conv_first)
    n_filters = n_filters_first_conv
    print("Before Downsample")
    print(np.shape(stack))
    #####################
    # Downsampling path #
    #####################

    skip_connection_list = []
    for i in range(n_pool):
        # Dense Block
        for j in range(n_layers_per_block[i]):
            l = BN_ReLU_Conv(inputs=stack,
                             n_filters=growth_rate,
                             keep_prob=keep_prob,
                             name="downsample_" + str(i) + "_" + str(j))
            stack = tf.concat([stack, l], axis=3)
            n_filters += growth_rate
        skip_connection_list.append(stack)
        stack = Transition_Down(inputs=stack,
                                n_filters=n_filters,
                                keep_prob=keep_prob,
                                name='downsample_stack_' + str(i))

    skip_connection_list = skip_connection_list[::-1]

    #####################
    #     Bottleneck    #
    #####################
    block_to_upsample = []
    for j in range(n_layers_per_block[n_pool]):
        l = BN_ReLU_Conv(inputs=stack,
                         n_filters=growth_rate,
                         keep_prob=keep_prob,
                         name="bottleneck_" + str(j))
        block_to_upsample.append(l)
        stack = tf.concat([stack, l], axis=3)

    #######################
    #   Upsampling path   #
    #######################

    for i in range(n_pool):
        n_filters_keep = growth_rate * n_layers_per_block[n_pool + i]
        stack = Transition_Up(skip_connection=skip_connection_list[i],
                              block_to_upsample=block_to_upsample,
                              n_filters_keep=n_filters_keep,
                              name="upsample_stack_" + str(i))

        # Dense Block
        block_to_upsample = []
        for j in range(n_layers_per_block[n_pool + i + 1]):
            l = BN_ReLU_Conv(inputs=stack,
                             n_filters=growth_rate,
                             keep_prob=keep_prob,
                             name="upsample_" + str(i) + "_" + str(j))
            block_to_upsample.append(l)
            stack = tf.concat([stack, l], axis=3)

    W_last = utils.weight_variable(
        [1, 1, stack.get_shape().as_list()[3], n_classes], name="W_last")
    b_last = utils.bias_variable([n_classes], name="b_last")
    conv_last = utils.conv2d_basic(stack, W_last, b_last)
    print("Conv_last")
    print(np.shape(conv_last))
    annotation_pred = tf.argmax(conv_last, dimension=3, name="prediction")
    return tf.expand_dims(annotation_pred, dim=3), conv_last
Exemple #9
0
def inference(image, keep_prob):
    """
    Semantic segmentation network definition
    :param image: input image. Should have values in range 0-255
    :param keep_prob:
    :return:
    """
    print("setting up vgg pretrained conv layers ...")
    model_data = utils.get_model_data(FLAGS.model_dir)

    mean = model_data['normalization'][0][0][0]
    mean_pixel = np.mean(mean, axis=(0, 1))
    mean_pixel = np.append(mean_pixel, [30.69861307993539, 284.9702])
    # mean_pixel = np.array([120.8952399852595, 81.93008162338278, 81.28988761879855, 30.69861307993539, 284.9702])
    weights = np.squeeze(model_data['layers'])

    processed_image = utils.process_image(image, mean_pixel)

    with tf.variable_scope("inference"):
        net = vgg_net(weights, processed_image)
        conv_final_layer = net["conv5_3"]

        pool5 = utils.max_pool_2x2(conv_final_layer)

        W6 = utils.weight_variable([7, 7, 512, 4096], name="W6")
        b6 = utils.bias_variable([4096], name="b6")
        conv6 = utils.conv2d_basic(pool5, W6, b6)
        relu6 = tf.nn.relu(conv6, name="relu6")
        if FLAGS.debug:
            utils.add_activation_summary(relu6)
        relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)

        W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7")
        b7 = utils.bias_variable([4096], name="b7")
        conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)
        relu7 = tf.nn.relu(conv7, name="relu7")
        if FLAGS.debug:
            utils.add_activation_summary(relu7)
        relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)

        W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSES], name="W8")
        b8 = utils.bias_variable([NUM_OF_CLASSES], name="b8")
        conv8 = utils.conv2d_basic(relu_dropout7, W8, b8)
        annotation_pred1 = tf.argmax(conv8, axis=3, name="prediction1")

        # now to upscale to actual image size
        deconv_shape1 = net["pool4"].get_shape()
        W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_OF_CLASSES], name="W_t1")
        b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
        conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=tf.shape(net["pool4"]))
        fuse_1 = tf.add(conv_t1, net["pool4"], name="fuse_1")

        deconv_shape2 = net["pool3"].get_shape()
        W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2")
        b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
        conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(net["pool3"]))
        fuse_2 = tf.add(conv_t2, net["pool3"], name="fuse_2")

        deconv_shape3 = net["pool2"].get_shape()
        W_t3 = utils.weight_variable([4, 4, deconv_shape3[3].value, deconv_shape2[3].value], name="W_t3")
        b_t3 = utils.bias_variable([deconv_shape3[3].value], name="b_t3")
        conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=tf.shape(net["pool2"]))
        fuse_3 = tf.add(conv_t3, net["pool2"], name="fuse_3")

        shape = tf.shape(image)
        deconv_shape4 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSES])
        W_t4 = utils.weight_variable([8, 8, NUM_OF_CLASSES, deconv_shape3[3].value], name="W_t4")
        b_t4 = utils.bias_variable([NUM_OF_CLASSES], name="b_t4")
        conv_t4 = utils.conv2d_transpose_strided(fuse_3, W_t4, b_t4, output_shape=deconv_shape4, stride=4)

        annotation_pred = tf.argmax(conv_t4, axis=3, name="prediction")

    return tf.expand_dims(annotation_pred, dim=3), conv_t4