def inference(image, keep_prob): """ Semantic segmentation network definition :param image: input image. Should have values in range 0-255 :param keep_prob: :return: """ print(">> Setting up resnet-101 pretrained layers ...") resnet101_model = utils.get_model_data(FLAGS.model_dir) weights = np.squeeze(resnet101_model['params']) mean_pixel = resnet101_model['meta'][0][0][2][0][0][2] normalised_img = utils.process_image(image, mean_pixel) with tf.variable_scope("inference"): net = resnet101_net(normalised_img, weights, keep_prob) last_layer = net["res5c_relu"] fc_filter = utils.weight_variable([1, 1, 2048, NUM_OF_CLASSES], name="fc_filter") fc_bias = utils.bias_variable([NUM_OF_CLASSES], name="fc_bias") fc = tf.nn.bias_add(tf.nn.conv2d(last_layer, fc_filter, strides=[1, 1, 1, 1], padding="SAME"), fc_bias, name='fc') # now to upscale to actual image size deconv_shape1 = net["res4b22_relu"].get_shape() W_t1 = utils.weight_variable( [4, 4, deconv_shape1[3].value, NUM_OF_CLASSES], name="W_t1") b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1") conv_t1 = utils.conv2d_transpose_strided(fc, W_t1, b_t1, output_shape=tf.shape( net["res4b22_relu"])) fuse_1 = tf.add(conv_t1, net["res4b22_relu"], name="fuse_1") deconv_shape2 = net["res3b3_relu"].get_shape() W_t2 = utils.weight_variable( [4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2") b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2") conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape( net["res3b3_relu"])) fuse_2 = tf.add(conv_t2, net["res3b3_relu"], name="fuse_2") shape = tf.shape(image) deconv_shape3 = tf.stack( [shape[0], shape[1], shape[2], NUM_OF_CLASSES]) W_t3 = utils.weight_variable( [16, 16, NUM_OF_CLASSES, deconv_shape2[3].value], name="W_t3") b_t3 = utils.bias_variable([NUM_OF_CLASSES], name="b_t3") conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8) annotation_pred = tf.argmax(conv_t3, axis=3, name="prediction") return tf.expand_dims(annotation_pred, dim=3), conv_t3
def inference(image, keep_prob): """ Semantic segmentation network definition :param image: input image. Should have values in range 0-255 :param keep_prob: :return: """ print("setting up vgg pretrained conv layers ...") model_data = utils.get_model_data(model_dir) mean = model_data['normalization'][0][0][0] mean_pixel = np.mean(mean, axis=(0, 1)) weights = np.squeeze(model_data['layers']) processed_image = utils.process_image(image, mean_pixel) with tf.variable_scope("inference"): image_net = vgg_net(weights, processed_image) conv_final_layer = image_net["conv5_3"] pool5 = utils.max_pool_2x2(conv_final_layer) W6 = utils.weight_variable([7, 7, 512, 4096], name="W6") b6 = utils.bias_variable([4096], name="b6") conv6 = utils.conv2d_basic(pool5, W6, b6) relu6 = tf.nn.relu(conv6, name="relu6") # if FLAGS.debug: # utils.add_activation_summary(relu6) relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob) W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7") b7 = utils.bias_variable([4096], name="b7") conv7 = utils.conv2d_basic(relu_dropout6, W7, b7) relu7 = tf.nn.relu(conv7, name="relu7") # if FLAGS.debug: # utils.add_activation_summary(relu7) relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob) W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSES], name="W8") b8 = utils.bias_variable([NUM_OF_CLASSES], name="b8") conv8 = utils.conv2d_basic(relu_dropout7, W8, b8) annotation_pred1 = tf.argmax(conv8, axis=3, name="prediction1") # now to upscale to actual image size deconv_shape1 = image_net["pool4"].get_shape() W_t1 = utils.weight_variable( [4, 4, deconv_shape1[3].value, NUM_OF_CLASSES], name="W_t1") b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1") conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=tf.shape( image_net["pool4"])) fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1") deconv_shape2 = image_net["pool3"].get_shape() W_t2 = utils.weight_variable( [4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2") b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2") conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape( image_net["pool3"])) fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2") shape = tf.shape(image) deconv_shape3 = tf.stack( [shape[0], shape[1], shape[2], NUM_OF_CLASSES]) W_t3 = utils.weight_variable( [16, 16, NUM_OF_CLASSES, deconv_shape2[3].value], name="W_t3") b_t3 = utils.bias_variable([NUM_OF_CLASSES], name="b_t3") conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8) annotation_pred = tf.argmax(conv_t3, axis=3, name="prediction") return tf.expand_dims(annotation_pred, dim=3), conv_t3