def vgg_net(weights, image): layers = ('conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4', 'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4') net = {} current = image for i, name in enumerate(layers): kind = name[:4] if kind == 'conv': kernels, bias = weights[i][0][0][0][0] # matconvnet: weights are [width, height, in_channels, out_channels] # tensorflow: weights are [height, width, in_channels, out_channels] kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + "_w") bias = utils.get_variable(bias.reshape(-1), name=name + "_b") current = utils.conv2d_basic(current, kernels, bias) print('conv ' + name[4:] + ':', current.shape) elif kind == 'relu': current = tf.nn.relu(current, name=name) if args.debug: utils.add_activation_summary(current) elif kind == 'pool': current = utils.avg_pool_2x2(current) print('pool ' + name[4:] + ' :', current.shape) net[name] = current return net
def inference(image, keep_prob): """ Semantic segmentation network definition :param image: input image. Should have values in range 0-255 :param keep_prob: :return: """ print("> [FCN] Setting up vgg initialized conv layers ...") model_data = utils.get_model_data(args.model_dir, MODEL_URL) mean = model_data['normalization'][0][0][0] mean_pixel = np.mean(mean, axis=(0, 1)) weights = np.squeeze(model_data['layers']) processed_image = utils.process_image(image, mean_pixel) with tf.variable_scope("inference"): image_net = vgg_net(weights, processed_image) conv_final_layer = image_net["conv5_3"] print('----------------------------------------------------') print('conv 5_3:', conv_final_layer.get_shape()) pool5 = utils.max_pool_2x2(conv_final_layer) print('pool 5 :', pool5.get_shape()) W6 = utils.weight_variable([3, 3, 512, 4096], name="W6") # original is [7, 7, 512, 4096] b6 = utils.bias_variable([4096], name="b6") conv6 = utils.conv2d_basic(pool5, W6, b6) print('conv 6 :', conv6.get_shape()) relu6 = tf.nn.relu(conv6, name="relu6") if args.debug: utils.add_activation_summary(relu6) relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob) W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7") b7 = utils.bias_variable([4096], name="b7") conv7 = utils.conv2d_basic(relu_dropout6, W7, b7) print('conv 7 :', conv7.get_shape()) relu7 = tf.nn.relu(conv7, name="relu7") if args.debug: utils.add_activation_summary(relu7) relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob) W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSESS], name="W8") b8 = utils.bias_variable([NUM_OF_CLASSESS], name="b8") conv8 = utils.conv2d_basic(relu_dropout7, W8, b8) print('conv 8 :', conv8.get_shape()) # annotation_pred1 = tf.argmax(conv8, dimension=3, name="prediction1") # now to upscale to actual image size deconv_shape1 = image_net["pool4"].get_shape() W_t1 = utils.weight_variable( [4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name="W_t1") b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1") conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=tf.shape( image_net["pool4"])) print('conv t1 :', conv_t1.get_shape()) fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1") print('fuse 1 :', fuse_1.get_shape()) deconv_shape2 = image_net["pool3"].get_shape() W_t2 = utils.weight_variable( [4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2") b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2") conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape( image_net["pool3"])) print('conv t2 :', conv_t2.get_shape()) fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2") print('fuse 2 :', fuse_2.get_shape()) shape = tf.shape(image) deconv_shape3 = tf.stack( [shape[0], shape[1], shape[2], NUM_OF_CLASSESS]) W_t3 = utils.weight_variable( [16, 16, NUM_OF_CLASSESS, deconv_shape2[3].value], name="W_t3") b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name="b_t3") conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8) print('conv t3 :', conv_t3.get_shape()) annotation_pred = tf.argmax(conv_t3, dimension=3, name="prediction") print('prediction:', annotation_pred.get_shape()) return tf.expand_dims(annotation_pred, dim=3), conv_t3
def inference(self, image, keep_prob): ''' Semantic segmentation network definition :param image: input image. Should have values in range 0-255 :param keep_prob: :return: ''' model_data = utils.get_model_data(self.model_dir, MODEL_URL) mean = model_data['normalization'][0][0][0] mean_pixel = np.mean(mean, axis=(0, 1)) weights = np.squeeze(model_data['layers']) processed_image = utils.process_image(image, mean_pixel) with tf.variable_scope('inference'): print('> [FCN] Setup vgg initialized conv layers... ', end=''); s = time.time() image_net = self.vgg_net(weights, processed_image) e = time.time(); print('%.4f ms' % ((e-s) * 1000)) conv_final_layer = image_net['conv5_3'] #print('----------------------------------------------------') print('> [FCN] Setup deconv layers... ', end=''); s = time.time() pool5 = utils.max_pool_2x2(conv_final_layer) #print('pool 5:', pool5.get_shape()) W6 = utils.weight_variable([self.f, self.f, 512, 4096], name='W6') b6 = utils.bias_variable([4096], name='b6') conv6 = utils.conv2d_basic(pool5, W6, b6) #print('conv 6:', conv6.get_shape()) relu6 = tf.nn.relu(conv6, name='relu6') relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob) W7 = utils.weight_variable([1, 1, 4096, 4096], name='W7') b7 = utils.bias_variable([4096], name='b7') conv7 = utils.conv2d_basic(relu_dropout6, W7, b7) #print('conv 7:', conv7.get_shape()) relu7 = tf.nn.relu(conv7, name='relu7') relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob) W8 = utils.weight_variable([1, 1, 4096, self.num_classes], name='W8') b8 = utils.bias_variable([self.num_classes], name='b8') conv8 = utils.conv2d_basic(relu_dropout7, W8, b8) #print('conv 8:', conv8.get_shape()) # annotation_pred1 = tf.argmax(conv8, dimension=3, name='prediction1') # now to upscale to actual image size deconv_shape1 = image_net['pool4'].get_shape() W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, self.num_classes], name='W_t1') b_t1 = utils.bias_variable([deconv_shape1[3].value], name='b_t1') conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=tf.shape(image_net['pool4'])) #print('conv t1:', conv_t1.get_shape()) fuse_1 = tf.add(conv_t1, image_net['pool4'], name='fuse_1') #print('fuse 1:', fuse_1.get_shape()) deconv_shape2 = image_net['pool3'].get_shape() W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name='W_t2') b_t2 = utils.bias_variable([deconv_shape2[3].value], name='b_t2') conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net['pool3'])) #print('conv t2:', conv_t2.get_shape()) fuse_2 = tf.add(conv_t2, image_net['pool3'], name='fuse_2') #print('fuse 2:', fuse_2.get_shape()) shape = tf.shape(image) deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], self.num_classes]) W_t3 = utils.weight_variable([16, 16, self.num_classes, deconv_shape2[3].value], name='W_t3') b_t3 = utils.bias_variable([self.num_classes], name='b_t3') conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8) #print('conv t3:', conv_t3.get_shape()) annotation_pred = tf.argmax(conv_t3, dimension=3, name='prediction') #print('prediction:', annotation_pred.get_shape()) e = time.time(); print('%.4f ms' % ((e-s) * 1000)) return tf.expand_dims(annotation_pred, dim=3), conv_t3