Пример #1
0
def inference(image, keep_prob):
    """
    Semantic segmentation network definition    # 语义分割网络定义
    :param image: input image. Should have values in range 0-255
    :param keep_prob:
    :return:
    """
    # 获取预训练网络VGG
    print("setting up vgg initialized conv layers ...")
    # model_dir Model_zoo/
    # MODEL_URL 下载VGG19网址
    model_data = utils.get_model_data(FLAGS.model_dir,
                                      MODEL_URL)  # 返回VGG19模型中内容

    mean = model_data['normalization'][0][0][0]  # 获得图像均值
    mean_pixel = np.mean(mean, axis=(0, 1))  # RGB

    weights = np.squeeze(model_data['layers'])  # 压缩VGG网络中参数,把维度是1的维度去掉 剩下的就是权重

    processed_image = utils.process_image(image, mean_pixel)  # 图像减均值

    with tf.variable_scope("inference"):  # 命名作用域 是inference
        image_net = vgg_net(weights, processed_image)  # 传入权重参数和预测图像,获得所有层输出结果
        # conv_final_layer = image_net["conv5_3"]                         # 获得输出结果
        conv_final_layer = image_net["relu4_4"]
        w5_0 = utils.weight_variable([3, 3, 512, 512],
                                     name="W5_0")  #取消pool4降采样操作,改成3*3/s1
        b5_0 = utils.bias_variable([512], name="b5_0")
        conv5_0 = utils.conv2d_strided(conv_final_layer, w5_0, b5_0)

        w5_1 = utils.weight_variable([3, 3, 512, 512], name="W5_1")
        b5_1 = utils.bias_variable([512], name="b5_1")
        conv5_1 = utils.conv2d_atrous_2(conv5_0, w5_1, b5_1)
        w5_2 = utils.weight_variable([3, 3, 512, 512],
                                     name="W5_2")  #将第五层的conv5_1,2,3改成2-空洞卷积
        b5_2 = utils.bias_variable([512], name="b5_2")
        conv5_2 = utils.conv2d_atrous_2(conv5_1, w5_2, b5_2)
        w5_3 = utils.weight_variable([3, 3, 512, 512], name="W5_3")
        b5_3 = utils.bias_variable([512], name="b5_3")
        conv5_3 = utils.conv2d_atrous_2(conv5_2, w5_3, b5_3)
        # pool5 = utils.max_pool_2x2(conv_final_layer)                    # /32 缩小32倍

        # W6 = utils.weight_variable([7, 7, 512, 4096], name="W6")        # 初始化第6层的w b
        # b6 = utils.bias_variable([4096], name="b6")
        # conv6 = utils.conv2d_basic(pool5, W6, b6)
        # relu6 = tf.nn.relu(conv6, name="relu6")
        # if FLAGS.debug:
        #     utils.add_activation_summary(relu6)
        # relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)

        w6_0 = utils.weight_variable([3, 3, 512, 4096],
                                     name="W6_0")  # 取消pool5降采样操作,改成3*3/s1
        b6_0 = utils.bias_variable([4096], name="b6_0")
        conv6_0 = utils.conv2d_strided(conv5_3, w6_0, b6_0)

        w6 = utils.weight_variable([3, 3, 4096, 4096], name="W7")
        b6 = utils.bias_variable([4096], name="b6")  #第6层为4-空洞卷积
        conv6 = utils.conv2d_atrous_4(conv6_0, w6, b6)
        relu6 = tf.nn.relu(conv6, name="relu6")
        if FLAGS.debug:
            utils.add_activation_summary(relu6)
        relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)
        # W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7")       # 第7层卷积层
        # b7 = utils.bias_variable([4096], name="b7")
        # conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)
        # relu7 = tf.nn.relu(conv7, name="relu7")
        # if FLAGS.debug:
        #     utils.add_activation_summary(relu7)
        # relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)
        w7 = utils.weight_variable([1, 1, 4096, 4096], name="w7")
        b7 = utils.bias_variable([4096], name="b7")  #第7层为4—空洞卷积
        conv7 = utils.conv2d_atrous_4(relu_dropout6, w7, b7)
        relu7 = tf.nn.relu(conv7, name="relu6")
        if FLAGS.debug:
            utils.add_activation_summary(relu7)
        relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)

        # W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSESS], name="W8")
        # b8 = utils.bias_variable([NUM_OF_CLASSESS], name="b8")
        # conv8 = utils.conv2d_basic(relu_dropout7, W8, b8)               # 第8层卷积层 分类151类
        # # annotation_pred1 = tf.argmax(conv8, dimension=3, name="prediction1")
        w8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSESS], name="W8")
        b8 = utils.bias_variable([NUM_OF_CLASSESS], name="b8")  # 第8层为4—空洞卷积
        conv8 = utils.conv2d_atrous_4(relu_dropout7, w8, b8)
        # conv8 = utils.max_pool_2x2(conv8)
        print(conv8.shape)
        # now to upscale to actual image size
        # deconv_shape1 = image_net["pool4"].get_shape()                  # 将pool4 1/16结果尺寸拿出来 做融合 [b,h,w,c]
        # # 定义反卷积层的 W,B [H, W, OUTC, INC]  输出个数为pool4层通道个数,输入为conv8通道个数
        # # 扩大两倍  所以stride = 2  kernel_size = 4
        # W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name="W_t1")
        # b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
        # # 输入为conv8特征图,使得其特征图大小扩大两倍,并且特征图个数变为pool4的通道数
        # conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=tf.shape(image_net["pool4"]))
        # fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1")     # 进行融合 逐像素相加

        deconv_shape1 = image_net["pool4"].get_shape(
        )  # 将pool4 1/16结果尺寸拿出来 做融合 [b,h,w,c]
        # 定义反卷积层的 W,B [H, W, OUTC, INC]  输出个数为pool4层通道个数,输入为conv8通道个数
        # 扩大两倍  所以stride = 2  kernel_size = 4
        W_t1 = utils.weight_variable(
            [4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name="W_t1")
        b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
        # 输入为conv8特征图,使得其特征图大小扩大两倍,并且特征图个数变为pool4的通道数
        conv_t1 = utils.conv2d_transpose_strided(conv8,
                                                 W_t1,
                                                 b_t1,
                                                 output_shape=tf.shape(
                                                     image_net["pool4"]))
        fuse_1 = tf.add(conv_t1, image_net["pool4"],
                        name="fuse_1")  # 进行融合 逐像素相加

        # 获得pool3尺寸 是原图大小的1/8
        deconv_shape2 = image_net["pool3"].get_shape()
        # 输出通道数为pool3通道数,  输入通道数为pool4通道数
        W_t2 = utils.weight_variable(
            [4, 4, deconv_shape2[3].value, deconv_shape1[3].value],
            name="W_t2")
        b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
        # 将上一层融合结果fuse_1在扩大两倍,输出尺寸和pool3相同
        conv_t2 = utils.conv2d_transpose_strided(fuse_1,
                                                 W_t2,
                                                 b_t2,
                                                 output_shape=tf.shape(
                                                     image_net["pool3"]))
        # 融合操作deconv(fuse_1) + pool3
        fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2")

        shape = tf.shape(image)  # 获得原始图像大小
        # 堆叠列表,反卷积输出尺寸,[b,原图H,原图W,类别个数]
        deconv_shape3 = tf.stack(
            [shape[0], shape[1], shape[2], NUM_OF_CLASSESS])
        # 建立反卷积w[8倍扩大需要ks=16, 输出通道数为类别个数, 输入通道数pool3通道数]
        W_t3 = utils.weight_variable(
            [16, 16, deconv_shape2[3].value, NUM_OF_CLASSESS], name="W_t3")
        b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name="b_t3")
        # 反卷积,fuse_2反卷积,输出尺寸为 [b,原图H,原图W,类别个数]
        conv_t3 = utils.conv2d_transpose_strided(fuse_2,
                                                 W_t3,
                                                 b_t3,
                                                 output_shape=deconv_shape3,
                                                 stride=8)

        # deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSESS])
        # # 建立反卷积w[8倍扩大需要ks=16, 输出通道数为类别个数, 输入通道数pool3通道数]
        # W_t1 = utils.weight_variable([16, 16, NUM_OF_CLASSESS, NUM_OF_CLASSESS], name="W_t1")  ##反卷积生成原图大小
        # b_t1 = utils.bias_variable([NUM_OF_CLASSESS], name="b_t1")
        # # 反卷积,fuse_2反卷积,输出尺寸为 [b,原图H,原图W,类别个数]
        # conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=deconv_shape3, stride=8)

        # 目前conv_t3的形式为size为和原始图像相同的size,通道数与分类数相同
        # 这句我的理解是对于每个像素位置,根据第3维度(通道数)通过argmax能计算出这个像素点属于哪个分类
        # 也就是对于每个像素而言,NUM_OF_CLASSESS个通道中哪个数值最大,这个像素就属于哪个分类
        # 每个像素点有21个值,哪个值最大就属于那一类
        # 返回一张图,每一个点对于其来别信息shape=[b,h,w]
        # annotation_pred = tf.argmax(conv_t1, dimension=3, name="prediction")
        annotation_pred = tf.argmax(conv_t3, axis=3, name="prediction")
    # 从第三维度扩展 形成[b,h,w,c] 其中c=1, conv_t3最后具有21深度的特征图
    # return tf.expand_dims(annotation_pred, dim=3), conv_t1

    return tf.expand_dims(annotation_pred, axis=3), conv_t3