Ejemplo n.º 1
0
W_t1 = utils.weight_variable([4, 4, 512, NUM_OF_CLASSESS], name="W_t1")
b_t1 = utils.bias_variable([512], name="b_t1")
conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1,
                                         b_t1)  # (24, 14, 14, 256)
print("the shape of conv_t1 is %s" % conv_t1.shape)

# 获得pool3尺寸 是原图大小的1/8
# 输出通道数为pool3通道数,  输入通道数为pool4通道数
# the shape of fuse_2 is (?, 28, 28, 256)
W_t2 = utils.weight_variable([4, 4, NUM_OF_CLASSESS, 512], name="W_t2")
b_t2 = utils.bias_variable([NUM_OF_CLASSESS], name="b_t2")
# 将上一层融合结果fuse_1在扩大两倍,输出尺寸和pool3相同
conv_t2 = utils.conv2d_transpose_strided(conv_t1, W_t2, b_t2)
# 融合操作deconv(fuse_1) + pool3
print("the shape of conv_t2 is %s" % conv_t2.shape)

# 堆叠列表,反卷积输出尺寸,[b,原图H,原图W,类别个数]
# the shape of conv_t3 is (?, ?, ?, 256)
deconv_shape3 = tf.stack([-1, 224, 224, NUM_OF_CLASSESS])
# 建立反卷积w[8倍扩大需要ks=16, 输出通道数为类别个数, 输入通道数pool3通道数]
W_t3 = utils.weight_variable([16, 16, NUM_OF_CLASSESS, NUM_OF_CLASSESS],
                             name="W_t3")
b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name="b_t3")
# 反卷积,fuse_2反卷积,输出尺寸为 [b,原图H,原图W,类别个数]
# conv_t3 = utils.conv2d_transpose_strided(conv_t2, W_t3, b_t3, output_shape=deconv_shape3, stride=8)
conv_t3 = utils.conv2d_transpose_strided8(conv_t2, W_t3, b_t3, stride=8)
print("the shape of conv_t3 is %s" % conv_t3.shape)

# 从第三维度扩展 形成[b,h,w,c] 其中c=1, conv_t3最后具有21深度的特征图
annotation_pred = tf.argmax(conv_t3, axis=3, name="prediction")