예제 #1
0
merge = np.argmax(mergesoftmax, 0).astype(np.uint8)
#tiff.imsave('/home/lenovo/2Tdisk/Wkyao/_/2017/merge/fs1116.tif', merge)
softmax_merge = np.load('/home/lenovo/256Gdisk/tainchi/merge/1120.npy')
merge_list = []
im_2017_list = []
for i in range(25):
    m = softmax_merge[:, :, i * 600:i * 600 + 600]
    merge_list.append(m)
    b = img17[:, i * 600:i * 600 + 600]
    b = np.array([np.array([b for i in range(3)])])
    b = b.transpose(0, 2, 3, 1)
    im_2017_list.append(b)
merge_list.append(softmax_merge[:, :, 15000:15106])
im_2017_list.append(
    np.array([np.array([img17[:, 15000:15106]
                        for i in range(3)])]).transpose(0, 2, 3, 1))

allImg_crf = []
allImg_soft = []

for n, im_2017_part in enumerate(im_2017_list):
    # 使用crf:
    soft = merge_list[n]
    im_2017_mean = np.mean(im_2017_list[n], axis=0)
    c = crf.crf(im_2017_mean, soft)
    allImg_crf.append(c)  # 保存整张crf图.
    Crf = np.concatenate(tuple(allImg_crf), axis=1)
#tiff.imsave('/home/lenovo/2Tdisk/Wkyao/_/2017/merge/crf_merge_1108.tif', Crf)
img = open_and_close(Crf)  #膨胀操作
tiff.imsave('/home/lenovo/256Gdisk/tainchi/merge/last.tif', img)
예제 #2
0
def main(argv=None):
    # 定义一下regularization:
    regularization = tf.Variable(0, dtype=tf.float32)

    keep_probability = tf.placeholder(tf.float32,
                                      name="keep_probabilty")  # dropout
    image = tf.placeholder(tf.float32,
                           shape=[None, None, None, 1],
                           name="input_image")  # 输入
    annotation = tf.placeholder(tf.int32,
                                shape=[None, None, None, 1],
                                name="annotation")  # label
    mean = tf.placeholder(tf.float32, name='mean')
    pred_annotation, logits, softmax = inference(image, keep_probability,
                                                 mean)  # logits=resize_F8

    print('annotation', annotation.shape)
    print('image', image.shape)

    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logits,
            # tf.nn.sparse_softmax_cross_entropy_with_logits自动对logits(resize_F8)做了softmax处理.
            labels=tf.squeeze(annotation, squeeze_dims=[3]),
            name="entropy"))
    tf.summary.scalar("entropy", loss)  # train val公用一个loss节点运算.

    trainable_var = tf.trainable_variables(
    )  # Variable被收集在名为tf.GraphKeys.VARIABLES的colletion中

    train_op = train(loss, trainable_var)

    print("Setting up summary op...")
    print("Setting up image reader...")
    train_records, valid_records = scene_parsing.read_dataset(
        FLAGS.data_dir)  # 数据的转换输入.
    print(len(train_records))
    print(len(valid_records))

    print("Setting up dataset reader")
    image_options = {
        'resize': False,
        'resize_size': IMAGE_RESIZE
    }  # 将IMAGE_SIZE大小作为一个batch

    if FLAGS.mode == 'train':
        train_dataset_reader = dataset.BatchDatset(train_records,
                                                   image_options)

    sess = tf.Session()
    print("Setting up Saver...")
    saver = tf.train.Saver()  # 声明tf.train.Saver()类 用于存储模型.
    summary_op = tf.summary.merge_all()  # 汇总所有summary.
    summary_writer_train = tf.summary.FileWriter(FLAGS.logs_dir + '/train',
                                                 sess.graph)
    summary_writer_val = tf.summary.FileWriter(FLAGS.logs_dir +
                                               '/val')  # 这里不需要再加入graph.

    sess.run(tf.global_variables_initializer())

    ckpt = tf.train.get_checkpoint_state(
        FLAGS.logs_dir)  # 生成check_point,下次可以从check_point继续训练
    if ckpt and ckpt.model_checkpoint_path:  # 这两行的作用是:tf.train.get_checkpoint_state()函数通过checkpoint文件(它存储所有模型的名字)找到目录下最新的模型.
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("Model restored...")

    if FLAGS.mode == "train":
        # mymean = train_dataset_reader._read_images()   #强行运行这个函数,把mean传过来.
        # train_dataset_reader 调用._read_images()  需要在里面修改mean是否运算和return.
        # 生成本次数据集的mean值.
        mymean = [73.9524, 73.9524, 73.9524]
        for itr in xrange(MAX_ITERATION):  # 修改itr数值,接着之前的模型数量后继续训练.
            train_images, train_annotations = train_dataset_reader.next_batch(
                FLAGS.batch_size)
            feed_dict = {
                image: train_images,
                annotation: train_annotations,
                keep_probability: 0.85,
                mean: mymean
            }
            sess.run(train_op, feed_dict=feed_dict)
            if itr % 10 == 0:
                # 这里不要运算loss
                train_loss, summary_str = sess.run([loss, summary_op],
                                                   feed_dict=feed_dict)
                summary_writer_train.add_summary(summary_str, itr)
                print("Step: %d, Train_loss:%g" % (itr, train_loss))

            if itr % 100 == 0:  # 每训100次测试一下验证集.
                # valid_images, valid_annotations = validation_dataset_reader.next_batch(FLAGS.batch_size)
                # valid_loss, summary_str = sess.run([loss, summary_op], feed_dict={image: valid_images,
                #                                                                          annotation: valid_annotations,
                #                                                                          keep_probability: 1.0,
                #                                                                          mean: mymean})    #计算新节点loss_valid的值.
                # summary_writer_val.add_summary(summary_str, itr)
                # print("%s ---> Validation_loss: %g" % (datetime.datetime.now(), valid_loss))

                saver.save(sess, FLAGS.logs_dir + "model.ckpt", itr)

        # summary_writer_val.close()
        summary_writer_train.close()

    elif FLAGS.mode == "visualize":
        # mymean = [42.11049008, 65.75782253, 74.11216841]
        mymean = [73.9524, 73.9524, 73.9524]

        validation_dataset_reader = dataset.BatchDatset(
            valid_records, image_options)
        valid_images, valid_annotations = validation_dataset_reader.get_random_batch(
            FLAGS.batch_size)
        pred = sess.run(pred_annotation,
                        feed_dict={
                            image: valid_images,
                            annotation: valid_annotations,
                            keep_probability: 1.0,
                            mean: mymean
                        })

        valid_annotations = np.squeeze(valid_annotations, axis=3)
        pred = np.squeeze(pred, axis=3)

        for itr in range(FLAGS.batch_size):
            utils.save_image(valid_images[itr].astype(np.uint8),
                             FLAGS.logs_dir,
                             name="inp_" + str(5 + itr))
            utils.save_image(valid_annotations[itr].astype(np.uint8),
                             FLAGS.logs_dir,
                             name="gt_" + str(5 + itr))
            utils.save_image(pred[itr].astype(np.uint8),
                             FLAGS.logs_dir,
                             name="pred_" + str(5 + itr))
            print("Saved image: %d" % itr)

    elif FLAGS.mode == "test":
        im_2017_list = []
        im_2017_list2 = []
        b = []
        global im_2017  # [5106, 15106, 3]
        global new_2017  # [8106, 15106, 3]
        global new_2015
        for i in range(25):
            b = new_2017[0:4000, i * 600:i * 600 + 600,
                         3]  # 训练第四通道的话,测试的时候也要换成第四通道.
            b = np.array([np.array([b for i in range(3)])])
            b = b.transpose(0, 2, 3, 1)
            im_2017_list.append(b)
            print(b.shape)
            '''
            # 多通道
            b = im_2017[0:5106, i * 300 : i * 300 + 300, :]
            b = np.array([b])
            # print(b.shape)
            # b = b.transpose(0, 2, 3, 1)
            im_2017_list.append(b)
        # im_2017_list.append(np.array([np.array([im_2017[0:5106, 15000:15106, 3] for i in range(3)])]).transpose(0, 2, 3, 1))
        #im_2017_list.append(np.array([im_2017[0:5106, 15000:15106, :]]))     # .transpose(0, 2, 3, 1))
        im_2017_list.append(np.array([im_2017[0:5106, 15000:15106, :]]))
        '''

        im_2017_list.append(
            np.array([
                np.array([new_2017[0:4000, 15000:15106, 3] for i in range(3)])
            ]).transpose(0, 2, 3, 1))
        '''
        for i in range(50):
            b = new_2017[5106:8106, i * 300:i * 300 + 300, 3]
            b = np.array([np.array([b for i in range(3)])])
            b = b.transpose(0, 2, 3, 1)
            im_2017_list2.append(b)
        
        im_2017_list2.append(
            np.array([np.array([new_2017[5106:8106, 15000:15106, 3] for i in range(3)])]).transpose(0, 2, 3, 1))
        '''
        allImg = []
        allImg2 = []
        allImg_soft = []
        allImg_crf = []
        mymean = [73.9524, 73.9524, 73.9524]

        for n, im_2017_part in enumerate(im_2017_list):
            # print(im_2017_part.shape)
            feed_dict_test = {
                image: im_2017_part,
                keep_probability: 1.0,
                mean: mymean
            }
            a = sess.run(pred_annotation, feed_dict=feed_dict_test)
            a = np.mean(a, axis=(0, 3))
            allImg.append(a)
            '''
            # Shift + Tab
            for n, im_2017_part2 in enumerate(im_2017_list2):
                # print(im_2017_part.shape)
                feed_dict_test = {image: im_2017_part2, keep_probability: 1.0, mean: mymean}
                a = sess.run(pred_annotation, feed_dict=feed_dict_test)
                a = np.mean(a, axis=(0, 3))
                allImg2.append(a)
            '''

            # 使用crf:
            soft = sess.run(softmax, feed_dict=feed_dict_test)
            # 运行 sess.run(softmax, feed_dict=feed_dict_test) 得到softmax,即网络前向运算并softmax为概率分布后的结果.
            soft = np.mean(soft, axis=0).transpose(2, 0, 1)
            # soft = soft.transpose(2, 0, 1)

            im_2017_mean = np.mean(im_2017_list[n], axis=0)

            # print (im_2017_mean.shape)     #(5106, 300, 3)
            c = crf.crf(im_2017_mean, soft)
            # print (c.shape)    #(5106, 300)
            allImg_crf.append(c)  # 保存整张soft图.
            allImg_soft.append(soft)  # 保存整张soft图.
        Crf = np.concatenate(tuple(allImg_crf),
                             axis=1)  # axis = 1 在第二个维度上进行拼接.
        softmax = np.concatenate(tuple(allImg_soft), axis=2)
        # tiff.imsave('/home/lenovo/2Tdisk/Wkyao/_/2017/vgg/crf_vgg_1108.tif', Crf)
        np.save('/home/lenovo/2Tdisk/Wkyao/_/2017/vgg/1116_3.npy',
                softmax)  # 保存拼接的Softmax为np
        np.save(
            '/home/lenovo/2Tdisk/Wkyao/FirstSetp/ChenJ/finall/1110/1116_3.npy',
            softmax)
        res1 = np.concatenate(tuple(allImg), axis=1).astype(np.uint8)
        tiff.imsave('/home/lenovo/2Tdisk/Wkyao/_/2017/vgg/1116_3.tif', res1)
        open_and_close(Crf)  # 膨胀操作.