Exemple #1
0
 def store_prediction(self, sess, batch_x, batch_y, name):
     """
     calculate stats on verification data
     :param sess:
     :param batch_x: test_x
     :param batch_y: test_y
     :param name: save prediction result as name
     :return:
     """
     prediction = sess.run(self.net.predicter, feed_dict={self.net.x: batch_x, 
                                                          self.net.y: batch_y, 
                                                          self.net.keep_prob: 1.})
     pred_shape = prediction.shape
     loss = sess.run(self.net.cost, feed_dict={self.net.x: batch_x, 
                                               self.net.y: util.crop_to_shape(batch_y, pred_shape),
                                               self.net.keep_prob: 1.})
     
     logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(error_rate(prediction,
                                                                     util.crop_to_shape(batch_y,
                                                                                        prediction.shape)),
                                                                     loss))
           
     img = util.combine_img_prediction(batch_x, batch_y, prediction)
     util.save_image(img, "%s/%s.jpg"%(self.prediction_path, name))
     
     return pred_shape
Exemple #2
0
    def store_prediction(self, sess, batch_x, batch_y, name):
        prediction = sess.run(self.net.predicter,
                              feed_dict={
                                  self.net.x: batch_x,
                                  self.net.y: batch_y,
                                  self.net.keep_prob: 1.
                              })
        pred_shape = prediction.shape

        loss = sess.run(self.net.cost,
                        feed_dict={
                            self.net.x: batch_x,
                            self.net.y:
                            util.crop_to_shape(batch_y, pred_shape),
                            self.net.keep_prob: 1.
                        })

        logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(
            error_rate(prediction,
                       util.crop_to_shape(batch_y, prediction.shape)), loss))

        img = util.combine_img_prediction(batch_x, batch_y, prediction)
        util.save_image(img, "%s/%s.jpg" % (self.prediction_path, name))

        return pred_shape
Exemple #3
0
    def store_prediction(self, sess, batch_x, batch_y, name):
        prediction = sess.run(self.net.predicter,
                              feed_dict={
                                  self.net.x: batch_x,
                                  self.net.y: batch_y,
                                  self.net.keep_prob: 1.
                              })
        pred_shape = prediction.shape

        loss = sess.run(self.net.cost,
                        feed_dict={
                            self.net.x: batch_x,
                            self.net.y:
                            util.crop_to_shape(batch_y, pred_shape),
                            self.net.keep_prob: 1.
                        })

        logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(
            error_rate(prediction,
                       util.crop_to_shape(batch_y, prediction.shape)), loss))

        print("batch_x", batch_x.shape)
        print("batch_y", batch_y.shape)
        print("prediction", prediction.shape)
        if batch_x.shape[-1] != batch_y.shape[-1]:
            logging.warn("image and label have different number of channels")
            return pred_shape
        img = util.combine_img_prediction(batch_x, batch_y, prediction)
        util.save_image(img, "%s/%s.jpg" % (self.prediction_path, name))

        return pred_shape
Exemple #4
0
def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root):
    print("Using data from: %s"%data_root)
    data_provider = DataProvider(600, glob.glob(data_root+"/*"))
    
    net = unet.Unet(channels=data_provider.channels, 
                    n_class=data_provider.n_class, 
                    layers=layers, 
                    features_root=features_root,
                    add_regularizers=True,
#                     filter_size=5
                    )
    
    path = output_path if restore else create_training_path(output_path)
    trainer = unet.Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2))
    path = trainer.train(data_provider, path, 
                         training_iters=training_iters, 
                         epochs=epochs, 
                         dropout=0.5, 
                         display_step=2, 
                         restore=restore)
     
    x_test, y_test = data_provider(1)
    prediction = net.predict(path, x_test)
     
    print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape))))
    
#     import numpy as np
#     np.save("prediction", prediction[0, ..., 1])
    
    img = util.combine_img_prediction(x_test, y_test, prediction)
    util.save_image(img, "prediction.jpg")
Exemple #5
0
    def store_prediction(self, sess, batch_x, batch_y, name):
        prediction = sess.run(self.net.predicter,
                              feed_dict={
                                  self.net.x: batch_x,
                                  self.net.y: batch_y,
                                  self.net.keep_prob: 1.
                              })
        pred_shape = prediction.shape

        loss = sess.run(self.net.cost,
                        feed_dict={
                            self.net.x: batch_x,
                            self.net.y:
                            util.crop_to_shape(batch_y, pred_shape),
                            self.net.keep_prob: 1.
                        })

        logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(
            error_rate(prediction,
                       util.crop_to_shape(batch_y, prediction.shape)), loss))
        #filename = '/Users/imke/Downloads/tf_unet/epoch'+str(epoch)+'.txt'
        #with open (filename, 'a') as fileobj:
        # fileobj.write("Verification error= {:.1f}%, loss= {:.4f}\n".format(error_rate(prediction,
        #                                                                        util.crop_to_shape(batch_y,
        #                                                                                          prediction.shape))))
        img = util.combine_img_prediction(batch_x, batch_y, prediction)
        util.save_image(img, "%s/%s.jpg" % (self.prediction_path, name))

        return pred_shape
Exemple #6
0
def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root):
    generator = Generator(572, data_root)
    
    data, label = generator(1)
    weights = None#(1/3) / (label.sum(axis=2).sum(axis=1).sum(axis=0) / data.size)
    
    net = unet.Unet(channels=generator.channels, 
                    n_class=generator.n_class, 
                    layers=layers, 
                    features_root=features_root,
                    add_regularizers=True,
                    class_weights=weights,
#                     filter_size=5
                    )
    
    path = output_path if restore else create_training_path(output_path)
#     trainer = unet.Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2))
    trainer = unet.Trainer(net, optimizer="adam", opt_kwargs=dict(beta1=0.91))
    path = trainer.train(generator, path, 
                         training_iters=training_iters, 
                         epochs=epochs, 
                         dropout=0.5, 
                         display_step=2, 
                         restore=restore)
     
    prediction = net.predict(path, data)
     
    print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(label, prediction.shape))))
    
#     import numpy as np
#     np.save("prediction", prediction[0, ..., 1])
    
    img = util.combine_img_prediction(data, label, prediction)
    util.save_image(img, "prediction.jpg")
    def store_prediction(self, sess, batch_x, batch_y, name):
        prediction = sess.run(self.net.predicter,
                              feed_dict={
                                  self.net.x: batch_x,
                                  self.net.y: batch_y,
                                  self.net.keep_prob: 1.
                              })

        pred_shape = prediction.shape

        loss = sess.run(self.net.cost,
                        feed_dict={
                            self.net.x: batch_x,
                            self.net.y:
                            util.crop_to_shape(batch_y, pred_shape),
                            self.net.keep_prob: 1.
                        })

        logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(
            error_rate(prediction,
                       util.crop_to_shape(batch_y, prediction.shape)), loss))
        #print ('in store_prediction')
        #print ('prediction: ', prediction.shape)
        #print ('batch_x: ', type(batch_x),batch_x.shape)
        #print ('batch_y: ', type(batch_y),batch_y.shape)

        #adjust for multiple class
        #adjust for multiple channels
        img = util.combine_img_prediction(batch_x, batch_y, prediction)
        #print ('in store_pred2: ', img.shape)
        util.save_image(img, "%s/%s.jpg" % (self.prediction_path, name))
        return pred_shape
def test_VDSR_with_sess(
    epoch,
    ckpt_path,
    data_path,
    sess,
):
    folder_list = glob.glob(os.path.join(data_path))
    print 'folder_list', folder_list
    saver.restore(sess, ckpt_path)

    psnr_dict = {}
    for folder_path in folder_list:
        psnr_list = []
        img_list = get_img_list(folder_path)
        for i in range(len(img_list)):
            (input_list, gt_list, scale_list) = get_image_batch(img_list, i, 2)

            input_y = input_list
            gt_y = gt_list
            # print input_y[0].shape
            start_t = time.time()
            # img_vdsr_y = sess.run([output_tensor],
            #                       feed_dict={input_tensor: np.resize(input_y,
            #                       (1, input_y.shape[0],
            #                       input_y.shape[1], 1))})
            img_vdsr_y = sess.run([output_tensor],
                                  feed_dict={input_tensor: input_y})
            print np.asarray(img_vdsr_y).shape
            # img_vdsr_y = np.resize(img_vdsr_y, (2, input_y.shape[1],
            #                        input_y.shape[2],1))
            end_t = time.time()
            print 'end_t', end_t, 'start_t', start_t
            print 'time consumption', end_t - start_t
            print 'image_size', input_y.shape
            img_vdsr_y = np.asarray(img_vdsr_y[0])
            print 'img_vdsr_y', img_vdsr_y.shape
            print 'input_y', input_y.shape
            print 'gt_y', gt_y.shape
            # misc.toimage(np.resize(img_vdsr_y[0],(input_y.shape[1], input_y.shape[2]) ),
            #  cmin=0.0,cmax=1.0).save('outfile_%d.jpg' % i)
            img = util.combine_img_prediction(input_y, gt_y, img_vdsr_y)
            util.save_image(img, 'outfile%d.jpg' % i)
            scipy.io.savemat('outfile%d' % i, mdict={'img': img_vdsr_y})

            # misc.toimage(img_vdsr_y).save('outfile%d.jpg' %i)

            psnr_bicub = psnr(input_y, gt_y, scale_list)
            psnr_vdsr = psnr(img_vdsr_y, gt_y, scale_list)
            print 'PSNR: bicubic %f\tVDSR %f' % (psnr_bicub, psnr_vdsr)
            psnr_list.append([psnr_bicub, psnr_vdsr, scale_list])
        psnr_dict[os.path.basename(folder_path)] = psnr_list
    with open('psnr/%s' % os.path.basename(ckpt_path), 'wb') as f:
        pickle.dump(psnr_dict, f)
Exemple #9
0
    def store_prediction(self,
                         sess,
                         eval_iters,
                         eval_data_provider,
                         border_size,
                         patch_size,
                         input_size,
                         name,
                         prediction_path,
                         verification_batch_size,
                         combine=False,
                         hard_prediction=False):
        for i in range(eval_iters):
            patches = eval_data_provider.get_patches(get_coordinates=True)
            if combine:
                image = np.zeros(
                    (verification_batch_size, input_size, input_size, 3))
                label = np.zeros(
                    (verification_batch_size, input_size, input_size, 2))
            prediction = np.zeros((verification_batch_size, input_size,
                                   input_size, self.n_class))
            for patch in patches:
                pred = sess.run(
                    (self.predicter),
                    feed_dict={
                        self.x: patch[0],
                        self.y: patch[1],
                        self.keep_prob: 1.0,
                        self.is_training: False
                    })
                x, y = patch[2]
                prediction[:, x:x + patch_size, y:y + patch_size, ...] = pred

                if combine:
                    offset = border_size
                    image[:, x:x + patch_size, y:y + patch_size,
                          ...] = patch[0][:, offset:-offset, offset:-offset,
                                          ...]
                    label[:, x:x + patch_size, y:y + patch_size,
                          ...] = patch[1]

            pred_shape = prediction.shape
            if hard_prediction:
                argmax = np.argmax(prediction, axis=3)
                prediction = np.stack([1 - argmax, argmax], axis=3)
            if combine:
                img = util.combine_img_prediction(image, label, prediction)
            else:
                img = util.to_rgb(prediction[...,
                                             1].reshape(-1, input_size, 1))
            util.save_image(img, "%s/%s_%s.jpg" % (prediction_path, name, i))

        return pred_shape
Exemple #10
0
def main():

    dp = DataProvider(batchSize=BATCH_SIZE, validationSize=VALIDATION_SIZE)
    dp.readData()
    print("DONE READING DATA")
    # calculate num of iterations
    iters = dp.getTrainSize() // BATCH_SIZE
    # unet
    net = unet.Unet(channels = 1, n_class = 2, layers = 3,\
     features_root = 16, cost="cross_entropy", cost_kwargs={})

    # # trainer
    # options = {"momentum":0.2, "learning_rate":0.2,"decay_rate":0.95}

    # trainer = unet.Trainer(net, optimizer="momentum",plotter = plot, opt_kwargs=options )
    # # train model
    # path = trainer.train(dp, OUTPUT_PATH,training_iters = iters,epochs=EPOCHS,\
    # 	dropout=DROPOUT_KEEP_PROB, display_step = DISPLAY_STEP,restore = restore)

    path = os.getcwd() + "/retinaModel/model.cpkt"

    x_test, y_test = dp.getTestData(3, crop=False)
    prediction = net.predict(path, x_test)

    # # sanity check
    # fig, ax = plt.subplots(3, 3)
    # ax[0][0].imshow(x_test[0,:,:,0],cmap=plt.cm.gray)
    # ax[0][1].imshow(y_test[0,:,:,1],cmap=plt.cm.gray)
    # ax[0][2].imshow(np.argmax(prediction[0,...],axis =2),cmap=plt.cm.gray)
    # ax[1][0].imshow(x_test[1,:,:,0],cmap=plt.cm.gray)
    # ax[1][1].imshow(y_test[1,:,:,1],cmap=plt.cm.gray)
    # ax[1][2].imshow(np.argmax(prediction[1,...],axis =2),cmap=plt.cm.gray)
    # ax[2][0].imshow(x_test[2,:,:,0],cmap=plt.cm.gray)
    # ax[2][1].imshow(y_test[2,:,:,1],cmap=plt.cm.gray)
    # ax[2][2].imshow(np.argmax(prediction[2,...],axis =2),cmap=plt.cm.gray)
    # plt.show()

    # save test result as image
    # check for path
    if not os.path.lexists(OUTPUT_PATH):
        os.makedirs(OUTPUT_PATH)

    sampleSize = 3
    img = util.combine_img_prediction(x_test[0:sampleSize,...], y_test[0:sampleSize,...]\
     , prediction[0:sampleSize,...])

    util.save_image(
        img, "%s/%s.jpg" % (os.getcwd() + "/" + "testResults", "testSample"))

    print("Testing error rate: {:.2f}%".format(
        unet.error_rate(prediction, util.crop_to_shape(y_test,
                                                       prediction.shape))))
Exemple #11
0
 def _store_prediction(sess, batch_x, batch_y, name, predicter, prediction_path, cost, x, y, keep_prob):
     prediction = sess.run(predicter, 
             feed_dict={x: batch_x, y: batch_y, keep_prob: 1.})
     pred_shape = prediction.shape
     
     loss = sess.run(cost, feed_dict={x: batch_x, 
             y: util.crop_to_shape(batch_y, pred_shape), keep_prob: 1.})
     
     logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(
             Trainer._error_rate(prediction, util.crop_to_shape(batch_y,
             prediction.shape)), loss))
           
     img = util.combine_img_prediction(batch_x, batch_y, prediction)
     util.save_image(img, "%s/%s.jpg"%(prediction_path, name))
     
     return pred_shape
Exemple #12
0
    def store_prediction(self, sess, batch_x, batch_y, name):
        prediction = sess.run(self.net.predicter,
                              feed_dict={
                                  self.net.x: batch_x,
                                  self.net.y: batch_y,
                                  self.net.keep_prob: 1.
                              })
        pred_shape = prediction.shape

        if len(batch_x.shape) == 5:
            loss = sess.run(self.net.cost,
                            feed_dict={
                                self.net.x: batch_x,
                                self.net.y: batch_y,
                                self.net.keep_prob: 1.
                            })
            logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(
                error_rate(prediction, batch_y), loss))
            img = np.concatenate(
                (batch_x[..., 0], batch_y[..., 0], prediction[..., 0]), axis=3)
            util.save_image(
                util.to_rgb(img[0, 0, ...]),
                "%s/%s.jpg" % (self.prediction_path, name + '_0_0'))
            # for i in range(0,img.shape[0]):
            #     for j in range(0,img.shape[1]):
            #         util.save_image(util.to_rgb(img[i,0,...]), "%s/%s.jpg" % (self.prediction_path, name+'_'+str(i)+'_'+str(j)))

        else:
            loss = sess.run(self.net.cost,
                            feed_dict={
                                self.net.x: batch_x,
                                self.net.y:
                                util.crop_to_shape(batch_y, pred_shape),
                                self.net.keep_prob: 1.
                            })
            logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(
                error_rate(prediction,
                           util.crop_to_shape(batch_y, prediction.shape)),
                loss))

            img = util.combine_img_prediction(batch_x, batch_y, prediction)
            util.save_image(img, "%s/%s.jpg" % (self.prediction_path, name))

        return pred_shape
Exemple #13
0
    def store_prediction(self, sess, batch_x, batch_y, name):
        prediction = sess.run(self.net.predicter, feed_dict={self.net.x: batch_x,
                                                             self.net.y: batch_y,
                                                             self.net.keep_prob: 1.})
        pred_shape = prediction.shape

        loss = sess.run(self.net.cost, feed_dict={self.net.x: batch_x,
                                                  self.net.y: util.crop_to_shape(batch_y, pred_shape),
                                                  self.net.keep_prob: 1.})

        logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(error_rate(prediction,
                                                                                   util.crop_to_shape(batch_y,
                                                                                                      prediction.shape)),
                                                                        loss))

        img = util.combine_img_prediction(batch_x, batch_y, prediction)
        util.save_image(img, "%s/%s.jpg" % (self.prediction_path, name))

        return pred_shape
Exemple #14
0
    def store_prediction(self, sess, batch_x, batch_y, name):
        prediction = sess.run(self.net.predicter,
                              feed_dict={
                                  self.net.x: batch_x,
                                  self.net.y: batch_y,
                                  self.net.keep_prob: 1.
                              })
        pred_shape = prediction.shape

        loss = sess.run(self.net.cost,
                        feed_dict={
                            self.net.x: batch_x,
                            self.net.y:
                            util.crop_to_shape(batch_y, pred_shape),
                            self.net.keep_prob: 1.
                        })

        img = util.combine_img_prediction(batch_x, batch_y, prediction)
        util.save_image(img, "%s/%s.jpg" % (self.prediction_path, name))

        return pred_shape
Exemple #15
0
 def store_prediction(self, sess, batch_x, batch_y, name):
     prediction = sess.run(self.net.predicter, feed_dict={self.net.x: batch_x, 
                                                          self.net.y: batch_y, 
                                                          self.net.keep_prob: 1.})
     pred_shape = prediction.shape
     # print("pred shape = ", pred_shape)
     # fig, ax = plt.subplots(1, 5)
     # ax[0].imshow(batch_x[0,:,:,0],cmap=plt.cm.gray)
     # ax[1].imshow(prediction[0,:,:,0],cmap=plt.cm.gray)
     # ax[2].imshow(prediction[0,:,:,1],cmap=plt.cm.gray)
     # ax[3].imshow(batch_y[0,:,:,0],cmap=plt.cm.gray)
     # ax[4].imshow(batch_y[0,:,:,1],cmap=plt.cm.gray)
     # plt.show()
     
     loss = sess.run(self.net.cost, feed_dict={self.net.x: batch_x, 
                                                    self.net.y: util.crop_to_shape(batch_y, pred_shape), 
                                                    self.net.keep_prob: 1.})
     
     logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(error_rate(prediction,
                                                                       util.crop_to_shape(batch_y,
                                                                                          prediction.shape)),
                                                                       loss))
     diceScore = dice_score(prediction,util.crop_to_shape(batch_y,prediction.shape))
     logging.info("Dice score= {:.2f}".format(diceScore))
     # add data to plotter
     self.plotter.updateLogger(diceScore,"dice score")
     self.plotter.updateLogger(loss,"validation loss")
     er = error_rate(prediction,util.crop_to_shape(batch_y,prediction.shape))
     self.plotter.updateLogger(er,"validation error")
     # 
     # smple from validation data
     sampleSizeValid = 10
     img = util.combine_img_prediction(batch_x[0:sampleSizeValid,...],\
      batch_y[0:sampleSizeValid,...], prediction[0:sampleSizeValid,...])
     util.save_image(img, "%s/%s.jpg"%(self.prediction_path, name))
     
     return pred_shape
 def store_prediction(self, sess, batch_x, batch_y, name):
     prediction = sess.run(self.net.predicter, feed_dict={self.net.x: batch_x, 
                                                          self.net.y: batch_y, 
                                                          self.net.keep_prob: 1.})
     pred_shape = prediction.shape
     
     loss = sess.run(self.net.cost, feed_dict={self.net.x: batch_x, 
                                                    self.net.y: util.crop_to_shape(batch_y, pred_shape), 
                                                    self.net.keep_prob: 1.})
     
     logging.info("Verification loss= {:.4f}".format(loss))
           
     # img = util.combine_img_prediction(batch_x, batch_y, prediction)
     # util.save_image(img, "%s/%s.jpg"%(self.prediction_path, name))
     img_0, img_1, img_2, img_3, img_4, img_5 = util.combine_img_prediction(batch_x, batch_y, prediction)
     util.save_image(img_0, "%s/%s_1.jpg" % (self.prediction_path, name))
     util.save_image(img_1, "%s/%s_2.jpg" % (self.prediction_path, name))
     util.save_image(img_2, "%s/%s_3.jpg" % (self.prediction_path, name))
     util.save_image(img_3, "%s/%s_4.jpg" % (self.prediction_path, name))
     util.save_image(img_4, "%s/%s_5.jpg" % (self.prediction_path, name))
     util.save_image(img_5, "%s/%s_6.jpg" % (self.prediction_path, name))
     
     # return pred_shape
     # By XY
     return pred_shape, prediction
Exemple #17
0
# In[9]:

trainer = Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2))

# In[8]:

data_provider = image_util.ImageDataProvider(
    "C:/Users/orange/Desktop/data/*.tif")

# In[9]:

path = trainer.train(data_provider,
                     "C:/Users/orange/Desktop/out",
                     training_iters=4,
                     epochs=4,
                     display_step=2)

# In[10]:

x_test, y_test = data_provider(13)

# In[11]:

prediction = net.predict("C:/Users/orange/Desktop/out/model.cpkt", x_test)

# In[12]:

error_rate(prediction, util.crop_to_shape(y_test, prediction.shape))
img = util.combine_img_prediction(x_test, y_test, prediction)
util.save_image(img, "C:/Users/orange/Desktop/out/prediction.jpg")
Exemple #18
0
    ny = 572
     
    training_iters = 20
    epochs = 100
    dropout = 0.75 # Dropout, probability to keep units
    display_step = 2
    restore = False
 
    generator = image_gen.get_image_gen_rgb(nx, ny, cnt=20)
    
    net = unet.Unet(channels=generator.channels, n_class=generator.n_class, layers=3, features_root=16)
    
    trainer = unet.Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2))
    path = trainer.train(generator, "./unet_trained", 
                         training_iters=training_iters, 
                         epochs=epochs, 
                         dropout=dropout, 
                         display_step=display_step, 
                         restore=restore)
     
    x_test, y_test = generator(4)
    prediction = net.predict(path, x_test)
     
    print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape))))
    
    import numpy as np
    np.savetxt("prediction.txt", prediction[..., 1].reshape(-1, prediction.shape[2]))
    
    img = util.combine_img_prediction(x_test, y_test, prediction)
    util.save_image(img, "prediction.jpg")
print("Error rate = %f" % unet.error_rate(
    predictions, util.crop_to_shape(y_tests, predictions.shape)))
indexes = range(x_tests.shape[0])
for idx in indexes:
    print("\tProcessing %d-th test image..." % idx)

    x_test = x_tests[idx].reshape(1, x_tests[idx].shape[0],
                                  x_tests[idx].shape[1], x_tests[idx].shape[2])
    y_test = y_tests[idx].reshape(1, y_tests[idx].shape[0],
                                  y_tests[idx].shape[1], y_tests[idx].shape[2])
    prediction = prediction[idx].reshape(1, prediction[idx].shape[0],
                                         prediction[idx].shape[1],
                                         prediction[idx].shape[2])

    img = util.combine_img_prediction(x_test, y_test, prediction)
    util.save_image(img,
                    prediction_dir_path + "/prediction" + str(idx) + ".jpg")

#indexes = range(x_tests.shape[0])
#for idx in indexes:
#	print("Processing %d-th test image..." % idx)
#
#	x_test = x_tests[idx].reshape(1, x_tests[idx].shape[0], x_tests[idx].shape[1], x_tests[idx].shape[2])
#	y_test = y_tests[idx].reshape(1, y_tests[idx].shape[0], y_tests[idx].shape[1], y_tests[idx].shape[2])
#
#	prediction = net.predict(model_filepath, x_test)
#	#Image.fromarray(prediction[0,:,:,0], mode='F').show()  % Error: not correct.
#	#Image.fromarray(prediction[0,:,:,1], mode='F').show()  % Error: not correct.
#
#	print("Error rate = %f" % unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape)))
#
#	img = util.combine_img_prediction(x_test, y_test, prediction)
Exemple #20
0
                        opt,
                        loss,
                        acc,
                        train_output,
                        learning_rate,
                        global_step,
                    ],
                                 feed_dict=feed_dict)

                    # del input_data, gt_data, cbcr_data

                print output.shape
                img = util.combine_img_prediction(input_data, gt_data,
                                                  output + input_data)
                name = 'epoch_%s' % epoch
                util.save_image(img, '%s/%s.jpg' % (prediction_path, name))
                print '[epoch %2.4f] loss %.4f\t acc %.4f\t lr %.7f' \
                    % (epoch + float(step) * BATCH_SIZE
                       / len(train_list), np.sum(l), accuracy, lr)
                psnr_bicub = psnr(input_data, gt_data, 0)
                psnr_vdsr = psnr(output + input_data, gt_data, 0)
                print 'PSNR: bicubic %f\U-NET %f' % (psnr_bicub, psnr_vdsr)

                # print "[epoch %2.4f] loss %.4f\t lr %.7f"%(epoch+(float(step)*BATCH_SIZE/len(train_list)), np.sum(l)/BATCH_SIZE, lr)

                # saver.save(sess, "./checkpoints/VDSR_const_clip_0.01_epoch_%03d.ckpt" % epoch ,global_step=global_step)

                saver.save(sess, ckpt_path)

                # test_VDSR(epoch, ckpt_path, TEST_DATA_PATH)
Exemple #21
0
def main():

    # input training and test datasets
    train_data = image_util.ImageDataProvider(
        search_path='RoadDetection_Train_Images')
    test_data = image_util.ImageDataProvider(
        search_path='RoadDetection_Test_Images')
    #
    # # train u-net
    # net = unet.Unet(layers=5, n_class=train_data.n_class, channels=train_data.channels, features_root=64, cost='dice_coefficient', cost_kwargs=dict(regularizer=0.01))
    net = unet.Unet(layers=4,
                    n_class=train_data.n_class,
                    channels=train_data.channels,
                    features_root=48,
                    cost='dice_coefficient',
                    cost_kwargs={
                        'regularizer': 0.01,
                        'class_weights': [0.1777, 0.8222]
                    })

    x_test, y_test = test_data(10)

    # save prediction masks in TIFF format
    data_files = [d for d in test_data.data_files if d.split('.')[-1] == 'jpg']
    data_files.sort()
    for i, name in enumerate(data_files):
        file_name = name.split('\\')[1].split('.')[0]
        prediction = net.predict(model_path="./unet_trained/model.ckpt",
                                 x_test=x_test[i].reshape(-1, 600, 400, 3))
        ny = prediction.shape[2]
        img = to_rgb(prediction[..., 1])
        im.fromarray(img[0].round().astype(np.uint8)).save(
            r'RoadDetection_Test_Predictions\\{}_mask.tif'.format(file_name),
            'TIFF',
            dpi=[300, 300],
            quality=90)

    # save prediction masks in JPEG format
    data_files = [d for d in test_data.data_files if d.split('.')[-1] == 'jpg']
    data_files.sort()
    for i, name in enumerate(data_files):
        file_name = name.split('\\')[1].split('.')[0]
        prediction = net.predict(model_path="./unet_trained/model.ckpt",
                                 x_test=x_test[i].reshape(-1, 600, 400, 3))
        ny = prediction.shape[2]
        img = to_rgb(prediction[..., 1])
        im.fromarray(img[0].round().astype(np.uint8)).save(
            r'RoadDetection_Test_Predictions\\{}_mask.jpg'.format(file_name),
            'JPEG',
            dpi=[300, 300],
            quality=90)

    # predict mask from training data for presentation
    x_val, y_val = train_data(1)
    prediction = net.predict(model_path="./unet_trained/model.ckpt",
                             x_test=x_val)

    fig, ax = plt.subplots(1, 3, figsize=(12, 5))
    ax[0].imshow(x_val[0, ..., 0], aspect="auto")
    ax[1].imshow(y_val[0, ..., 1], aspect="auto")
    pred = np.squeeze(prediction[0, ..., 1])
    ax[2].imshow(pred, aspect="auto")
    ax[0].set_title("Input")
    ax[1].set_title("Ground truth")
    ax[2].set_title("Prediction")
    fig.tight_layout()

    # predict mask from test data for presentation
    x_val, y_val = test_data(1)
    prediction = net.predict(model_path="./unet_trained/model.ckpt",
                             x_test=x_val)

    fig, ax = plt.subplots(1, 3, figsize=(12, 5))
    ax[0].imshow(x_val[0, ..., 0], aspect="auto")
    ax[1].imshow(y_val[0, ..., 1], aspect="auto")
    pred = np.squeeze(prediction[0, ..., 1])
    ax[2].imshow(pred, aspect="auto")
    ax[0].set_title("Input")
    ax[1].set_title("Ground truth")
    ax[2].set_title("Prediction")
    fig.tight_layout()

    # presentation method 2
    x_test, y_test = test_data(10)
    img = util.combine_img_prediction(x_test, y_test, prediction)
    util.save_image(img, "test_pred_image.jpg")

    util.plot_prediction(x_test=x_train, y_test=x_train, prediction=validation)
    print('Process completed.')
Exemple #22
0
def TestData(net , Test_Path , Train_Path , padSize):

    TestImageNum = 7

    Trained_Model_Path = Train_Path + 'model/model.cpkt'
    TestResults_Path   = Test_Path  + 'results/'

    try:
        os.stat(TestResults_Path)
    except:
        os.makedirs(TestResults_Path)

    AllImage_logical = np.zeros((1924,1924))
    AllImage = np.zeros((1924,1924))

    trainer = unet.Trainer(net)

    TestData = image_util.ImageDataProvider(  Test_Path + '*.tif' , shuffle_data=False)

    L = len(TestData.data_files)
    DiceCoefficient  = np.zeros(L)
    LogLoss  = np.zeros(L)
    # BB_Cord = np.zeros(L,3)
    BB_Cord = np.zeros((L,2))


    aa = TestData.data_files
    for BB_ind in range(L):
    # BB_ind = 1
        bb = aa[BB_ind]
        d = bb.find('/img')
        cc = bb[d:len(bb)-4]
        dd = cc.split('_')
        # imageName = int(dd[0])
        xdim = int(dd[1])
        ydim = int(dd[2])
        # BB_Cord[ BB_ind , : ] = [xdim,ydim,imageName]
        BB_Cord[ BB_ind , : ] = [xdim,ydim]

    Data , Label = TestData(L)


    szD = Data.shape
    szL = Label.shape

    data  = np.zeros((1,szD[1],szD[2],szD[3]))
    label = np.zeros((1,szL[1],szL[2],szL[3]))

    shiftFlag = 0
    for BB_ind in range(L):

        data[0,:,:,:]  = Data[BB_ind,:,:,:].copy()
        label[0,:,:,:] = Label[BB_ind,:,:,:].copy()

        if shiftFlag == 1:
            shiftX = 0
            shiftY = 0
            data = np.roll(data,[0,shiftX,shiftY,0])
            label = np.roll(label,[0,shiftX,shiftY,0])

        prediction = net.predict( Trained_Model_Path, data)
        PredictedSeg = prediction[0,...,1] > 0.2

        # ix, iy, ImgNum = BB_Cord[ BB_ind , : ]
        ix, iy = BB_Cord[ BB_ind , : ]
        ix = int(148*ix)
        iy = int(148*iy)
        # AllImage[148*ix:148*(ix+1) , 148*iy:148*(iy+1) ,ImgNum] = prediction[0,...,1]
        # AllImage_logical[148*ix:148*(ix+1) , 148*iy:148*(iy+1) ,ImgNum] = PredictedSeg

        AllImage[ix:148+ix , iy:148+iy] = prediction[0,...,1]
        AllImage_logical[ix:148+ix , iy:148+iy] = PredictedSeg

        # unet.error_rate(prediction, util.crop_to_shape(label, prediction.shape))

        sz = label.shape

        A = (padSize/2)
        imgCombined = util.combine_img_prediction(data, label, prediction)
        DiceCoefficient[BB_ind] = DiceCoefficientCalculator(PredictedSeg,label[0,A:sz[1]-A,A:sz[2]-A,1])  # 20 is for zero padding done for input
        util.save_image(imgCombined, TestResults_Path+"prediction_slice"+ str(BB_Cord[BB_ind]) + ".jpg")


        Loss = unet.error_rate(prediction,label[:,A:sz[1]-A,A:sz[2]-A,:])
        LogLoss[BB_ind] = np.log10(Loss+eps)

    np.savetxt(TestResults_Path+'DiceCoefficient.txt',DiceCoefficient)
    np.savetxt(TestResults_Path+'LogLoss.txt',LogLoss)


    im = Image.fromarray(np.uint8(AllImage))
    msk = Image.fromarray(np.uint8(AllImage_logical))

    im.save( TestResults_Path + 'PredictionSeg_'+str(TestImageNum)+'.tif')
    msk.save(TestResults_Path + 'PredictionSeg_'+str(TestImageNum)+'_Logical.tif')


    return AllImage , AllImage_logical
Exemple #23
0
# Basic usage.

from tf_unet import unet, util, image_util

dataset_home_path = "/home/sangwook/my_dataset/life_science/isbi"
train_dataset_path = dataset_home_path + "/train-volume.tif"
test_dataset_path = dataset_home_path + "/test-volume.tif"
model_output_path = dataset_home_path + "/output"

# Prepare data loading.
train_data_provider = image_util.ImageDataProvider(train_dataset_path)

#-- Setup & train.
net = unet.Unet(layers=3, features_root=64, channels=1, n_class=2)
trainer = unet.Trainer(net)

path = trainer.train(train_data_provider, model_output_path, training_iters=32, epochs=100)

#-- Veriry.

#-- Test.
#test_data_provider = image_util.ImageDataProvider(test_dataset_path)
#x_test, y_test = test_data_provider(1)
x_test, y_test = train_data_provider(1)
prediction = net.predict(path, x_test)

unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape))

img = util.combine_img_prediction(x_test, y_test, prediction)
util.save_image(img, dataset_home_path + "/prediction.jpg")