Beispiel #1
0
    def store_prediction(self, sess, batch_x, batch_y, name):
        prediction = sess.run(self.net.predicter,
                              feed_dict={
                                  self.net.x: batch_x,
                                  self.net.y: batch_y,
                                  self.net.keep_prob: 1.
                              })
        pred_shape = prediction.shape

        loss = sess.run(self.net.cost,
                        feed_dict={
                            self.net.x: batch_x,
                            self.net.y:
                            util.crop_to_shape(batch_y, pred_shape),
                            self.net.keep_prob: 1.
                        })

        logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(
            error_rate(prediction,
                       util.crop_to_shape(batch_y, prediction.shape)), loss))

        print("batch_x", batch_x.shape)
        print("batch_y", batch_y.shape)
        print("prediction", prediction.shape)
        if batch_x.shape[-1] != batch_y.shape[-1]:
            logging.warn("image and label have different number of channels")
            return pred_shape
        img = util.combine_img_prediction(batch_x, batch_y, prediction)
        util.save_image(img, "%s/%s.jpg" % (self.prediction_path, name))

        return pred_shape
Beispiel #2
0
    def store_prediction(self, sess, batch_x, batch_y, name):
        prediction = sess.run(self.net.predicter,
                              feed_dict={
                                  self.net.x: batch_x,
                                  self.net.y: batch_y,
                                  self.net.keep_prob: 1.
                              })
        pred_shape = prediction.shape

        loss = sess.run(self.net.cost,
                        feed_dict={
                            self.net.x: batch_x,
                            self.net.y:
                            util.crop_to_shape(batch_y, pred_shape),
                            self.net.keep_prob: 1.
                        })

        logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(
            error_rate(prediction,
                       util.crop_to_shape(batch_y, prediction.shape)), loss))
        #filename = '/Users/imke/Downloads/tf_unet/epoch'+str(epoch)+'.txt'
        #with open (filename, 'a') as fileobj:
        # fileobj.write("Verification error= {:.1f}%, loss= {:.4f}\n".format(error_rate(prediction,
        #                                                                        util.crop_to_shape(batch_y,
        #                                                                                          prediction.shape))))
        img = util.combine_img_prediction(batch_x, batch_y, prediction)
        util.save_image(img, "%s/%s.jpg" % (self.prediction_path, name))

        return pred_shape
    def store_prediction(self, sess, batch_x, batch_y, name):
        prediction = sess.run(self.net.predicter,
                              feed_dict={
                                  self.net.x: batch_x,
                                  self.net.y: batch_y,
                                  self.net.keep_prob: 1.
                              })

        pred_shape = prediction.shape

        loss = sess.run(self.net.cost,
                        feed_dict={
                            self.net.x: batch_x,
                            self.net.y:
                            util.crop_to_shape(batch_y, pred_shape),
                            self.net.keep_prob: 1.
                        })

        logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(
            error_rate(prediction,
                       util.crop_to_shape(batch_y, prediction.shape)), loss))
        #print ('in store_prediction')
        #print ('prediction: ', prediction.shape)
        #print ('batch_x: ', type(batch_x),batch_x.shape)
        #print ('batch_y: ', type(batch_y),batch_y.shape)

        #adjust for multiple class
        #adjust for multiple channels
        img = util.combine_img_prediction(batch_x, batch_y, prediction)
        #print ('in store_pred2: ', img.shape)
        util.save_image(img, "%s/%s.jpg" % (self.prediction_path, name))
        return pred_shape
Beispiel #4
0
    def store_prediction(self, sess, batch_x, batch_y, name):
        prediction = sess.run(self.net.predicter,
                              feed_dict={
                                  self.net.x: batch_x,
                                  self.net.y: batch_y,
                                  self.net.keep_prob: 1.
                              })
        pred_shape = prediction.shape

        loss = sess.run(self.net.cost,
                        feed_dict={
                            self.net.x: batch_x,
                            self.net.y:
                            util.crop_to_shape(batch_y, pred_shape),
                            self.net.keep_prob: 1.
                        })

        logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(
            error_rate(prediction,
                       util.crop_to_shape(batch_y, prediction.shape)), loss))

        img = util.combine_img_prediction(batch_x, batch_y, prediction)
        util.save_image(img, "%s/%s.jpg" % (self.prediction_path, name))

        return pred_shape
Beispiel #5
0
 def store_prediction(self, sess, batch_x, batch_y, name):
     """
     calculate stats on verification data
     :param sess:
     :param batch_x: test_x
     :param batch_y: test_y
     :param name: save prediction result as name
     :return:
     """
     prediction = sess.run(self.net.predicter, feed_dict={self.net.x: batch_x, 
                                                          self.net.y: batch_y, 
                                                          self.net.keep_prob: 1.})
     pred_shape = prediction.shape
     loss = sess.run(self.net.cost, feed_dict={self.net.x: batch_x, 
                                               self.net.y: util.crop_to_shape(batch_y, pred_shape),
                                               self.net.keep_prob: 1.})
     
     logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(error_rate(prediction,
                                                                     util.crop_to_shape(batch_y,
                                                                                        prediction.shape)),
                                                                     loss))
           
     img = util.combine_img_prediction(batch_x, batch_y, prediction)
     util.save_image(img, "%s/%s.jpg"%(self.prediction_path, name))
     
     return pred_shape
Beispiel #6
0
def launch(data_root, output_path, training_iters, epochs, restore, layers,
           features_root):
    data_provider = DataProvider(572, data_root)

    data, label = data_provider(1)
    weights = None  #(1/3) / (label.sum(axis=2).sum(axis=1).sum(axis=0) / data.size)

    net = unet.Unet(
        channels=data_provider.channels,
        n_class=data_provider.n_class,
        layers=layers,
        features_root=features_root,
        cost_kwargs=dict(regularizer=0.001, class_weights=weights),
    )

    path = output_path if restore else util.create_training_path(output_path)

    trainer = unet.Trainer(net, optimizer="adam", opt_kwargs=dict(beta1=0.91))
    path = trainer.train(data_provider,
                         path,
                         training_iters=training_iters,
                         epochs=epochs,
                         dropout=0.5,
                         display_step=2,
                         restore=restore)

    prediction = net.predict(path, data)

    print("Testing error rate: {:.2f}%".format(
        unet.error_rate(prediction, util.crop_to_shape(label,
                                                       prediction.shape))))
Beispiel #7
0
 def _store_prediction(sess, batch_x, batch_y, name, predicter, prediction_path, cost, x, y, keep_prob):
     prediction = sess.run(predicter, 
             feed_dict={x: batch_x, y: batch_y, keep_prob: 1.})
     pred_shape = prediction.shape
     
     loss = sess.run(cost, feed_dict={x: batch_x, 
             y: util.crop_to_shape(batch_y, pred_shape), keep_prob: 1.})
     
     logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(
             Trainer._error_rate(prediction, util.crop_to_shape(batch_y,
             prediction.shape)), loss))
           
     img = util.combine_img_prediction(batch_x, batch_y, prediction)
     util.save_image(img, "%s/%s.jpg"%(prediction_path, name))
     
     return pred_shape
def launch(data_root, output_path, training_iters, epochs, restore, layers,
           features_root):
    print("Using data from: %s" % data_root)
    data_provider = ultrasound_util.DataProvider(data_root + "/*.tif",
                                                 a_min=0,
                                                 a_max=210)
    net = unet.Unet(
        channels=data_provider.channels,
        n_class=data_provider.n_class,
        layers=layers,
        features_root=features_root,
        cost="dice_coefficient",
    )

    path = output_path if restore else create_training_path(output_path)
    trainer = unet.Trainer(net, norm_grads=True, optimizer="adam")
    path = trainer.train(data_provider,
                         path,
                         training_iters=training_iters,
                         epochs=epochs,
                         dropout=0.5,
                         display_step=2,
                         restore=restore)

    x_test, y_test = data_provider(1)
    prediction = net.predict(path, x_test)

    print("Testing error rate: {:.2f}%".format(
        unet.error_rate(prediction, util.crop_to_shape(y_test,
                                                       prediction.shape))))
Beispiel #9
0
def launch(data_root, output_path, training_iters, epochs, restore, layers,
           features_root):
    print("Using data from: %s" % data_root)
    data_provider = DataProvider(600, glob.glob(data_root + "/*"))

    net = unet.Unet(
        channels=data_provider.channels,
        n_class=data_provider.n_class,
        layers=layers,
        features_root=features_root,
        cost_kwargs=dict(regularizer=0.001),
    )

    path = output_path if restore else create_training_path(output_path)
    trainer = unet.Trainer(net,
                           optimizer="momentum",
                           opt_kwargs=dict(momentum=0.2))
    path = trainer.train(data_provider,
                         path,
                         training_iters=training_iters,
                         epochs=epochs,
                         dropout=0.5,
                         display_step=2,
                         restore=restore)

    x_test, y_test = data_provider(1)
    prediction = net.predict(path, x_test)

    print("Testing error rate: {:.2f}%".format(
        unet.error_rate(prediction, util.crop_to_shape(y_test,
                                                       prediction.shape))))
def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root):
    print("Using data from: %s"%data_root)
    data_provider = ultrasound_util.DataProvider(data_root + "/*.tif", 
                                      a_min=0, 
                                      a_max=210)
    net = unet.Unet(channels=data_provider.channels, 
                    n_class=data_provider.n_class, 
                    layers=layers, 
                    features_root=features_root,
                    cost="dice_coefficient",
                    )
    
    path = output_path if restore else create_training_path(output_path)
    trainer = unet.Trainer(net, norm_grads=True, optimizer="adam")
    path = trainer.train(data_provider, path, 
                         training_iters=training_iters, 
                         epochs=epochs, 
                         dropout=0.5, 
                         display_step=2, 
                         restore=restore)
     
    x_test, y_test = data_provider(1)
    prediction = net.predict(path, x_test)
     
    print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape))))
Beispiel #11
0
def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root):
    print("Using data from: %s"%data_root)
    data_provider = DataProvider(600, glob.glob(data_root+"/*"))
    
    net = unet.Unet(channels=data_provider.channels, 
                    n_class=data_provider.n_class, 
                    layers=layers, 
                    features_root=features_root,
                    add_regularizers=True,
#                     filter_size=5
                    )
    
    path = output_path if restore else create_training_path(output_path)
    trainer = unet.Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2))
    path = trainer.train(data_provider, path, 
                         training_iters=training_iters, 
                         epochs=epochs, 
                         dropout=0.5, 
                         display_step=2, 
                         restore=restore)
     
    x_test, y_test = data_provider(1)
    prediction = net.predict(path, x_test)
     
    print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape))))
    
#     import numpy as np
#     np.save("prediction", prediction[0, ..., 1])
    
    img = util.combine_img_prediction(x_test, y_test, prediction)
    util.save_image(img, "prediction.jpg")
Beispiel #12
0
def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root):
    generator = Generator(572, data_root)
    
    data, label = generator(1)
    weights = None#(1/3) / (label.sum(axis=2).sum(axis=1).sum(axis=0) / data.size)
    
    net = unet.Unet(channels=generator.channels, 
                    n_class=generator.n_class, 
                    layers=layers, 
                    features_root=features_root,
                    add_regularizers=True,
                    class_weights=weights,
#                     filter_size=5
                    )
    
    path = output_path if restore else create_training_path(output_path)
#     trainer = unet.Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2))
    trainer = unet.Trainer(net, optimizer="adam", opt_kwargs=dict(beta1=0.91))
    path = trainer.train(generator, path, 
                         training_iters=training_iters, 
                         epochs=epochs, 
                         dropout=0.5, 
                         display_step=2, 
                         restore=restore)
     
    prediction = net.predict(path, data)
     
    print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(label, prediction.shape))))
    
#     import numpy as np
#     np.save("prediction", prediction[0, ..., 1])
    
    img = util.combine_img_prediction(data, label, prediction)
    util.save_image(img, "prediction.jpg")
Beispiel #13
0
  def save_prediction_color_code(ground_truth, prediction, save_path,
                                 filename):
    color_code_dict = [
      [0, 0, 0],  # black
      # [0, 0, 0],  # black
      [1, 0, 0],  # red
      [1, 0.4392156863, 0],  # orange
      [1, 1, 1],  # white
      [1, 0, 1],  # magenta
      [0, 0, 1],  # blue
      [0, 1, 0],  # green
      [0, 1, 1]  # cyan
      ]

    # Crop ground truth image
    crop_gt = util.crop_to_shape(ground_truth, prediction.shape)

    # Argmax to remove one-hot encoding
    gt_categorical = np.argmax(crop_gt, axis=3).squeeze()
    pr_categorical = np.argmax(prediction, axis=3).squeeze()

    gt_mat = np.zeros(gt_categorical.shape + (3,))
    pr_mat = np.zeros(pr_categorical.shape + (3,))

    for num in range(len(color_code_dict)):
      gt_mat[gt_categorical == num, :] = color_code_dict[num]
      pr_mat[pr_categorical == num, :] = color_code_dict[num]

    if not os.path.exists(save_path):
      os.makedirs(save_path)

    imsave(os.path.join(save_path, filename + '_gt.png'), gt_mat)
    imsave(os.path.join(save_path, filename + '_pr.png'), pr_mat)
Beispiel #14
0
def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root):
    data_provider = DataProvider(572, data_root)
    
    data, label = data_provider(1)
    weights = None#(1/3) / (label.sum(axis=2).sum(axis=1).sum(axis=0) / data.size)
    
    net = unet.Unet(channels=data_provider.channels, 
                    n_class=data_provider.n_class, 
                    layers=layers, 
                    features_root=features_root,
                    cost_kwargs=dict(regularizer=0.001,
                                     class_weights=weights),
                    )
    
    path = output_path if restore else create_training_path(output_path)
    trainer = unet.Trainer(net, optimizer="adam", opt_kwargs=dict(beta1=0.91))
    path = trainer.train(data_provider, path, 
                         training_iters=training_iters, 
                         epochs=epochs, 
                         dropout=0.5, 
                         display_step=2, 
                         restore=restore)
     
    prediction = net.predict(path, data)
     
    print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(label, prediction.shape))))
Beispiel #15
0
    def store_prediction(self, sess, batch_x, batch_y, name):
        #print('track 1')
        prediction = sess.run(self.net.predicter,
                              feed_dict={
                                  self.net.x: batch_x,
                                  self.net.y: batch_y,
                                  self.net.keep_prob: 1.
                              })
        #print('track 2')
        pred_shape = prediction.shape

        loss = sess.run(self.net.cost,
                        feed_dict={
                            self.net.x: batch_x,
                            self.net.y: batch_y,
                            self.net.keep_prob: 1.
                        })
        #print('track 3')
        logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(
            error_rate(prediction,
                       util.crop_to_shape(batch_y, prediction.shape)), loss))
        # print('track 4')
        #print('this is shape batch x: ' + str(batch_x.shape))
        #print('this is shape batch y: ' + str(batch_y.shape))
        #print('this is shape prediction: ' + str(prediction.shape))
        #img = util.combine_img_prediction(batch_x, batch_y, prediction)
        #print('track 5')
        #util.save_image(img, "%s/%s.jpg"%(self.prediction_path, name))

        return pred_shape
 def store_prediction(self, sess, batch_x, batch_y, name):
     prediction = sess.run(self.net.predicter, feed_dict={self.net.x: batch_x, 
                                                          self.net.y: batch_y, 
                                                          self.net.keep_prob: 1.})
     pred_shape = prediction.shape
     
     loss = sess.run(self.net.cost, feed_dict={self.net.x: batch_x, 
                                                    self.net.y: util.crop_to_shape(batch_y, pred_shape), 
                                                    self.net.keep_prob: 1.})
     
     logging.info("Verification loss= {:.4f}".format(loss))
           
     # img = util.combine_img_prediction(batch_x, batch_y, prediction)
     # util.save_image(img, "%s/%s.jpg"%(self.prediction_path, name))
     img_0, img_1, img_2, img_3, img_4, img_5 = util.combine_img_prediction(batch_x, batch_y, prediction)
     util.save_image(img_0, "%s/%s_1.jpg" % (self.prediction_path, name))
     util.save_image(img_1, "%s/%s_2.jpg" % (self.prediction_path, name))
     util.save_image(img_2, "%s/%s_3.jpg" % (self.prediction_path, name))
     util.save_image(img_3, "%s/%s_4.jpg" % (self.prediction_path, name))
     util.save_image(img_4, "%s/%s_5.jpg" % (self.prediction_path, name))
     util.save_image(img_5, "%s/%s_6.jpg" % (self.prediction_path, name))
     
     # return pred_shape
     # By XY
     return pred_shape, prediction
Beispiel #17
0
 def eval(self):
     loss, = sess.run(
         (self.net.cost, ),
         feed_dict={
             self.net.x: batch_x,
             self.net.y: util.crop_to_shape(batch_y, pred_shape),
             self.net.keep_prob: 1.0
         })
Beispiel #18
0
    def store_prediction(self, sess, batch_x, batch_y, name):
        prediction = sess.run(self.net.predicter,
                              feed_dict={
                                  self.net.x: batch_x,
                                  self.net.y: batch_y,
                                  self.net.keep_prob: 1.
                              })
        pred_shape = prediction.shape

        if len(batch_x.shape) == 5:
            loss = sess.run(self.net.cost,
                            feed_dict={
                                self.net.x: batch_x,
                                self.net.y: batch_y,
                                self.net.keep_prob: 1.
                            })
            logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(
                error_rate(prediction, batch_y), loss))
            img = np.concatenate(
                (batch_x[..., 0], batch_y[..., 0], prediction[..., 0]), axis=3)
            util.save_image(
                util.to_rgb(img[0, 0, ...]),
                "%s/%s.jpg" % (self.prediction_path, name + '_0_0'))
            # for i in range(0,img.shape[0]):
            #     for j in range(0,img.shape[1]):
            #         util.save_image(util.to_rgb(img[i,0,...]), "%s/%s.jpg" % (self.prediction_path, name+'_'+str(i)+'_'+str(j)))

        else:
            loss = sess.run(self.net.cost,
                            feed_dict={
                                self.net.x: batch_x,
                                self.net.y:
                                util.crop_to_shape(batch_y, pred_shape),
                                self.net.keep_prob: 1.
                            })
            logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(
                error_rate(prediction,
                           util.crop_to_shape(batch_y, prediction.shape)),
                loss))

            img = util.combine_img_prediction(batch_x, batch_y, prediction)
            util.save_image(img, "%s/%s.jpg" % (self.prediction_path, name))

        return pred_shape
Beispiel #19
0
    def store_prediction(self, sess, batch_x, batch_y, name):
        prediction = sess.run(self.net.predicter, feed_dict={self.net.x: batch_x,
                                                             self.net.y: batch_y,
                                                             self.net.keep_prob: 1.})
        pred_shape = prediction.shape

        loss = sess.run(self.net.cost, feed_dict={self.net.x: batch_x,
                                                  self.net.y: util.crop_to_shape(batch_y, pred_shape),
                                                  self.net.keep_prob: 1.})

        logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(error_rate(prediction,
                                                                                   util.crop_to_shape(batch_y,
                                                                                                      prediction.shape)),
                                                                        loss))

        img = util.combine_img_prediction(batch_x, batch_y, prediction)
        util.save_image(img, "%s/%s.jpg" % (self.prediction_path, name))

        return pred_shape
Beispiel #20
0
    def store_prediction(self, sess, batch_x, batch_y, name):
        prediction = sess.run(self.net.predicter,
                              feed_dict={
                                  self.net.x: batch_x,
                                  self.net.y: batch_y,
                                  self.net.keep_prob: 1.,
                                  self.net.mode: False,
                                  self.learning_rate: self.lr
                              })
        pred_shape = prediction.shape
        #print(pred_shape)
        loss = sess.run(self.net.cost,
                        feed_dict={
                            self.net.x: batch_x,
                            self.net.y:
                            util.crop_to_shape(batch_y, pred_shape),
                            self.net.keep_prob: 1.,
                            self.net.mode: False,
                            self.learning_rate: self.lr
                        })
        recall_c0, recall_c1 = sess.run(
            (self.net.recall_c0, self.net.recall_c1),
            feed_dict={
                self.net.x: batch_x,
                self.net.y: util.crop_to_shape(batch_y, pred_shape),
                self.net.keep_prob: 1.,
                self.net.mode: False,
                self.learning_rate: self.lr
            })  #,
        # self.net.pd: pred_shape})

        logging.info(
            "Verification error= {:.1f}%, loss= {:.4f}, recall_c0= {:.4f}, recall_c1= {:.4f}"
            .format(
                error_rate(prediction,
                           util.crop_to_shape(batch_y, prediction.shape)),
                loss, recall_c0, recall_c1))

        img = util.combine_img_prediction(batch_x, batch_y, prediction)
        #util.save_image(img, "%s/%s.jpg"%(self.prediction_path, name))

        return pred_shape
Beispiel #21
0
def main():

    dp = DataProvider(batchSize=BATCH_SIZE, validationSize=VALIDATION_SIZE)
    dp.readData()
    print("DONE READING DATA")
    # calculate num of iterations
    iters = dp.getTrainSize() // BATCH_SIZE
    # unet
    net = unet.Unet(channels = 1, n_class = 2, layers = 3,\
     features_root = 16, cost="cross_entropy", cost_kwargs={})

    # # trainer
    # options = {"momentum":0.2, "learning_rate":0.2,"decay_rate":0.95}

    # trainer = unet.Trainer(net, optimizer="momentum",plotter = plot, opt_kwargs=options )
    # # train model
    # path = trainer.train(dp, OUTPUT_PATH,training_iters = iters,epochs=EPOCHS,\
    # 	dropout=DROPOUT_KEEP_PROB, display_step = DISPLAY_STEP,restore = restore)

    path = os.getcwd() + "/retinaModel/model.cpkt"

    x_test, y_test = dp.getTestData(3, crop=False)
    prediction = net.predict(path, x_test)

    # # sanity check
    # fig, ax = plt.subplots(3, 3)
    # ax[0][0].imshow(x_test[0,:,:,0],cmap=plt.cm.gray)
    # ax[0][1].imshow(y_test[0,:,:,1],cmap=plt.cm.gray)
    # ax[0][2].imshow(np.argmax(prediction[0,...],axis =2),cmap=plt.cm.gray)
    # ax[1][0].imshow(x_test[1,:,:,0],cmap=plt.cm.gray)
    # ax[1][1].imshow(y_test[1,:,:,1],cmap=plt.cm.gray)
    # ax[1][2].imshow(np.argmax(prediction[1,...],axis =2),cmap=plt.cm.gray)
    # ax[2][0].imshow(x_test[2,:,:,0],cmap=plt.cm.gray)
    # ax[2][1].imshow(y_test[2,:,:,1],cmap=plt.cm.gray)
    # ax[2][2].imshow(np.argmax(prediction[2,...],axis =2),cmap=plt.cm.gray)
    # plt.show()

    # save test result as image
    # check for path
    if not os.path.lexists(OUTPUT_PATH):
        os.makedirs(OUTPUT_PATH)

    sampleSize = 3
    img = util.combine_img_prediction(x_test[0:sampleSize,...], y_test[0:sampleSize,...]\
     , prediction[0:sampleSize,...])

    util.save_image(
        img, "%s/%s.jpg" % (os.getcwd() + "/" + "testResults", "testSample"))

    print("Testing error rate: {:.2f}%".format(
        unet.error_rate(prediction, util.crop_to_shape(y_test,
                                                       prediction.shape))))
Beispiel #22
0
 def store_prediction(self, sess, batch_x, batch_y, name):
     prediction = sess.run(self.net.predicter, feed_dict={self.net.x: batch_x, 
                                                          self.net.y: batch_y, 
                                                          self.net.keep_prob: 1.})
     pred_shape = prediction.shape
     # print("pred shape = ", pred_shape)
     # fig, ax = plt.subplots(1, 5)
     # ax[0].imshow(batch_x[0,:,:,0],cmap=plt.cm.gray)
     # ax[1].imshow(prediction[0,:,:,0],cmap=plt.cm.gray)
     # ax[2].imshow(prediction[0,:,:,1],cmap=plt.cm.gray)
     # ax[3].imshow(batch_y[0,:,:,0],cmap=plt.cm.gray)
     # ax[4].imshow(batch_y[0,:,:,1],cmap=plt.cm.gray)
     # plt.show()
     
     loss = sess.run(self.net.cost, feed_dict={self.net.x: batch_x, 
                                                    self.net.y: util.crop_to_shape(batch_y, pred_shape), 
                                                    self.net.keep_prob: 1.})
     
     logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(error_rate(prediction,
                                                                       util.crop_to_shape(batch_y,
                                                                                          prediction.shape)),
                                                                       loss))
     diceScore = dice_score(prediction,util.crop_to_shape(batch_y,prediction.shape))
     logging.info("Dice score= {:.2f}".format(diceScore))
     # add data to plotter
     self.plotter.updateLogger(diceScore,"dice score")
     self.plotter.updateLogger(loss,"validation loss")
     er = error_rate(prediction,util.crop_to_shape(batch_y,prediction.shape))
     self.plotter.updateLogger(er,"validation error")
     # 
     # smple from validation data
     sampleSizeValid = 10
     img = util.combine_img_prediction(batch_x[0:sampleSizeValid,...],\
      batch_y[0:sampleSizeValid,...], prediction[0:sampleSizeValid,...])
     util.save_image(img, "%s/%s.jpg"%(self.prediction_path, name))
     
     return pred_shape
Beispiel #23
0
def launch(data_root,
           roidictfile,
           output_path,
           training_iters,
           epochs,
           restore,
           layers,
           features_root,
           val=None):

    with open(roidictfile) as fh:
        roidict = yaml.load(fh)
    if val:
        val_data_provider = ImageDataProvider(val, roidict)

    data_provider = ImageDataProvider(data_root, roidict)

    data, label = data_provider(1)
    # make sure the labels are not flat
    assert np.any(
        np.asarray([label[-1, ..., nn].var()
                    for nn in range(label.shape[-1])]) > 0)

    weights = None  #(1/3) / (label.sum(axis=2).sum(axis=1).sum(axis=0) / data.size)

    net = unet.Unet(
        channels=data_provider.channels,
        n_class=data_provider.n_class,
        layers=layers,
        features_root=features_root,
        cost_kwargs=dict(regularizer=0.001, class_weights=weights),
    )

    path = output_path if restore else create_training_path(output_path)
    trainer = unet.Trainer(net, optimizer="adam", opt_kwargs=dict(beta1=0.91))
    path = trainer.train(data_provider,
                         path,
                         training_iters=training_iters,
                         epochs=epochs,
                         dropout=0.5,
                         display_step=2,
                         restore=restore,
                         val_data_provider=val_data_provider)

    prediction = net.predict(path, data)

    print("Testing error rate: {:.2f}%".format(
        unet.error_rate(prediction, util.crop_to_shape(label,
                                                       prediction.shape))))
Beispiel #24
0
    def store_prediction(self, sess, batch_x, batch_y, name):
        prediction = sess.run(self.net.predicter,
                              feed_dict={
                                  self.net.x: batch_x,
                                  self.net.y: batch_y,
                                  self.net.keep_prob: 1.
                              })
        pred_shape = prediction.shape

        loss = sess.run(self.net.cost,
                        feed_dict={
                            self.net.x: batch_x,
                            self.net.y:
                            util.crop_to_shape(batch_y, pred_shape),
                            self.net.keep_prob: 1.
                        })

        img = util.combine_img_prediction(batch_x, batch_y, prediction)
        util.save_image(img, "%s/%s.jpg" % (self.prediction_path, name))

        return pred_shape
def launch(data_root, output_path, training_iters, epochs, restore, layers,
           features_root):
    print("Using data from: %s" % data_root)

    if not os.path.exists(data_root):
        raise IOError("Kaggle Ultrasound Dataset not found")

    data_provider = DataProvider(search_path=data_root + "/*.tif",
                                 mean=100,
                                 std=56)

    net = unet.Unet(
        channels=data_provider.channels,
        n_class=data_provider.n_class,
        layers=layers,
        features_root=features_root,
        #cost="dice_coefficient",
    )

    path = output_path if restore else util.create_training_path(output_path)

    trainer = unet.Trainer(net,
                           batch_size=1,
                           norm_grads=False,
                           optimizer="adam")
    path = trainer.train(data_provider,
                         path,
                         training_iters=training_iters,
                         epochs=epochs,
                         dropout=0.5,
                         display_step=2,
                         restore=restore)

    x_test, y_test = data_provider(1)
    prediction = net.predict(path, x_test)

    print("Testing error rate: {:.2f}%".format(
        unet.error_rate(prediction, util.crop_to_shape(y_test,
                                                       prediction.shape))))
Beispiel #26
0
def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root):
    print("Using data from: %s"%data_root)
    data_provider = DataProvider(600, glob.glob(data_root+"/*"))
    
    net = unet.Unet(channels=data_provider.channels, 
                    n_class=data_provider.n_class, 
                    layers=layers, 
                    features_root=features_root,
                    cost_kwargs=dict(regularizer=0.001),
                    )
    
    path = output_path if restore else create_training_path(output_path)
    trainer = unet.Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2))
    path = trainer.train(data_provider, path, 
                         training_iters=training_iters, 
                         epochs=epochs, 
                         dropout=0.5, 
                         display_step=2, 
                         restore=restore)
     
    x_test, y_test = data_provider(1)
    prediction = net.predict(path, x_test)
     
    print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape))))
Beispiel #27
0
    def train(self,
              data_provider,
              output_path,
              training_iters=10,
              epochs=100,
              dropout=0.75,
              display_step=1,
              restore=False,
              write_graph=False):
        """
        Lauches the training process
        
        :param data_provider: callable returning training and verification data
        :param output_path: path where to store checkpoints
        :param training_iters: number of training mini batch iteration
        :param epochs: number of epochs
        :param dropout: dropout probability
        :param display_step: number of steps till outputting stats
        :param restore: Flag if previous model should be restored 
        :param write_graph: Flag if the computation graph should be written as protobuf file to the output path
        """
        save_path = os.path.join(output_path, "model.cpkt")
        if epochs == 0:
            return save_path

        init = self._initialize(training_iters, output_path, restore)

        with tf.Session() as sess:
            if write_graph:
                tf.train.write_graph(sess.graph_def, output_path, "graph.pb",
                                     False)

            sess.run(init)

            if restore:
                ckpt = tf.train.get_checkpoint_state(output_path)
                if ckpt and ckpt.model_checkpoint_path:
                    self.net.restore(sess, ckpt.model_checkpoint_path)

            test_x, test_y = data_provider(self.verification_batch_size)
            pred_shape = self.store_prediction(sess, test_x, test_y, "_init")

            summary_writer = tf.summary.FileWriter(output_path,
                                                   graph=sess.graph)
            logging.info("Start optimization")

            avg_gradients = None
            for epoch in range(epochs):
                total_loss = 0
                for step in range((epoch * training_iters),
                                  ((epoch + 1) * training_iters)):
                    batch_x, batch_y = data_provider(self.batch_size)

                    # Run optimization op (backprop)
                    _, loss, lr, gradients = sess.run(
                        (self.optimizer, self.net.cost,
                         self.learning_rate_node, self.net.gradients_node),
                        feed_dict={
                            self.net.x: batch_x,
                            self.net.y:
                            util.crop_to_shape(batch_y, pred_shape),
                            self.net.keep_prob: dropout
                        })

                    if self.net.summaries and self.norm_grads:
                        avg_gradients = _update_avg_gradients(
                            avg_gradients, gradients, step)
                        norm_gradients = [
                            np.linalg.norm(gradient)
                            for gradient in avg_gradients
                        ]
                        self.norm_gradients_node.assign(norm_gradients).eval()

                    if step % display_step == 0:
                        self.output_minibatch_stats(
                            sess, summary_writer, step, batch_x,
                            util.crop_to_shape(batch_y, pred_shape))

                    total_loss += loss

                self.output_epoch_stats(epoch, total_loss, training_iters, lr)
                self.store_prediction(sess, test_x, test_y, "epoch_%s" % epoch)

                save_path = self.net.save(sess, save_path)
            logging.info("Optimization Finished!")

            return save_path
Beispiel #28
0
# In[9]:

trainer = Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2))

# In[8]:

data_provider = image_util.ImageDataProvider(
    "C:/Users/orange/Desktop/data/*.tif")

# In[9]:

path = trainer.train(data_provider,
                     "C:/Users/orange/Desktop/out",
                     training_iters=4,
                     epochs=4,
                     display_step=2)

# In[10]:

x_test, y_test = data_provider(13)

# In[11]:

prediction = net.predict("C:/Users/orange/Desktop/out/model.cpkt", x_test)

# In[12]:

error_rate(prediction, util.crop_to_shape(y_test, prediction.shape))
img = util.combine_img_prediction(x_test, y_test, prediction)
util.save_image(img, "C:/Users/orange/Desktop/out/prediction.jpg")
Beispiel #29
0
    clrs = ['b', 'r', 'g', 'k']
    trsh = np.linspace(1, 0, 300, endpoint=1)

    for fil in test_files:

        fname = fil.split('/')[-1]
        dp = rfc.DataProvider(a_min=0,
                              a_max=200,
                              files=[fil],
                              label_name='gt_mask',
                              n_class=2)
        data, mask = dp(1)

        pred, dt = net.predict(model_dir + '/model.cpkt', data, time_it=1)
        times.append(dt)
        mask = util.crop_to_shape(mask, pred.shape)

        fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(18, 8))
        ax1.imshow(data[0, :, :, 0], aspect='auto')
        ax2.imshow(mask[0, :, :, 1], aspect='auto')
        ax3.imshow(pred[0, :, :, 1], aspect='auto')

        np.save(pred_dir + fname + '_mask', mask)
        np.save(pred_dir + fname + '_pred', pred)

        plt.subplots_adjust(left=0.04, right=0.99, top=0.99, bottom=0.04)
        plt.savefig(pred_dir + fname + '.jpg', dpi=30)
        plt.close()

        y_true = mask[0, :, :, 1].reshape(-1).astype(int)
        y_score = pred[0, :, :, 1].reshape(-1)
Beispiel #30
0
    ny = 572
     
    training_iters = 20
    epochs = 100
    dropout = 0.75 # Dropout, probability to keep units
    display_step = 2
    restore = False
 
    generator = image_gen.get_image_gen_rgb(nx, ny, cnt=20)
    
    net = unet.Unet(channels=generator.channels, n_class=generator.n_class, layers=3, features_root=16)
    
    trainer = unet.Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2))
    path = trainer.train(generator, "./unet_trained", 
                         training_iters=training_iters, 
                         epochs=epochs, 
                         dropout=dropout, 
                         display_step=display_step, 
                         restore=restore)
     
    x_test, y_test = generator(4)
    prediction = net.predict(path, x_test)
     
    print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape))))
    
    import numpy as np
    np.savetxt("prediction.txt", prediction[..., 1].reshape(-1, prediction.shape[2]))
    
    img = util.combine_img_prediction(x_test, y_test, prediction)
    util.save_image(img, "prediction.jpg")
Beispiel #31
0
    def train(self, data_provider, output_path, training_iters=10, epochs=100, dropout=0.75, display_step=1,
              restore=False, write_graph=False, prediction_path='prediction'):
        """
        Lauches the training process

        :param data_provider: callable returning training and verification data
        :param output_path: path where to store checkpoints
        :param training_iters: number of training mini batch iteration
        :param epochs: number of epochs
        :param dropout: dropout probability
        :param display_step: number of steps till outputting stats
        :param restore: Flag if previous model should be restored
        :param write_graph: Flag if the computation graph should be written as protobuf file to the output path
        :param prediction_path: path where to save predictions on each epoch
        """
        save_path = os.path.join(output_path, "model.ckpt")
        if epochs == 0:
            return save_path

        init = self._initialize(training_iters, output_path, restore, prediction_path)

        with tf.Session() as sess:
            if write_graph:
                tf.train.write_graph(sess.graph_def, output_path, "graph.pb", False)

            sess.run(init)

            if restore:
                ckpt = tf.train.get_checkpoint_state(output_path)
                if ckpt and ckpt.model_checkpoint_path:
                    self.net.restore(sess, ckpt.model_checkpoint_path)

            test_x, test_y = data_provider(self.verification_batch_size)
            pred_shape = self.store_prediction(sess, test_x, test_y, "_init")

            summary_writer = tf.summary.FileWriter(output_path, graph=sess.graph)
            logging.info("Start optimization")

            avg_gradients = None
            for epoch in range(epochs):
                total_loss = 0
                for step in range((epoch * training_iters), ((epoch + 1) * training_iters)):
                    batch_x, batch_y = data_provider(self.batch_size)

                    # Run optimization op (backprop)
                    _, loss, lr, gradients = sess.run(
                        (self.optimizer, self.net.cost, self.learning_rate_node, self.net.gradients_node),
                        feed_dict={self.net.x: batch_x,
                                   self.net.y: util.crop_to_shape(batch_y, pred_shape),
                                   self.net.keep_prob: dropout})

                    if self.net.summaries and self.norm_grads:
                        avg_gradients = _update_avg_gradients(avg_gradients, gradients, step)
                        norm_gradients = [np.linalg.norm(gradient) for gradient in avg_gradients]
                        self.norm_gradients_node.assign(norm_gradients).eval()

                    if step % display_step == 0:
                        self.output_minibatch_stats(sess, summary_writer, step, batch_x,
                                                    util.crop_to_shape(batch_y, pred_shape))

                    total_loss += loss

                self.output_epoch_stats(epoch, total_loss, training_iters, lr)
                self.store_prediction(sess, test_x, test_y, "epoch_%s" % epoch)

                save_path = self.net.save(sess, save_path)
            logging.info("Optimization Finished!")

            return save_path
            
    for i in range(n_files):
        fname = str(i)
        data = read_h5file(prefix+'data_'+fname+'.h5')
        rfi = read_h5file(prefix+'rfi_'+fname+'.h5')

        data = np.absolute(data)
        rfi = np.absolute(rfi)
        rfi = (rfi>dpt.threshold)[:,:,:,0]
        print(rfi.shape)
        data,mask = dpt._process(data, rfi)
        print(mask.shape)

        pred,dt = net.predict(model_dir+'/model.cpkt', data,time_it=1)
        _,kk,jj,_, = pred.shape
        mask = util.crop_to_shape(mask, pred.shape)[:,:kk,:jj,:]

        fig, (ax1,ax2,ax3) = plt.subplots(3,1,figsize=(18,8))
        ax1.imshow(data[0,:,:,0],aspect='auto')
        ax2.imshow(mask[0,:,:,1],aspect='auto')
        ax3.imshow(pred[0,:,:,1],aspect='auto')

        np.save(pred_dir+fname+'_mask',mask)
        np.save(pred_dir+fname+'_pred',pred)

        plt.subplots_adjust(left=0.04, right=0.99, top=0.99, bottom=0.04)
        plt.savefig(pred_dir+fname+'.jpg',dpi=30)
        plt.close()

        y_true = mask[0,:,:,1].reshape(-1).astype(int)
        y_score = pred[0,:,:,1].reshape(-1)
Beispiel #33
0
    def train(self, data_provider, output_path, training_iters=10, epochs=100, dropout=0.75, display_step=1,
              restore=False, write_graph=True, prediction_path='prediction', log_file='log_file.txt'):
        """
        Lauches the training process

        :param data_provider: callable returning training and verification data
        :param output_path: path where to store checkpoints
        :param training_iters: number of training mini batch iteration
        :param epochs: number of epochs
        :param dropout: dropout probability
        :param display_step: number of steps till outputting stats
        :param restore: Flag if previous model should be restored
        :param write_graph: Flag if the computation graph should be written as protobuf file to the output path
        :param prediction_path: path where to save predictions on each epoch
        :param log_file: txt file where epochs and minibatch stats are saved
        """

        log_path = os.path.join(output_path, log_file)

        save_path = os.path.join(output_path, "model.ckpt")
        if epochs == 0:
            return save_path

        init = self._initialize(training_iters, output_path, restore, prediction_path)

        with tf.Session() as sess:
            if write_graph:
                tf.train.write_graph(sess.graph_def, output_path, "graph.pb", False)

            sess.run(init)

            if restore:
                ckpt = tf.train.get_checkpoint_state(output_path)
                if ckpt and ckpt.model_checkpoint_path:
                    self.net.restore(sess, ckpt.model_checkpoint_path)

            # generator = image_util.ImageDataProvider(search_path="orniere_data/test_108p/*.jpg", data_suffix=".jpg",
            #                                          mask_suffix="_mask.jpg", shuffle_data='True')

            # test_x, test_y = generator(4)
            # test_shape = self.store_prediction_val(sess, test_x, test_y, "_init")

            val_x, val_y = data_provider(self.verification_batch_size)

            pred_shape = self.store_prediction_val(sess, val_x, val_y, "_init", log_path)

            summary_writer = tf.summary.FileWriter(output_path, graph=sess.graph)
            logging.info("Start optimization")

            avg_gradients = None
            best_loss = 100
            best_epoch = 0
            # val_diff_full = False
            # queue.Queue(10)
            for epoch in range(epochs):
                total_loss = 0
                for step in range((epoch * training_iters), ((epoch + 1) * training_iters)):
                    batch_x, batch_y = data_provider(self.batch_size)

                    # Run optimization op (backprop)
                    _, loss, lr, gradients = sess.run(
                        (self.optimizer, self.net.cost, self.learning_rate_node, self.net.gradients_node),
                        feed_dict={self.net.x: batch_x,
                                   self.net.y: util.crop_to_shape(batch_y, pred_shape),
                                   self.net.keep_prob: dropout})

                    if self.net.summaries and self.norm_grads:
                        avg_gradients = _update_avg_gradients(avg_gradients, gradients, step)
                        norm_gradients = [np.linalg.norm(gradient) for gradient in avg_gradients]
                        self.norm_gradients_node.assign(norm_gradients).eval()

                    if step % display_step == 0:
                        self.output_minibatch_stats(sess, summary_writer, step, batch_x,
                                                    util.crop_to_shape(batch_y, pred_shape), log_path)

                    total_loss += loss

                self.output_epoch_stats(epoch, total_loss, training_iters, lr, log_path)
                self.store_prediction_val(sess, val_x, val_y, "epoch_%s" % epoch, log_path)
                # self.store_prediction_test(sess, test_x, test_y, "epoch_%s" % epoch, log_path)
                print('total_loss: ', (total_loss/training_iters), 'best_loss: ', best_loss)
                if (total_loss/training_iters) < best_loss:
                    best_epoch = epoch
                    best_loss = (total_loss/training_iters)
                    save_path = self.net.save(sess, save_path)
            logging.info("Optimization Finished!")
            logging.info("Model from epoch %i saved in : %s" % (best_epoch, save_path))

            return save_path
Beispiel #34
0
'''
cv2.imshow('label',label[0,...,1])
cv2.waitKey(0)
cv2.destroyAllWindows()
'''

prediction = net.predict('3.30/model.cpkt', data)
print(prediction.shape)
cv2.imshow('label', mask)
cv2.waitKey(0)
cv2.destroyAllWindows()
pred1 = prediction[0, :, :, :]
#pred2 = prediction[1,:,:,:]
#pred3 = prediction[2,:,:,:]

print(unet.error_rate(prediction, util.crop_to_shape(label, prediction.shape)))

#img = util.combine_img_prediction(data, label, prediction)
#util.save_image(img, "prediction.jpg")
'''
prediction
'''
data, label = data_provider(1)

prediction = net.predict(path, test_x)
mask = prediction[0, :, :, :]
print(label[0, :, :, :])
print(mask)
cv2.imshow('mask', mask)
cv2.waitKey(0)
cv2.destroyAllWindows()
Beispiel #35
0
    def do(unet_hash, data_provider, **kwargs):
        """
        Lauches the training process
        
        :param data_provider: callable returning training and verification data
        :param output_path: path where to store checkpoints
        :param training_iters: number of training mini batch iteration
        :param epochs: number of epochs
        :param dropout: dropout probability
        :param display_step: number of steps till outputting stats
        :param restore: Flag if previous model should be restored 
        """
        kwargs.p()
        output_path = kwargs.get("output_path")
        n_class = kwargs.get("n_class")
        batch_size =kwargs.get("batch_size", 1)
        # training_iters =kwargs.get("training_iters", 10)
        training_iters =kwargs.get("training_iters", 1)
        epochs =kwargs.get("epochs", 100)
        dropout =kwargs.get("dropout", 0.75)
        display_step =kwargs.get("display_step", 5)
        is_restore =kwargs.get("is_restore", False)
        cost_options =kwargs.get("cost_options", {})
        optimizer_options =kwargs.get("optimizer_options", {"type":"momentum"})

        logits = unet_hash["logits"]
        weights_biases = unet_hash["weights_biases"]
        x = unet_hash["x"]
        y = unet_hash["y"]
        keep_prob = unet_hash["keep_prob"]

        prediction_path = "prediction"
        verification_batch_size = 4

        Trainer._clear_path(prediction_path, output_path, is_restore)
        cost = Trainer._get_cost(logits,weights_biases,
                y, n_class, cost_options)
        
        gradients_node = tf.gradients(cost, weights_biases)
        # gradients_node.size().p()
        # weights_biases.size().p()

        norm_gradients_node = tf.Variable(tf.constant(0.0, shape=[len(gradients_node)]))
        
        global_step = tf.Variable(0)
        optimizer, learning_rate_node = Trainer._get_optimizer(
                training_iters, global_step, cost, optimizer_options)

        cross_entropy = -tf.reduce_mean(tf.reshape(y, [-1, n_class])*
                tf.log(tf.clip_by_value(tf.reshape(
                Trainer._pixel_wise_softmax_2(logits), [-1, n_class]),
                1e-10,1.0)), name="cross_entropy")
        
        predicter = Trainer._pixel_wise_softmax_2(logits)
        correct_pred = tf.equal(tf.argmax(predicter, 3), tf.argmax(y, 3))
        accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

        # # if self.net.summaries:
        # #     tf.summary.histogram('norm_grads', self.norm_gradients_node)

        tf.summary.scalar('loss', cost)
        tf.summary.scalar('cross_entropy', cross_entropy)
        tf.summary.scalar('accuracy', accuracy)

        tf.summary.scalar('learning_rate', learning_rate_node)

        summary_op = tf.summary.merge_all()        
        init = tf.global_variables_initializer()
        
        
        save_path = os.path.join(output_path, "model.cpkt")
        with tf.Session() as sess:
            sess.run(init)
            
            if is_restore:
                ckpt = tf.train.get_checkpoint_state(output_path)
                if ckpt and ckpt.model_checkpoint_path:
                    _restore(sess, ckpt.model_checkpoint_path)
            
            test_x, test_y = data_provider(verification_batch_size)
            name = "_init"
            pred_shape = Trainer._store_prediction(sess, test_x, test_y, name, 
                    predicter, prediction_path, cost, x, y, keep_prob)
            
            # summary_writer = tf.summary.FileWriter(output_path, graph=sess.graph)

            logging.info("Start optimization")
            avg_gradients = None

            for epoch in range(epochs):
                total_loss = 0
                for step in range((epoch*training_iters), ((epoch+1)*training_iters)):
                    batch_x, batch_y = data_provider(batch_size)
                     
                    # Run optimization op (backprop)
                    _, loss, lr, gradients = sess.run(
                            (optimizer, cost, learning_rate_node, gradients_node), 
                            feed_dict={x: batch_x, y: util.crop_to_shape(batch_y, pred_shape),
                            keep_prob: dropout})

                    if avg_gradients is None:
                        avg_gradients = [np.zeros_like(gradient) for gradient in gradients]
                    for i in range(len(gradients)):
                        avg_gradients[i] = (avg_gradients[i] * 
                                (1.0 - (1.0 / (step+1)))) + (gradients[i] / (step+1))
                        
                    norm_gradients = [np.linalg.norm(gradient) for gradient in avg_gradients]
                    norm_gradients_node.assign(norm_gradients).eval()
                    
                    if step % display_step == 0:
                        new_batch_y = util.crop_to_shape(batch_y, pred_shape)
                        summary_str, loss, acc, predictions = sess.run(
                                [summary_op, cost, accuracy, predicter], 
                                feed_dict={x: batch_x, y: new_batch_y,
                                keep_prob: 1.})
                        # summary_writer.add_summary(summary_str, step)
                        # summary_writer.flush()
                        logging.info("Iter {:}, Minibatch Loss= {:.4f}, Training Accuracy= {:.4f}, Minibatch error= {:.1f}%".format(
                                step, loss, acc, Trainer._error_rate(predictions, new_batch_y)))

                    total_loss += loss

                Trainer._output_epoch_stats(epoch, total_loss, training_iters, lr)
                Trainer._store_prediction(sess, test_x, test_y, "epoch_%s"%epoch, 
                        predicter, prediction_path, cost, x, y, keep_prob)
                    
                save_path = Trainer._save(sess, save_path)
            logging.info("Optimization Finished!")
            
        return predicter
Beispiel #36
0
'''
from __future__ import print_function, division, absolute_import, unicode_literals
import numpy as np
from tf_unet import image_gen
from tf_unet import unet
from tf_unet import util


if __name__ == '__main__':
    np.random.seed(98765)

    generator = image_gen.GrayScaleDataProvider(nx=572, ny=572, cnt=20, rectangles=False)
    
    net = unet.Unet(channels=generator.channels, 
                    n_class=generator.n_class, 
                    layers=3,
                    features_root=16)
    
    trainer = unet.Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2))
    path = trainer.train(generator, "./unet_trained",
                         training_iters=32,
                         epochs=5,
                         dropout=0.75,# probability to keep units
                         display_step=2)
     
    x_test, y_test = generator(4)
    prediction = net.predict(path, x_test)
     
    print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction,
                                                               util.crop_to_shape(y_test, prediction.shape))))
Beispiel #37
0
    imgs_row = []
    for i in range(num_row):
        imgs_row.append(
            np.concatenate(imgs[i * num_col:(i + 1) * num_col], axis=1))
    img_stack = np.concatenate(imgs_row, axis=0)
    return img_stack.astype(np.uint8)


path = tf.train.latest_checkpoint(output_path)

images = data_provider.images_origin
masks = data_provider.masks_origin
prediction = net.predict(path, images)

gts = np.argmax(masks, axis=-1)
images_crop = util.crop_to_shape(images, prediction.shape)
gts_crop = util.crop_to_shape(gts, prediction.shape)

preds_rgb = []
preds_gt = []
for i, pred in enumerate(prediction):
    h, w = pred.shape[0:2]
    label = pred.argmax(axis=-1)
    preds_gt.append(label)
    label = label.reshape([-1])
    label_rgb = np.array(label_cmap_list[label])
    label_rgb = label_rgb.reshape((h, w, 3))
    preds_rgb.append(label_rgb)

img = stack_imgs(images_crop, data_provider.num_row, data_provider.num_col)
img_gt = stack_imgs(gts_crop, data_provider.num_row, data_provider.num_col)
Beispiel #38
0
    def train(self,
              data_provider,
              output_path,
              training_iters=10,
              epochs=100,
              dropout=0.75,
              display_step=1,
              restore=False):

        save_path = os.path.join(output_path, "model.cpkt")
        if epochs == 0:
            return save_path

        init = self._initialize(training_iters, output_path, restore)
        with tf.Session() as sess:
            sess.run(init)

            if restore:
                ckpt = tf.train.get_checkpoint_state(output_path)
                if ckpt and ckpt.model_checkpoint_path:
                    self.net.restore(sess, ckpt.model_checkpoint_path)

            test_x, test_y = data_provider(self.verification_batch_size)
            pred_shape = self.store_prediction(sess, test_x, test_y, "_init")

            summary_writer = tf.summary.FileWriter(output_path,
                                                   graph=sess.graph)
            logging.info("Start optimization")

            avg_gradients = None
            for epoch in range(0, epochs):
                total_loss = 0
                for step in range((epoch * training_iters),
                                  ((epoch + 1) * training_iters)):
                    batch_x, batch_y = data_provider(self.batch_size)

                    # Run optimization op (backprop)
                    _, loss, lr, gradients = sess.run(
                        (self.optimizer, self.net.cost,
                         self.learning_rate_node, self.net.gradients_node),
                        feed_dict={
                            self.net.x: batch_x,
                            self.net.y:
                            util.crop_to_shape(batch_y, pred_shape),
                            self.net.keep_prob: dropout
                        })

                    if avg_gradients is None:
                        avg_gradients = [
                            np.zeros_like(gradient) for gradient in gradients
                        ]
                    for i in range(len(gradients)):
                        avg_gradients[i] = (avg_gradients[i] *
                                            (1.0 -
                                             (1.0 /
                                              (step + 1)))) + (gradients[i] /
                                                               (step + 1))

                    norm_gradients = [
                        np.linalg.norm(gradient) for gradient in avg_gradients
                    ]
                    self.norm_gradients_node.assign(norm_gradients).eval()

                    if step % display_step == 0:
                        self.output_minibatch_stats(
                            sess, summary_writer, step, batch_x,
                            util.crop_to_shape(batch_y, pred_shape))

                    total_loss += loss

                self.output_epoch_stats(epoch, total_loss, training_iters, lr)
                self.store_prediction(sess, test_x, test_y, "epoch_%s" % epoch)

                save_path = self.net.save(sess, save_path)
            logging.info("Optimization Finished!")

            return save_path