def setUp(self):
     self.train_data, self.validation_data = \
       prepare_data.load_data(
         self.IMAGE_DIR,
         self.SEG_DIR,
         n_class=2,
         train_val_rate=0.9
         )
     self.data = prepare_data.generate_data(self.IMAGE_DIR, self.SEG_DIR, 1)
Example #2
0
    def test_generate_test_dataset(self):
        test_data, _ = prepare_data.load_data(self.IMAGE_DIR,
                                              None,
                                              n_class=2,
                                              train_val_rate=0.9)
        test_images = prepare_data.generate_data(*test_data, 1)
        for img, seg in test_images:
            self.assertEqual(len(seg), 0)

            break
Example #3
0
    def validation(self, sess, output):
        val_image = prepare_data.load_data(self.VALIDATION_DIR,
                                           None,
                                           n_class=2,
                                           train_val_rate=1)[0]
        data = prepare_data.generate_data(*val_image, batch_size=1)
        for Input, _ in data:
            result = sess.run(output,
                              feed_dict={
                                  self.X: Input,
                                  self.is_training: None
                              })
            break

        result = np.argmax(result[0], axis=2)
        ident = np.identity(3, dtype=np.int8)
        result = ident[result] * 255

        plt.imshow((Input[0] * 255).astype(np.int16))
        plt.imshow(result, alpha=0.2)
        plt.show()
Example #4
0
 def test_batch_generation(self):
     batch_data = prepare_data.generate_data(*self.train_data, 5)
     for img, seg in batch_data:
         self.assertEqual(len(img), 5)
         self.assertEqual(len(seg), 5)
         break
Example #5
0
    def train(self, parser):
        """
    training operation
    argument of this function are given by functions in prepare_data.py

    Parameters
    ----------
      parser: 
        the paser that has some options
    """
        epoch = parser.epoch
        l2 = parser.l2
        batch_size = parser.batch_size
        train_val_rate = parser.train_rate

        output = self.UNet(l2_reg=l2, is_training=self.is_training)
        loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.y,
                                                       logits=output))
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            train_ops = tf.train.AdamOptimizer(
                parser.learning_rate).minimize(loss)

        init = tf.global_variables_initializer()
        saver = tf.train.Saver(max_to_keep=100)
        all_train, all_val = prepare_data.load_data(
            self.IMAGE_DIR,
            self.SEGMENTED_DIR,
            n_class=2,
            train_val_rate=train_val_rate)
        with tf.Session() as sess:
            init.run()
            for e in range(epoch):
                data = prepare_data.generate_data(*all_train, batch_size)
                val_data = prepare_data.generate_data(*all_val,
                                                      len(all_val[0]))
                for Input, Teacher in data:
                    sess.run(train_ops,
                             feed_dict={
                                 self.X: Input,
                                 self.y: Teacher,
                                 self.is_training: True
                             })
                    ls = loss.eval(feed_dict={
                        self.X: Input,
                        self.y: Teacher,
                        self.is_training: None
                    })
                    for val_Input, val_Teacher in val_data:
                        val_loss = loss.eval(
                            feed_dict={
                                self.X: val_Input,
                                self.y: val_Teacher,
                                self.is_training: None
                            })

                print(f'epoch #{e + 1}, loss = {ls}, val loss = {val_loss}')
                if e % 100 == 0:
                    saver.save(sess, f"./params/model_{e + 1}epochs.ckpt")

            self.validation(sess, output)