Beispiel #1
0
    def validate(self):
        print('\n\nValidating epoch: %d' % self.epoch)
        print('-' * 80)
        total = len(self.dataset_val)
        val_generator = self.dataset_val.generator(self.options.batch_size)
        progbar = Progbar(total, width=25)

        for input_rgb in val_generator:
            originItems = []
            sketchItems = []
            for ix in range(len(input_rgb)):
                originItems.append(input_rgb[ix][0])
                sketchItems.append(input_rgb[ix][1])
            originItems = np.array(originItems)
            sketchItems = np.array(sketchItems)
            feed_dic = {
                self.input_rgb:
                originItems,
                self.input_gray:
                sketchItems.reshape(self.options.batch_size, 512, 512, 1)
            }

            self.sess.run([self.dis_loss, self.gen_loss, self.accuracy],
                          feed_dict=feed_dic)

            lossD, lossD_fake, lossD_real, lossG, lossG_l1, lossG_gan, acc, step = self.eval_outputs(
                feed_dic=feed_dic)

            progbar.add(len(input_rgb),
                        values=[("D loss", lossD), ("D fake", lossD_fake),
                                ("D real", lossD_real), ("G loss", lossG),
                                ("G L1", lossG_l1), ("G gan", lossG_gan),
                                ("accuracy", acc)])

        print('\n')
Beispiel #2
0
    def validate(self):
        print('\n\nValidating epoch: %d' % self.epoch)
        total = len(self.dataset_val)
        val_generator = self.dataset_val.generator(self.options.batch_size)
        progbar = Progbar(total, width=25)

        for input_rgb in val_generator:
            feed_dic = {self.input_rgb: input_rgb}

            self.sess.run([self.dis_loss, self.gen_loss, self.accuracy],
                          feed_dict=feed_dic)

            lossD, lossD_fake, lossD_real, lossG, lossG_l1, lossG_gan, acc, step = self.eval_outputs(
                feed_dic=feed_dic)

            progbar.add(len(input_rgb),
                        values=[("D loss", lossD), ("D fake", lossD_fake),
                                ("D real", lossD_real), ("G loss", lossG),
                                ("G L1", lossG_l1), ("G gan", lossG_gan),
                                ("accuracy", acc)])

        print('\n')
Beispiel #3
0
    def train(self):
        total = len(self.dataset_train)

        for epoch in range(self.options.epochs):
            lr_rate = self.sess.run(self.learning_rate)

            print('Training epoch: %d' % (epoch + 1) + " - learning rate: " +
                  str(lr_rate))
            print('-' * 80)
            print(self.dataset_train.load())

            self.epoch = epoch + 1
            self.iteration = 0
            generator = self.dataset_train.generator(self.options.batch_size)
            progbar = Progbar(total,
                              width=25,
                              stateful_metrics=['epoch', 'iter', 'step'])

            for input_rgb in generator:
                print("-" * 80)
                #print(len(input_rgb))
                #print(input_rgb[0][0].shape)
                #print(input_rgb[0][1].shape)
                originItems = []
                sketchItems = []
                for ix in range(len(input_rgb)):
                    originItems.append(input_rgb[ix][0])
                    sketchItems.append(input_rgb[ix][1])
                originItems = np.array(originItems)
                sketchItems = np.array(sketchItems)
                feed_dic = {
                    self.input_rgb:
                    originItems,
                    self.input_gray:
                    sketchItems.reshape(self.options.batch_size, 512, 512, 1)
                }

                self.iteration = self.iteration + 1
                self.sess.run([self.dis_train], feed_dict=feed_dic)
                self.sess.run([self.gen_train, self.accuracy],
                              feed_dict=feed_dic)
                self.sess.run([self.gen_train, self.accuracy],
                              feed_dict=feed_dic)

                lossD, lossD_fake, lossD_real, lossG, lossG_l1, lossG_gan, acc, step = self.eval_outputs(
                    feed_dic=feed_dic)

                progbar.add(len(input_rgb),
                            values=[("epoch", epoch + 1),
                                    ("iter", self.iteration), ("step", step),
                                    ("D loss", lossD), ("D fake", lossD_fake),
                                    ("D real", lossD_real), ("G loss", lossG),
                                    ("G L1", lossG_l1), ("G gan", lossG_gan),
                                    ("accuracy", acc)])

                # log model at checkpoints
                if self.options.log and step % self.options.log_interval == 0:
                    with open(self.train_log_file, 'a') as f:
                        f.write('%d %d %f %f %f %f %f %f %f\n' %
                                (self.epoch, step, lossD, lossD_fake,
                                 lossD_real, lossG, lossG_l1, lossG_gan, acc))

                    if self.options.visualize:
                        visualize(self.train_log_file, self.test_log_file,
                                  self.options.visualize_window, self.name)

                # sample model at checkpoints
                if self.options.sample and step % self.options.sample_interval == 0:
                    self.sample(show=False)

                # validate model at checkpoints
                if self.options.validate and self.options.validate_interval > 0 and step % self.options.validate_interval == 0:
                    self.validate()

                # save model at checkpoints
                if self.options.save and step % self.options.save_interval == 0:
                    self.save()

            if self.options.validate:
                self.validate()
BATCH_SIZE = 128
LEARNING_RATE = 0.1

learning_rate.set_value(np.cast['float32'](LEARNING_RATE))
training_history = []
valid_history = []
for epoch in range(NB_EPOCH):
    prog = Progbar(target=X_train.shape[0])
    n = 0
    history = []
    while n < X_train.shape[0]:
        start = n
        end = min(n + BATCH_SIZE, X_train.shape[0])
        c = f_train(X_train[start:end], y_train[start:end])
        prog.title = 'Epoch: %.2d, Cost: %.4f' % (epoch + 1, c)
        prog.add(end - start)
        n += BATCH_SIZE
        history.append(c)
    # end of epoch, start validating
    y = np.argmax(f_predict(X_valid), axis=-1)
    accuracy = accuracy_score(y_valid, y)
    print('Validation accuracy:', accuracy)
    # save history
    training_history.append(np.mean(history))
    valid_history.append(accuracy)

y = np.argmax(f_predict(X_test), axis=-1)
accuracy = accuracy_score(y_test, y)
print('Test accuracy:', accuracy)
print('Classification report:')
print(classification_report(y_test, y))