Exemplo n.º 1
0
    def train(self, x1_train_df, x2_train_df, epochs, batch_size=128, sample_interval=50):
        time = datetime.now().strftime("%d-%m-%Y_%H.%M.%S")
        fname = '_ganautodiambatch_loss0.8_full_upsample' + x1_train_df.index[0].split('.')[0]
        fname = time + fname
        os.makedirs(os.path.join('figures_' + self.modelname, fname))
        os.makedirs(os.path.join('output_' + self.modelname, fname))
        os.makedirs(os.path.join('models_' + self.modelname, fname))

        training_metrics = {"epoch": [], "d_loss": [], "d_accuracy": [], "g_loss": []}

        x1_train = x1_train_df.values
        x2_train = x2_train_df.values

        # Adversarial ground truths
        valid = np.ones((batch_size, 1))
        fake = np.zeros((batch_size, 1))
        d_loss = [0, 0]

        steps_per_epoch = np.max([x1_train.shape[0], x2_train.shape[0]]) // batch_size
        for epoch in range(epochs):

            for step in range(steps_per_epoch):
                # ---------------------
                #  Train Discriminator
                # ---------------------

                # Select a random batch of x1 and x2
                idx1 = np.random.randint(0, x1_train.shape[0], batch_size)
                x1 = x1_train[idx1]
                idx2 = np.random.randint(0, x2_train.shape[0], batch_size)
                x2 = x2_train[idx2]

                # Generate a batch of new images
                gen_x1 = self.generator.predict(x1)

                # Train the discriminator
                if d_loss[1] > 0.8:
                    d_loss_real = self.discriminator.test_on_batch(x2, valid)
                    d_loss_fake = self.discriminator.test_on_batch(gen_x1, fake)
                    d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
                    pass
                else:
                    d_loss_real = self.discriminator.train_on_batch(x2, valid)
                    d_loss_fake = self.discriminator.train_on_batch(gen_x1, fake)
                    d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

                # ---------------------
                #  Train Generator
                # ---------------------

                # Train the generator (to have the discriminator label samples as valid)
                g_loss = self.combined.train_on_batch(x1, valid)

            # Plot the progress
            print("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100 * d_loss[1], g_loss),
                  flush=True)

            training_metrics["epoch"].append(epoch)
            training_metrics["d_loss"].append(d_loss[0])
            training_metrics["d_accuracy"].append(d_loss[1])
            training_metrics["g_loss"].append(g_loss)

            # If at save interval => save generated image samples
            if epoch % sample_interval == 0:
                print('generating plots and saving outputs')
                gx1 = self.generator.predict(x1_train_df)
                self.generator.save(os.path.join('models_' + self.modelname, fname, 'generator' + str(epoch)))
                save_info.save_dataframes(epoch, x1_train_df, x2_train_df, gx1, fname, dir_name='output_'+self.modelname)
                save_info.save_scores(epoch, x1_train_df, x2_train_df, gx1, training_metrics, fname, dir_name='output_'+self.modelname)
    def train(self,
              x1_train_df,
              x2_train_df,
              epochs,
              batch_size=128,
              sample_interval=50):
        fname = datetime.now().strftime("%d-%m-%Y_%H.%M.%S")
        os.makedirs(os.path.join('figures', fname))
        os.makedirs(os.path.join('output', fname))
        os.makedirs(os.path.join('models', fname))

        plot_model = {
            "epoch": [],
            "d_loss": [],
            "g_loss": [],
            "d_accuracy": [],
            "g_accuracy": [],
            "g_reconstruction_error": [],
            "g_loss_total": []
        }

        x1_train = x1_train_df.values
        x2_train = x2_train_df.values

        # Adversarial ground truths
        valid = np.ones((batch_size, 1))
        fake = np.zeros((batch_size, 1))

        valid_full = np.ones((len(x1_train), 1))
        fake_full = np.zeros((len(x1_train), 1))
        d_loss = [0, 0]

        steps_per_epoch = len(x1_train) // batch_size
        for epoch in range(epochs):
            d_loss_list = []
            g_loss_list = []
            for step in range(steps_per_epoch):
                # ---------------------
                #  Train Discriminator
                # ---------------------

                # Select a random batch of x1 and x2
                idx = np.random.randint(0, x1_train.shape[0], batch_size)
                x1 = x1_train[idx]
                x2 = x2_train[idx]

                # Generate a batch of new images
                gen_x1 = self.generator.predict(x1)

                # Train the discriminator
                if d_loss[
                        1] > 0.8:  # Gives the generator a break if the discriminator learns too fast
                    d_loss_real = self.discriminator.test_on_batch(x2, valid)
                    d_loss_fake = self.discriminator.test_on_batch(
                        gen_x1, fake)
                    d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

                else:
                    d_loss_real = self.discriminator.train_on_batch(x2, valid)
                    d_loss_fake = self.discriminator.train_on_batch(
                        gen_x1, fake)
                    d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

                # ---------------------
                #  Train Generator
                # ---------------------

                # Train the generator (to have the discriminator label samples as valid)
                g_loss = self.combined.train_on_batch(x1, [x1, valid])

                g_loss_list.append(g_loss)
                d_loss_list.append(d_loss)

            gen_x1 = self.generator.predict(x1_train)
            g_loss = self.combined.test_on_batch(x1_train,
                                                 [x1_train, valid_full])
            d_loss = self.discriminator.test_on_batch(
                np.concatenate((x2_train, gen_x1)),
                np.concatenate((valid_full, fake_full)))
            # g_loss = np.mean(g_loss_list, axis=0)
            # d_loss = np.mean(d_loss_list, axis=0)
            # Plot the progress
            print(
                "%d [D loss: %f, acc.: %.2f%%] [G loss: %f, mae: %.2f, xentropy: %f, acc.: %.2f%%]"
                % (epoch, d_loss[0], 100 * d_loss[1], g_loss[0], g_loss[1],
                   g_loss[2], g_loss[3] * 100))

            plot_model["epoch"].append(epoch)
            plot_model["d_loss"].append(d_loss[0])
            plot_model["g_loss"].append(g_loss[2])

            plot_model["d_accuracy"].append(d_loss[1])
            plot_model["g_accuracy"].append(g_loss[3])

            plot_model["g_reconstruction_error"].append(g_loss[1])
            plot_model["g_loss_total"].append(g_loss[0])

            # If at save interval => save generated image samples
            if epoch % sample_interval == 0:
                print('generating plots and saving outputs')
                gx1 = self.generator.predict(x1_train_df)
                self.generator.save(
                    os.path.join('models', fname,
                                 'generator' + str(epoch) + '.csv'))
                save_info.save_dataframes(epoch, x1_train_df, x2_train_df, gx1,
                                          fname)
                save_info.save_scores(epoch, x1_train_df, x2_train_df, gx1,
                                      fname)
                save_plots.plot_progress(epoch,
                                         x1_train_df,
                                         x2_train_df,
                                         gx1,
                                         plot_model,
                                         fname,
                                         umap=False)
        return plot_model