コード例 #1
0
NoEigenComp = 20

# ----------------------------- i/o ------------------------------------------

DataDir = '../Cl_data/Data/'
PlotsDir = '../Cl_data/Plots/'
ModelDir = '../Cl_data/Model/'

train_path = DataDir + 'LatinCl_' + str(totalFiles) + '.npy'
train_target_path = DataDir + 'LatinPara5_' + str(totalFiles) + '.npy'
test_path = DataDir + 'LatinCl_' + str(TestFiles) + '.npy'
test_target_path = DataDir + 'LatinPara5_' + str(TestFiles) + '.npy'

camb_in = Cl_load.cmb_profile(train_path=train_path,
                              train_target_path=train_target_path,
                              test_path=test_path,
                              test_target_path=test_target_path,
                              num_para=5)

(x_train, y_train), (x_test, y_test) = camb_in.load_data()

x_train = x_train[:, 2:]
x_test = x_test[:, 2:]

# x_train = x_train[:,2::2]
# x_test = x_test[:,2::2]

print(x_train.shape, 'train sequences')
print(x_test.shape, 'test sequences')
print(y_train.shape, 'train sequences')
print(y_test.shape, 'test sequences')
コード例 #2
0
ファイル: Cl_aae.py プロジェクト: nesar/AstroVAE
    def train(self, epochs, batch_size=128, save_interval=50):

        camb_in = Cl_load.cmb_profile(train_path=train_path,
                                      train_target_path=train_target_path,
                                      test_path=test_path,
                                      test_target_path=test_target_path,
                                      num_para=5)

        (x_train, y_train), (x_test, y_test) = camb_in.load_data()

        x_train = x_train[:, 2:]
        x_test = x_test[:, 2:]

        print(x_train.shape, 'train sequences')
        print(x_test.shape, 'test sequences')
        print(y_train.shape, 'train sequences')
        print(y_test.shape, 'test sequences')

        normFactor = np.max([np.max(x_train), np.max(x_test)])
        print('-------normalization factor:', normFactor)

        x_train = x_train.astype('float32') / normFactor  #/ 255.
        x_test = x_test.astype('float32') / normFactor  #/ 255.
        x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
        x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))

        # Load the dataset
        #(X_train, _), (_, _) = mnist.load_data()

        # Rescale -1 to 1
        #X_train = (X_train.astype(np.float32) - 127.5) / 127.5
        X_train = np.expand_dims(x_train, axis=3)

        half_batch = int(batch_size / 2)

        for epoch in range(epochs):

            # ---------------------
            #  Train Discriminator
            # ---------------------

            # Select a random half batch of images
            idx = np.random.randint(0, X_train.shape[0], half_batch)
            imgs = X_train[idx]

            # Generate a half batch of embedded images
            latent_fake = self.encoder.predict(imgs)

            latent_real = np.random.normal(size=(half_batch, self.encoded_dim))

            valid = np.ones((half_batch, 1))
            fake = np.zeros((half_batch, 1))

            # Train the discriminator
            d_loss_real = self.discriminator.train_on_batch(latent_real, valid)
            d_loss_fake = self.discriminator.train_on_batch(latent_fake, fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            # ---------------------
            #  Train Generator
            # ---------------------

            # Select a random half batch of images
            idx = np.random.randint(0, X_train.shape[0], batch_size)
            imgs = X_train[idx]

            # Generator wants the discriminator to label the generated representations as valid
            valid_y = np.ones((batch_size, 1))

            # Train the generator
            g_loss = self.adversarial_autoencoder.train_on_batch(
                imgs, [imgs, valid_y])

            # Plot the progress
            print("%d [D loss: %f, acc: %.2f%%] [G loss: %f, mse: %f]" %
                  (epoch, d_loss[0], 100 * d_loss[1], g_loss[0], g_loss[1]))
            plt.figure(1032)
            plt.plot(epoch, d_loss[0], 'ko')
            plt.plot(epoch, g_loss[0], 'ro')
            plt.plot(epoch, g_loss[0], 'bo')

            plt.figure(1031)
            plt.plot(epoch, 100 * d_loss[1], 'go')

            # If at save interval => save generated image samples
            if epoch % save_interval == 0:
                # Select a random half batch of images
                idx = np.random.randint(0, X_train.shape[0], 25)
                imgs = X_train[idx]
                self.save_imgs(epoch, imgs)