Esempio n. 1
0
 def __init__(self,
              root="../data/mnist.npz",
              train=True,
              transform=None,
              target_transform=None):
     self.root = root
     self.transform = transform
     self.target_transform = target_transform
     self.train = train
     (self.train_data,
      self.train_labels), (self.test_data,
                           self.test_labels) = reader.read_mnist(root)
Esempio n. 2
0
    qcon_optim = Adam(Q_con.parameters(), lr = 2e-4)
if c3_len:
    Q_bin = Linear(embedding_len, c3_len).cuda() if GPU_NUMS > 1 else Linear(embedding_len, c3_len)
    qbin_optim = Adam(Q_bin.parameters(), lr = 2e-4)

g_optim = Adam(Net_G.parameters(), lr = 1e-3)
d_optim = Adam(Net_D.parameters(), lr = 2e-4)

nll = NLLLoss().cuda() if GPU_NUMS > 1 else NLLLoss()
mse = MSELoss().cuda() if GPU_NUMS > 1 else MSELoss()
bce = BCELoss().cuda() if GPU_NUMS > 1 else BCELoss()

'''
读取数据
'''
(X_train, Y_train), (X_test, Y_test) = read_mnist("/data/mnist.npz")
x_train = np.expand_dims(X_train, 1)
y_train = np.zeros((Y_train.shape[0], 10), dtype = np.uint8)
y_train[np.arange(Y_train.shape[0]), Y_train] = 1

x_test = np.expand_dims(X_test, 1)
y_test = np.zeros((X_test.shape[0], 10), dtype = np.uint8)
y_test[np.arange(Y_test.shape[0]), Y_test] = 1

x_train = x_train.astype(np.uint8)
x_test = x_test.astype(np.uint8)
y_train = y_train.astype(np.uint8)
y_test = y_test.astype(np.uint8)

supervision = 100 # Number of samples to supervise with
Esempio n. 3
0
from keras import optimizers as KOpts
import numpy as np
from matplotlib import pyplot as plt

plt.switch_backend('agg')

PHRASE = "TRAIN"
GPU_NUMS = 2
batchSize = 100
epochs = 30
cfg = MNISTConfig()

if PHRASE == "TRAIN":
    np.random.seed(1000)
    randomDim = 100
    (X_train, y_train), (X_test, y_test) = read_mnist("../ganData/mnist.npz")
    X_train = (X_train.astype(np.float32) - 127.5) / 127.5
    X_train = X_train.reshape(60000, 784)

    adam = KOpts.Adam(lr=0.0002, beta_1=0.5)

    def build_generator(randomDim):
        img = KLayers.Input(shape=(randomDim, ))
        network = KLayers.Dense(units=128 * 7 * 7)(img)
        network = KLayers.Activation('relu')(network)
        network = KLayers.Reshape((7, 7, 128))(network)
        network = KLayers.BatchNormalization(momentum=0.8)(network)
        network = KLayers.UpSampling2D()(network)

        network = KLayers.Conv2D(128, kernel_size=3, padding='same')(network)
        network = KLayers.Activation('relu')(network)
    def train(self, epochs, batch_size=128, save_interval=50):

        # Load the dataset
        (X_train, _), (_, _) = read_mnist()

        # Rescale -1 to 1
        X_train = (X_train.astype(np.float32) - 127.5) / 127.5
        X_train = np.expand_dims(X_train, axis=3)

        half_batch = int(batch_size / 2)
        proBar = ProgressBar(1, epochs,
                             "d loss:%.3f,d acc:%.3f;g loss:%.3f,g acc:%.3f")

        for epoch in range(epochs):

            # ---------------------
            #  Train Discriminator
            # ---------------------

            # Select a random half batch of images
            idx = np.random.randint(0, X_train.shape[0], half_batch)
            imgs = X_train[idx]

            # Generate a half batch of embedded images
            latent_fake = self.encoder.predict(imgs)

            latent_real = np.random.normal(size=(half_batch, self.encoded_dim))

            valid = np.ones((half_batch, 1))
            fake = np.zeros((half_batch, 1))

            # Train the discriminator
            d_loss_real = self.discriminator.train_on_batch(latent_real, valid)
            d_loss_fake = self.discriminator.train_on_batch(latent_fake, fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            # ---------------------
            #  Train Generator
            # ---------------------

            # Select a random half batch of images
            idx = np.random.randint(0, X_train.shape[0], batch_size)
            imgs = X_train[idx]

            # Generator wants the discriminator to label the generated representations as valid
            valid_y = np.ones((batch_size, 1))

            # Train the generator
            g_loss = self.adversarial_autoencoder.train_on_batch(
                imgs, [imgs, valid_y])

            # Plot the progress
            # print ("%d [D loss: %f, acc: %.2f%%] [G loss: %f, mse: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss[0], g_loss[1]))
            proBar.show(d_loss[0], d_loss[1], g_loss[0], g_loss[1])

            # If at save interval => save generated image samples
            if epoch % save_interval == 0:
                # Select a random half batch of images
                idx = np.random.randint(0, X_train.shape[0], 25)
                imgs = X_train[idx]
                self.save_imgs(epoch, imgs)
Esempio n. 5
0
        for j in range(5):
            axs[i, j].imshow(gen_imgs[cnt, :, :, 0], cmap='gray')
            axs[i, j].set_title("Digit: %d" % label[cnt])
            axs[i, j].axis('off')
            cnt += 1
    fig.savefig("output/%d.png" % epoch)
    plt.close()


'''
开始训练
'''
'''
1. 读入数据,然后归一化到(-1,1)
'''
(x_train, y_train), (_, _) = read_mnist("../ganData/mnist.npz")
x_train = (x_train.astype(np.float32) - 127.5) / 127.5  # 127.5 = 255 / 2
x_train = np.expand_dims(x_train,
                         axis=3)  # shape从(60000,28,28) => (60000,28,28,1)
y_train = np.reshape(y_train, newshape=(-1, 1))
'''
2. 生成网络
'''
Generator = builder_generator()
Discriminator = builder_discriminator()
optimizer = Adam(0.0002, 0.5)
Generator.compile(loss='binary_crossentropy', optimizer=optimizer)
Discriminator.compile(loss='binary_crossentropy',
                      optimizer=optimizer,
                      metrics=['accuracy'])
noise_temp = Input(shape=(NOISE_DIM, ))