示例#1
0
def testModel():
    # Create models
    print("Creating Autoencoder, Encoder and Generator...")
    autoencoder, encoder, decoder = getModels()

    # Load Autoencoder weights
    print("Loading weights...")
    autoencoder.load_weights(modelsPath + modelName)

    # Load dataset to test
    print("Loading dataset...")
    X_train, X_test = loadDataset()

    # Visualization functions
    visualizeReconstructedImages(X_train[:16],
                                 X_test[:16],
                                 autoencoder,
                                 save=True)
def testModel():
    # Create models
    print("Creating Autoencoder, Encoder and Generator...")
    autoencoder, encoder, decoder = getModels()

    # Load Autoencoder weights
    print("Loading weights...")
    autoencoder.load_weights(modelsPath+modelName)

    # Load dataset to test
    print("Loading dataset...")
    X_train, X_test = loadDataset()

    # Visualization functions
    #visualizeReconstructedImages(X_train[:16],X_test[:16], autoencoder)
    # computeTSNEProjectionOfPixelSpace(X_test[:1000], display=True)
    # computeTSNEProjectionOfLatentSpace(X_test[:1000], encoder, display=True)
    # while 1: visualizeInterpolation(X_test[randint(0,X_test.shape[0])], X_test[randint(0,X_test.shape[0])], encoder, decoder, save=False, nbSteps=5)
    while 1 :visualizeArithmetics(X_test[randint(0,X_test.shape[0])], X_test[randint(0,X_test.shape[0])], X_test[randint(0,X_test.shape[0])], encoder, decoder)
        hashName = ''.join(random.choice(string.lowercase) for i in range(3))

    for i in range(len(reconstructions)):
        interpolatedImage = normalImages[i]*255
        interpolatedImage = cv2.resize(interpolatedImage,(50,50))
        interpolatedImage = interpolatedImage.astype(np.uint8)
        resultImage = interpolatedImage if resultImage is None else np.hstack([resultImage,interpolatedImage])

        reconstructedImage = reconstructions[i]*255.
        reconstructedImage = reconstructedImage.reshape([28,28])
        reconstructedImage = cv2.resize(reconstructedImage,(50,50))
        reconstructedImage = reconstructedImage.astype(np.uint8)
        resultLatent = reconstructedImage if resultLatent is None else np.hstack([resultLatent,reconstructedImage])
    
        if save:
            cv2.imwrite(visualsPath+"{}_{}.png".format(hashName,i),np.hstack([interpolatedImage,reconstructedImage]))

        result = np.vstack([resultImage,resultLatent])

    if not save:
        cv2.imshow("Interpolation in Image Space vs Latent Space",result)
        cv2.waitKey()
        cv2.destroyAllWindows()

if __name__ == "__main__":
    # Load dataset to test
    print("Loading dataset...")
    X_train, X_test = loadDataset()
    visualizeDataset(X_test[:100])

示例#4
0
def trainModel(startEpoch=0):
    # Create models
    print("Creating Autoencoder...")
    autoencoder, _, _ = getModels()
    autoencoder.compile(optimizer=RMSprop(lr=0.00025), loss="mse")

    # From which we start
    if startEpoch > 0:
        # Load Autoencoder weights
        print("Loading weights...")
        autoencoder.load_weights(modelsPath + modelName)

    print("Loading dataset...")
    X_train, X_test = loadDataset()

    # Compute number of batches
    nbBatch = int(X_train.shape[0] / batchSize)

    # Train the Autoencoder on dataset
    print(
        "Training Autoencoder for {} epochs with {} batches per epoch and {} samples per batch."
        .format(nbEpoch, nbBatch, batchSize))
    print("Run id: {}".format(runID))

    # Debug utils writer
    writer = tf.summary.FileWriter("/tmp/logs/" + runID)
    batchTimes = [0. for i in range(5)]

    # For each epoch
    for epoch in range(startEpoch, nbEpoch):
        # For each batch
        for batchIndex in range(nbBatch):
            batchStartTime = time.time()
            # Get batch
            X = X_train[batchIndex * batchSize:(batchIndex + 1) * batchSize]

            # Train on batch
            autoencoderLoss = autoencoder.train_on_batch(X, X)
            trainingSummary = tf.Summary.Value(
                tag="Loss", simple_value=float(autoencoderLoss))

            # Compute ETA
            batchTime = time.time() - batchStartTime
            batchTimes = batchTimes[1:] + [batchTime]
            eta = getETA(
                sum(batchTimes) / len(batchTimes), nbBatch, batchIndex,
                nbEpoch, epoch)

            # Save reconstructions on train/test samples
            if batchIndex % 2 == 0:
                visualizeReconstructedImages(X_train[:16],
                                             X_test[:16],
                                             autoencoder,
                                             save=True,
                                             label="{}_{}".format(
                                                 epoch, batchIndex))

            # Validation & Tensorboard Debug
            if batchIndex % 20 == 0:
                validationLoss = autoencoder.evaluate(X_test[:512],
                                                      X_test[:512],
                                                      batch_size=256,
                                                      verbose=0)
                validationSummary = tf.Summary.Value(
                    tag="Validation Loss", simple_value=float(validationLoss))
                summary = tf.Summary(
                    value=[trainingSummary, validationSummary])
                print(
                    "Epoch {}/{} - Batch {}/{} - Loss: {:.3f}/{:.3f} - ETA:".
                    format(epoch + 1, nbEpoch, batchIndex + 1, nbBatch,
                           autoencoderLoss, validationLoss), eta)
            else:
                print(
                    "Epoch {}/{} - Batch {}/{} - Loss: {:.3f} - ETA:".format(
                        epoch + 1, nbEpoch, batchIndex + 1, nbBatch,
                        autoencoderLoss), eta)
                summary = tf.Summary(value=[
                    trainingSummary,
                ])
            writer.add_summary(summary, epoch * nbBatch + batchIndex)

        #Save model every epoch
        print("Saving autoencoder...")
        autoencoder.save_weights(modelsPath + modelName, overwrite=True)
示例#5
0
def train():
    #======== Parameters ========#
    # Gan hacks tricks
    softLabels = True
    flipLabels = False  #Max(log(D)) instead of min(log(1-D))
    addNoiseToDiscriminator = True
    useDropout = False

    # Torch heuristic for training
    useTorchHeuristic = True
    lossMargin = 0.25

    # Classic training parameters
    epochNb = 3
    batchSize = 128

    # Display parameters
    displayImageNb = 64

    # Optimizers for generator and discriminator
    d_optim = SGD(lr=0.00025, momentum=0.9, nesterov=True)
    g_optim = Adam(lr=0.00025, beta_1=0.5)
    #============================#

    # Load data
    X_train, X_test = loadDataset()
    
    # Create models
    discriminator = getDiscriminator(useDropout)
    generator = getGenerator(useDropout)
    discriminator_on_generator = getGeneratorContainingDiscriminator(generator, discriminator)
    
    # Compile models with optimizers
    generator.compile(loss='binary_crossentropy', optimizer="SGD")
    discriminator_on_generator.compile(loss='binary_crossentropy', optimizer=g_optim)
    discriminator.trainable = True
    discriminator.compile(loss='binary_crossentropy', optimizer=d_optim)
    
    # Prepare 100D noise matrix for each batch
    # We keep the same noise to follow the generation
    displayNoise = np.random.normal(0., 1., (displayImageNb, 100))
        
    # Torch.ch heuristic for training
    discriminator.trainable = True
    generator.trainable = True

    # Compute number of batches
    batchNb = int(X_train.shape[0]/batchSize)

    print("Starting training for {} epochs with {} batches of size {}".format(epochNb, batchNb, batchSize))

    # For each epoch
    for epoch in range(epochNb):        
        # For each batch
        for batchIndex in range(batchNb):
            # Save generated images to disk every batch
            generated_images = generator.predict(displayNoise, verbose=0)
            saveGeneratedImages(generated_images, "{}_{}".format(epoch, batchIndex))

            #==== TRAINING THE DISCRIMINATOR ====#
            # Get real image batch from data
            real_images = X_train[batchIndex*batchSize:(batchIndex+1)*batchSize]
            # Add noise to make it harder for discriminator
            X = addNoise(real_images) if addNoiseToDiscriminator else real_images
            y = getTrueLabels(batchSize, soft=softLabels) 
            # Train on real images, or just compute loss if not trainable
            d_loss_real = discriminator.train_on_batch(X, y)

            # Generate a batch of fake images
            noise = np.random.normal(0., 1., (batchSize, 100))
            generated_images = generator.predict(noise, verbose=0)
            # Add noise to make it harder for discriminator
            X = addNoise(generated_images) if addNoiseToDiscriminator else generated_images
            y = getFakeLabels(batchSize, soft=softLabels)
            # Train on fake images, or just compute loss if not trainable
            d_loss_fake = discriminator.train_on_batch(X, y)

            #==== TRAINING THE GENERATOR ====#
            for i in range(2):
                # Generator is trained twice on a batch because the discriminator is too
                shouldDiscriminatorBeTrained = discriminator.trainable
                # Always put as not trainable first
                discriminator.trainable = False
                # Train generator, or just compute loss if not trainable
                noise = np.random.normal(0., 1., (batchSize, 100))
                y = getTrueLabels(batchSize, soft=softLabels, flipped=flipLabels)
                g_loss = discriminator_on_generator.train_on_batch(noise, y)
                # Restore true value of trainable
                discriminator.trainable = shouldDiscriminatorBeTrained

            print("Epoch {}/{} - Batch {}/{} - (G: {:.3f}) - (D_true: {:.3f}, D_fake: {:.3f})".format(epoch+1, epochNb, batchIndex+1, batchNb, g_loss, d_loss_real, d_loss_fake))
                        
            if useTorchHeuristic:
                # Assume both should be trained
                discriminator.trainable = True
                generator.trainable = True

                # Discriminator too powerful
                if d_loss_fake < lossMargin or d_loss_real < lossMargin:
                    print "Stopping D for next update"
                    discriminator.trainable = False
                
                # Discriminator too weak
                if d_loss_fake > (1.0-lossMargin) or d_loss_real > (1.0-lossMargin):
                    print "Stopping G for next update"
                    generator.trainable = False
                
                # Both are good, train both
                if not discriminator.trainable and not generator.trainable:
                    print "Starting G and D"
                    discriminator.trainable = True
                    generator.trainable = True
            
            #Save model every N batches
            if batchIndex%40 == 0:
                print "Saving models..."
                generator.save_weights('Models/generator', True)
                discriminator.save_weights('Models/discriminator', True)