Beispiel #1
0
def main():

    config = get_config(MODEL_CONFIG)
    label_encoder, cdl_mapping = get_label_encoder_and_mapping()

    model_name = get_model_name()
    model, X_mean_train, X_std_train = fit_model(config, label_encoder,
                                                 cdl_mapping)

    print(f"Saving model to {model_name}")
    model.save(model_name)

    # TODO Also save label encoder?
    save_X_mean_and_std_train(X_mean_train, X_std_train, model_name)

    test_scenes = get_annotated_scenes(config["test_scenes"], label_encoder,
                                       cdl_mapping)
    normalize_scenes(test_scenes, X_mean_train, X_std_train)

    # Note: use a large batch size so that test set stats have a small standard error
    test_generator = get_generator(test_scenes,
                                   label_encoder,
                                   IMAGE_SHAPE,
                                   batch_size=600)
    test_X, test_y, test_weights = next(test_generator)

    # TODO Show test set loss for each objective
    # Also fit some simple baseline models (null model, regression
    #  tree that only sees average for each band in the image, nearest neighbors...),
    #  compute their test set loss and show on a plot with CNN test loss
    print_classification_reports(test_X, test_y, model, label_encoder)
Beispiel #2
0
 def initialize_modules(self):
     """ create all main modules """
     self.generator = get_generator(self.instruction_set)
     self.input_gen: InputGenerator = get_input_generator()
     self.executor: Executor = get_executor()
     self.model: Model = get_model(self.executor.read_base_addresses())
     self.analyser: Analyser = get_analyser()
     self.coverage: Coverage = get_coverage(self.instruction_set, self.executor, self.model,
                                            self.analyser)
Beispiel #3
0
def main():

    config = get_config(MODEL_CONFIG)

    label_encoder, cdl_mapping = get_label_encoder_and_mapping()

    model, X_mean_train, X_std_train = fit_model(config, label_encoder,
                                                 cdl_mapping)

    # TODO Filename  # TODO Also save X_{mean,std}_train
    # TODO https://github.com/keras-team/keras/issues/5916 custom objects
    model.save("my_model.h5")

    test_scenes = get_annotated_scenes(config["test_scenes"], label_encoder,
                                       cdl_mapping)
    normalize_scenes(test_scenes, X_mean_train, X_std_train)

    test_generator = get_generator(test_scenes,
                                   label_encoder,
                                   IMAGE_SHAPE,
                                   batch_size=600)
    test_X, test_y = next(test_generator)

    print_classification_reports(test_X, test_y, model, label_encoder)

    colormap = get_colormap(label_encoder)
    print(f"Colormap used for predictions: {colormap}")

    for test_scene in config["test_scenes"]:

        predict_pixels_entire_scene(
            model,
            test_scene,
            X_mean_train,
            X_std_train,
            IMAGE_SHAPE,
            label_encoder,
            colormap,
        )
Beispiel #4
0
gan_opt = Adam(lr=0.00009, beta_1=0.5, beta_2=0.999, epsilon=1e-08)
d_opt = Adam(lr=0.00009, beta_1=0.5, beta_2=0.999, epsilon=1e-08)

if model_checkpoint == 'y':
    print("Loading the saved models: ")
    generator = load_model(model_path + "\\generator.hd5")
    discriminator = load_model(model_path + "\\discriminator.hd5")
    set_trainable(discriminator, True)
    discriminator.compile(loss='binary_crossentropy', optimizer=d_opt)
    gan = load_model(model_path + "\\gan.hd5",
                     custom_objects={'PSNR': psnr.PSNR})
else:
    generator = generator(LRDim, num_residual_blocks)
    discriminator = discriminator(HRDim, d_opt)
    generator = generator.get_generator()
    discriminator = discriminator.get_discriminator()
    gan = get_gan_network(generator, discriminator, gan_opt)

discriminator.compile(loss='binary_crossentropy', optimizer=d_opt)
gan.compile(loss=['mse', 'binary_crossentropy'],
            optimizer=gan_opt,
            metrics=[psnr.PSNR, 'accuracy'])

epochs = 50
batch_size = 8

batch_count = len(LRimages) // batch_size

loss = {'d_loss': [], 'g_loss': []}
Beispiel #5
0
from utils import plotLoss, plotGeneratedImages, saveModels

# dimension of latent space
LATENT_SPACE_DIM = 100

# Load MNIST data
(X_train, Y_train), (X_test, Y_test) = fashion_mnist.load_data()
X_train = (X_train.astype(np.float32)) / 255.0
X_train = np.expand_dims(X_train, axis=-1)  #(num, H, W, C)

dLosses = []
gLosses = []

# Combined network
generator = get_generator((LATENT_SPACE_DIM, ))
discriminator = get_discriminator((28, 28, 1))  # shape of mnist images

discriminator.trainable = False
ganInput = Input(shape=(LATENT_SPACE_DIM, ))
x = generator(ganInput)
ganOutput = discriminator(x)
gan = Model(inputs=ganInput, outputs=ganOutput)
gan.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.0002, beta_1=0.5))


def train(epochs=1, batchSize=128):
    batchCount = X_train.shape[0] // batchSize
    print('[INIT] Epochs:', epochs)
    print('[INIT] Batch size:', batchSize)
    print('[INIT] Batches per epoch:', batchCount)
Beispiel #6
0
def fit_model(config, label_encoder, cdl_mapping):

    training_scenes = get_annotated_scenes(config["training_scenes"],
                                           label_encoder, cdl_mapping)

    unique_training_images = sum([
        (x[0].shape[0] // IMAGE_SHAPE[0]) * (x[0].shape[1] // IMAGE_SHAPE[1])
        for x in training_scenes
    ])
    print(
        f"Done loading {len(training_scenes)} training scenes containing {unique_training_images} unique images of shape {IMAGE_SHAPE}"
    )

    validation_scenes = get_annotated_scenes(config["validation_scenes"],
                                             label_encoder, cdl_mapping)

    X_mean_train, X_std_train = get_X_mean_and_std(training_scenes)

    print(f"X_mean_train = {X_mean_train}")
    print(f"X_std_train = {X_std_train}")

    normalize_scenes(training_scenes, X_mean_train, X_std_train)
    normalize_scenes(validation_scenes, X_mean_train, X_std_train)

    model = get_keras_model(IMAGE_SHAPE, label_encoder)

    # plot_model(model, to_file='model.png')

    training_generator = get_generator(training_scenes,
                                       label_encoder,
                                       IMAGE_SHAPE,
                                       batch_size=10)

    sample_batch = next(training_generator)

    for name, values in sample_batch[1].items():
        print(f"Sample batch of {name}: {Counter(values.flatten().tolist())}")

    print(f"Shape of sample batch X: {sample_batch[0][0].shape}")

    save_sample_images(sample_batch, X_mean_train, X_std_train, label_encoder)

    validation_generator = get_generator(validation_scenes,
                                         label_encoder,
                                         IMAGE_SHAPE,
                                         batch_size=10)

    # TODO Tensorboard
    history = model.fit(
        x=training_generator,
        steps_per_epoch=100,
        epochs=200,
        verbose=True,
        callbacks=[
            callbacks.EarlyStopping(patience=20,
                                    monitor="val_loss",
                                    restore_best_weights=True,
                                    verbose=True)
        ],
        validation_data=validation_generator,
        validation_steps=25,
    )

    return model, X_mean_train, X_std_train
Beispiel #7
0
def fit_model(config, label_encoder, cdl_mapping):

    training_scenes = get_annotated_scenes(config["training_scenes"],
                                           label_encoder, cdl_mapping)

    unique_training_images = sum([
        (x[0].shape[0] // IMAGE_SHAPE[0]) * (x[0].shape[1] // IMAGE_SHAPE[1])
        for x in training_scenes
    ])
    print(
        f"Done loading {len(training_scenes)} training scenes containing {unique_training_images} unique images of shape {IMAGE_SHAPE}"
    )

    validation_scenes = get_annotated_scenes(config["validation_scenes"],
                                             label_encoder, cdl_mapping)

    X_mean_train, X_std_train = get_X_mean_and_std(training_scenes)

    print(f"X_mean_train = {X_mean_train}")
    print(f"X_std_train = {X_std_train}")

    normalize_scenes(training_scenes, X_mean_train, X_std_train)
    normalize_scenes(validation_scenes, X_mean_train, X_std_train)

    model = get_keras_model(IMAGE_SHAPE, label_encoder)

    # plot_model(model, to_file='model.png')

    training_generator = get_generator(training_scenes, label_encoder,
                                       IMAGE_SHAPE)

    sample_batch = next(training_generator)

    for name, values in sample_batch[1].items():
        print(f"Sample batch of {name}: {Counter(values.flatten().tolist())}")

    print(f"Shape of sample batch X: {sample_batch[0][0].shape}")

    save_sample_images(sample_batch, X_mean_train, X_std_train, label_encoder)

    validation_generator = get_generator(validation_scenes, label_encoder,
                                         IMAGE_SHAPE)

    # TODO Also use class_weight when computing test accuracy stats
    # TODO Doesn't work for pixels, see https://github.com/keras-team/keras/issues/3653
    class_weight = get_class_weight(label_encoder)
    print(f"Class weights used in training: {class_weight}")

    # TODO Tensorboard
    history = model.fit_generator(
        generator=training_generator,
        steps_per_epoch=50,
        epochs=100,
        verbose=True,
        callbacks=[
            callbacks.EarlyStopping(patience=20,
                                    monitor="val_loss",
                                    restore_best_weights=True)
        ],
        class_weight=class_weight,
        validation_data=validation_generator,
        validation_steps=10,
    )

    return model, X_mean_train, X_std_train
Beispiel #8
0
                                   width=WIDTH,
                                   do_normalize=False)
    train_dataset = CocoStuffDataSet(mode='train',
                                     supercategories=['animal'],
                                     height=HEIGHT,
                                     width=WIDTH,
                                     do_normalize=False)
    val_loader = DataLoader(val_dataset, args.batch_size, shuffle=False)
    train_loader = DataLoader(train_dataset, args.batch_size, shuffle=True)
    NUM_CLASSES = train_dataset.numClasses
    print("Number of classes: {}".format(NUM_CLASSES))
    image_shape = (3, HEIGHT, WIDTH)
    segmentation_shape = (NUM_CLASSES, HEIGHT, WIDTH)

    discriminator = None
    generator = get_generator(args.generator_name, NUM_CLASSES, args.use_bn)
    if args.train_gan:
        discriminator = GAN(NUM_CLASSES, segmentation_shape, image_shape)
    trainer = Trainer(generator, discriminator, train_loader, val_loader, \
                    gan_reg=args.gan_reg, weight_clip=args.weight_clip, grad_clip=args.grad_clip, \
                    noise_scale=args.noise_scale, disc_lr=args.disc_lr, gen_lr=args.gen_lr, train_gan= args.train_gan, \
                    experiment_dir=experiment_dir, resume=args.load_model, load_iter=args.load_iter)

    if args.mode == "train":
        trainer.train(num_epochs=args.epochs,
                      print_every=args.print_every,
                      eval_every=args.eval_every)
    elif args.mode == 'eval':
        assert (args.load_model), "Need to load model to evaluate it"
        # just do evaluation
        print(trainer.get_confusion_matrix(val_loader))