Beispiel #1
0
    def train(_run):
        # Build model
        model = build_model(hparams, **dataset.preprocessing.kwargs)

        # Compile model
        model.compile(optimizer=optimizers.make_optimizer(
            hparams.optimizer, hparams.opt_param),
                      loss="categorical_crossentropy",
                      metrics=["categorical_accuracy"])

        # Print Summary of models
        lq.models.summary(model)

        # If the model already exists, load it and continue training
        initial_epoch = 0
        if os.path.exists(os.path.join(model_dir, "stats.json")):
            with open(os.path.join(model_dir, "stats.json"),
                      "r") as stats_file:
                initial_epoch = json.load(stats_file)["epoch"]
                click.echo(
                    f"Restoring model from {model_path} at epoch = {initial_epoch}"
                )
                model.load_weights(model_path)

        cb = [callbacks.SaveStats(model_dir=model_dir)]

        if tb_graph:
            # If tensorboard logging is enabled, write graph
            cb.extend([
                tf.keras.callbacks.TensorBoard(
                    log_dir=os.path.join(model_dir, "tb"),
                    write_graph=tb_graph,
                    histogram_freq=0,
                    update_freq='epoch',
                    # update_freq=0,
                    profile_batch=0,
                    embeddings_freq=0),
            ])

        # Callback for sending data to Sacred Experiment
        cb.extend([
            tf.keras.callbacks.LambdaCallback(
                on_epoch_end=lambda epoch, logs: [
                    ex.log_scalar(metric, value, epoch + 1)
                    for (metric, value) in logs.items()
                ])
        ])

        # Train this mode
        train_log = model.fit(
            dataset.train_data(hparams.batch_size),
            epochs=hparams.epochs,
            steps_per_epoch=dataset.train_examples // hparams.batch_size,
            validation_data=dataset.validation_data(hparams.batch_size),
            validation_steps=dataset.validation_examples // hparams.batch_size,
            initial_epoch=initial_epoch,
            callbacks=cb)
Beispiel #2
0
def test(build_model, dataset, hparams, logdir):
    # Check if the given directory already contains model
    if os.path.exists(os.path.join(logdir, "stats.json")):
        # then mark this as the directory to load weights from
        model_dir = logdir
    else:
        # Raise Error
        raise RuntimeError(f"No valid model stats file found in {logdir}")
    model_path = os.path.join(model_dir, "weights.h5")

    # Build model
    model = build_model(hparams, **dataset.preprocessing.kwargs)

    # Compile model
    model.compile(optimizer=optimizers.make_optimizer(hparams.optimizer,
                                                      hparams.opt_param),
                  loss="categorical_crossentropy",
                  metrics=["categorical_accuracy"])

    # Print Summary of models
    lq.models.summary(model)

    # Load model weights from the specified file
    print("Before loading...")
    for l in model.layers:
        for _w in l.trainable_weights:
            print("{:40s}".format(l.name, _w.name),
                  tf.keras.backend.get_value(_w).flatten()[:3])
    # model.load_weights(model_path)
    utils.load_weights(model, model_path)
    print("After loading...")
    for l in model.layers:
        for _w in l.trainable_weights:
            print("{:25s}".format(l.name, _w.name),
                  tf.keras.backend.get_value(_w).flatten()[:3])

    # Test this model
    test_log = model.evaluate(dataset.test_data(hparams.batch_size),
                              steps=dataset.test_examples //
                              hparams.batch_size)

    data = [["Metric", "Value"]]
    for (idx, metric) in enumerate(model.metrics_names):
        data.append([metric, test_log[idx]])

    from terminaltables import AsciiTable
    print(AsciiTable(data, title="Test Statistics").table)
Beispiel #3
0
def test(build_model, dataset, hparams, logdir):
    # Check if the given directory already contains model
    if os.path.exists(os.path.join(logdir, "stats.json")):
        # then mark this as the directory to load weights from
        model_dir = logdir
    else:
        # Raise Error
        raise RuntimeError(f"No valid model stats file found in {logdir}")
    model_path = os.path.join(model_dir, "weights.h5")

    # Build model
    model = build_model(hparams, **dataset.preprocessing.kwargs)

    # Custom metric
    import math

    def PSNR(y_true, y_pred):
        max_pixel = 1.0
        return 10.0 * (1.0 / math.log(10)) * K.log(
            (max_pixel**2) / (K.mean(K.square(y_pred - y_true))))

    # Compile model
    model.compile(optimizer=optimizers.make_optimizer(hparams.optimizer,
                                                      hparams.opt_param),
                  loss="mse",
                  metrics=[PSNR])

    # Print Summary of models
    lq.models.summary(model)

    # # Load model weights from the specified file
    model.load_weights(model_path)

    # Test this model
    test_log = model.evaluate(dataset.test_data(hparams.batch_size),
                              steps=dataset.test_examples //
                              hparams.batch_size)

    data = [["Metric", "Value"]]
    for (idx, metric) in enumerate(model.metrics_names):
        data.append([metric, test_log[idx]])

    from terminaltables import AsciiTable
    print(AsciiTable(data, title="Test Statistics").table)
Beispiel #4
0
def launch_job(args):
    if args.batch_size == 1000:
        args.log_interval = 6
    elif args.batch_size == 500:
        args.log_interval = 12
    elif args.batch_size == 250:
        args.log_interval = 24
    elif args.batch_size == 125:
        args.log_interval = 48
    dir = build_log_dir(args)
    try:
        os.makedirs(dir)
    except:
        pass
    with open(os.path.join(dir, 'args.pkl'), 'wb') as f:
        pickle.dump(args, f)

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)
    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    train_loader = torch.utils.data.DataLoader(datasets.SVHN(
        './svhn_data',
        split="train",
        download=True,
        transform=transforms.Compose([
            transforms.Grayscale(),
            transforms.ToTensor(),
            transforms.Normalize((0.4435, ), (0.1970, ))
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(datasets.SVHN(
        './svhn_data',
        split="test",
        download=True,
        transform=transforms.Compose([
            transforms.Grayscale(),
            transforms.ToTensor(),
            transforms.Normalize((0.4435, ), (0.1970, ))
        ])),
                                              batch_size=args.test_batch_size,
                                              shuffle=True,
                                              **kwargs)

    model = Net().to(device)

    import optimizers
    optimizer = optimizers.make_optimizer(args, model)

    accuracies = []
    losses = []
    times = [0.0]

    if args.decay_lr:
        lambda_lr = lambda epoch: 0.9
        scheduler = LambdaLR(optimizer, lr_lambda=[lambda_lr])
    for epoch in range(1, args.epochs + 1):
        if args.decay_lr:
            scheduler.step()
        train(args, model, device, train_loader, test_loader, optimizer, epoch,
              [accuracies, losses, times])

    log_stats(accuracies, losses, times, args, model, device, test_loader,
              epoch, 'inf')
Beispiel #5
0
def launch_job(args):
    if args.batch_size == 1000:
        args.log_interval = 6
    elif args.batch_size == 500:
        args.log_interval = 12
    elif args.batch_size == 250:
        args.log_interval = 12 #24
    elif args.batch_size == 125:
        args.log_interval = 48
    dir = build_log_dir(args)
    try:
        os.makedirs(dir)
    except:
        pass
    with open(os.path.join(dir, 'args.pkl'), 'wb') as f:
        pickle.dump(args, f)

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)
    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    train_loader = torch.utils.data.DataLoader(
        datasets.FashionMNIST('./data', train=True, download=True,
                       transform=transforms.Compose([
                           transforms.ToTensor(),
                           transforms.Normalize(mean=(0.5001,), std=(1.1458,))
                       ])),
        batch_size=args.batch_size, shuffle=True, **kwargs)

    test_loader = torch.utils.data.DataLoader(
        datasets.FashionMNIST('./data', train=False, transform=transforms.Compose([
                           transforms.ToTensor(),
                           transforms.Normalize(mean=(0.5001,), std=(1.1458,))
                       ])),
        batch_size=args.test_batch_size, shuffle=True, **kwargs)

    model = Net().to(device)

    import optimizers
    optimizer = optimizers.make_optimizer(args, model)

    accuracies = []
    losses = []
    times = [0.0]

    if args.decay_lr:
        # lambda_lr = lambda epoch: 0.9 #1.0 / np.sqrt(epoch+1)
        lambda_lr = lambda epoch: 1.0 / np.sqrt(epoch+1)
        if args.optim in ['ngd_bd', 'natural_amsgrad_bd', 'natural_adam_bd']:
            scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=[lambda_lr, lambda_lr, lambda_lr, lambda_lr])
        else:
            scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=[lambda_lr])
        # scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=0.1)
    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, test_loader, optimizer, epoch, [accuracies, losses, times])
        if args.decay_lr:
            if args.verbose:
                print ("Decay factor: ", 1.0 / np.sqrt(epoch+1))
            scheduler.step()

    log_stats(accuracies, losses, times, args, model, device, test_loader, epoch, 'inf')
Beispiel #6
0
def vispics(build_model, dataset, hparams, logdir):
    # Check if the given directory already contains model
    if os.path.exists(os.path.join(logdir, "stats.json")):
        # then mark this as the directory to load weights from
        model_dir = logdir
    else:
        # Raise Error
        raise RuntimeError(f"No valid model stats file found in {logdir}")
    model_path = os.path.join(model_dir, "weights.h5")

    # Build model
    model = build_model(hparams, **dataset.preprocessing.kwargs)

    # Custom metric
    import math

    def PSNR(y_true, y_pred):
        max_pixel = 1.0
        return 10.0 * (1.0 / math.log(10)) * K.log(
            (max_pixel**2) / (K.mean(K.square(y_pred - y_true))))

    # Compile model
    model.compile(optimizer=optimizers.make_optimizer(hparams.optimizer,
                                                      hparams.opt_param),
                  loss="mse",
                  metrics=[PSNR])

    # # Print Summary of models
    # lq.models.summary(model)

    viz_model = tf.keras.models.Model(inputs=model.input,
                                      outputs=[model.input, model.output])

    # Load model weights from the specified file
    model.load_weights(model_path)

    n_samples = 10000
    recon_imgs = viz_model.predict(dataset.test_data(n_samples), steps=1)

    import matplotlib.pyplot as plt
    import numpy as np

    ### Uncomment this if u want Comparison of reconstructed images
    ### using BAE+BOP with actual images =================================================================

    PSNR = np.zeros(n_samples)
    for idx in range(n_samples):
        print(idx)
        loss, psnr = model.evaluate(
            np.expand_dims(recon_imgs[1][idx, :, :, :], axis=0),
            np.expand_dims(recon_imgs[0][idx, :, :, :], axis=0))
        PSNR[idx] = psnr

    idxsorted = np.argsort(PSNR)

    idx0 = idxsorted[0]
    idx1 = idxsorted[1]
    idx4999 = idxsorted[int(n_samples / 2 - 1)]
    idx9998 = idxsorted[n_samples - 2]
    idx9999 = idxsorted[n_samples - 1]
    print(idx0, idx1, idx4999, idx9998, idx9999)

    fig, ax = plt.subplots(2, 5, figsize=(7, 3.5))
    # Plot the 5 samples
    ax[0, 0].imshow(0.50 * (recon_imgs[0][idx0, :, :, :] + 1))
    ax[0, 0].set_ylabel('Noisy image')
    ax[1, 0].imshow(0.50 * (recon_imgs[1][idx0, :, :, :] + 1))
    ax[1, 0].set_xlabel('PSNR = {0:.2f}'.format(PSNR[idx0]))
    ax[1, 0].set_ylabel('BAE w/ BOP')

    ax[0, 1].imshow(0.50 * (recon_imgs[0][idx1, :, :, :] + 1))
    ax[1, 1].imshow(0.50 * (recon_imgs[1][idx1, :, :, :] + 1))
    ax[1, 1].set_xlabel('PSNR = {0:.2f}'.format(PSNR[idx1]))

    ax[0, 2].imshow(0.50 * (recon_imgs[0][idx4999, :, :, :] + 1))
    ax[1, 2].imshow(0.50 * (recon_imgs[1][idx4999, :, :, :] + 1))
    ax[1, 2].set_xlabel('PSNR = {0:.2f}'.format(PSNR[idx4999]))

    ax[0, 3].imshow(0.50 * (recon_imgs[0][idx9998, :, :, :] + 1))
    ax[1, 3].imshow(0.50 * (recon_imgs[1][idx9998, :, :, :] + 1))
    ax[1, 3].set_xlabel('PSNR = {0:.2f}'.format(PSNR[idx9998]))

    ax[0, 4].imshow(0.50 * (recon_imgs[0][idx9999, :, :, :] + 1))
    ax[1, 4].imshow(0.50 * (recon_imgs[1][idx9999, :, :, :] + 1))
    ax[1, 4].set_xlabel('PSNR = {0:.2f}'.format(PSNR[idx9999]))

    fig.savefig("./../results/BAEBOPfigsbestworst.pdf",
                format='pdf',
                bbox_inches='tight')
    plt.show()

    exit()

    ## Once u find the five indeces use them to find the reconstructed figures with AE+Adam and BAE+Adam
    idx0 = 2590
    idx1 = 6869
    idx4999 = 4728
    idx9998 = 8264
    idx9999 = 9701