Exemplo n.º 1
0
Arquivo: test.py Projeto: yongxinw/zsl
    def __init__(self, args):
        super(Tester, self).__init__()

        self.args = args

        self.model = AutoEncoder(args)
        self.model.load_state_dict(torch.load(args.checkpoint))
        self.model.cuda()
        self.model.eval()

        self.result = {}

        self.train_dataset = CUBDataset(split='train')
        self.test_dataset = CUBDataset(split='test')
        self.val_dataset = CUBDataset(split='val')

        self.train_loader = DataLoader(dataset=self.train_dataset,
                                       batch_size=args.batch_size)
        self.test_loader = DataLoader(dataset=self.test_dataset,
                                      batch_size=args.batch_size)
        self.val_loader = DataLoader(dataset=self.val_dataset,
                                     batch_size=100,
                                     shuffle=True)

        train_cls = self.train_dataset.get_classes('train')
        test_cls = self.test_dataset.get_classes('test')
        print("Load class")
        print(train_cls)
        print(test_cls)

        self.zsl = ZSLPrediction(train_cls, test_cls)
def init_auto(cfg):
    cfg.init_config()
    show_config(cfg)

    print('creating model')
    model = AutoEncoder(cfg).to(cfg.device)
    print_log(cfg.log_path, model)

    print('creating dataloader')
    dataloaders_dict = {
        phase: DataLoader(GeneDataset(phase, cfg),
                          batch_size=cfg.batch_size,
                          shuffle=cfg.shuffle,
                          num_workers=cfg.num_workers)
        for phase in ['train', 'test']
    }
    print('creating dataloader done')

    criterion_dict = nn.MSELoss()

    optimizer = optim.Adam(model.parameters(),
                           lr=cfg.learning_rate,
                           weight_decay=cfg.weight_decay)

    print('start training')
    train(model, dataloaders_dict, criterion_dict, optimizer, cfg)
Exemplo n.º 3
0
    def __init__(self, args):
        # load network
        self.G = AutoEncoder(args)
        self.D = Discriminator(args)
        self.G.weight_init()
        self.D.weight_init()
        self.G.cuda()
        self.D.cuda()
        self.criterion = nn.MSELoss()

        # load data
        self.train_dataset = CUBDataset(split='train')
        self.valid_dataset = CUBDataset(split='val')
        self.train_loader = DataLoader(dataset=self.train_dataset, batch_size=args.batch_size)
        self.valid_loader = DataLoader(dataset=self.valid_dataset, batch_size=args.batch_size)

        # Optimizers
        self.G_optim = optim.Adam(self.G.parameters(), lr = args.lr_G)
        self.D_optim = optim.Adam(self.D.parameters(), lr = 0.5 * args.lr_D)
        self.G_scheduler = StepLR(self.G_optim, step_size=30, gamma=0.5)
        self.D_scheduler = StepLR(self.D_optim, step_size=30, gamma=0.5)

        # Parameters
        self.epochs = args.epochs
        self.batch_size = args.batch_size
        self.z_var = args.z_var 
        self.sigma = args.sigma
        self.lambda_1 = args.lambda_1
        self.lambda_2 = args.lambda_2

        log_dir = os.path.join(args.log_dir, datetime.now().strftime("%m_%d_%H_%M_%S"))
        # if not os.path.isdir(log_dir):
            # os.makedirs(log_dir)
        self.writter = SummaryWriter(log_dir)
Exemplo n.º 4
0
    def train_autoencoder(self,
                          distribution,
                          fan_in,
                          fan_out,
                          learning_rate=0.01):
        # allocate symbolic variables for the data
        X = tt.fmatrix('X')  # the data is presented as rasterized images

        autoEncoder = AutoEncoder(X,
                                  distribution,
                                  fan_in,
                                  fan_out,
                                  n_hidden=500,
                                  activation=tt.nnet.sigmoid,
                                  output=tt.nnet.sigmoid)

        gparams = [
            tt.grad(cost=autoEncoder.cost, wrt=param)
            for param in autoEncoder.params
        ]

        updates = [(param, param - learning_rate * gparam)
                   for param, gparam in zip(autoEncoder.params, gparams)]

        train = theano.function(inputs=[X],
                                outputs=autoEncoder.cost,
                                updates=updates,
                                allow_input_downcast=True)

        ############
        # TRAINING #
        ############

        tic = timeit.default_timer()

        # go through training epochs
        for epoch in range(self.n_epochs):
            # go through trainng set
            for batch_idx in range(0, self.n_train_batch, self.batch_size):
                train_loss = train(self.trX[batch_idx:batch_idx +
                                            self.batch_size])

            print("Training epoch {}, cost {}. %%".format(
                epoch + 1, train_loss))

        toc = timeit.default_timer()
        training_time = (toc - tic)
        print("Average time per epoch = {}.%%".format(training_time))

        # plot encoding weights
        weight = [
            param for param in autoEncoder.get_params() if param.shape >= 2
        ]

        util = Utils(None, None, None)
        # util.plot_first_k_numbers(weight[0], 100)
        # plot decoding weights
        util.plot_first_k_numbers(np.transpose(weight[0]), 100)
Exemplo n.º 5
0
 def __init__(self, hparams):
     super().__init__()
     self.val_dict = {}
     self.train_losses = []
     self.hparams = hparams
     self.hparams["tpu_cores"] = 0
     self.loss = self.get_loss_fn()
     # you can get fancier here of course, we will likely have a separate
     # class for the model
     self.model = AutoEncoder(self.hparams["latent_dim"])
     print(self.model)
     print(self.model.parameters())
Exemplo n.º 6
0
def test_mnist_ae():
    dl = MNISTDataLoader('/data/Research/datasets/mnist/mnist.pkl')
    
    dp = UnlabeledMemoryDataProvider(data_loader=dl, 
                                         batch_size=batch_size, 
                                         max_gpu_train_samples=max_gpu_samples, 
                                         max_gpu_valid_samples=max_gpu_samples, 
                                         is_test=False, 
                                         epochwise_shuffle=False, 
                                         nvalid_samples=0)
    
    opt = optimizers.SGD_Rms_Optimizer(decay=0.9)
    
    ae = AutoEncoder(batch_size = batch_size, seed=seed,
            network_structure=ae_ns, network_cost=ae_cost)
    
    ae.fit(data_provider=dp, 
            optimizer=opt,
            train_epoch=pretrain_epochs,
            noiseless_validation=True,
            dump_freq=(60000//100+1)*10)
     
    ae.save('testmodel_ae.joblib')
#     ae.load('testmodel_ae.joblib')

    Image.fromarray(tile_raster_images(ae.params[2].getParameterValue('W').T, (28,28), (10, 10))).show()   

    f = open('/data/Research/datasets/mnist/mnist.pkl', 'rb')
    _, _, test_set = cPickle.load(f)
    f.close()
    
    dp = UnlabeledMemoryDataProvider(data_loader=dl, 
                                         batch_size=batch_size, 
                                         max_gpu_train_samples=max_gpu_samples, 
                                         max_gpu_valid_samples=max_gpu_samples, 
                                         is_test=True, 
                                         epochwise_shuffle=False, 
                                         nvalid_samples=0)
    
    feat1 = ae.extract_feature_from_memory_data(test_set[0], 'full2', 1, True)
    feat2 = ae.extract_feature_from_data_provider(dp, 'full2', None, False, 1, True)
    
    print numpy.all(numpy.equal(feat1, feat2))
    
    rec1 = ae.reconstruct_from_memory_data(test_set[0][:9])
    Image.fromarray(tile_raster_images(rec1, (28,28), (3, 3))).show()
    rec2 = ae.reconstruct_from_data_provider(dp)
    Image.fromarray(tile_raster_images(rec2, (28,28), (3, 3))).show()
    print numpy.all(numpy.equal(rec1, rec2[:9]))
    
    rec1 = ae.reconstruct_from_memory_data(test_set[0][:9], steps=10, noiseless=False)
    Image.fromarray(tile_raster_images(rec1, (28,28), (3, 3))).show()
Exemplo n.º 7
0
def main(args):
    print('Loading data')
    idxs = np.load(args.boards_file, allow_pickle=True)['idxs']
    print(f'Number of Boards: {len(idxs)}')

    if torch.cuda.is_available() and args.num_gpus > 0:
        device = torch.device('cuda:0')
    else:
        device = torch.device('cpu')

    if args.shuffle:
        np.random.shuffle(idxs)

    train_idxs = idxs[:-args.num_test]
    test_idxs = idxs[-args.num_test:]

    train_loader = DataLoader(Boards(train_idxs),
                              batch_size=args.batch_size,
                              shuffle=False)
    test_loader = DataLoader(Boards(test_idxs), batch_size=args.batch_size)

    model = AutoEncoder().to(device)
    if args.model_loadname:
        model.load_state_dict(torch.load(args.model_loadname))

    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    model.train()
    losses = []
    total_iters = 0

    for epoch in range(args.init_epoch, args.epochs):
        print(f'Running epoch {epoch} / {args.epochs}\n')
        for batch_idx, board in tqdm(enumerate(train_loader),
                                     total=len(train_loader)):
            board = board.to(device)
            optimizer.zero_grad()
            loss = model.loss(board)
            loss.backward()

            losses.append(loss.item())
            optimizer.step()

            if total_iters % args.log_interval == 0:
                tqdm.write(f'Loss: {loss.item()}')

            if total_iters % args.save_interval == 0:
                torch.save(
                    model.state_dict(),
                    append_to_modelname(args.model_savename, total_iters))
                plot_losses(losses, 'vis/ae_losses.png')
            total_iters += 1
Exemplo n.º 8
0
    def __init__(self, model_path):
        ae = AutoEncoder()
        model = BoardValuator(ae)

        model.load_state_dict(
            torch.load(model_path, map_location=torch.device('cpu')))

        model.eval()
        self.model = model
Exemplo n.º 9
0
def test(args):
    print(args.save_dir)

    model_args, data_args = load_args(Path(args.save_dir))
    assert not model_args.modelfree, "Code only evaluates on model based models"

    saver = ModelSaver(Path(args.save_dir), None)

    power_constraint = PowerConstraint()
    possible_inputs = get_md_set(model_args.md_len)

    # TODO: change to batch size and batch per epoch to 1000
    data_args.batch_size = 5000
    data_args.batches_per_epoch = 1000
    dataset_size = data_args.batch_size * data_args.batches_per_epoch
    loader = InputDataloader(data_args.batch_size, data_args.block_length,
                             dataset_size)
    loader = loader.example_generator()

    SNRs = [.5, 1, 2, 3, 4]
    BER = []
    loss = []

    for SNR in SNRs:
        print(f"Testing {SNR} SNR level")
        data_args.SNR = SNR
        accuracy = []
        losses = []
        print(data_args.channel)
        print(model_args.modelfree)
        channel = get_channel(data_args.channel, model_args.modelfree,
                              data_args)
        model = AutoEncoder(model_args, data_args, power_constraint, channel,
                            possible_inputs)
        saver.load(model)
        for step in tqdm(range(data_args.batches_per_epoch)):
            msg = next(loader)
            metrics = model.trainable_encoder.test_on_batch(msg, msg)
            losses.append(metrics[0])
            accuracy.append(metrics[1])
        mean_loss = sum(losses) / len(losses)
        mean_BER = 1 - sum(accuracy) / len(accuracy)
        loss.append(mean_loss)
        BER.append(mean_BER)
        print(f"mean BER: {mean_BER}")
        print(f"mean loss: {mean_loss}")

    # create plots for results
    plt.plot(SNRs, BER)
    plt.ylabel("BER")
    plt.xlabel("SNR")
    plt.yscale('log')
    plt.savefig('figures/AWGN_modelaware.png')
    plt.show()
Exemplo n.º 10
0
def main():
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    sample_size = 64

    ckpt = torch.load("ckpts/recent.pth")
    model = AutoEncoder(ckpt["nc"], ckpt["ngf"]).to(device)
    model.load_state_dict(ckpt["netG"])

    img_transform = transforms.Compose([
        transforms.ToTensor(),
    ])

    dataset = MNIST('./data/MNIST', transform=img_transform)
    dataloader = DataLoader(dataset, batch_size=sample_size)

    imgs, label = next(iter(dataloader))

    new_imgs = reconstruct(model, imgs, label, device)
    vutils.save_image(imgs, "./inference_img/original.png")
    vutils.save_image(new_imgs, "./inference_img/new_img.png")
Exemplo n.º 11
0
def learn():
    import tflearn.datasets.mnist as mnist
    X, Y, testX, testY = mnist.load_data(one_hot=True)

    d = AutoEncoder(784, [784 * 2], 64)
    d.learn(X, testX)
    d.save()
Exemplo n.º 12
0
def init_model(path_to_checkpoint=PATH_TO_EMBEDDER):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    net = AutoEncoder(
        latent_dim=512,
        in_channels=1,
        num_hiddens=256,
        num_res_hiddens=64,
        num_res_layers=4,
        out_channels=1,
    ).to(device)

    net.load_state_dict(
        torch.load(open(path_to_checkpoint, 'rb'), map_location=device)
    )
    
    print()
    print('='*30, end='\n\n')
    print(net.eval())
    print(end='\n\n')
    print('='*30, end='\n\n')

    return device, net
Exemplo n.º 13
0
def main(args):
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)

    print('Loading data')
    data = np.load(args.boards_file, allow_pickle=True)
    idxs = data['idxs']
    labels = data['values'] 
    mask = labels != None
    idxs = idxs[mask]
    labels = labels[mask]
    n = len(idxs)

    if args.shuffle:
        perm = np.random.permutation(n)
        idxs = idxs[perm]
        labels = labels[perm]

    if args.experiment is None:
        experiment = Experiment(project_name="chess-axia")
        experiment.log_parameters(vars(args))
    else:
        experiment = ExistingExperiment(previous_experiment=args.experiment)
    key = experiment.get_key()

    print(f'Number of Boards: {n}')

    if torch.cuda.is_available() and args.num_gpus > 0:
        device = torch.device('cuda:0')
    else:
        device = torch.device('cpu')

    if args.num_train is None:
        args.num_train = n - args.num_test
    if args.num_train + args.num_test > n:
        raise ValueError('num-train and num-test sum to more than dataset size')
    train_idxs = idxs[:args.num_train]
    test_idxs = idxs[-args.num_test:]

    train_labels = labels[:-args.num_test]
    test_labels = labels[-args.num_test:]
    #print(f'Win percentage: {sum(train_labels)/ len(train_labels):.1%}')
    print('Train size: ' + str(len(train_labels)))

    train_loader = DataLoader(BoardAndPieces(train_idxs, train_labels),
                              batch_size=args.batch_size, collate_fn=collate_fn,
                              shuffle=True)
    test_loader = DataLoader(BoardAndPieces(test_idxs, test_labels),
                             batch_size=args.batch_size, collate_fn=collate_fn)

    ae = AutoEncoder().to(device)
    ae_file = append_to_modelname(args.ae_model, args.ae_iter)
    ae.load_state_dict(torch.load(ae_file))

    model = BoardValuator(ae).to(device)
    loss_fn = model.loss_fn
    model = DataParallel(model)
    if args.model_loadname:
        model.load_state_dict(torch.load(args.model_loadname))

    if args.ae_freeze:
        print('Freezing AE model')
        for param in ae.parameters():
            param.requires_grad = False

    if torch.cuda.device_count() > 1 and args.num_gpus > 1:
        model = torch.nn.DataParallel(model)

    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    #cum_acc = cum_loss = count = 0
    total_iters = args.init_iter

    for epoch in range(args.init_epoch, args.epochs):
        print(f'Running epoch {epoch} / {args.epochs}\n')
        #for batch_idx, (input, mask, label) in tqdm(enumerate(train_loader),
        #                             total=len(train_loader)):
        for batch_idx, (input, mask, label) in enumerate(train_loader):

            model.train()

            input = to(input, device)
            mask = to(mask, device)
            label = to(label, device)

            optimizer.zero_grad()
            output = model(input, mask)
            loss = loss_fn(output, label)
            loss.backward()
            optimizer.step()

            cum_loss += loss.item()
            # cum_acc += acc.item()
            count += 1

            if total_iters % args.log_interval == 0:
                tqdm.write(f'Epoch: {epoch}\t Iter: {total_iters:>6}\t Loss: {loss.item():.5f}')
                # experiment.log_metric('accuracy', cum_acc / count,
                #                       step=total_iters)
                experiment.log_metric('loss', cum_loss / count,
                                      step=total_iters)
                experiment.log_metric('loss_', cum_loss / count,
                                      step=total_iters)
                #cum_acc = cum_loss = count = 0

            if total_iters % args.save_interval == 0:
                path = get_modelpath(args.model_dirname, key,
                                     args.model_savename, iter=total_iters,
                                     epoch=epoch)
                dirname = os.path.dirname(path)
                if not os.path.exists(dirname):
                    os.makedirs(dirname)
                torch.save(model.state_dict(), path)

            if total_iters % args.eval_interval == 0 and total_iters != 0:
                loss = eval_loss(model, test_loader, device, loss_fn)
                tqdm.write(f'\tTEST: Loss: {loss:.5f}')
                #experiment.log_metric('test accuracy', acc, step=total_iters,
                #                      epoch=epoch)
                experiment.log_metric('test loss', loss, step=total_iters,
                                      epoch=epoch)
            total_iters += 1
Exemplo n.º 14
0
class AutoencoderModel(pl.LightningModule):
    def __init__(self, hparams):
        super().__init__()
        self.val_dict = {}
        self.train_losses = []
        self.hparams = hparams
        self.hparams["tpu_cores"] = 0
        self.loss = self.get_loss_fn()
        # you can get fancier here of course, we will likely have a separate
        # class for the model
        self.model = AutoEncoder(self.hparams["latent_dim"])
        print(self.model)
        print(self.model.parameters())

    def forward(self, inputs):
        output = self.model(inputs)
        return dict(latent=output[0], predicted=output[1])

    def training_step(self, batch, batch_idx):
        x = batch
        sign = torch.sign(x)
        _, preds = self.model(x)
        preds = preds * sign
        loss = self.loss(preds, x)
        self.train_losses.append(loss.detach().cpu().item())
        self.log(
            "train_loss",
            loss,
            on_epoch=True,
            on_step=True,
            logger=True,
            prog_bar=True,
        )

        return loss

    def training_epoch_end(self, training_result):
        self.log(
            "epoch_train_loss",
            sum(self.train_losses) / len(self.train_losses),
            on_epoch=True,
            logger=True,
        )
        self.train_losses = []

    def validation_step(self, batch, batch_idx):
        x = batch
        sign = torch.sign(x)
        _, preds = self.model(x)
        loss = self.loss(preds * sign, x)
        self.log(
            "val_loss",
            loss,
            on_epoch=True,
            on_step=False,
        )
        for n in [1, 5, 10, 20]:
            x_mask = x.clone().detach()
            for i in range(x_mask.shape[0]):
                num_revs = x_mask[i, :].bool().sum()
                if n > num_revs:
                    x_mask[i, :] = 0
                else:
                    x_mask[i, :][torch.where(x_mask[i, :] > 0)[-n:]] = 0
            _, preds = self.model(x_mask)
            loss = self.loss(preds * sign, x)
            self.log(
                f"val_last_{n}_loss",
                loss,
                on_epoch=True,
                on_step=False,
            )
            self.val_dict.setdefault(f"val_last_{n}_loss",
                                     []).append(loss.detach().cpu().item())

    def validation_epoch_end(self, validation_result):
        for k, v in self.val_dict.items():
            self.log(f"epoch_{k}", sum(v) / len(v), on_epoch=True, logger=True)
        self.val_dict = {}

    def get_loss_fn(self):
        if self.hparams['reduction'] == "sum":
            loss = nn.MSELoss(reduction='sum')
        else:
            loss = nn.MSELoss()
        final_loss = loss
        return final_loss

    def configure_optimizers(self):
        if self.hparams["optimizer"] == "Adam":
            optim = torch.optim.Adam(self.model.parameters(),
                                     lr=self.hparams["lr"])
        else:
            optim = torch.optim.SGD(
                self.model.parameters(),
                lr=self.hparams["lr"],
                momentum=self.hparams["momentum"],
            )

        # test this
        return util.set_schedule(self, optim)

    def __dataloader(self, split):
        return get_dataloader(split, self.hparams)

    def val_dataloader(self):
        return self.__dataloader("valid")

    def train_dataloader(self):
        return self.__dataloader("train")

    def test_dataloader(self):
        return self.__dataloader("test")

    @staticmethod
    def add_model_specific_args(parent_parser):
        parser = ArgumentParser(parents=[parent_parser], add_help=False)
        parser.add_argument("--latent_dim", type=int, default=256)
        parser.add_argument("--scheduler", type=str, default="none")
        parser.add_argument("--reduction", type=str, default="mean")
        return parser
Exemplo n.º 15
0
import matplotlib.pyplot as plt
import numpy as np
import tflearn.datasets.mnist as mnist
import sys

layers = list(map(int, sys.argv[1:]))
neck = layers[-1]
layers = layers[:-1]

X, Y, testX, testY = mnist.load_data(one_hot=True)

# Testing the image reconstruction on new data (test set)
#Xes = tflearn.data_utils.shuffle(testX)[0]
Xes = tflearn.data_utils.shuffle(testX)[0]

d = AutoEncoder(28*28, layers, neck)
d.load()

# Applying encode and decode over test set
encode_decode = d.model.predict(Xes)

# Compare original images with their reconstructions
f, a = plt.subplots(3, 20, figsize=(20, 3))
for i in range(20):
    a[0][i].imshow(np.reshape(Xes[i], (28, 28)), cmap="hot")
    a[1][i].imshow(np.reshape(encode_decode[i], (28, 28)), cmap="hot")
    a[2][i].imshow(np.reshape(Xes[i]-encode_decode[i], (28, 28)), cmap="hot")
f.show()
plt.draw()
plt.waitforbuttonpress()
Exemplo n.º 16
0
    )

    train_loader = DataLoader(train_dataset,
                              batch_size=opt.batch_size,
                              shuffle=True,
                              num_workers=opt.workers,
                              pin_memory=True)
    val_loader = DataLoader(val_dataset,
                            batch_size=opt.batch_size,
                            shuffle=False,
                            num_workers=opt.workers,
                            pin_memory=True,
                            drop_last=True)

    if opt.model == 'ae':
        net = AutoEncoder(3, n_classes=1, filters=opt.filters)
    elif opt.model == 'unet':
        net = UNet(3, n_classes=1, filters=opt.filters)
    elif opt.model == 'unet3plus':
        net = UNet3Plus(3, n_classes=1, filters=opt.filters)

    if device == torch.device('cuda'):
        net = nn.DataParallel(net, device_ids=[0, 1, 2, 3])
        logger.info(f'use gpu: {net.device_ids}')
    net.to(device=device)

    # optimizer = optim.RMSprop(net.parameters(), lr=opt.lr, weight_decay=1e-8, momentum=0.9)
    optimizer = optim.SGD(net.parameters(), lr=opt.lr, momentum=0.9)
    criterion = nn.BCEWithLogitsLoss()
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     'max',
Exemplo n.º 17
0
import os
from data_load import Dspites, train_val_split
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from models import AutoEncoder
import torch.nn.functional as F

with open("config.json") as json_file:
    conf = json.load(json_file)
dataset_path = os.path.join(conf['data']['dataset_path'],
                            conf['data']['dataset_file'])
device = conf['train']['device']

model = AutoEncoder(in_channels=1,
                    dec_channels=1,
                    latent_size=conf['model']['latent_size'])
model = model.to(device)

dspites_dataset = Dspites(dataset_path)
train_val = train_val_split(dspites_dataset)
val_test = train_val_split(train_val['val'], val_split=0.2)

data_loader_train = DataLoader(train_val['train'],
                               batch_size=conf['train']['batch_size'],
                               shuffle=True,
                               num_workers=2)
data_loader_val = DataLoader(val_test['val'],
                             batch_size=200,
                             shuffle=False,
                             num_workers=1)
def main():
    with open("config.json") as json_file:
        conf = json.load(json_file)
    dataset_path = os.path.join(conf['data']['dataset_path'],
                                conf['data']['dataset_file'])
    device = conf['train']['device']

    model = AutoEncoder(in_channels=1,
                        dec_channels=1,
                        latent_size=conf['model']['latent_size'])
    model = model.to(device)
    model.load_state_dict(torch.load(load_path))

    dspites_dataset = Dspites(dataset_path)
    train_val = train_val_split(dspites_dataset)
    val_test = train_val_split(train_val['val'], val_split=0.2)

    data_loader_train = DataLoader(train_val['train'],
                                   batch_size=conf['train']['batch_size'],
                                   shuffle=True,
                                   num_workers=2)
    data_loader_val = DataLoader(val_test['val'],
                                 batch_size=200,
                                 shuffle=False,
                                 num_workers=1)
    data_loader_test = DataLoader(val_test['train'],
                                  batch_size=200,
                                  shuffle=False,
                                  num_workers=1)

    print('autoencoder training')
    print('frozen encoder: ', freeze_encoder)
    print('train dataset length: ', len(train_val['train']))
    print('val dataset length: ', len(val_test['val']))
    print('test dataset length: ', len(val_test['train']))

    print('latent space size:', conf['model']['latent_size'])
    print('batch size:', conf['train']['batch_size'])

    loss_function = nn.BCELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    model.train()
    if freeze_encoder:
        model.freeze_encoder()

    for epoch in range(25):
        if epoch > 15:
            for param in optimizer.param_groups:
                param['lr'] = max(0.00001,
                                  param['lr'] / conf['train']['lr_decay'])
                print('lr: ', param['lr'])

        loss_list = []
        model.train()

        for batch_i, batch in enumerate(data_loader_train):
            augment_transform = np.random.choice(augment_transform_list1)
            batch1 = image_batch_transformation(batch, augment_transform)
            loss = autoencoder_step(model, batch, device, loss_function)
            loss_list.append(loss.item())
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        mean_epoch_loss = sum(loss_list) / len(loss_list)
        model.eval()
        validation_loss = autoencoder_validation(data_loader_val, model,
                                                 device, loss_function)
        if epoch == 0:
            min_validation_loss = validation_loss
        else:
            min_validation_loss = min(min_validation_loss, validation_loss)
        print('epoch {0}, loss: {1:2.5f}, validation: {2:2.5f}'.format(
            epoch, mean_epoch_loss, validation_loss))
        if min_validation_loss == validation_loss:
            #pass
            torch.save(model.state_dict(), save_path)

    model.load_state_dict(torch.load(save_path))
    test_results = autoencoder_validation(data_loader_test, model, device,
                                          loss_function)
    print('test result: ', test_results)
Exemplo n.º 19
0
        model_frozen = copy.deepcopy(model)
        model_frozen.eval()
        model.freeze_encoder()
        loss_function = return_loss_function(model_frozen)
        mean_epoch_loss, validation_loss = \
            decoder_step(model, loss_function, optimizer, data_loader_train, data_loader_val, device)
        print('         autoencoder loss: {0:2.5f}, BCE val: {1:2.5f}'.format(
            mean_epoch_loss, validation_loss))


if __name__ == "__main__":
    device = conf['train']['device']

    model = AutoEncoder(in_channels=1,
                        dec_channels=1,
                        latent_size=conf['model']['latent_size'])
    model = model.to(device)
    model.load_state_dict(torch.load(load_path))

    dataset_path = os.path.join(conf['data']['dataset_path'],
                                conf['data']['dataset_file'])
    dspites_dataset = Dspites(dataset_path)
    train_val = train_val_split(dspites_dataset)
    val_test = train_val_split(train_val['val'], val_split=0.2)

    data_loader_train = DataLoader(train_val['train'],
                                   batch_size=conf['train']['batch_size'],
                                   shuffle=True,
                                   num_workers=2)
    data_loader_val = DataLoader(val_test['val'],
Exemplo n.º 20
0
import tflearn
from models import AutoEncoder
import matplotlib.pyplot as plt
import numpy as np

d1 = AutoEncoder(28*28, [256], 1)
d1.load()
decode = d1.decoder()

# Compare original images with their reconstructions
f, a = plt.subplots(2, 20, figsize=(20, 2))
for i in range(20):
    x = i/20.0
    a[0][i].imshow(np.reshape(decode([x*12-1]), (28, 28)), cmap="hot")
    a[1][i].imshow(np.reshape(decode([(x-0.5)*20]), (28, 28)), cmap="hot")
f.show()
plt.draw()
plt.waitforbuttonpress()

Exemplo n.º 21
0
def main():
    args = parse()

    # set random seeds
    random.seed(args.manual_seed)
    torch.manual_seed(args.manual_seed)
    np.random.seed(args.manual_seed)

    # prepare output directories
    base_dir = Path(args.out_dir)
    model_dir = base_dir.joinpath(args.model_name)
    if (args.resume or args.initialize) and not model_dir.exists():
        raise Exception("Model directory for resume does not exist")
    if not (args.resume or args.initialize) and model_dir.exists():
        c = ""
        while c != "y" and c != "n":
            c = input("Model directory already exists, overwrite?").strip()

        if c == "y":
            shutil.rmtree(model_dir)
        else:
            sys.exit(0)
    model_dir.mkdir(parents=True, exist_ok=True)

    summary_writer_dir = model_dir.joinpath("runs")
    summary_writer_dir.mkdir(exist_ok=True)
    save_path = model_dir.joinpath("checkpoints")
    save_path.mkdir(exist_ok=True)

    # prepare summary writer
    writer = SummaryWriter(summary_writer_dir, comment=args.writer_comment)

    # prepare data
    train_loader, val_loader, test_loader, args = load_dataset(
        args, flatten=args.flatten)

    # prepare flow model
    if hasattr(flows, args.flow):
        flow_model_template = getattr(flows, args.flow)

    flow_list = [flow_model_template(args.zdim) for _ in range(args.num_flows)]
    prior = torch.distributions.MultivariateNormal(torch.zeros(args.zdim),
                                                   torch.eye(args.zdim))
    flow_model = NormalizingFlowModel(prior, flow_list).to(args.device)

    # prepare autoencoder
    if args.dataset == "mnist":
        ae_model = AutoEncoder(args.xdim, args.zdim, args.units,
                               "binary").to(args.device)
    elif args.dataset == "cifar10":
        ae_model = ConvAutoEncoder().to(args.device)

    # setup optimizers
    ae_optimizer = optim.Adam(ae_model.parameters(), args.learning_rate)
    flow_optimizer = optim.Adam(flow_model.parameters(), args.learning_rate)

    # setup loss
    if args.dataset == "mnist":
        args.imshape = (1, 28, 28)
        args.zshape = (args.zdim, )
        ae_loss = nn.BCEWithLogitsLoss(reduction="sum").to(args.device)
    elif args.dataset == "cifar10":
        args.imshape = (3, 32, 32)
        args.zshape = (8, 8, 8)
        ae_loss = nn.MSELoss(reduction="sum").to(args.device)

    total_epochs = np.max([args.vae_epochs, args.flow_epochs, args.epochs])

    if args.resume:
        raise NotImplementedError
    if args.initialize:
        raise NotImplementedError

    # training loop
    for epoch in trange(1, total_epochs + 1):
        if epoch <= args.vae_epochs:
            train_ae(
                epoch,
                train_loader,
                ae_model,
                ae_optimizer,
                writer,
                ae_loss,
                device=args.device,
            )
            log_ae_tensorboard_images(
                ae_model,
                val_loader,
                writer,
                epoch,
                "AE/val/Images",
                xshape=args.imshape,
            )
            evaluate_ae(epoch, test_loader, ae_model, writer, ae_loss)

        if epoch <= args.flow_epochs:
            train_flow(
                epoch,
                train_loader,
                flow_model,
                ae_model,
                flow_optimizer,
                writer,
                device=args.device,
            )

            log_flow_tensorboard_images(
                flow_model,
                ae_model,
                writer,
                epoch,
                "Flow/sampled/Images",
                xshape=args.imshape,
                zshape=args.zshape,
            )

        if epoch % args.save_iter == 0:
            checkpoint_dict = {
                "epoch": epoch,
                "ae_optimizer": ae_optimizer.state_dict(),
                "flow_optimizer": flow_optimizer.state_dict(),
                "ae_model": ae_model.state_dict(),
                "flow_model": flow_model.state_dict(),
            }
            fname = f"model_{epoch}.pt"
            save_checkpoint(checkpoint_dict, save_path, fname)

    writer.close()
Exemplo n.º 22
0
def run(batch_size, epochs, val_split, num_workers, print_every,
        trainval_csv_path, test_csv_path, model_type, tasks, lr, weight_decay,
        momentum, dataset_dir):

    train_dataset = CustomDatasetFromImages(trainval_csv_path,
                                            data_dir=dataset_dir)
    # test_dataset = CustomDatasetFromImages(test_csv_path, data_dir = dataset_dir)

    dset_len = len(train_dataset)
    val_size = int(val_split * dset_len)
    test_size = int(0.15 * dset_len)
    train_size = dset_len - val_size - test_size

    train_data, val_dataset, test_dataset = torch.utils.data.random_split(
        train_dataset, [train_size, val_size, test_size])
    train_loader_small = torch.utils.data.DataLoader(dataset=train_data,
                                                     batch_size=batch_size,
                                                     pin_memory=False,
                                                     drop_last=True,
                                                     shuffle=True,
                                                     num_workers=num_workers)

    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=2 * batch_size,
                                               pin_memory=False,
                                               drop_last=True,
                                               shuffle=True,
                                               num_workers=num_workers)
    val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
                                             batch_size=batch_size,
                                             pin_memory=False,
                                             drop_last=True,
                                             shuffle=True,
                                             num_workers=num_workers)
    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=batch_size,
                                              pin_memory=False,
                                              drop_last=True,
                                              shuffle=True,
                                              num_workers=num_workers)

    if model_type == 'densenet121':
        model = models.densenet121(pretrained=False)
    elif model_type == 'resnet101':
        model = models.resnet101(pretrained=False)
    elif model_type == 'resnet50':
        model = models.resnet50(pretrained=False)
    elif model_type == 'resnet34':
        model = models.resnet34(pretrained=False)
    elif model_type == 'vgg19':
        model = models.vgg19(pretrained=False)

    model = AutoEncoder(model_type, model=model)
    model = nn.DataParallel(model)

    print(model)

    model = model.to('cuda')

    criterion = nn.MSELoss(reduction='sum')

    # =============================== PRE-TRAIN MODEL ========================
    optimizer = torch.optim.SGD(model.parameters(),
                                weight_decay=weight_decay,
                                momentum=momentum,
                                lr=lr,
                                nesterov=True)
    scheduler = ReduceLROnPlateau(optimizer,
                                  factor=0.5,
                                  patience=3,
                                  min_lr=1e-7,
                                  verbose=True)
    trainset_percent = (1 - val_split - 0.15)
    trainer = AutoTrainer(model,
                          optimizer,
                          scheduler,
                          criterion,
                          epochs,
                          print_every=print_every,
                          trainset_split=trainset_percent)
    trainer.train(train_loader, val_loader)
    val_loss = trainer.validate(test_loader)

    with open(trainer.output_log, 'a+') as out:
        print('Test Loss', val_loss, file=out)
Exemplo n.º 23
0
def test(args):

    model_dict = load_model_dict(Path(args.model_dict_path))

    BER = []
    loss = []
    noises = []

    for noise, save_dir in model_dict.items():
        model_args, data_args = load_args(Path(save_dir))
        assert model_args.modelfree, "Code only evaluates on model free"

        saver = ModelSaver(Path(save_dir), None)
        power_constraint = PowerConstraint()
        possible_inputs = get_md_set(model_args.md_len)

        # TODO: change to batch size and batch per epoch to 1000
        data_args.batch_size = 100
        data_args.batches_per_epoch = 100
        dataset_size = data_args.batch_size * data_args.batches_per_epoch
        loader = InputDataloader(data_args.batch_size, data_args.block_length,
                                 dataset_size)
        loader = loader.example_generator()

        if data_args.channel == "AWGN":
            assert float(noise) == data_args.SNR
        else:
            assert float(noise) == data_args.epsilon

        print(f"Testing {noise} noise level")

        accuracy = []
        losses = []

        channel = get_channel(data_args.channel, model_args.modelfree,
                              data_args)
        model = AutoEncoder(model_args, data_args, power_constraint, channel,
                            possible_inputs)
        channel = get_channel(data_args.channel, model_args.modelfree,
                              data_args)
        model = AutoEncoder(model_args, data_args, power_constraint, channel,
                            possible_inputs)
        saver.load(model)
        for step in tqdm(range(data_args.batches_per_epoch)):
            msg = next(loader)
            metrics = model.trainable_encoder.test_on_batch(msg, msg)
            losses.append(metrics[0])
            accuracy.append(metrics[1])
        mean_loss = sum(losses) / len(losses)
        mean_BER = 1 - sum(accuracy) / len(accuracy)
        loss.append(mean_loss)
        BER.append(mean_BER)
        noises.append(noise)
        print(f"mean BER: {mean_BER}")
        print(f"mean loss: {mean_loss}")

    # create plots for results
    plt.plot(noises, BER, 'b--')
    plt.plot(noises, BER, 'bx')
    plt.ylabel("BER")
    plt.xlabel("noise")
    plt.yscale('log')
    plt.ylim([1e-6, 1.0])
    plt.savefig("figures/figure.png")
Exemplo n.º 24
0
def main():

    save = "./experiments"
    log_dir = "./experiments"
    input_path = "./data//mnist"
    batch_size = 16
    lr = 1e-3
    latent_size = 12
    n_iter = 10

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    checkpoint_dir = f"{save}/checkpoints/autoencoder"
    os.makedirs(checkpoint_dir, exist_ok=True)

    if device.type == 'cuda':
        log_dir = f"{log_dir}/logs/autoencoder/cuda"
    else:
        log_dir = f"{log_dir}/logs/autoencoder/cpu"
    os.makedirs(log_dir, exist_ok=True)

    checkpoint_path = f'{checkpoint_dir}/checkpoint_' + datetime.now(
    ).strftime('%d_%m_%Y_%H:%M:%S')

    writer = SummaryWriter(log_dir)

    writer = SummaryWriter(log_dir)

    data = MNIST(transform=True,
                 test_size=0.1,
                 train_batch_size=batch_size,
                 input_path=input_path)
    traindata, valdata, testdata = data.data()
    train, val, test = data.loader()

    n = 300
    x, labels = testdata[np.random.randint(0, len(testdata), n)]
    images, labels = torch.from_numpy(x.reshape(
        n, 1, 28, 28)), torch.from_numpy(labels).to(device)
    img_grid = torchvision.utils.make_grid(images)
    # matplotlib_imshow(img_grid, one_channel=True)
    writer.add_image(f'{n}_mnist_images', img_grid)

    images, labels = images.to(device), labels.to(device)

    model = AutoEncoder(28 * 28, latent_size)
    optimizer = torch.optim.Adam(params=model.parameters(), lr=lr)
    criterion = torch.nn.MSELoss()

    model = model.to(device)

    writer.add_graph(model, images.view(len(images), 28 * 28))

    losses = train_autoencoder(train,
                               test,
                               model,
                               criterion,
                               optimizer,
                               device,
                               checkpoint_path,
                               writer,
                               n_iter=n_iter)

    with torch.no_grad():
        projection = model.encodeur(images.view(len(images), 28 * 28))

    writer.add_embedding(projection, metadata=labels, label_img=images)
Exemplo n.º 25
0
dspites_dataset = Dspites(dataset_path)
train_val = train_val_split(dspites_dataset)
val_test = train_val_split(train_val['val'], val_split=0.2)

data_loader_train = DataLoader(train_val['train'], batch_size=conf['train']['batch_size'], shuffle=True, num_workers=2)
data_loader_val = DataLoader(val_test['val'], batch_size=200, shuffle=False, num_workers=1)
data_loader_test = DataLoader(val_test['train'], batch_size=200, shuffle=False, num_workers=1)

print('latent space size:', conf['model']['latent_size'])
print('batch size:', conf['train']['batch_size'])

conf['train']['batch_size'] = 128
data_loader_train = DataLoader(train_val['train'], batch_size=conf['train']['batch_size'], shuffle=True, num_workers=2)
data_loader_val = DataLoader(train_val['val'], batch_size=500, shuffle=False, num_workers=1)

model = AutoEncoder(in_channels=1, dec_channels=1, latent_size=conf['model']['latent_size'])
model = model.to(device)
#
#autoencoder_bce_loss_latent12.pt
model.load_state_dict(torch.load('weights/archi_mega_super_long_metric_learn_6.pt'))

#1 - scale (from 0.5 to 1.0), 2,3 - orientation (cos, sin), 4,5 - position (from 0 to 1)
latent_range = [4,5]
min_value = 0
max_value = 1

regressor = SimpleNet(latent_size=conf['model']['latent_size'], number_of_classes=len(latent_range))
regressor.to(device)

loss_function = nn.MSELoss()
optimizer = torch.optim.Adam(regressor.parameters(), lr=0.001)
import numpy as np
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import tqdm
from torch.utils.data import DataLoader

from config import Config
from models import AutoEncoder, SiameseNetwork

config = Config()

device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 

autoencoder = AutoEncoder(config)
siamese_network = SiameseNetwork(config)

autoencoder_file = '/autoencoder_epoch175_loss1.1991.pth'
siamese_file = '/siamese_network_epoch175_loss1.1991.pth'

if config.load_model:
    autoencoder.load_state_dict(torch.load(config.saved_models_folder + autoencoder_file))
    siamese_network.load_state_dict(torch.load(config.saved_models_folder + siamese_file))

autoencoder.to(device)
autoencoder.train()

siamese_network.to(device)
siamese_network.train()
    valid_size = 0.2  # parte para validar
    n = len(DataSet)
    indices = list(range(n))
    np.random.shuffle(indices)  # revolvemos los indices
    split = int(np.floor(valid_size * n))
    train_idx, valid_idx = indices[
        split:], indices[:split]  # seprarmos los indices

    trainDataSet = Subset(
        DataSet, train_idx)  # tomamos un subconjunto de acuerdo a los indices
    valDataSet = Subset(DataSet, valid_idx)

    trainLoader = DataLoader(trainDataSet, shuffle=True, batch_size=16)
    valLoader = DataLoader(valDataSet, batch_size=16, shuffle=True)
    model = AutoEncoder(images=in_channels)

    if torch.cuda.is_available():
        model.encoder.cuda()
        model.decoder.cuda()

    optimizer = optim.Adam(model.parameters(),
                           lr=1e-4,
                           weight_decay=1e-5,
                           betas=[0.9, 0.999])
    n_train = len(trainDataSet)
    n_val = len(valDataSet)

    trainLossList = []
    valLossList = []
    ## Training the Auto-Encoder.
Exemplo n.º 28
0
def main():
    opts = get_argparser().parse_args()

    # dataset
    train_trainsform = transforms.Compose([
        transforms.RandomCrop(size=512, pad_if_needed=True),
        transforms.RandomHorizontalFlip(),
        transforms.RandomVerticalFlip(),
        transforms.ToTensor(),
    ])

    val_transform = transforms.Compose([transforms.ToTensor()])

    train_loader = data.DataLoader(data.ConcatDataset([
        ImageDataset(root='datasets/data/CLIC/train',
                     transform=train_trainsform),
        ImageDataset(root='datasets/data/CLIC/valid',
                     transform=train_trainsform),
    ]),
                                   batch_size=opts.batch_size,
                                   shuffle=True,
                                   num_workers=2,
                                   drop_last=True)

    val_loader = data.DataLoader(ImageDataset(root='datasets/data/kodak',
                                              transform=val_transform),
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=1)

    os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_id
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    print("Train set: %d, Val set: %d" %
          (len(train_loader.dataset), len(val_loader.dataset)))
    model = AutoEncoder(C=128, M=128, in_chan=3, out_chan=3).to(device)

    # optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=1e-4,
                                 weight_decay=1e-5)

    # checkpoint
    best_score = 0.0
    cur_epoch = 0
    if opts.ckpt is not None and os.path.isfile(opts.ckpt):
        model.load_state_dict(torch.load(opts.ckpt))
    else:
        print("[!] Retrain")

    if opts.loss_type == 'ssim':
        criterion = SSIM_Loss(data_range=1.0, size_average=True, channel=3)
    else:
        criterion = MS_SSIM_Loss(data_range=1.0,
                                 size_average=True,
                                 channel=3,
                                 nonnegative_ssim=True)

    #==========   Train Loop   ==========#
    for cur_epoch in range(opts.total_epochs):
        # =====  Train  =====
        model.train()
        for cur_step, images in enumerate(train_loader):
            images = images.to(device, dtype=torch.float32)
            optimizer.zero_grad()
            outputs = model(images)

            loss = criterion(outputs, images)
            loss.backward()

            optimizer.step()

            if (cur_step) % opts.log_interval == 0:
                print("Epoch %d, Batch %d/%d, loss=%.6f" %
                      (cur_epoch, cur_step, len(train_loader), loss.item()))

        # =====  Save Latest Model  =====
        torch.save(model.state_dict(), 'latest_model.pt')

        # =====  Validation  =====
        print("Val on Kodak dataset...")
        best_score = 0.0
        cur_score = test(opts, model, val_loader, criterion, device)
        print("%s = %.6f" % (opts.loss_type, cur_score))
        # =====  Save Best Model  =====
        if cur_score > best_score:  # save best model
            best_score = cur_score
            torch.save(model.state_dict(), 'best_model.pt')
            print("Best model saved as best_model.pt")
Exemplo n.º 29
0
    decoded3 = decoded[2][0].squeeze()
    decoded3 = decoded3.cpu().detach().numpy()
    return z, decoded1, decoded2, decoded3


device = 'cuda'

with open("config.json") as json_file:
    conf = json.load(json_file)
dataset_path = os.path.join(conf['data']['dataset_path'],
                            conf['data']['dataset_file'])
dspites_dataset = Dspites(dataset_path)
train_val = train_val_split(dspites_dataset)

model = AutoEncoder(in_channels=1,
                    dec_channels=1,
                    latent_size=conf['model']['latent_size'])
model = model.to(device)
load_path = 'weights/my_algorithm_2triplet_6.pt'
#autoencoder
model.load_state_dict(torch.load(load_path))

image1 = train_val['train'].__getitem__(0)['image']
plt.imsave("images/original_image1.png", image1)
image2 = train_val['train'].__getitem__(1)['image']
plt.imsave("images/original_image2.png", image2)
image3 = train_val['train'].__getitem__(3000)['image']
plt.imsave("images/original_image3.png", image3)

model.eval()
z, decoded1, decoded2, decoded3 = encode_decode_3images(
Exemplo n.º 30
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--filename', required=True, help='Name/path of file')
    parser.add_argument('--savefile',
                        type=str,
                        default='./output.txt',
                        help='Path to file where will be save results')
    parser.add_argument('--class_weight',
                        action='store_true',
                        default=None,
                        help='Use balance weight')
    parser.add_argument('--seed', default=1234, help='Number of seed')

    parser.add_argument('--pretrain_epochs',
                        type=int,
                        default=100,
                        help="Number of epochs to pretrain model AE")
    parser.add_argument('--dims_layers_ae',
                        type=int,
                        nargs='+',
                        default=[500, 100, 10],
                        help="Dimensional of layers in AE")
    parser.add_argument('--batch_size', type=int, default=50)
    parser.add_argument('--lr',
                        type=float,
                        default=0.001,
                        help="Learning rate")
    parser.add_argument('--use_dropout',
                        action='store_true',
                        help="Use dropout")
    parser.add_argument('--no-cuda',
                        action='store_true',
                        help='disables CUDA training')
    parser.add_argument('--earlyStopping',
                        type=int,
                        default=None,
                        help='Number of epochs to early stopping')
    parser.add_argument('--use_scheduler', action='store_true')
    args = parser.parse_args()
    print(args)

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    use_cuda = not args.no_cuda and torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    print(f'Device: {device.type}')

    loaded = np.load(args.filename)
    data = loaded['data']
    labels = loaded['label']
    del loaded

    name_target = PurePosixPath(args.savefile).stem
    save_dir = f'{PurePosixPath(args.savefile).parent}/tensorboard/{name_target}'
    Path(save_dir).mkdir(parents=True, exist_ok=True)

    args.dims_layers_ae = [data.shape[1]] + args.dims_layers_ae
    model_ae = AutoEncoder(args.dims_layers_ae, args.use_dropout).to(device)

    criterion_ae = nn.MSELoss()
    optimizer = torch.optim.Adam(model_ae.parameters(),
                                 lr=args.lr,
                                 weight_decay=1e-5)

    scheduler = None
    if args.use_scheduler:
        scheduler = torch.optim.lr_scheduler.LambdaLR(
            optimizer, lr_lambda=lambda ep: 0.95)

    min_val_loss = np.Inf
    epochs_no_improve = 0
    fit_time_ae = 0
    writer = SummaryWriter(save_dir)
    model_path = f'{PurePosixPath(args.savefile).parent}/models_AE/{name_target}.pth'
    Path(PurePosixPath(model_path).parent).mkdir(parents=True, exist_ok=True)
    epoch_tqdm = tqdm(range(args.pretrain_epochs), desc="Epoch loss")
    for epoch in epoch_tqdm:
        loss_train, fit_t = train_step(model_ae, criterion_ae, optimizer,
                                       scheduler, data, labels, device, writer,
                                       epoch, args.batch_size)
        fit_time_ae += fit_t
        if loss_train < min_val_loss:
            torch.save(model_ae.state_dict(), model_path)
            epochs_no_improve = 0
            min_val_loss = loss_train
        else:
            epochs_no_improve += 1
        epoch_tqdm.set_description(
            f"Epoch loss: {loss_train:.5f} (minimal loss: {min_val_loss:.5f}, stop: {epochs_no_improve}|{args.earlyStopping})"
        )
        if args.earlyStopping is not None and epoch > args.earlyStopping and epochs_no_improve == args.earlyStopping:
            print('\033[1;31mEarly stopping in AE model\033[0m')
            break

    print('===================================================')
    print(f'Transforming data to lower dimensional')
    if device.type == "cpu":
        model_ae.load_state_dict(
            torch.load(model_path, map_location=lambda storage, loc: storage))
    else:
        model_ae.load_state_dict(torch.load(model_path))
    model_ae.eval()

    low_data = np.empty((data.shape[0], args.dims_layers_ae[-1]))
    n_batch, rest = divmod(data.shape[0], args.batch_size)
    n_batch = n_batch + 1 if rest else n_batch
    score_time_ae = 0
    with torch.no_grad():
        test_tqdm = tqdm(range(n_batch), desc="Transform data", leave=False)
        for i in test_tqdm:
            start_time = time.time()
            batch = torch.from_numpy(
                data[i * args.batch_size:(i + 1) *
                     args.batch_size, :]).float().to(device)
            # ===================forward=====================
            z, _ = model_ae(batch)
            low_data[i * args.batch_size:(i + 1) *
                     args.batch_size, :] = z.detach().cpu().numpy()
            end_time = time.time()
            score_time_ae += end_time - start_time
    print('Data shape after transformation: {}'.format(low_data.shape))
    print('===================================================')

    if args.class_weight:
        args.class_weight = 'balanced'
    else:
        args.class_weight = None

    # Split data
    sss = StratifiedShuffleSplit(n_splits=3,
                                 test_size=0.1,
                                 random_state=args.seed)
    scoring = {
        'acc': make_scorer(accuracy_score),
        'roc_auc': make_scorer(roc_auc_score, needs_proba=True),
        'mcc': make_scorer(matthews_corrcoef),
        'bal': make_scorer(balanced_accuracy_score),
        'recall': make_scorer(recall_score)
    }

    max_iters = 10000
    save_results(args.savefile,
                 'w',
                 'model',
                 None,
                 True,
                 fit_time_ae=fit_time_ae,
                 score_time_ae=score_time_ae)

    with warnings.catch_warnings():
        warnings.simplefilter('ignore', ConvergenceWarning)
        warnings.simplefilter('ignore', RuntimeWarning)
        environ["PYTHONWARNINGS"] = "ignore"

        # Linear SVM
        print("\rLinear SVM         ", end='')
        parameters = {'C': [0.01, 0.1, 1, 10, 100]}
        # svc = svm.LinearSVC(class_weight=args.class_weight, random_state=seed)
        svc = svm.SVC(kernel='linear',
                      class_weight=args.class_weight,
                      random_state=args.seed,
                      probability=True,
                      max_iter=max_iters)
        clf = GridSearchCV(svc,
                           parameters,
                           cv=sss,
                           n_jobs=-1,
                           scoring=scoring,
                           refit='roc_auc',
                           return_train_score=True)
        try:
            clf.fit(low_data, labels)
        except Exception as e:
            if hasattr(e, 'message'):
                print(e.message)
            else:
                print(e)

        save_results(args.savefile,
                     'a',
                     'Linear SVM',
                     clf,
                     False,
                     fit_time_ae=fit_time_ae,
                     score_time_ae=score_time_ae)

        # RBF SVM
        print("\rRBF SVM             ", end='')
        parameters = {
            'kernel': ['rbf'],
            'C': [0.01, 0.1, 1, 10, 100],
            'gamma': ['scale', 'auto', 1e-2, 1e-3, 1e-4]
        }
        svc = svm.SVC(gamma="scale",
                      class_weight=args.class_weight,
                      random_state=args.seed,
                      probability=True,
                      max_iter=max_iters)
        clf = GridSearchCV(svc,
                           parameters,
                           cv=sss,
                           n_jobs=-1,
                           scoring=scoring,
                           refit='roc_auc',
                           return_train_score=True)
        try:
            clf.fit(low_data, labels)
        except Exception as e:
            if hasattr(e, 'message'):
                print(e.message)
            else:
                print(e)
        save_results(args.savefile,
                     'a',
                     'RBF SVM',
                     clf,
                     False,
                     fit_time_ae=fit_time_ae,
                     score_time_ae=score_time_ae)

        # LogisticRegression
        print("\rLogisticRegression  ", end='')
        lreg = LogisticRegression(random_state=args.seed,
                                  solver='lbfgs',
                                  multi_class='ovr',
                                  class_weight=args.class_weight,
                                  n_jobs=-1,
                                  max_iter=max_iters)
        parameters = {'C': [0.01, 0.1, 1, 10, 100]}
        clf = GridSearchCV(lreg,
                           parameters,
                           cv=sss,
                           n_jobs=-1,
                           scoring=scoring,
                           refit='roc_auc',
                           return_train_score=True)
        try:
            clf.fit(low_data, labels)
        except Exception as e:
            if hasattr(e, 'message'):
                print(e.message)
            else:
                print(e)
        save_results(args.savefile,
                     'a',
                     'LogisticRegression',
                     clf,
                     False,
                     fit_time_ae=fit_time_ae,
                     score_time_ae=score_time_ae)
        print()
Exemplo n.º 31
0
def main(args):
    # Set random seed for reproducibility
    manualSeed = 999
    #manualSeed = random.randint(1, 10000) # use if you want new results
    print("Random Seed: ", manualSeed)
    random.seed(manualSeed)
    torch.manual_seed(manualSeed)

    dataroot = args.dataroot
    workers = args.workers
    batch_size = args.batch_size
    nc = args.nc
    ngf = args.ngf
    ndf = args.ndf
    nhd = args.nhd
    num_epochs = args.num_epochs
    lr = args.lr
    beta1 = args.beta1
    ngpu = args.ngpu
    resume = args.resume
    record_pnt = args.record_pnt
    log_pnt = args.log_pnt
    mse = args.mse
    '''
    # We can use an image folder dataset the way we have it setup.
    # Create the dataset
    dataset = dset.ImageFolder(root=dataroot,
                               transform=transforms.Compose([
                                   transforms.Resize(image_size),
                                   transforms.CenterCrop(image_size),
                                   transforms.ToTensor(),
                                   transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                               ]))
    # Create the dataloader
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
                                             shuffle=True, num_workers=workers)
    '''
    dataset = dset.MNIST(
        root=dataroot,
        transform=transforms.Compose([
            transforms.ToTensor(),
            # transforms.Normalize(0.5, 0.5),
        ]))

    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             shuffle=True,
                                             num_workers=workers)

    # Decide which device we want to run on
    device = torch.device("cuda:0" if (
        torch.cuda.is_available() and ngpu > 0) else "cpu")

    # Create the generator
    netG = AutoEncoder(nc, ngf, nhd=nhd).to(device)

    # Handle multi-gpu if desired
    if (device.type == 'cuda') and (ngpu > 1):
        netG = nn.DataParallel(netG, list(range(ngpu)))

    # Apply the weights_init function to randomly initialize all weights
    #  to mean=0, stdev=0.2.
    netG.apply(weights_init)

    # Create the Discriminator
    netD = Discriminator(nc, ndf, ngpu).to(device)

    # Handle multi-gpu if desired
    if (device.type == 'cuda') and (ngpu > 1):
        netD = nn.DataParallel(netD, list(range(ngpu)))

    # Apply the weights_init function to randomly initialize all weights
    #  to mean=0, stdev=0.2.
    netD.apply(weights_init)

    #resume training if args.resume is True
    if resume:
        ckpt = torch.load('ckpts/recent.pth')
        netG.load_state_dict(ckpt["netG"])
        netD.load_state_dict(ckpt["netD"])

    # Initialize BCELoss function
    criterion = nn.BCELoss()
    MSE = nn.MSELoss()
    mse_coeff = 1.
    center_coeff = 0.001

    # Establish convention for real and fake flags during training
    real_flag = 1
    fake_flag = 0

    # Setup Adam optimizers for both G and D
    optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))
    optimizerG = optim.Adam(netG.dec.parameters(), lr=lr, betas=(beta1, 0.999))
    optimizerAE = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))

    # Training Loop

    # Lists to keep track of progress
    iters = 0

    R_errG = 0
    R_errD = 0
    R_errAE = 0
    R_std = 0
    R_mean = 0

    print("Starting Training Loop...")
    # For each epoch
    for epoch in range(num_epochs):
        # For each batch in the dataloader
        for i, data in enumerate(dataloader, 0):

            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            ## Train with all-real batch
            netD.zero_grad()
            # Format batch
            real_img, label = data
            real_img, label = real_img.to(device), to_one_hot_vector(
                10, label).to(device)

            b_size = real_img.size(0)
            flag = torch.full((b_size, ), real_flag, device=device)
            # Forward pass real batch through D
            output = netD(real_img, label).view(-1)
            # Calculate loss on all-real batch
            errD_real = criterion(output, flag)
            # Calculate gradients for D in backward pass
            errD_real.backward()
            D_x = output.mean().item()

            ## Train with all-fake batch
            # Generate fake image batch with G
            noise = torch.randn(b_size, nhd, 1, 1).to(device)
            fake = netG.dec(noise, label)
            flag.fill_(fake_flag)
            # Classify all fake batch with D
            output = netD(fake.detach(), label).view(-1)
            # Calculate D's loss on the all-fake batch
            errD_fake = criterion(output, flag)
            # Calculate the gradients for this batch
            errD_fake.backward()
            D_G_z1 = output.mean().item()
            # Add the gradients from the all-real and all-fake batches
            errD = errD_real + errD_fake
            # Update D
            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            netG.dec.zero_grad()
            flag.fill_(real_flag)  # fake flags are real for generator cost
            # Since we just updated D, perform another forward pass of all-fake batch through D
            output = netD(fake, label).view(-1)
            # Calculate G's loss based on this output
            errG = criterion(output, flag)
            # Calculate gradients for G
            errG.backward()
            # Update G
            optimizerG.step()

            ############################
            # (3) Update AE network: minimize reconstruction loss
            ###########################
            netG.zero_grad()
            new_img = netG(real_img, label, label)
            hidden = netG.enc(real_img, label)
            central_loss = MSE(hidden, torch.zeros(hidden.shape).to(device))
            errAE = mse_coeff* MSE(real_img, new_img) \
                    + center_coeff* central_loss
            errAE.backward()
            optimizerAE.step()

            R_errG += errG.item()
            R_errD += errD.item()
            R_errAE += errAE.item()
            R_std += (hidden**2).mean().item()
            R_mean += hidden.mean().item()
            # Output training stats
            if i % log_pnt == 0:
                print(
                    '[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tLoss_AE: %.4f\t'
                    % (epoch, num_epochs, i, len(dataloader), R_errD / log_pnt,
                       R_errG / log_pnt, R_errAE / log_pnt))
                print('mean: %.4f\tstd: %.4f\tcentral/msecoeff: %4f' %
                      (R_mean / log_pnt, R_std / log_pnt,
                       center_coeff / mse_coeff))
                R_errG = 0.
                R_errD = 0.
                R_errAE = 0.
                R_std = 0.
                R_mean = 0.

            # Check how the generator is doing by saving G's output on fixed_noise
            if (iters % record_pnt == 0) or ((epoch == num_epochs - 1) and
                                             (i == len(dataloader) - 1)):
                vutils.save_image(
                    fake.to("cpu"),
                    './samples/image_{}.png'.format(iters // record_pnt))
                torch.save(
                    {
                        "netG": netG.state_dict(),
                        "netD": netD.state_dict(),
                        "nc": nc,
                        "ngf": ngf,
                        "ndf": ndf
                    }, 'ckpts/recent.pth')

            iters += 1
Exemplo n.º 32
0
import matplotlib.pyplot as plt
import numpy as np
import tflearn.datasets.mnist as mnist
import sys

layers = list(map(int, sys.argv[1:]))
neck = layers[-1]
layers = layers[:-1]

X, Y, testX, testY = mnist.load_data(one_hot=True)

# Testing the image reconstruction on new data (test set)
#Xes = tflearn.data_utils.shuffle(testX)[0]
Xes = tflearn.data_utils.shuffle(testX)[0]

d = AutoEncoder(28 * 28, layers, neck)
d.load()

# Applying encode and decode over test set
encode_decode = d.model.predict(Xes)

# Compare original images with their reconstructions
f, a = plt.subplots(3, 20, figsize=(20, 3))
for i in range(20):
    a[0][i].imshow(np.reshape(Xes[i], (28, 28)), cmap="hot")
    a[1][i].imshow(np.reshape(encode_decode[i], (28, 28)), cmap="hot")
    a[2][i].imshow(np.reshape(Xes[i] - encode_decode[i], (28, 28)), cmap="hot")
f.show()
plt.draw()
plt.waitforbuttonpress()
Exemplo n.º 33
0
import tflearn
from models import AutoEncoder
import matplotlib.pyplot as plt
import numpy as np

d1 = AutoEncoder(28 * 28, [256], 1)
d1.load()
decode = d1.decoder()

# Compare original images with their reconstructions
f, a = plt.subplots(2, 20, figsize=(20, 2))
for i in range(20):
    x = i / 20.0
    a[0][i].imshow(np.reshape(decode([x * 12 - 1]), (28, 28)), cmap="hot")
    a[1][i].imshow(np.reshape(decode([(x - 0.5) * 20]), (28, 28)), cmap="hot")
f.show()
plt.draw()
plt.waitforbuttonpress()