Exemplo n.º 1
0
def main():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(device)

    seed = 42
    out_dir = './logs'
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)
    checkpoints_dir = "./checkpoints"
    if not os.path.exists(checkpoints_dir):
        os.mkdir(out_dir)

    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)

    model = VAE(z_dim=512)
    model.load_state_dict(torch.load("./checkpoints/500.pth"))
    model = model.to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=5e-4)

    test_loader = return_MVTecAD_loader(
        image_dir="./mvtec_anomaly_detection/grid/test/metal_contamination/",
        batch_size=10,
        train=False)
    #eval(model=model,test_loader=test_loader,device=device)
    EBM(model, test_loader, device)
Exemplo n.º 2
0
 def __call__(self, config, seed, device_str):
     # Set random seeds
     set_global_seeds(seed)
     # Create device
     device = torch.device(device_str)
     # Use log dir for current job (run_experiment)
     logdir = Path(config['log.dir']) / str(config['ID']) / str(seed)
     
     # Create dataset for training and testing
     train_dataset = datasets.MNIST('data/', 
                                    train=True, 
                                    download=True, 
                                    transform=transforms.ToTensor())
     test_dataset = datasets.MNIST('data/', 
                                   train=False, 
                                   transform=transforms.ToTensor())
     # Define GPU-dependent keywords for DataLoader
     if config['cuda']:
         kwargs = {'num_workers': 1, 'pin_memory': True}
     else:
         kwargs = {}
     # Create data loader for training and testing
     train_loader = DataLoader(train_dataset, 
                               batch_size=config['train.batch_size'], 
                               shuffle=True, 
                               **kwargs)
     test_loader = DataLoader(test_dataset, 
                              batch_size=config['eval.batch_size'], 
                              shuffle=True, 
                              **kwargs)
     
     # Create the model
     if config['network.type'] == 'VAE':
         model = VAE(config=config)
     elif config['network.type'] == 'ConvVAE':
         model = ConvVAE(config=config)
     model = model.to(device)
     
     # Create optimizer
     optimizer = optim.Adam(model.parameters(), lr=1e-3)
     
     # Create engine
     engine = Engine(agent=model,
                     runner=None,
                     config=config,
                     device=device,
                     optimizer=optimizer, 
                     train_loader=train_loader, 
                     test_loader=test_loader)
     
     # Training and evaluation
     for epoch in range(config['train.num_epoch']):
         train_output = engine.train(n=epoch)
         engine.log_train(train_output, logdir=logdir, epoch=epoch)
         
         eval_output = engine.eval(n=epoch)
         engine.log_eval(eval_output, logdir=logdir, epoch=epoch)
 
     return None
Exemplo n.º 3
0
def main():
    train_loader = return_MVTecAD_loader(
        image_dir="./mvtec_anomaly_detection/grid/train/good/",
        batch_size=256,
        train=True)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(device)

    seed = 42
    out_dir = './logs'
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)
    checkpoints_dir = "./checkpoints"
    if not os.path.exists(checkpoints_dir):
        os.mkdir(out_dir)

    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)

    model = VAE(z_dim=512).to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=5e-4)
    num_epochs = 500
    for epoch in range(num_epochs):
        loss = train(model=model,
                     train_loader=train_loader,
                     device=device,
                     optimizer=optimizer,
                     epoch=epoch)
        print('epoch [{}/{}], train loss: {:.4f}'.format(
            epoch + 1, num_epochs, loss))
        if (epoch + 1) % 10 == 0:
            torch.save(
                model.state_dict(),
                os.path.join(checkpoints_dir, "{}.pth".format(epoch + 1)))
    test_loader = return_MVTecAD_loader(
        image_dir="./mvtec_anomaly_detection/grid/test/metal_contamination/",
        batch_size=10,
        train=False)
    eval(model=model, test_loader=test_loader, device=device)
    EBM(model, test_loader, device)
Exemplo n.º 4
0
Arquivo: algo.py Projeto: vin136/lagom
 def __call__(self, config, seed, device):
     set_global_seeds(seed)
     logdir = Path(config['log.dir']) / str(config['ID']) / str(seed)
     
     train_loader, test_loader = self.make_dataset(config)
     
     model = VAE(config=config, device=device)
     
     model.train_loader = train_loader
     model.test_loader = test_loader
     model.optimizer = optim.Adam(model.parameters(), lr=1e-3)
     
     engine = Engine(agent=model, runner=None, config=config)
     
     for epoch in range(config['train.num_epoch']):
         train_output = engine.train(n=epoch)
         engine.log_train(train_output, logdir=logdir, epoch=epoch)
         
         eval_output = engine.eval(n=epoch)
         engine.log_eval(eval_output, logdir=logdir, epoch=epoch)
 
     return None
Exemplo n.º 5
0
            total += len(batch_gpu["id"])
            acc += correct
        return acc / total


# Train VAE
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
vae = VAE()

# Use multiple GPUs if available
if torch.cuda.device_count() > 1:
    print("Using ", torch.cuda.device_count(), "GPUs")
    vae = nn.DataParallel(vae)
vae.to(device)

optimizer = torch.optim.Adam(vae.parameters(), lr=0.001)
losses1 = []
valError = []

for epoch in range(50):
    train_iter = iter(train_loader)
    batch = None
    preds = None
    for i in range(len(train_loader)):
        batch = next(train_iter)
        batch_gpu = batch.to(device)
        preds = vae(batch_gpu)
        pred_cpu = preds.to('cpu')

        loss = nn.functional.mse_loss(preds["mask"], batch_gpu["mask"])
Exemplo n.º 6
0
                               tilde_x.view(args.batch_size, 1, 28, 28),
                               global_step=epoch)
        save_image(tilde_x.view(args.batch_size, 1, 28, 28),
                   os.path.join(args.save_dir, 'samples%d.jpg' % (epoch)))


if __name__ == '__main__':
    args = config()
    tensorboard = SummaryWriter(log_dir='logs')
    if os.path.isdir(args.save_dir):
        shutil.rmtree(args.save_dir)
    os.makedirs(args.save_dir)

    device = torch.device('cuda: {}'.format(args.gpu))
    model = VAE(network_type=args.network_type, latent_dim=20).to(device)
    opt = optim.Adam(model.parameters(), lr=1e-3)

    train_items = DataLoader(datasets.MNIST(root='./data',
                                            train=True,
                                            download=True,
                                            transform=transforms.ToTensor()),
                             batch_size=args.batch_size,
                             shuffle=True,
                             num_workers=multiprocessing.cpu_count(),
                             pin_memory=True)

    test_items = DataLoader(datasets.MNIST(root='./data',
                                           train=False,
                                           download=True,
                                           transform=transforms.ToTensor()),
                            batch_size=args.batch_size,
Exemplo n.º 7
0
# dataset
dataset = ImageFolder(
    root=opt.dataroot,
    transform=transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
    ])
)

dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
                                         shuffle=True, num_workers=int(opt.workers))

# model
netVAE = VAE()
criterion = CustomLoss(3e-6)
optimizer = optim.Adam(netVAE.parameters(), lr=0.0001, betas=(0.5, 0.999))

if opt.cuda:
    netVAE.cuda()

# train
min_loss = float('inf')
kld_loss_list, mse_loss_list = [], []
for epoch in range(1, opt.epochs + 1):
    mse_loss, kld_loss, total_loss = 0, 0, 0
    for batch_idx, in_fig in enumerate(dataloader):
        x = Variable(in_fig) 
        if opt.cuda:
            x = x.cuda()
        optimizer.zero_grad()
        output, mu, logvar = netVAE(x)