Beispiel #1
0
def main():
    train_loader = return_MVTecAD_loader(
        image_dir="./mvtec_anomaly_detection/grid/train/good/",
        batch_size=256,
        train=True)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(device)

    seed = 42
    out_dir = './logs'
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)
    checkpoints_dir = "./checkpoints"
    if not os.path.exists(checkpoints_dir):
        os.mkdir(out_dir)

    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)

    model = VAE(z_dim=512).to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=5e-4)
    num_epochs = 500
    for epoch in range(num_epochs):
        loss = train(model=model,
                     train_loader=train_loader,
                     device=device,
                     optimizer=optimizer,
                     epoch=epoch)
        print('epoch [{}/{}], train loss: {:.4f}'.format(
            epoch + 1, num_epochs, loss))
        if (epoch + 1) % 10 == 0:
            torch.save(
                model.state_dict(),
                os.path.join(checkpoints_dir, "{}.pth".format(epoch + 1)))
    test_loader = return_MVTecAD_loader(
        image_dir="./mvtec_anomaly_detection/grid/test/metal_contamination/",
        batch_size=10,
        train=False)
    eval(model=model, test_loader=test_loader, device=device)
    EBM(model, test_loader, device)
Beispiel #2
0
    args = config()
    tensorboard = SummaryWriter(log_dir='logs')
    if os.path.isdir(args.save_dir):
        shutil.rmtree(args.save_dir)
    os.makedirs(args.save_dir)

    device = torch.device('cuda: {}'.format(args.gpu))
    model = VAE(network_type=args.network_type, latent_dim=20).to(device)
    opt = optim.Adam(model.parameters(), lr=1e-3)

    train_items = DataLoader(datasets.MNIST(root='./data',
                                            train=True,
                                            download=True,
                                            transform=transforms.ToTensor()),
                             batch_size=args.batch_size,
                             shuffle=True,
                             num_workers=multiprocessing.cpu_count(),
                             pin_memory=True)

    test_items = DataLoader(datasets.MNIST(root='./data',
                                           train=False,
                                           download=True,
                                           transform=transforms.ToTensor()),
                            batch_size=args.batch_size,
                            shuffle=True)

    for epoch in range(1, args.epoch + 1):
        training(epoch, model, opt, train_items, args, tensorboard)
        test(epoch, test_items, model, args, tensorboard)
        torch.save(model.state_dict(), 'model')
Beispiel #3
0
                        100.0 * batch_idx / len(dataloader),
                    criterion.latestloss()['MSE'].item(),
                    criterion.latestloss()['KLD'].item()
                    ), end='\r')
            sys.stdout.write('\033[K')
    
    print("Epoch: {}/{} Loss:{:.6f}".format(epoch, opt.epochs, total_loss / len(dataloader)))
    mse_loss_list.append(mse_loss / len(dataloader))
    kld_loss_list.append(kld_loss / len(dataloader))
        
    # save
    current_loss = total_loss / len(dataloader)
    state = {
        'model': 'VAE',
        'epoch': epoch,
        'state_dict': netVAE.state_dict(),
        'optimizer': optimizer.state_dict(),
        'kld_loss': kld_loss_list,
        'mse_loss': mse_loss_list
    }

    if not os.path.exists(opt.outf):
        os.makedirs(opt.outf)

    filename = os.path.join(opt.outf, "VAE_epoch_{}.pth".format(epoch))
    best_filename = os.path.join(opt.outf, "VAE_best.pth".format(epoch))

    if epoch % opt.save_freq == 0:
        torch.save(state, f=filename)
    if min_loss > current_loss:
        torch.save(state, f=best_filename)