Esempio n. 1
0
def main():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(device)

    seed = 42
    out_dir = './logs'
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)
    checkpoints_dir = "./checkpoints"
    if not os.path.exists(checkpoints_dir):
        os.mkdir(out_dir)

    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)

    model = VAE(z_dim=512)
    model.load_state_dict(torch.load("./checkpoints/500.pth"))
    model = model.to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=5e-4)

    test_loader = return_MVTecAD_loader(
        image_dir="./mvtec_anomaly_detection/grid/test/metal_contamination/",
        batch_size=10,
        train=False)
    #eval(model=model,test_loader=test_loader,device=device)
    EBM(model, test_loader, device)
Esempio n. 2
0
 def __call__(self, config, seed, device_str):
     # Set random seeds
     set_global_seeds(seed)
     # Create device
     device = torch.device(device_str)
     # Use log dir for current job (run_experiment)
     logdir = Path(config['log.dir']) / str(config['ID']) / str(seed)
     
     # Create dataset for training and testing
     train_dataset = datasets.MNIST('data/', 
                                    train=True, 
                                    download=True, 
                                    transform=transforms.ToTensor())
     test_dataset = datasets.MNIST('data/', 
                                   train=False, 
                                   transform=transforms.ToTensor())
     # Define GPU-dependent keywords for DataLoader
     if config['cuda']:
         kwargs = {'num_workers': 1, 'pin_memory': True}
     else:
         kwargs = {}
     # Create data loader for training and testing
     train_loader = DataLoader(train_dataset, 
                               batch_size=config['train.batch_size'], 
                               shuffle=True, 
                               **kwargs)
     test_loader = DataLoader(test_dataset, 
                              batch_size=config['eval.batch_size'], 
                              shuffle=True, 
                              **kwargs)
     
     # Create the model
     if config['network.type'] == 'VAE':
         model = VAE(config=config)
     elif config['network.type'] == 'ConvVAE':
         model = ConvVAE(config=config)
     model = model.to(device)
     
     # Create optimizer
     optimizer = optim.Adam(model.parameters(), lr=1e-3)
     
     # Create engine
     engine = Engine(agent=model,
                     runner=None,
                     config=config,
                     device=device,
                     optimizer=optimizer, 
                     train_loader=train_loader, 
                     test_loader=test_loader)
     
     # Training and evaluation
     for epoch in range(config['train.num_epoch']):
         train_output = engine.train(n=epoch)
         engine.log_train(train_output, logdir=logdir, epoch=epoch)
         
         eval_output = engine.eval(n=epoch)
         engine.log_eval(eval_output, logdir=logdir, epoch=epoch)
 
     return None
Esempio n. 3
0
            correct = np.count_nonzero(batch["id"] -
                                       preds_cpu["id"].argmax(1) == 0)
            total += len(batch_gpu["id"])
            acc += correct
        return acc / total


# Train VAE
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
vae = VAE()

# Use multiple GPUs if available
if torch.cuda.device_count() > 1:
    print("Using ", torch.cuda.device_count(), "GPUs")
    vae = nn.DataParallel(vae)
vae.to(device)

optimizer = torch.optim.Adam(vae.parameters(), lr=0.001)
losses1 = []
valError = []

for epoch in range(50):
    train_iter = iter(train_loader)
    batch = None
    preds = None
    for i in range(len(train_loader)):
        batch = next(train_iter)
        batch_gpu = batch.to(device)
        preds = vae(batch_gpu)
        pred_cpu = preds.to('cpu')