Ejemplo n.º 1
0
def generate_imgs(model_path, save_path, split='val'):
    
    data = BlurDataset.from_single_dataset('../../data/coco')
    dataloader = data.loader(split=split, batch_size=25)
                  
    model = torch.load(model_path)
    model.eval()
    
    losses = []
    
    for i, (imgs, tgts) in enumerate(dataloader):

        #Send imgs to gpu
        imgs = imgs.type(dtype)
        tgts = tgts.type(dtype)

        gen_imgs = model(imgs)
        
        loss = F.mse_loss(gen_imgs, tgts).item()
        print(loss)
        losses.append(loss)
        
#         save_image(imgs.data[:1], save_path +"_input.png", nrow=1)
#         save_image(gen_imgs.data[:1], save_path +"_output.png", nrow=1)
#         save_image(tgts.data[:1], save_path +"_target.png", nrow=1)
        
    print(np.mean(losses))

                  
                  
          
Ejemplo n.º 2
0
def train_wgan(generator, discriminator, save=True, it=0):
    data_path='../../data/coco'
#     batch_size = 16
    data = BlurDataset.from_single_dataset(data_path)
    train_dataloader = data.loader(batch_size=batch_size)
    val_dataloader = data.loader(split='test', batch_size=batch_size)

    train_losses = []
    val_losses = []
    for epoch in range(num_epochs):
#         train_losses += train_wgan_batches(epoch, generator, discriminator, train_dataloader, batch_size, it)
        
#         if epoch % validation_rate == 0:
        val_losses += train_wgan_batches(epoch, generator, discriminator, val_dataloader, batch_size, it=0, train=False, save=False)
#     save_losses(train_losses, file_base + "train_lossses.txt")
#     save_losses(val_losses, file_base + "val_losses.txt")
            
            
Ejemplo n.º 3
0
#Model
unet = UNet(img_shape).type(dtype)

#Loss
loss_func = torch.nn.MSELoss().type(dtype)

# Optimizers
optimizer = torch.optim.Adam(unet.parameters(), lr=lr, betas=(b1, b2))
'''
Load Data 
'''

data_path = '../../data/coco'

data = BlurDataset.from_single_dataset(data_path)
train_dataloader = data.loader(batch_size=batch_size)
val_dataloader = data.loader(split='val', batch_size=batch_size)
'''
Train

TODO:
    -Experiment with params
'''


def save_losses(losses, path):
    with open(path, 'a+') as f:
        for item in losses:
            f.write("{}\n".format(item))