示例#1
0
#     imgs = imgs.type(Tensor) 
    
    
    img_train = torch.from_numpy(x_fft).type(Tensor)                                #changed (x_fft~6)
    imgn_train = torch.from_numpy(x_masked_fft).type(Tensor)                        #changed (x_masked_fft~6)
    img_train, imgn_train = Variable(img_train.cuda()), Variable(imgn_train.cuda()) #added   (img_train~6,imgn_train~6)
        
    mask_fft = torch.from_numpy(mask_fft).type(torch.FloatTensor)                   #added   (mask_fft~6)
    mask_train_in = Variable(mask_fft.cuda())                                       #added   (mask_train_in~6)
        
    imgn_train_cat = torch.cat((mask_train_in, imgn_train), axis=1)  #added  (imgn_train_cat~12:mask_train_in~6,imgn_train~6)
        
    out_train = torch.clamp(net_stage1(imgn_train_cat), 0., 1.)             #(out_train~12)      
#      out_train = torch.clamp(net_stage1(imgn_train_1), 0., 1.)

    img_back = get_color_images_back(img_train.cpu().numpy(), lims_list, idx_list)             #added orig img back~3
    img_back_masked = get_color_images_back(imgn_train.cpu().numpy(), lims_list, idx_list_m)   #added masked img back~3
    img_back_recon = get_color_images_back(out_train.detach().cpu().numpy(), lims_list, idx_list) #recon img back~3
    img_back_recon = torch.clamp(torch.from_numpy(img_back_recon), -1., 1.).type(Tensor)
        
#   sample = torch.cat((masked_imgs, img_back_recon.data, imgs.data), -2)
#   save_image(sample, "test.png", nrow=8, normalize=True)
#   import ipdb; ipdb.set_trace()
        
    masked_imgs_display = masked_imgs.clone()
    masked_imgs = torch.cat((masked_imgs, img_back_recon), axis=1) #masked_imgs=masked_imgs+ifft of 1st_stage
        
    i_outputs, i_gen_loss, i_dis_loss, i_logs = model.process(imgs, masked_imgs, masks)
    outputs_merged = (i_outputs * (1 - masks)) + (imgs * masks)
    
    basename = os.path.basename(imfile)
示例#2
0
def main():
    # Load dataset
    print('Loading dataset ...\n')
    use_cuda = torch.cuda.is_available()
    torch.manual_seed(1234)
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    
    if opt.dataset=='celeba':
        dataset_train = MyCelebA('../data', split="train", target_type="bbox", download=False,
                                   transform=transforms.Compose([transforms.Resize((opt.img_size, opt.img_size)),
                                                                 transforms.ToTensor(),
                                                                 transforms.Normalize((0.5, 0.5, 0.5), 
                                                                                      (0.5, 0.5, 0.5))]))

        loader_train = torch.utils.data.DataLoader(dataset_train, batch_size=opt.batchSize, shuffle=True)

        dataset_val = MyCelebA('../data', split="valid",  target_type="bbox",
                                            transform=transforms.Compose([transforms.Resize((opt.img_size, opt.img_size)),
                                                                         transforms.ToTensor(),
                                                                         transforms.Normalize((0.5, 0.5, 0.5),
                                                                                              (0.5, 0.5, 0.5))]))
        test_loader = torch.utils.data.DataLoader(dataset_val, batch_size=opt.batchSize, shuffle=True)

        
    elif opt.dataset=='dtd':
        dataset_train = MyDTD('../data', split="train", download=False,
                                   transform=transforms.Compose([transforms.Resize((opt.img_size, opt.img_size)),
                                                                 transforms.ToTensor(),
                                                                 transforms.Normalize((0.5, 0.5, 0.5), 
                                                                                      (0.5, 0.5, 0.5))]))

        loader_train = torch.utils.data.DataLoader(dataset_train, batch_size=opt.batchSize, shuffle=True)

        dataset_val = MyDTD('../data', split="valid", 
                                   transform=transforms.Compose([transforms.Resize((opt.img_size, opt.img_size)),
                                                                         transforms.ToTensor(),
                                                                         transforms.Normalize((0.5, 0.5, 0.5),
                                                                                              (0.5, 0.5, 0.5))]))
        test_loader = torch.utils.data.DataLoader(dataset_val, batch_size=opt.batchSize, shuffle=True)
        

    elif opt.dataset=='paris_streetview':
        dataset_train = MyParis_streetview('../data', split="train", download=False,
                                   transform=transforms.Compose([transforms.Resize((opt.img_size, opt.img_size)),
                                                                 transforms.ToTensor(),
                                                                 transforms.Normalize((0.5, 0.5, 0.5), 
                                                                                      (0.5, 0.5, 0.5))]))

        loader_train = torch.utils.data.DataLoader(dataset_train, batch_size=opt.batchSize, shuffle=True)

        dataset_val = MyParis_streetview('../data', split="valid", 
                                    transform=transforms.Compose([transforms.Resize((opt.img_size, opt.img_size)),
                                                                 transforms.ToTensor(),
                                                                 transforms.Normalize((0.5, 0.5, 0.5),
                                                                                      (0.5, 0.5, 0.5))]))
        test_loader = torch.utils.data.DataLoader(dataset_val, batch_size=opt.batchSize, shuffle=True)
        
    
        
    print("# of training samples: %d\n" % int(len(dataset_train)))

    # Build model
    net = DnCNN(channels=12, out_ch=6, num_of_layers=opt.num_of_layers) # channels was 6
    net.apply(weights_init_kaiming)
    criterion = nn.MSELoss(size_average=False) # ToDo: Add weighted MSE loss
    # Move to GPU
    model = net.cuda()
    criterion.cuda()
    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    # training
    logdir = opt.outf + '/{}{}_{}'.format(prefix, opt.dataset, datetime_f)
    os.makedirs(logdir, exist_ok=True)
    
    writer = SummaryWriter(opt.outf + '/{}tb_{}_{}'.format(prefix, opt.dataset, datetime_f))
    step = 0
    for epoch in range(opt.epochs):
        if epoch < opt.milestone:
            current_lr = opt.lr
        else:
            current_lr = opt.lr / 10.
        # set learning rate
        for param_group in optimizer.param_groups:
            param_group["lr"] = current_lr
        print('learning rate %f' % current_lr)
        # train
        for i, data in enumerate(loader_train, 0):
            # training step
            model.train()
            model.zero_grad()
            optimizer.zero_grad()
            img_train = data[0]
            
            
            if use_irregular:
                x_masked, x_fft, x_masked_fft, lims_list, idx_list, idx_list_m, all_masks, mask_fft = get_color_fft_images_irregular(img_train.numpy(), True)
            else:
                x_masked, x_fft, x_masked_fft, lims_list, idx_list, idx_list_m = get_color_fft_images(img_train.numpy(), 
                                                                                                      dx=64, half=use_half)

            img_train = torch.from_numpy(x_fft).type(torch.FloatTensor) 
            imgn_train = torch.from_numpy(x_masked_fft).type(torch.FloatTensor) 
            mask_fft = torch.from_numpy(mask_fft).type(torch.FloatTensor) 

            img_train, imgn_train = Variable(img_train.cuda()), Variable(imgn_train.cuda())
            mask_train_in = Variable(mask_fft.cuda())
            imgn_train_cat = torch.cat((mask_train_in, imgn_train), axis=1)
            
#             import ipdb; ipdb.set_trace()
            
            out_train = model(imgn_train_cat)
            loss = criterion(out_train, img_train) / (imgn_train.size()[0]*2)
            loss.backward()
            optimizer.step()
            # results
            model.eval()
            # out_train = torch.clamp(imgn_train-model(imgn_train), 0., 1.)
            out_train = torch.clamp(model(imgn_train_cat), 0., 1.)
            
            img_back = get_color_images_back(img_train.cpu().numpy(), lims_list, idx_list)
            img_back_masked = get_color_images_back(imgn_train.cpu().numpy(), lims_list, idx_list_m)
            img_back_recon = get_color_images_back(out_train.detach().cpu().numpy(), lims_list, idx_list)
#             import ipdb; ipdb.set_trace()
            #orig_im = (img_train + 1)/2
            img_back = (torch.from_numpy(img_back) + 1)/2
            img_back_masked = (torch.from_numpy(img_back_masked) + 1)/2
            img_back_recon = (torch.from_numpy(img_back_recon) + 1)/2
            
            psnr_train = batch_PSNR(img_back, img_back_recon, 1.)
            print("[epoch %d][%d/%d] loss: %.4f PSNR_train: %.4f" %
                (epoch+1, i+1, len(loader_train), loss.item(), psnr_train))
            # if you are using older version of PyTorch, you may need to change loss.item() to loss.data[0]
            if step % 10 == 0:
                # Log the scalar values
                writer.add_scalar('loss', loss.item(), step)
                writer.add_scalar('PSNR on training data', psnr_train, step)
            step += 1
            ## the end of each epoch
            model.eval()
            if step % 50 == 0:
                print('Saving images...')
                Img = utils.make_grid(img_back.data, nrow=8, normalize=True, scale_each=True)
                Imgn = utils.make_grid(img_back_masked.data, nrow=8, normalize=True, scale_each=True)
                Irecon = utils.make_grid(img_back_recon.data, nrow=8, normalize=True, scale_each=True)
                writer.add_image('clean image', Img, step//50)
                writer.add_image('noisy image', Imgn, step//50)
                writer.add_image('reconstructed image', Irecon, step//50)

                utils.save_image(img_back.data, logdir + '/clean_image_{}.png'.format(step//50))
                utils.save_image(img_back_masked.data, logdir + '/noisy_image_{}.png'.format(step//50))
                utils.save_image(img_back_recon.data, logdir + '/reconstructed_image_{}.png'.format(step//50))
                
        # save model
        torch.save(model.state_dict(), os.path.join(opt.outf, '{}{}_net.pth'.format(prefix, opt.dataset)))
示例#3
0
        else:
            outputs_center_regular = get_color_fft_images(imgs.numpy(),
                                                          dx=16,
                                                          half=False,
                                                          return_mask=True)
            x_masked, x_fft, x_masked_fft, lims_list, idx_list, idx_list_m, all_masks = outputs_center_regular

        masked_imgs = torch.from_numpy(x_masked).type(Tensor)
        masks = all_masks.type(Tensor)
        imgs = imgs.type(Tensor)

        img_train_1 = torch.from_numpy(x_fft).type(Tensor)
        imgn_train_1 = torch.from_numpy(x_masked_fft).type(Tensor)
        out_train = torch.clamp(net_stage1(imgn_train_1), 0., 1.)

        img_back_recon = get_color_images_back(
            out_train.detach().cpu().numpy(), lims_list, idx_list)
        img_back_recon = torch.clamp(torch.from_numpy(img_back_recon), -1.,
                                     1.).type(Tensor)

        sample = torch.cat((masked_imgs, img_back_recon.data, imgs.data), -2)
        save_image(sample, "test.png", nrow=8, normalize=True)
        #         import ipdb; ipdb.set_trace()

        masked_imgs_display = masked_imgs.clone()
        masked_imgs = torch.cat((masked_imgs, img_back_recon), axis=1)

        i_outputs, i_gen_loss, i_dis_loss, i_logs = model.process(
            imgs, masked_imgs, masks)
        outputs_merged = (i_outputs * (1 - masks)) + (imgs * masks)

        # metrics