Beispiel #1
0
def WGAN_tester(opt):
    
    # Save the model if pre_train == True
    def load_model_generator(net, epoch, opt):
        model_name = 'deepfillv2_WGAN_G_epoch%d_batchsize%d.pth' % (epoch, 4)
        model_name = os.path.join('pretrained_model', model_name)
        pretrained_dict = torch.load(model_name)
        generator.load_state_dict(pretrained_dict)

    # ----------------------------------------
    #      Initialize training parameters
    # ----------------------------------------

    # configurations
    if not os.path.exists(results_path):
        os.makedirs(results_path)

    # Build networks
    generator = utils.create_generator(opt).eval()
    print('-------------------------Loading Pretrained Model-------------------------')
    load_model_generator(generator, opt.epoch, opt)
    print('-------------------------Pretrained Model Loaded-------------------------')

    # To device
    generator = generator.cuda()
    
    # ----------------------------------------
    #       Initialize training dataset
    # ----------------------------------------

    # Define the dataset
    trainset = test_dataset.InpaintDataset(opt)
    print('The overall number of images equals to %d' % len(trainset))

    # Define the dataloader
    dataloader = DataLoader(trainset, batch_size = opt.batch_size, shuffle = False, num_workers = opt.num_workers, pin_memory = True)
    
    # ----------------------------------------
    #            Testing
    # ----------------------------------------
    # Testing loop
    for batch_idx, (img, mask) in enumerate(dataloader):
        img = img.cuda()
        mask = mask.cuda()

        # Generator output
        with torch.no_grad():
            first_out, second_out = generator(img, mask)

        # forward propagation
        first_out_wholeimg = img * (1 - mask) + first_out * mask        # in range [0, 1]
        second_out_wholeimg = img * (1 - mask) + second_out * mask      # in range [0, 1]

        masked_img = img * (1 - mask) + mask
        mask = torch.cat((mask, mask, mask), 1)
        img_list = [second_out_wholeimg]
        name_list = ['second_out']
        utils.save_sample_png(sample_folder = results_path, sample_name = '%d' % (batch_idx + 1), img_list = img_list, name_list = name_list, pixel_max_cnt = 255)
        print('----------------------batch_idx%d' % (batch_idx + 1) + ' has been finished----------------------')
    test_dataset = dataset.DenoisingValDataset(opt)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=opt.test_batch_size,
                                              shuffle=False,
                                              num_workers=opt.num_workers,
                                              pin_memory=True)
    sample_folder = opt.save_name
    utils.check_path(sample_folder)

    # forward
    for i, (true_input, true_target) in enumerate(test_loader):

        # To device
        true_input = true_input.cuda()
        true_target = true_target.cuda()

        # Forward propagation
        with torch.no_grad():
            fake_target = generator(true_input, true_input)
            fake_target = fake_target.unsqueeze(0)

        # Save
        print('The %d-th iteration' % (i))
        img_list = [true_input, fake_target, true_target]
        name_list = ['in', 'pred', 'gt']
        utils.save_sample_png(sample_folder=sample_folder,
                              sample_name='%d' % (i + 1),
                              img_list=img_list,
                              name_list=name_list,
                              pixel_max_cnt=255)
def Trainer(opt):
    # ----------------------------------------
    #       Network training parameters
    # ----------------------------------------

    # cudnn benchmark
    cudnn.benchmark = opt.cudnn_benchmark

    # Handle multiple GPUs
    gpu_num = torch.cuda.device_count()
    print("There are %d GPUs used" % gpu_num)
    opt.train_batch_size *= gpu_num
    opt.num_workers *= gpu_num

    # Loss functions
    criterion_L2 = torch.nn.MSELoss().cuda()

    # Initialize SGN
    generator = utils.create_generator(opt)

    # To device
    if opt.multi_gpu:
        generator = nn.DataParallel(generator)
        generator = generator.cuda()
    else:
        generator = generator.cuda()

    # Optimizers
    optimizer_G = torch.optim.Adam(generator.parameters(),
                                   lr=opt.lr,
                                   betas=(opt.b1, opt.b2),
                                   weight_decay=opt.weight_decay)

    # Learning rate decrease
    def adjust_learning_rate(opt, epoch, optimizer):
        # Set the learning rate to the specific value
        if epoch >= opt.epoch_decreased:
            for param_group in optimizer.param_groups:
                param_group['lr'] = opt.lr_decreased

    # Save the model if pre_train == True
    def save_model(opt, epoch, iteration, len_dataset, network):
        """Save the model at "checkpoint_interval" and its multiple"""
        # Judge name
        if not os.path.exists(opt.save_root):
            os.makedirs(opt.save_root)
        # Save model dict
        if opt.multi_gpu == True:
            if opt.save_mode == 'epoch':
                modelname = 'DnCNN_epoch%d_bs%d_mu%d_sigma%d.pth' % (
                    epoch, opt.train_batch_size, opt.mu, opt.sigma)
                modelpath = os.path.join(opt.save_root, modelname)
                if (epoch % opt.save_by_epoch
                        == 0) and (iteration % len_dataset == 0):
                    torch.save(network.module.state_dict(), modelpath)
                    print(
                        'The trained model is successfully saved at epoch %d' %
                        (epoch))
            if opt.save_mode == 'iter':
                modelname = 'DnCNN_iter%d_bs%d_mu%d_sigma%d.pth' % (
                    iteration, opt.train_batch_size, opt.mu, opt.sigma)
                modelpath = os.path.join(opt.save_root, modelname)
                if iteration % opt.save_by_iter == 0:
                    torch.save(network.module.state_dict(), modelpath)
                    print(
                        'The trained model is successfully saved at iteration %d'
                        % (iteration))
        else:
            if opt.save_mode == 'epoch':
                modelname = 'DnCNN_epoch%d_bs%d_mu%d_sigma%d.pth' % (
                    epoch, opt.train_batch_size, opt.mu, opt.sigma)
                modelpath = os.path.join(opt.save_root, modelname)
                if (epoch % opt.save_by_epoch
                        == 0) and (iteration % len_dataset == 0):
                    torch.save(network.state_dict(), modelpath)
                    print(
                        'The trained model is successfully saved at epoch %d' %
                        (epoch))
            if opt.save_mode == 'iter':
                modelname = 'DnCNN_iter%d_bs%d_mu%d_sigma%d.pth' % (
                    iteration, opt.train_batch_size, opt.mu, opt.sigma)
                modelpath = os.path.join(opt.save_root, modelname)
                if iteration % opt.save_by_iter == 0:
                    torch.save(network.state_dict(), modelpath)
                    print(
                        'The trained model is successfully saved at iteration %d'
                        % (iteration))

    # ----------------------------------------
    #             Network dataset
    # ----------------------------------------

    # Define the dataset
    trainset = dataset.DenoisingDataset(opt, opt.train_root)
    valset = dataset.DenoisingDataset(opt, opt.val_root)
    print('The overall number of training images:', len(trainset))
    print('The overall number of validation images:', len(valset))

    # Define the dataloader
    train_loader = DataLoader(trainset,
                              batch_size=opt.train_batch_size,
                              shuffle=True,
                              num_workers=opt.num_workers,
                              pin_memory=True)
    val_loader = DataLoader(valset,
                            batch_size=opt.val_batch_size,
                            shuffle=False,
                            num_workers=opt.num_workers,
                            pin_memory=True)

    # ----------------------------------------
    #                 Training
    # ----------------------------------------

    # Count start time
    prev_time = time.time()

    # Tensorboard
    writer = SummaryWriter()

    # For loop training
    for epoch in range(opt.epochs):

        # Record learning rate
        for param_group in optimizer_G.param_groups:
            writer.add_scalar('data/lr', param_group['lr'], epoch)
            print('learning rate = ', param_group['lr'])

        if epoch == 0:
            iters_done = 0

        ### training
        for i, (noisy_img, img) in enumerate(train_loader):

            # To device
            noisy_img = noisy_img.cuda()
            img = img.cuda()

            # Train Generator
            optimizer_G.zero_grad()

            # Forword propagation
            recon_img = generator(noisy_img)
            loss = criterion_L2(recon_img, img)

            # Record losses
            writer.add_scalar('data/L2Loss', loss.item(), iters_done)

            # Overall Loss and optimize
            loss.backward()
            optimizer_G.step()

            # Determine approximate time left
            iters_done = epoch * len(train_loader) + i
            iters_left = opt.epochs * len(train_loader) - iters_done
            time_left = datetime.timedelta(seconds=iters_left *
                                           (time.time() - prev_time))
            prev_time = time.time()

            # Print log
            print(
                "\r[Epoch %d/%d] [Batch %d/%d] [Recon Loss: %.4f] Time_left: %s"
                % ((epoch + 1), opt.epochs, i, len(train_loader), loss.item(),
                   time_left))

            # Save model at certain epochs or iterations
            save_model(opt, (epoch + 1), (iters_done + 1), len(train_loader),
                       generator)

        # Learning rate decrease at certain epochs
        adjust_learning_rate(opt, (epoch + 1), optimizer_G)

        ### sampling
        utils.save_sample_png(opt,
                              epoch,
                              noisy_img,
                              recon_img,
                              img,
                              addition_str='training')

        ### Validation
        val_PSNR = 0
        num_of_val_image = 0

        for j, (val_noisy_img, val_img) in enumerate(val_loader):

            # To device
            # A is for input image, B is for target image
            val_noisy_img = val_noisy_img.cuda()
            val_img = val_img.cuda()

            # Forward propagation
            val_recon_img = generator(val_noisy_img)

            # Accumulate num of image and val_PSNR
            num_of_val_image += val_noisy_img.shape[0]
            val_PSNR += utils.psnr(val_recon_img, val_img,
                                   1) * val_noisy_img.shape[0]

        val_PSNR = val_PSNR / num_of_val_image

        # Record average PSNR
        writer.add_scalar('data/val_PSNR', val_PSNR, epoch)
        print('PSNR at epoch %d: %.4f' % ((epoch + 1), val_PSNR))

        ### sampling
        utils.save_sample_png(opt,
                              epoch,
                              val_noisy_img,
                              val_recon_img,
                              val_img,
                              addition_str='validation')

    writer.close()
Beispiel #4
0
def Pre_train(opt):
    # ----------------------------------------
    #       Network training parameters
    # ----------------------------------------

    # cudnn benchmark
    cudnn.benchmark = opt.cudnn_benchmark

    # configurations
    save_folder = opt.save_path
    sample_folder = opt.sample_path
    utils.check_path(save_folder)
    utils.check_path(sample_folder)

    # Loss functions
    criterion_L1 = torch.nn.L1Loss().cuda()

    # Initialize Generator
    generator = utils.create_generator(opt)

    # To device
    if opt.multi_gpu:
        generator = nn.DataParallel(generator)
        generator = generator.cuda()
    else:
        generator = generator.cuda()

    # Optimizers
    optimizer_G = torch.optim.Adam(generator.parameters(),
                                   lr=opt.lr_g,
                                   betas=(opt.b1, opt.b2),
                                   weight_decay=opt.weight_decay)

    # Learning rate decrease
    def adjust_learning_rate(opt, epoch, optimizer):
        target_epoch = opt.epochs - opt.lr_decrease_epoch
        remain_epoch = opt.epochs - epoch
        if epoch >= opt.lr_decrease_epoch:
            lr = opt.lr_g * remain_epoch / target_epoch
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

    # Save the model if pre_train == True
    def save_model(opt, epoch, iteration, len_dataset, generator):
        """Save the model at "checkpoint_interval" and its multiple"""
        # Define the name of trained model
        if opt.save_mode == 'epoch':
            model_name = 'KPN_single_image_epoch%d_bs%d_mu%d_sigma%d.pth' % (
                epoch, opt.train_batch_size, opt.mu, opt.sigma)
        if opt.save_mode == 'iter':
            model_name = 'KPN_single_image_iter%d_bs%d_mu%d_sigma%d.pth' % (
                iteration, opt.train_batch_size, opt.mu, opt.sigma)
        save_model_path = os.path.join(opt.save_path, model_name)
        if opt.multi_gpu == True:
            if opt.save_mode == 'epoch':
                if (epoch % opt.save_by_epoch
                        == 0) and (iteration % len_dataset == 0):
                    torch.save(generator.module.state_dict(), save_model_path)
                    print(
                        'The trained model is successfully saved at epoch %d' %
                        (epoch))
            if opt.save_mode == 'iter':
                if iteration % opt.save_by_iter == 0:
                    torch.save(generator.module.state_dict(), save_model_path)
                    print(
                        'The trained model is successfully saved at iteration %d'
                        % (iteration))
        else:
            if opt.save_mode == 'epoch':
                if (epoch % opt.save_by_epoch
                        == 0) and (iteration % len_dataset == 0):
                    torch.save(generator.state_dict(), save_model_path)
                    print(
                        'The trained model is successfully saved at epoch %d' %
                        (epoch))
            if opt.save_mode == 'iter':
                if iteration % opt.save_by_iter == 0:
                    torch.save(generator.state_dict(), save_model_path)
                    print(
                        'The trained model is successfully saved at iteration %d'
                        % (iteration))

    # ----------------------------------------
    #             Network dataset
    # ----------------------------------------

    # Handle multiple GPUs
    gpu_num = torch.cuda.device_count()
    print("There are %d GPUs used" % gpu_num)
    opt.train_batch_size *= gpu_num
    #opt.val_batch_size *= gpu_num
    opt.num_workers *= gpu_num

    # Define the dataset
    trainset = dataset.DenoisingDataset(opt)
    print('The overall number of training images:', len(trainset))

    # Define the dataloader
    train_loader = DataLoader(trainset,
                              batch_size=opt.train_batch_size,
                              shuffle=True,
                              num_workers=opt.num_workers,
                              pin_memory=True)

    # ----------------------------------------
    #                 Training
    # ----------------------------------------

    # Count start time
    prev_time = time.time()

    # For loop training
    for epoch in range(opt.epochs):
        for i, (true_input, true_target) in enumerate(train_loader):

            # To device
            true_input = true_input.cuda()
            true_target = true_target.cuda()

            # Train Generator
            optimizer_G.zero_grad()
            fake_target = generator(true_input, true_input)

            # L1 Loss
            Pixellevel_L1_Loss = criterion_L1(fake_target, true_target)

            # Overall Loss and optimize
            loss = Pixellevel_L1_Loss
            loss.backward()
            optimizer_G.step()

            # Determine approximate time left
            iters_done = epoch * len(train_loader) + i
            iters_left = opt.epochs * len(train_loader) - iters_done
            time_left = datetime.timedelta(seconds=iters_left *
                                           (time.time() - prev_time))
            prev_time = time.time()

            # Print log
            print(
                "\r[Epoch %d/%d] [Batch %d/%d] [Pixellevel L1 Loss: %.4f] Time_left: %s"
                % ((epoch + 1), opt.epochs, i, len(train_loader),
                   Pixellevel_L1_Loss.item(), time_left))

            # Save model at certain epochs or iterations
            save_model(opt, (epoch + 1), (iters_done + 1), len(train_loader),
                       generator)

            # Learning rate decrease at certain epochs
            adjust_learning_rate(opt, (epoch + 1), optimizer_G)

        ### Sample data every epoch
        if (epoch + 1) % 1 == 0:
            img_list = [true_input, fake_target, true_target]
            name_list = ['in', 'pred', 'gt']
            utils.save_sample_png(sample_folder=sample_folder,
                                  sample_name='train_epoch%d' % (epoch + 1),
                                  img_list=img_list,
                                  name_list=name_list,
                                  pixel_max_cnt=255)
        '''
Beispiel #5
0
def Pre_train(opt):
    # ----------------------------------------
    #       Network training parameters
    # ----------------------------------------

    #torch.cuda.set_device(1)

    # cudnn benchmark
    cudnn.benchmark = opt.cudnn_benchmark

    # configurations
    save_folder = opt.save_path
    sample_folder = opt.sample_path
    utils.check_path(save_folder)
    utils.check_path(sample_folder)

    # Loss functions
    if opt.no_gpu == False:
        criterion_L1 = torch.nn.L1Loss().cuda()
        criterion_L2 = torch.nn.MSELoss().cuda()
        #criterion_rainypred = torch.nn.L1Loss().cuda()
        criterion_ssim = pytorch_ssim.SSIM().cuda()
    else:
        criterion_L1 = torch.nn.L1Loss()
        criterion_L2 = torch.nn.MSELoss()
        #criterion_rainypred = torch.nn.L1Loss().cuda()
        criterion_ssim = pytorch_ssim.SSIM()

    # Initialize Generator
    generator = utils.create_generator(opt)

    # To device
    if opt.no_gpu == False:
        if opt.multi_gpu:
            generator = nn.DataParallel(generator)
            generator = generator.cuda()
        else:
            generator = generator.cuda()

    # Optimizers
    optimizer_G = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                          generator.parameters()),
                                   lr=opt.lr_g,
                                   betas=(opt.b1, opt.b2),
                                   weight_decay=opt.weight_decay)
    #optimizer_G = torch.optim.Adam(generator.parameters(), lr = opt.lr_g, betas = (opt.b1, opt.b2), weight_decay = opt.weight_decay)

    # pretrained model
    #encnet = encoding.models.get_model('Encnet_ResNet50s_PContext', pretrained=True).cuda()
    #encnet.eval()
    #resnet = (torch.nn.Sequential(*list(encnet.children())[:1]))[0]
    #resnet.eval()
    #encnet_feat = torch.nn.Sequential(*list(resnet.children())[:1])
    #encnet_feat.eval()

    #for param in encnet.parameters():
    #    param.requires_grad = False
    print("pretrained models loaded")

    # Learning rate decrease
    def adjust_learning_rate(opt, epoch, optimizer):
        target_epoch = opt.epochs - opt.lr_decrease_epoch
        remain_epoch = opt.epochs - epoch
        if epoch >= opt.lr_decrease_epoch:
            lr = opt.lr_g * remain_epoch / target_epoch
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

    # Save the model if pre_train == True
    def save_model(opt, epoch, iteration, len_dataset, generator):
        """Save the model at "checkpoint_interval" and its multiple"""
        # Define the name of trained model
        """
        if opt.save_mode == 'epoch':
            model_name = 'KPN_single_image_epoch%d_bs%d_mu%d_sigma%d.pth' % (epoch, opt.train_batch_size, opt.mu, opt.sigma)
        if opt.save_mode == 'iter':
            model_name = 'KPN_single_image_iter%d_bs%d_mu%d_sigma%d.pth' % (iteration, opt.train_batch_size, opt.mu, opt.sigma)
        """
        if opt.save_mode == 'epoch':
            model_name = 'KPN_rainy_image_epoch%d_bs%d.pth' % (
                epoch, opt.train_batch_size)
        if opt.save_mode == 'iter':
            model_name = 'KPN_rainy_image_iter%d_bs%d.pth' % (
                iteration, opt.train_batch_size)
        save_model_path = os.path.join(opt.save_path, model_name)
        if opt.multi_gpu == True:
            if opt.save_mode == 'epoch':
                if (epoch % opt.save_by_epoch
                        == 0) and (iteration % len_dataset == 0):
                    torch.save(generator.module.state_dict(), save_model_path)
                    print(
                        'The trained model is successfully saved at epoch %d' %
                        (epoch))
            if opt.save_mode == 'iter':
                if iteration % opt.save_by_iter == 0:
                    torch.save(generator.module.state_dict(), save_model_path)
                    print(
                        'The trained model is successfully saved at iteration %d'
                        % (iteration))
        else:
            if opt.save_mode == 'epoch':
                if (epoch % opt.save_by_epoch
                        == 0) and (iteration % len_dataset == 0):
                    torch.save(generator.state_dict(), save_model_path)
                    print(
                        'The trained model is successfully saved at epoch %d' %
                        (epoch))
            if opt.save_mode == 'iter':
                if iteration % opt.save_by_iter == 0:
                    torch.save(generator.state_dict(), save_model_path)
                    print(
                        'The trained model is successfully saved at iteration %d'
                        % (iteration))

    # ----------------------------------------
    #             Network dataset
    # ----------------------------------------

    # Handle multiple GPUs
    #os.environ["CUDA_VISIBLE_DEVICES"] = ""
    gpu_num = torch.cuda.device_count()
    print("There are %d GPUs used" % gpu_num)
    #if opt.no_gpu == False:
    #opt.train_batch_size *= gpu_num
    #opt.val_batch_size *= gpu_num
    #opt.num_workers *= gpu_num

    #print(opt.multi_gpu)
    '''
    print(opt.no_gpu == False)
    print(opt.no_gpu)
    print(gpu_num)
    print(opt.train_batch_size)
    '''

    # Define the dataset
    trainset = dataset.DenoisingDataset(opt)
    print('The overall number of training images:', len(trainset))

    # Define the dataloader
    train_loader = DataLoader(trainset,
                              batch_size=opt.train_batch_size,
                              shuffle=True,
                              num_workers=opt.num_workers,
                              pin_memory=True)

    # ----------------------------------------
    #                 Training
    # ----------------------------------------

    # Count start time
    prev_time = time.time()

    # For loop training
    for epoch in range(opt.epochs):
        for i, (true_input, true_target) in enumerate(train_loader):

            #print("in epoch %d" % i)

            if opt.no_gpu == False:
                # To device
                true_input = true_input.cuda()
                true_target = true_target.cuda()

            # Train Generator
            optimizer_G.zero_grad()
            fake_target = generator(true_input, true_input)

            ssim_loss = -criterion_ssim(true_target, fake_target)
            '''
            #trans for enc_net
            enc_trans = transforms.Compose([transforms.Normalize([.485, .456, .406], [.229, .224, .225])])
            fake_target_norm = torch.from_numpy(np.zeros(fake_target.size())).cuda()
            true_target_norm = torch.from_numpy(np.zeros(true_target.size())).cuda()
            for j in range(fake_target.size()[0]):
                fake_target_norm[j] = enc_trans(fake_target[j])
                true_target_norm[j] = enc_trans(true_target[j])
            '''

            #print(fake_target_norm.size())
            #enc_pred = encnet.evaluate(fake_target_norm.type(torch.FloatTensor).cuda())
            #enc_pred = encnet(fake_target_norm.type(torch.FloatTensor).cuda())[0]
            #enc_gt = encnet(true_target_norm.type(torch.FloatTensor).cuda())[0]
            '''
            enc_feat_pred = encnet_feat(fake_target_norm.type(torch.FloatTensor).cuda())[0]
            enc_feat_gt = encnet_feat(true_target_norm.type(torch.FloatTensor).cuda())[0]
            '''

            #rain_layer_gt = true_input - true_target
            #rain_layer_pred = true_input - fake_target
            #rainy_pred = true_input - (fake_target * rain_layer_pred)
            #print(type(true_input))
            #print(type(fake_target))

            # L1 Loss
            Pixellevel_L1_Loss = criterion_L1(fake_target, true_target)
            #enc_loss = criterion_L1(enc_pred, enc_gt)
            #enc_feat_loss = criterion_L1(enc_feat_pred, enc_feat_gt)
            #Pixellevel_L2_Loss = criterion_L2(fake_target, true_target)
            #Pixellevel_L2_Loss = criterion_L2(rain_layer_pred, rain_layer_gt)
            #Loss_rainypred = criterion_rainypred(rainy_pred, true_input)

            # Overall Loss and optimize
            loss = Pixellevel_L1_Loss + 0.2 * ssim_loss
            #loss = Pixellevel_L1_Loss
            #loss = Pixellevel_L1_Loss + Pixellevel_L2_Loss + Loss_rainypred
            loss.backward()
            optimizer_G.step()

            #check
            '''
            for j in encnet.named_parameters():
                print(j)
                break
            '''

            # Determine approximate time left
            iters_done = epoch * len(train_loader) + i
            iters_left = opt.epochs * len(train_loader) - iters_done
            time_left = datetime.timedelta(seconds=iters_left *
                                           (time.time() - prev_time))
            prev_time = time.time()

            # Print log
            print(
                "\r[Epoch %d/%d] [Batch %d/%d] [Loss: %.4f %.4f] Time_left: %s"
                % ((epoch + 1), opt.epochs, i, len(train_loader),
                   Pixellevel_L1_Loss.item(), ssim_loss.item(), time_left))

            # Save model at certain epochs or iterations
            save_model(opt, (epoch + 1), (iters_done + 1), len(train_loader),
                       generator)

            # Learning rate decrease at certain epochs
            adjust_learning_rate(opt, (epoch + 1), optimizer_G)

        ### Sample data every epoch
        if (epoch + 1) % 1 == 0:
            img_list = [true_input, fake_target, true_target]
            name_list = ['in', 'pred', 'gt']
            utils.save_sample_png(sample_folder=sample_folder,
                                  sample_name='train_epoch%d' % (epoch + 1),
                                  img_list=img_list,
                                  name_list=name_list,
                                  pixel_max_cnt=255)
        '''
Beispiel #6
0
def Continue_train_WGAN(opt):
    # ----------------------------------------
    #       Network training parameters
    # ----------------------------------------

    # cudnn benchmark
    cudnn.benchmark = opt.cudnn_benchmark

    # configurations
    save_folder = os.path.join(opt.save_path, opt.task_name)
    sample_folder = os.path.join(opt.sample_path, opt.task_name)
    utils.check_path(save_folder)
    utils.check_path(sample_folder)

    # Loss functions
    criterion_L1 = torch.nn.L1Loss().cuda()

    # Initialize Generator
    generator = utils.create_generator(opt)
    discriminator = utils.create_discriminator(opt)

    # To device
    if opt.multi_gpu:
        generator = nn.DataParallel(generator)
        generator = generator.cuda()
        discriminator = nn.DataParallel(discriminator)
        discriminator = discriminator.cuda()
    else:
        generator = generator.cuda()
        discriminator = discriminator.cuda()

    # Optimizers
    optimizer_G = torch.optim.Adam(generator.parameters(),
                                   lr=opt.lr_g,
                                   betas=(opt.b1, opt.b2),
                                   weight_decay=opt.weight_decay)
    optimizer_D = torch.optim.Adam(discriminator.parameters(),
                                   lr=opt.lr_d,
                                   betas=(opt.b1, opt.b2))

    # Learning rate decrease
    def adjust_learning_rate(opt, epoch, optimizer):
        target_epoch = opt.epochs - opt.lr_decrease_epoch
        remain_epoch = opt.epochs - epoch
        if epoch >= opt.lr_decrease_epoch:
            lr = opt.lr_g * remain_epoch / target_epoch
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

    # Save the model if pre_train == True
    def save_model(opt, epoch, iteration, len_dataset, generator):
        """Save the model at "checkpoint_interval" and its multiple"""
        if opt.multi_gpu == True:
            if opt.save_mode == 'epoch':
                if (epoch % opt.save_by_epoch
                        == 0) and (iteration % len_dataset == 0):
                    torch.save(
                        generator.module.state_dict(),
                        'DeblurGANv1_wgan_epoch%d_bs%d.pth' %
                        (epoch, opt.batch_size))
                    print(
                        'The trained model is successfully saved at epoch %d' %
                        (epoch))
            if opt.save_mode == 'iter':
                if iteration % opt.save_by_iter == 0:
                    torch.save(
                        generator.module.state_dict(),
                        'DeblurGANv1_wgan_iter%d_bs%d.pth' %
                        (iteration, opt.train_batch_size))
                    print(
                        'The trained model is successfully saved at iteration %d'
                        % (iteration))
        else:
            if opt.save_mode == 'epoch':
                if (epoch % opt.save_by_epoch
                        == 0) and (iteration % len_dataset == 0):
                    torch.save(
                        generator.state_dict(),
                        'DeblurGANv1_wgan_epoch%d_bs%d.pth' %
                        (epoch, opt.train_batch_size))
                    print(
                        'The trained model is successfully saved at epoch %d' %
                        (epoch))
            if opt.save_mode == 'iter':
                if iteration % opt.save_by_iter == 0:
                    torch.save(
                        generator.state_dict(),
                        'DeblurGANv1_wgan_iter%d_bs%d.pth' %
                        (iteration, opt.train_batch_size))
                    print(
                        'The trained model is successfully saved at iteration %d'
                        % (iteration))

    # ----------------------------------------
    #             Network dataset
    # ----------------------------------------

    # Handle multiple GPUs
    gpu_num = torch.cuda.device_count()
    print("There are %d GPUs used" % gpu_num)
    opt.train_batch_size *= gpu_num
    #opt.val_batch_size *= gpu_num
    opt.num_workers *= gpu_num

    # Define the dataset
    trainset = dataset.DeblurDataset(opt, 'train')
    valset = dataset.DeblurDataset(opt, 'val')
    print('The overall number of training images:', len(trainset))
    print('The overall number of validation images:', len(valset))

    # Define the dataloader
    train_loader = DataLoader(trainset,
                              batch_size=opt.train_batch_size,
                              shuffle=True,
                              num_workers=opt.num_workers,
                              pin_memory=True)
    val_loader = DataLoader(valset,
                            batch_size=opt.val_batch_size,
                            shuffle=True,
                            num_workers=opt.num_workers,
                            pin_memory=True)

    # ----------------------------------------
    #                 Training
    # ----------------------------------------

    # Count start time
    prev_time = time.time()

    # For loop training
    for epoch in range(opt.epochs):
        for i, (true_input, true_target) in enumerate(train_loader):

            # To device
            true_input = true_input.cuda()
            true_target = true_target.cuda()

            # Train Discriminator
            for j in range(opt.additional_training_d):

                optimizer_D.zero_grad()

                # Generator output
                fake_target = generator(true_input)

                # Fake samples
                fake_scalar_d = discriminator(true_input, fake_target.detach())
                true_scalar_d = discriminator(true_input, true_target)
                # Overall Loss and optimize
                loss_D = -torch.mean(true_scalar_d) + torch.mean(fake_scalar_d)
                loss_D.backward()

            # Train Generator
            optimizer_G.zero_grad()
            fake_target = generator(true_input)

            # L1 Loss
            Pixellevel_L1_Loss = criterion_L1(fake_target, true_target)

            # GAN Loss
            fake_scalar = discriminator(true_input, fake_target)
            GAN_Loss = -torch.mean(fake_scalar)

            # Overall Loss and optimize
            loss = opt.lambda_l1 * Pixellevel_L1_Loss + GAN_Loss
            loss.backward()
            optimizer_G.step()

            # Determine approximate time left
            iters_done = epoch * len(train_loader) + i
            iters_left = opt.epochs * len(train_loader) - iters_done
            time_left = datetime.timedelta(seconds=iters_left *
                                           (time.time() - prev_time))
            prev_time = time.time()

            # Print log
            print(
                "\r[Epoch %d/%d] [Batch %d/%d] [Pixellevel L1 Loss: %.4f] [GAN Loss: %.4f] [D Loss: %.4f] Time_left: %s"
                % ((epoch + 1), opt.epochs, i, len(train_loader),
                   Pixellevel_L1_Loss.item(), GAN_Loss.item(), loss_D.item(),
                   time_left))

            # Save model at certain epochs or iterations
            save_model(opt, (epoch + 1), (iters_done + 1), len(train_loader),
                       generator)

            # Learning rate decrease at certain epochs
            adjust_learning_rate(opt, (epoch + 1), optimizer_G)
            adjust_learning_rate(opt, (epoch + 1), optimizer_D)

        ### Sample data every epoch
        if (epoch + 1) % 1 == 0:
            img_list = [fake_target, true_target]
            name_list = ['pred', 'gt']
            utils.save_sample_png(sample_folder=sample_folder,
                                  sample_name='train_epoch%d' % (epoch + 1),
                                  img_list=img_list,
                                  name_list=name_list,
                                  pixel_max_cnt=255)

        ### Validation
        val_PSNR = 0
        num_of_val_image = 0

        for j, (true_input, true_target) in enumerate(val_loader):

            # To device
            # A is for input image, B is for target image
            true_input = true_input.cuda()
            true_target = true_target.cuda()

            # Forward propagation
            with torch.no_grad():
                fake_target = generator(true_input)

            # Accumulate num of image and val_PSNR
            num_of_val_image += true_input.shape[0]
            val_PSNR += utils.psnr(fake_target, true_target,
                                   1) * true_input.shape[0]
        val_PSNR = val_PSNR / num_of_val_image

        ### Sample data every epoch
        if (epoch + 1) % 1 == 0:
            img_list = [fake_target, true_target]
            name_list = ['pred', 'gt']
            utils.save_sample_png(sample_folder=sample_folder,
                                  sample_name='val_epoch%d' % (epoch + 1),
                                  img_list=img_list,
                                  name_list=name_list,
                                  pixel_max_cnt=255)

        # Record average PSNR
        print('PSNR at epoch %d: %.4f' % ((epoch + 1), val_PSNR))
def trainer_LSGAN(opt):

    # cudnn benchmark
    cudnn.benchmark = opt.cudnn_benchmark

    # configurations
    if not os.path.exists(opt.save_path):
        os.makedirs(opt.save_path)

    # Handle multiple GPUs
    gpu_num = torch.cuda.device_count()
    print("There are %d GPUs used" % gpu_num)
    opt.batch_size *= gpu_num
    opt.num_workers *= gpu_num

    # Loss functions
    criterion_L1 = torch.nn.L1Loss().cuda()
    criterion_MSE = torch.nn.MSELoss().cuda()

    # Initialize Generator
    generator = utils.create_generator(opt)
    discriminator = utils.create_discriminator(opt)
    perceptualnet = utils.create_perceptualnet(opt)

    # To device
    if opt.multi_gpu:
        generator = nn.DataParallel(generator)
        generator = generator.cuda()
        discriminator = nn.DataParallel(discriminator)
        discriminator = discriminator.cuda()
        perceptualnet = nn.DataParallel(generator)
        perceptualnet = perceptualnet.cuda()
    else:
        generator = generator.cuda()
        discriminator = discriminator.cuda()
        perceptualnet = perceptualnet.cuda()

    # Optimizers
    optimizer_G = torch.optim.Adam(generator.parameters(),
                                   lr=opt.lr_g,
                                   betas=(opt.b1, opt.b2))
    optimizer_D = torch.optim.Adam(discriminator.parameters(),
                                   lr=opt.lr_d,
                                   betas=(opt.b1, opt.b2))

    # Learning rate decrease
    def adjust_learning_rate(opt, epoch, iteration, optimizer):
        # Set the learning rate to the initial LR decayed by "lr_decrease_factor" every "lr_decrease_epoch" epochs
        if opt.lr_decrease_mode == 'epoch':
            lr = opt.lr_g * (opt.lr_decrease_factor
                             **(epoch // opt.lr_decrease_epoch))
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
        if opt.lr_decrease_mode == 'iter':
            lr = opt.lr_g * (opt.lr_decrease_factor
                             **(iteration // opt.lr_decrease_iter))
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

    # Save the model
    def save_model(opt, epoch, iteration, len_dataset, generator):
        """Save the model at "checkpoint_interval" and its multiple"""
        if opt.save_mode == 'epoch':
            model_name = 'SCGAN_%s_epoch%d_bs%d.pth' % (opt.gan_mode, epoch,
                                                        opt.batch_size)
        if opt.save_mode == 'iter':
            model_name = 'SCGAN_%s_iter%d_bs%d.pth' % (opt.gan_mode, iteration,
                                                       opt.batch_size)
        save_name = os.path.join(opt.save_path, model_name)
        if opt.multi_gpu == True:
            if opt.save_mode == 'epoch':
                if (epoch % opt.save_by_epoch
                        == 0) and (iteration % len_dataset == 0):
                    torch.save(generator.module.state_dict(), save_name)
                    print('The trained model is saved as %s' % (model_name))
            if opt.save_mode == 'iter':
                if iteration % opt.save_by_iter == 0:
                    torch.save(generator.module.state_dict(), save_name)
                    print('The trained model is saved as %s' % (model_name))
        else:
            if opt.save_mode == 'epoch':
                if (epoch % opt.save_by_epoch
                        == 0) and (iteration % len_dataset == 0):
                    torch.save(generator.state_dict(), save_name)
                    print('The trained model is saved as %s' % (model_name))
            if opt.save_mode == 'iter':
                if iteration % opt.save_by_iter == 0:
                    torch.save(generator.state_dict(), save_name)
                    print('The trained model is saved as %s' % (model_name))

    # ----------------------------------------
    #             Network dataset
    # ----------------------------------------

    # Define the dataset
    trainset = dataset.ColorizationDataset(opt)
    print('The overall number of images:', len(trainset))

    # Define the dataloader
    dataloader = DataLoader(trainset,
                            batch_size=opt.batch_size,
                            shuffle=True,
                            num_workers=opt.num_workers,
                            pin_memory=True)

    # ----------------------------------------
    #                 Training
    # ----------------------------------------

    # Count start time
    prev_time = time.time()

    # Tensor type
    Tensor = torch.cuda.FloatTensor

    # For loop training
    for epoch in range(opt.epochs):
        for i, (true_L, true_RGB, true_sal) in enumerate(dataloader):

            # To device
            true_L = true_L.cuda()
            true_RGB = true_RGB.cuda()
            true_sal = true_sal.cuda()
            true_sal = torch.cat((true_sal, true_sal, true_sal), 1)
            true_attn = true_RGB.mul(true_sal)

            # Adversarial ground truth
            valid = Tensor(np.ones((true_L.shape[0], 1, 30, 30)))
            fake = Tensor(np.zeros((true_L.shape[0], 1, 30, 30)))

            ### Train Discriminator
            optimizer_D.zero_grad()

            # Generator output
            fake_RGB, fake_sal = generator(true_L)

            # Fake colorizations
            fake_scalar_d = discriminator(true_L, fake_RGB.detach())
            loss_fake = criterion_MSE(fake_scalar_d, fake)

            # True colorizations
            true_scalar_d = discriminator(true_L, true_RGB)
            loss_true = criterion_MSE(true_scalar_d, valid)

            # Overall Loss and optimize
            loss_D = 0.5 * (loss_fake + loss_true)
            loss_D.backward()
            optimizer_D.step()

            ### Train Generator
            optimizer_G.zero_grad()

            fake_RGB, fake_sal = generator(true_L)

            # Pixel-level L1 Loss
            loss_L1 = criterion_L1(fake_RGB, true_RGB)

            # Attention Loss
            fake_sal = torch.cat((fake_sal, fake_sal, fake_sal), 1)
            fake_attn = fake_RGB.mul(fake_sal)
            loss_attn = criterion_L1(fake_attn, true_attn)

            # Perceptual Loss
            feature_fake_RGB = perceptualnet(fake_RGB)
            feature_true_RGB = perceptualnet(true_RGB)
            loss_percep = criterion_L1(feature_fake_RGB, feature_true_RGB)

            # GAN Loss
            fake_scalar = discriminator(true_L, fake_RGB)
            loss_GAN = criterion_MSE(fake_scalar, valid)

            # Overall Loss and optimize
            loss_G = opt.lambda_l1 * loss_L1 + opt.lambda_gan * loss_GAN + opt.lambda_percep * loss_percep + opt.lambda_attn * loss_attn
            loss_G.backward()
            optimizer_G.step()

            # Determine approximate time left
            iters_done = epoch * len(dataloader) + i
            iters_left = opt.epochs * len(dataloader) - iters_done
            time_left = datetime.timedelta(seconds=iters_left *
                                           (time.time() - prev_time))
            prev_time = time.time()

            # Print log
            print(
                "\r[Epoch %d/%d] [Batch %d/%d] [Pixel-level Loss: %.4f] [Attention Loss: %.4f] [Perceptual Loss: %.4f] [D Loss: %.4f] [G Loss: %.4f] Time_left: %s"
                % ((epoch + 1), opt.epochs, i, len(dataloader), loss_L1.item(),
                   loss_attn.item(), loss_percep.item(), loss_D.item(),
                   loss_GAN.item(), time_left))

            # Save model at certain epochs or iterations
            save_model(opt, (epoch + 1), (iters_done + 1), len(dataloader),
                       generator)

            # Learning rate decrease at certain epochs
            adjust_learning_rate(opt, (epoch + 1), (iters_done + 1),
                                 optimizer_G)
            adjust_learning_rate(opt, (epoch + 1), (iters_done + 1),
                                 optimizer_D)

        ### Sample data every epoch
        if (epoch + 1) % 1 == 0:
            img_list = [fake_RGB, true_RGB]
            name_list = ['pred', 'gt']
            utils.save_sample_png(sample_folder=opt.sample_path,
                                  sample_name='epoch%d' % (epoch + 1),
                                  img_list=img_list,
                                  name_list=name_list)
def trainer_WGANGP(opt):

    # cudnn benchmark
    cudnn.benchmark = opt.cudnn_benchmark

    # configurations
    if not os.path.exists(opt.save_path):
        os.makedirs(opt.save_path)

    # Handle multiple GPUs
    gpu_num = torch.cuda.device_count()
    print("There are %d GPUs used" % gpu_num)
    opt.batch_size *= gpu_num
    opt.num_workers *= gpu_num

    # Loss functions
    criterion_L1 = torch.nn.L1Loss().cuda()

    # Initialize Generator
    generator = utils.create_generator(opt)
    discriminator = utils.create_discriminator(opt)
    perceptualnet = utils.create_perceptualnet(opt)

    # To device
    if opt.multi_gpu:
        generator = nn.DataParallel(generator)
        generator = generator.cuda()
        discriminator = nn.DataParallel(discriminator)
        discriminator = discriminator.cuda()
        perceptualnet = nn.DataParallel(generator)
        perceptualnet = perceptualnet.cuda()
    else:
        generator = generator.cuda()
        discriminator = discriminator.cuda()
        perceptualnet = perceptualnet.cuda()

    # Optimizers
    optimizer_G = torch.optim.Adam(generator.parameters(),
                                   lr=opt.lr_g,
                                   betas=(opt.b1, opt.b2))
    optimizer_D = torch.optim.Adam(discriminator.parameters(),
                                   lr=opt.lr_d,
                                   betas=(opt.b1, opt.b2))

    # Learning rate decrease
    def adjust_learning_rate(opt, epoch, iteration, optimizer):
        # Set the learning rate to the initial LR decayed by "lr_decrease_factor" every "lr_decrease_epoch" epochs
        if opt.lr_decrease_mode == 'epoch':
            lr = opt.lr_g * (opt.lr_decrease_factor
                             **(epoch // opt.lr_decrease_epoch))
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
        if opt.lr_decrease_mode == 'iter':
            lr = opt.lr_g * (opt.lr_decrease_factor
                             **(iteration // opt.lr_decrease_iter))
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

    # Save the model
    def save_model(opt, epoch, iteration, len_dataset, generator):
        """Save the model at "checkpoint_interval" and its multiple"""
        if opt.save_mode == 'epoch':
            model_name = 'SCGAN_%s_epoch%d_bs%d.pth' % (opt.gan_mode, epoch,
                                                        opt.batch_size)
        if opt.save_mode == 'iter':
            model_name = 'SCGAN_%s_iter%d_bs%d.pth' % (opt.gan_mode, iteration,
                                                       opt.batch_size)
        save_name = os.path.join(opt.save_path, model_name)
        if opt.multi_gpu == True:
            if opt.save_mode == 'epoch':
                if (epoch % opt.save_by_epoch
                        == 0) and (iteration % len_dataset == 0):
                    torch.save(generator.module.state_dict(), save_name)
                    print('The trained model is saved as %s' % (model_name))
            if opt.save_mode == 'iter':
                if iteration % opt.save_by_iter == 0:
                    torch.save(generator.module.state_dict(), save_name)
                    print('The trained model is saved as %s' % (model_name))
        else:
            if opt.save_mode == 'epoch':
                if (epoch % opt.save_by_epoch
                        == 0) and (iteration % len_dataset == 0):
                    torch.save(generator.state_dict(), save_name)
                    print('The trained model is saved as %s' % (model_name))
            if opt.save_mode == 'iter':
                if iteration % opt.save_by_iter == 0:
                    torch.save(generator.state_dict(), save_name)
                    print('The trained model is saved as %s' % (model_name))

    # ----------------------------------------
    #             Network dataset
    # ----------------------------------------

    # Define the dataset
    trainset = dataset.ColorizationDataset(opt)
    print('The overall number of images:', len(trainset))

    # Define the dataloader
    dataloader = DataLoader(trainset,
                            batch_size=opt.batch_size,
                            shuffle=True,
                            num_workers=opt.num_workers,
                            pin_memory=True)

    # ----------------------------------------
    #                 Training
    # ----------------------------------------

    # Count start time
    prev_time = time.time()

    # Tensor type
    Tensor = torch.cuda.FloatTensor

    # Calculate the gradient penalty loss for WGAN-GP
    def compute_gradient_penalty(D, input_samples, real_samples, fake_samples):
        # Random weight term for interpolation between real and fake samples
        alpha = Tensor(np.random.random((real_samples.size(0), 1, 1, 1)))
        # Get random interpolation between real and fake samples
        interpolates = (alpha * real_samples +
                        ((1 - alpha) * fake_samples)).requires_grad_(True)
        d_interpolates = D(input_samples, interpolates)
        # For PatchGAN
        fake = Variable(Tensor(real_samples.shape[0], 1, 30, 30).fill_(1.0),
                        requires_grad=False)
        # Get gradient w.r.t. interpolates
        gradients = autograd.grad(
            outputs=d_interpolates,
            inputs=interpolates,
            grad_outputs=fake,
            create_graph=True,
            retain_graph=True,
            only_inputs=True,
        )[0]
        gradients = gradients.view(gradients.size(0), -1)
        gradient_penalty = ((gradients.norm(2, dim=1) - 1)**2).mean()
        return gradient_penalty

    # For loop training
    for epoch in range(opt.epochs):
        for i, (true_L, true_RGB, true_sal) in enumerate(dataloader):

            # To device
            true_L = true_L.cuda()
            true_RGB = true_RGB.cuda()
            true_sal = true_sal.cuda()
            true_sal = torch.cat((true_sal, true_sal, true_sal), 1)
            true_attn = true_RGB.mul(true_sal)

            ### Train Discriminator
            optimizer_D.zero_grad()

            # Generator output
            fake_RGB, fake_sal = generator(true_L)

            # Fake colorizations
            fake_scalar_d = discriminator(true_L, fake_RGB.detach())

            # True colorizations
            true_scalar_d = discriminator(true_L, true_RGB)

            # Gradient penalty
            gradient_penalty = compute_gradient_penalty(
                discriminator, true_L.data, true_RGB.data, fake_RGB.data)

            # Overall Loss and optimize
            loss_D = -torch.mean(true_scalar_d) + torch.mean(
                fake_scalar_d) + opt.lambda_gp * gradient_penalty
            loss_D.backward()
            optimizer_D.step()

            ### Train Generator
            optimizer_G.zero_grad()

            fake_RGB, fake_sal = generator(true_L)

            # Pixel-level L1 Loss
            loss_L1 = criterion_L1(fake_RGB, true_RGB)

            # Attention Loss
            fake_sal = torch.cat((fake_sal, fake_sal, fake_sal), 1)
            fake_attn = fake_RGB.mul(fake_sal)
            loss_attn = criterion_L1(fake_attn, true_attn)

            # Perceptual Loss
            feature_fake_RGB = perceptualnet(fake_RGB)
            feature_true_RGB = perceptualnet(true_RGB)
            loss_percep = criterion_L1(feature_fake_RGB, feature_true_RGB)

            # GAN Loss
            fake_scalar = discriminator(true_L, fake_RGB)
            loss_GAN = -torch.mean(fake_scalar)

            # Overall Loss and optimize
            loss_G = opt.lambda_l1 * loss_L1 + opt.lambda_gan * loss_GAN + opt.lambda_percep * loss_percep + opt.lambda_attn * loss_attn
            loss_G.backward()
            optimizer_G.step()

            # Determine approximate time left
            iters_done = epoch * len(dataloader) + i
            iters_left = opt.epochs * len(dataloader) - iters_done
            time_left = datetime.timedelta(seconds=iters_left *
                                           (time.time() - prev_time))
            prev_time = time.time()

            # Print log
            print(
                "\r[Epoch %d/%d] [Batch %d/%d] [Pixel-level Loss: %.4f] [Attention Loss: %.4f] [Perceptual Loss: %.4f] [D Loss: %.4f] [G Loss: %.4f] Time_left: %s"
                % ((epoch + 1), opt.epochs, i, len(dataloader), loss_L1.item(),
                   loss_attn.item(), loss_percep.item(), loss_D.item(),
                   loss_GAN.item(), time_left))

            # Save model at certain epochs or iterations
            save_model(opt, (epoch + 1), (iters_done + 1), len(dataloader),
                       generator)

            # Learning rate decrease at certain epochs
            adjust_learning_rate(opt, (epoch + 1), (iters_done + 1),
                                 optimizer_G)
            adjust_learning_rate(opt, (epoch + 1), (iters_done + 1),
                                 optimizer_D)

        ### Sample data every epoch
        if (epoch + 1) % 1 == 0:
            img_list = [fake_RGB, true_RGB]
            name_list = ['pred', 'gt']
            utils.save_sample_png(sample_folder=opt.sample_path,
                                  sample_name='epoch%d' % (epoch + 1),
                                  img_list=img_list,
                                  name_list=name_list)
    val_SSIM = 0
    for i, (in_img, RGBout_img) in enumerate(test_loader):
        # To device
        # A is for input image, B is for target image
        in_img = in_img.cuda()
        RGBout_img = RGBout_img.cuda()

        # Forward propagation
        with torch.no_grad():
            out, sal = generator(in_img)

        # Sample data every iter
        img_list = [out, RGBout_img]
        name_list = ['pred', 'gt']
        utils.save_sample_png(sample_folder=opt.savepath,
                              sample_name='%d' % (i),
                              img_list=img_list,
                              name_list=name_list)

        # PSNR
        val_PSNR_this = utils.psnr(out, RGBout_img, 1) * in_img.shape[0]
        print('The %d-th image PSNR %.4f' % (i, val_PSNR_this))
        val_PSNR = val_PSNR + val_PSNR_this
        # SSIM
        val_SSIM_this = utils.ssim(out, RGBout_img) * in_img.shape[0]
        print('The %d-th image SSIM %.4f' % (i, val_SSIM_this))
        val_SSIM = val_SSIM + val_SSIM_this

    val_PSNR = val_PSNR / len(test_dataset)
    print('The average PSNR equals to', val_PSNR)
    val_SSIM = val_SSIM / len(test_dataset)
    print('The average SSIM equals to', val_SSIM)
Beispiel #10
0
def LSGAN_trainer(opt):
    # ----------------------------------------
    #      Initialize training parameters
    # ----------------------------------------

    # cudnn benchmark accelerates the network
    cudnn.benchmark = opt.cudnn_benchmark

    # configurations
    save_folder = opt.save_path
    sample_folder = opt.sample_path
    if not os.path.exists(save_folder):
        os.makedirs(save_folder)
    if not os.path.exists(sample_folder):
        os.makedirs(sample_folder)

    # Build networks
    generator = utils.create_generator(opt)
    discriminator = utils.create_discriminator(opt)
    perceptualnet = utils.create_perceptualnet()

    # To device
    if opt.multi_gpu == True:
        generator = nn.DataParallel(generator)
        discriminator = nn.DataParallel(discriminator)
        perceptualnet = nn.DataParallel(perceptualnet)
        generator = generator.cuda()
        discriminator = discriminator.cuda()
        perceptualnet = perceptualnet.cuda()
    else:
        generator = generator.cuda()
        discriminator = discriminator.cuda()
        perceptualnet = perceptualnet.cuda()

    # Loss functions
    L1Loss = nn.L1Loss()
    MSELoss = nn.MSELoss()

    # Optimizers
    optimizer_g = torch.optim.Adam(generator.parameters(),
                                   lr=opt.lr_g,
                                   betas=(opt.b1, opt.b2),
                                   weight_decay=opt.weight_decay)
    optimizer_d = torch.optim.Adam(discriminator.parameters(),
                                   lr=opt.lr_d,
                                   betas=(opt.b1, opt.b2),
                                   weight_decay=opt.weight_decay)

    # Learning rate decrease
    def adjust_learning_rate(lr_in, optimizer, epoch, opt):
        """Set the learning rate to the initial LR decayed by "lr_decrease_factor" every "lr_decrease_epoch" epochs"""
        lr = lr_in * (opt.lr_decrease_factor**(epoch // opt.lr_decrease_epoch))
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr

    # Save the model if pre_train == True
    def save_model(net, epoch, opt):
        """Save the model at "checkpoint_interval" and its multiple"""
        model_name = 'deepfillv2_WGAN_epoch%d_batchsize%d.pth' % (
            epoch, opt.batch_size)
        model_name = os.path.join(save_folder, model_name)
        if opt.multi_gpu == True:
            if epoch % opt.checkpoint_interval == 0:
                torch.save(net.module.state_dict(), model_name)
                print('The trained model is successfully saved at epoch %d' %
                      (epoch))
        else:
            if epoch % opt.checkpoint_interval == 0:
                torch.save(net.state_dict(), model_name)
                print('The trained model is successfully saved at epoch %d' %
                      (epoch))

    # ----------------------------------------
    #       Initialize training dataset
    # ----------------------------------------

    # Define the dataset
    trainset = dataset.InpaintDataset(opt)
    print('The overall number of images equals to %d' % len(trainset))

    # Define the dataloader
    dataloader = DataLoader(trainset,
                            batch_size=opt.batch_size,
                            shuffle=True,
                            num_workers=opt.num_workers,
                            pin_memory=True)

    # ----------------------------------------
    #            Training and Testing
    # ----------------------------------------

    # Initialize start time
    prev_time = time.time()

    # Tensor type
    Tensor = torch.cuda.FloatTensor

    # Training loop
    for epoch in range(opt.epochs):
        for batch_idx, (img, mask) in enumerate(dataloader):

            # Load mask (shape: [B, 1, H, W]), masked_img (shape: [B, 3, H, W]), img (shape: [B, 3, H, W]) and put it to cuda
            img = img.cuda()
            mask = mask.cuda()

            # LSGAN vectors
            valid = Tensor(np.ones((img.shape[0], 1, 8, 8)))
            fake = Tensor(np.zeros((img.shape[0], 1, 8, 8)))

            ### Train Discriminator
            optimizer_d.zero_grad()

            # Generator output
            first_out, second_out = generator(img, mask)

            # forward propagation
            first_out_wholeimg = img * (
                1 - mask) + first_out * mask  # in range [0, 1]
            second_out_wholeimg = img * (
                1 - mask) + second_out * mask  # in range [0, 1]

            # Fake samples
            fake_scalar = discriminator(second_out_wholeimg.detach(), mask)
            # True samples
            true_scalar = discriminator(img, mask)

            # Overall Loss and optimize
            loss_fake = MSELoss(fake_scalar, fake)
            loss_true = MSELoss(true_scalar, valid)
            # Overall Loss and optimize
            loss_D = 0.5 * (loss_fake + loss_true)
            loss_D.backward()
            optimizer_d.step()

            ### Train Generator
            optimizer_g.zero_grad()

            # Mask L1 Loss
            first_MaskL1Loss = L1Loss(first_out_wholeimg, img)
            second_MaskL1Loss = L1Loss(second_out_wholeimg, img)

            # GAN Loss
            fake_scalar = discriminator(second_out_wholeimg, mask)
            GAN_Loss = MSELoss(fake_scalar, valid)

            # Get the deep semantic feature maps, and compute Perceptual Loss
            img_featuremaps = perceptualnet(img)  # feature maps
            second_out_wholeimg_featuremaps = perceptualnet(
                second_out_wholeimg)
            second_PerceptualLoss = L1Loss(second_out_wholeimg_featuremaps,
                                           img_featuremaps)

            # Compute losses
            loss = opt.lambda_l1 * first_MaskL1Loss + opt.lambda_l1 * second_MaskL1Loss + \
                opt.lambda_perceptual * second_PerceptualLoss + opt.lambda_gan * GAN_Loss
            loss.backward()
            optimizer_g.step()

            # Determine approximate time left
            batches_done = epoch * len(dataloader) + batch_idx
            batches_left = opt.epochs * len(dataloader) - batches_done
            time_left = datetime.timedelta(seconds=batches_left *
                                           (time.time() - prev_time))
            prev_time = time.time()

            # Print log
            print(
                "\r[Epoch %d/%d] [Batch %d/%d] [first Mask L1 Loss: %.5f] [second Mask L1 Loss: %.5f]"
                % ((epoch + 1), opt.epochs, batch_idx, len(dataloader),
                   first_MaskL1Loss.item(), second_MaskL1Loss.item()))
            print(
                "\r[D Loss: %.5f] [G Loss: %.5f] [Perceptual Loss: %.5f] time_left: %s"
                % (loss_D.item(), GAN_Loss.item(),
                   second_PerceptualLoss.item(), time_left))

        # Learning rate decrease
        adjust_learning_rate(opt.lr_g, optimizer_g, (epoch + 1), opt)
        adjust_learning_rate(opt.lr_d, optimizer_d, (epoch + 1), opt)

        # Save the model
        save_model(generator, (epoch + 1), opt)

        ### Sample data every epoch
        masked_img = img * (1 - mask) + mask
        mask = torch.cat((mask, mask, mask), 1)
        if (epoch + 1) % 1 == 0:
            img_list = [img, mask, masked_img, first_out, second_out]
            name_list = ['gt', 'mask', 'masked_img', 'first_out', 'second_out']
            utils.save_sample_png(sample_folder=sample_folder,
                                  sample_name='epoch%d' % (epoch + 1),
                                  img_list=img_list,
                                  name_list=name_list,
                                  pixel_max_cnt=255)
Beispiel #11
0
def ESRGAN_Trainer(opt):
    # ----------------------------------------
    #       Network training parameters
    # ----------------------------------------

    # cudnn benchmark
    cudnn.benchmark = opt.cudnn_benchmark

    # configurations
    save_folder = os.path.join(opt.save_path, opt.task_name)
    sample_folder = os.path.join(opt.sample_path, opt.task_name)
    if not os.path.exists(save_folder):
        os.makedirs(save_folder)
    if not os.path.exists(sample_folder):
        os.makedirs(sample_folder)

    # Loss functions
    criterion_L1 = torch.nn.L1Loss().cuda()
    criterion_MSE = torch.nn.MSELoss().cuda()

    # Initialize networks
    generator = utils.create_ESRGAN_generator(opt)
    discriminator = utils.create_ESRGAN_discriminator(opt)
    perceptualnet = utils.create_perceptualnet(opt)

    # To device
    if opt.multi_gpu:
        generator = nn.DataParallel(generator)
        generator = generator.cuda()
        discriminator = nn.DataParallel(discriminator)
        discriminator = discriminator.cuda()
        perceptualnet = nn.DataParallel(perceptualnet)
        perceptualnet = perceptualnet.cuda()

    else:
        generator = generator.cuda()
        discriminator = discriminator.cuda()
        perceptualnet = perceptualnet.cuda()

    # Optimizers
    optimizer_G = torch.optim.Adam(generator.parameters(),
                                   lr=opt.lr_g,
                                   betas=(opt.b1, opt.b2))
    optimizer_D = torch.optim.Adam(discriminator.parameters(),
                                   lr=opt.lr_d,
                                   betas=(opt.b1, opt.b2))

    # Learning rate decrease
    def adjust_learning_rate(opt, epoch, iteration, optimizer):
        # Set the learning rate to the initial LR decayed by "lr_decrease_factor" every "lr_decrease_epoch" epochs
        if opt.lr_decrease_mode == 'epoch':
            lr = opt.lr_g * (opt.lr_decrease_factor
                             **(epoch // opt.lr_decrease_epoch))
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
        if opt.lr_decrease_mode == 'iter':
            lr = opt.lr_g * (opt.lr_decrease_factor
                             **(iteration // opt.lr_decrease_iter))
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

    # Save the model if pre_train == True
    def save_model(opt, epoch, iteration, len_dataset, generator):
        """Save the model at "checkpoint_interval" and its multiple"""
        # Define the name of trained model
        if opt.save_mode == 'epoch':
            model_name = 'Wavelet_epoch%d_bs%d.pth' % (epoch, opt.batch_size)
        if opt.save_mode == 'iter':
            model_name = 'Wavelet_iter%d_bs%d.pth' % (iteration,
                                                      opt.batch_size)
        save_model_path = os.path.join(opt.save_path, opt.task_name,
                                       model_name)
        if opt.multi_gpu == True:
            if opt.save_mode == 'epoch':
                if (epoch % opt.save_by_epoch
                        == 0) and (iteration % len_dataset == 0):
                    torch.save(generator.module.state_dict(), save_model_path)
                    print(
                        'The trained model is successfully saved at epoch %d' %
                        (epoch))
            if opt.save_mode == 'iter':
                if iteration % opt.save_by_iter == 0:
                    torch.save(generator.module.state_dict(), save_model_path)
                    print(
                        'The trained model is successfully saved at iteration %d'
                        % (iteration))
        else:
            if opt.save_mode == 'epoch':
                if (epoch % opt.save_by_epoch
                        == 0) and (iteration % len_dataset == 0):
                    torch.save(generator.state_dict(), save_model_path)
                    print(
                        'The trained model is successfully saved at epoch %d' %
                        (epoch))
            if opt.save_mode == 'iter':
                if iteration % opt.save_by_iter == 0:
                    torch.save(generator.state_dict(), save_model_path)
                    print(
                        'The trained model is successfully saved at iteration %d'
                        % (iteration))

    # ----------------------------------------
    #             Network dataset
    # ----------------------------------------

    # Define the dataset
    trainset = dataset.LRHRDataset(opt)
    print('The overall number of images:', len(trainset))

    # Define the dataloader
    dataloader = DataLoader(trainset,
                            batch_size=opt.batch_size,
                            shuffle=True,
                            num_workers=opt.num_workers,
                            pin_memory=True)

    # ----------------------------------------
    #                 Training
    # ----------------------------------------

    # Tensor type
    Tensor = torch.cuda.FloatTensor

    # Count start time
    prev_time = time.time()

    # Tensorboard
    writer = SummaryWriter()

    # For loop training
    for epoch in range(opt.epochs):

        # Record learning rate
        for param_group in optimizer_G.param_groups:
            writer.add_scalar('data/lr', param_group['lr'], epoch)
            print('learning rate = ', param_group['lr'])

        if epoch == 0:
            iters_done = 0

        for i, (img_LR, img_HR) in enumerate(dataloader):

            # To device
            # A is for downsample clean image, B is for noisy image
            #assert img_LR.shape == img_HR.shape
            img_LR = img_LR.cuda()
            img_HR = img_HR.cuda()

            # Adversarial ground truth
            valid = Tensor(
                np.ones((img_LR.shape[0], 1, img_LR.shape[2] // 8,
                         img_LR.shape[3] // 8)))
            fake = Tensor(
                np.zeros((img_LR.shape[0], 1, img_LR.shape[2] // 8,
                          img_LR.shape[3] // 8)))
            z = np.random.randn(opt.batch_size, 1, 128, 128).astype(np.float32)
            z = np.repeat(z, 64, axis=1)
            # z = np.repeat(z, 32, axis=3)
            z = Variable(torch.from_numpy(z)).cuda()
            # print(z.size())
            ### Train Generator
            # Forward
            pred = generator(img_LR, z)

            # L1 loss
            loss_L1 = criterion_L1(pred, img_HR)

            # gan part
            fake_scalar = discriminator(pred)
            loss_gan = criterion_MSE(fake_scalar, valid)

            # Perceptual loss part
            fea_true = perceptualnet(img_HR)
            fea_pred = perceptualnet(pred)
            # print(fea_pred.size())
            loss_percep = criterion_MSE(fea_true, fea_pred)

            # Overall Loss and optimize
            optimizer_G.zero_grad()
            loss = opt.lambda_l1 * loss_L1 + opt.lambda_gan * loss_gan + opt.lambda_percep * loss_percep
            loss.backward()
            optimizer_G.step()

            ### Train Discriminator
            # Forward
            pred = generator(img_LR, z)

            # GAN loss
            fake_scalar = discriminator(pred.detach())
            loss_fake = criterion_MSE(fake_scalar, fake)
            true_scalar = discriminator(img_HR)
            loss_true = criterion_MSE(true_scalar, valid)

            # Overall Loss and optimize
            optimizer_D.zero_grad()
            loss_D = 0.5 * (loss_fake + loss_true)
            loss_D.backward()
            optimizer_D.step()

            # Record losses
            writer.add_scalar('data/loss_L1', loss_L1.item(), iters_done)
            writer.add_scalar('data/loss_percep', loss_percep.item(),
                              iters_done)
            writer.add_scalar('data/loss_G', loss.item(), iters_done)
            writer.add_scalar('data/loss_D', loss_D.item(), iters_done)

            # Determine approximate time left
            iters_done = epoch * len(dataloader) + i
            iters_left = opt.epochs * len(dataloader) - iters_done
            time_left = datetime.timedelta(seconds=iters_left *
                                           (time.time() - prev_time))
            prev_time = time.time()

            # Print log
            print(
                "\r[Epoch %d/%d] [Batch %d/%d] [L1 Loss: %.4f] [G Loss: %.4f] [G percep Loss: %.4f] [D Loss: %.4f] Time_left: %s"
                % ((epoch + 1), opt.epochs, i, len(dataloader), loss_L1.item(),
                   loss_gan.item(), loss_percep.item(), loss_D.item(),
                   time_left))

            # Save model at certain epochs or iterations
            save_model(opt, (epoch + 1), (iters_done + 1), len(dataloader),
                       generator)

            # Learning rate decrease at certain epochs
            adjust_learning_rate(opt, (epoch + 1), (iters_done + 1),
                                 optimizer_G)
            adjust_learning_rate(opt, (epoch + 1), (iters_done + 1),
                                 optimizer_D)

            ### Sample data every epoch
            if (epoch + 1) % 1 == 0:
                img_list = [pred, img_HR]
                name_list = ['pred', 'gt']
                utils.save_sample_png(sample_folder=sample_folder,
                                      sample_name='epoch%d' % (epoch + 1),
                                      img_list=img_list,
                                      name_list=name_list,
                                      pixel_max_cnt=255)

    writer.close()
Beispiel #12
0
def Trainer_GAN(opt):
    # ----------------------------------------
    #              Initialization
    # ----------------------------------------

    # cudnn benchmark
    cudnn.benchmark = opt.cudnn_benchmark

    # configurations
    save_folder = os.path.join(opt.save_path, opt.task_name)
    sample_folder = os.path.join(opt.sample_path, opt.task_name)
    if not os.path.exists(save_folder):
        os.makedirs(save_folder)
    if not os.path.exists(sample_folder):
        os.makedirs(sample_folder)

    # Initialize networks
    generator = utils.create_generator(opt)
    discriminator = utils.create_discriminator(opt)
    perceptualnet = utils.create_perceptualnet()

    # To device
    if opt.multi_gpu:
        generator = nn.DataParallel(generator)
        generator = generator.cuda()
        discriminator = nn.DataParallel(discriminator)
        discriminator = discriminator.cuda()
        perceptualnet = nn.DataParallel(perceptualnet)
        perceptualnet = perceptualnet.cuda()
    else:
        generator = generator.cuda()
        discriminator = discriminator.cuda()
        perceptualnet = perceptualnet.cuda()

    # ----------------------------------------
    #             Network dataset
    # ----------------------------------------

    # Handle multiple GPUs
    gpu_num = torch.cuda.device_count()
    print("There are %d GPUs used" % gpu_num)
    opt.train_batch_size *= gpu_num
    #opt.val_batch_size *= gpu_num
    opt.num_workers *= gpu_num

    # Define the dataset
    train_imglist = utils.get_jpgs(os.path.join(opt.in_path_train))
    val_imglist = utils.get_jpgs(os.path.join(opt.in_path_val))
    train_dataset = dataset.Qbayer2RGB_dataset(opt, 'train', train_imglist)
    val_dataset = dataset.Qbayer2RGB_dataset(opt, 'val', val_imglist)
    print('The overall number of training images:', len(train_imglist))
    print('The overall number of validation images:', len(val_imglist))

    # Define the dataloader
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=opt.train_batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=opt.val_batch_size,
                                             shuffle=False,
                                             num_workers=opt.num_workers,
                                             pin_memory=True)

    # ----------------------------------------
    #       Network training parameters
    # ----------------------------------------

    # Loss functions
    criterion_L1 = torch.nn.L1Loss().cuda()

    class ColorLoss(nn.Module):
        def __init__(self):
            super(ColorLoss, self).__init__()
            self.L1loss = nn.L1Loss()

        def RGB2YUV(self, RGB):
            YUV = RGB.clone()
            YUV[:,
                0, :, :] = 0.299 * RGB[:,
                                       0, :, :] + 0.587 * RGB[:,
                                                              1, :, :] + 0.114 * RGB[:,
                                                                                     2, :, :]
            YUV[:,
                1, :, :] = -0.14713 * RGB[:,
                                          0, :, :] - 0.28886 * RGB[:,
                                                                   1, :, :] + 0.436 * RGB[:,
                                                                                          2, :, :]
            YUV[:,
                2, :, :] = 0.615 * RGB[:,
                                       0, :, :] - 0.51499 * RGB[:,
                                                                1, :, :] - 0.10001 * RGB[:,
                                                                                         2, :, :]
            return YUV

        def forward(self, x, y):
            yuv_x = self.RGB2YUV(x)
            yuv_y = self.RGB2YUV(y)
            return self.L1loss(yuv_x, yuv_y)

    yuv_loss = ColorLoss()

    # Optimizers
    optimizer_G = torch.optim.Adam(generator.parameters(),
                                   lr=opt.lr_g,
                                   betas=(opt.b1, opt.b2),
                                   weight_decay=opt.weight_decay)
    optimizer_D = torch.optim.Adam(generator.parameters(),
                                   lr=opt.lr_d,
                                   betas=(opt.b1, opt.b2),
                                   weight_decay=opt.weight_decay)

    # Learning rate decrease
    def adjust_learning_rate(opt, epoch, iteration, optimizer, lr_gd):
        # Set the learning rate to the initial LR decayed by "lr_decrease_factor" every "lr_decrease_epoch" epochs
        if opt.lr_decrease_mode == 'epoch':
            lr = lr_gd * (opt.lr_decrease_factor
                          **(epoch // opt.lr_decrease_epoch))
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
        if opt.lr_decrease_mode == 'iter':
            lr = lr_gd * (opt.lr_decrease_factor
                          **(iteration // opt.lr_decrease_iter))
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

    # Save the model if pre_train == True
    def save_model(opt, epoch, iteration, len_dataset, generator):
        # Define the name of trained model
        if opt.save_mode == 'epoch':
            model_name = '%s_gan_noise%.3f_epoch%d_bs%d.pth' % (
                opt.net_mode, opt.noise_level, epoch, opt.train_batch_size)
        if opt.save_mode == 'iter':
            model_name = '%s_gan_noise%.3f_iter%d_bs%d.pth' % (
                opt.net_mode, opt.noise_level, iteration, opt.train_batch_size)
        save_model_path = os.path.join(opt.save_path, opt.task_name,
                                       model_name)
        # Save model
        if opt.multi_gpu == True:
            if opt.save_mode == 'epoch':
                if (epoch % opt.save_by_epoch
                        == 0) and (iteration % len_dataset == 0):
                    torch.save(generator.module.state_dict(), save_model_path)
                    print(
                        'The trained model is successfully saved at epoch %d' %
                        (epoch))
            if opt.save_mode == 'iter':
                if iteration % opt.save_by_iter == 0:
                    torch.save(generator.module.state_dict(), save_model_path)
                    print(
                        'The trained model is successfully saved at iteration %d'
                        % (iteration))
        else:
            if opt.save_mode == 'epoch':
                if (epoch % opt.save_by_epoch
                        == 0) and (iteration % len_dataset == 0):
                    torch.save(generator.state_dict(), save_model_path)
                    print(
                        'The trained model is successfully saved at epoch %d' %
                        (epoch))
            if opt.save_mode == 'iter':
                if iteration % opt.save_by_iter == 0:
                    torch.save(generator.state_dict(), save_model_path)
                    print(
                        'The trained model is successfully saved at iteration %d'
                        % (iteration))

    # ----------------------------------------
    #                 Training
    # ----------------------------------------

    # Count start time
    prev_time = time.time()

    # Tensorboard
    writer = SummaryWriter()

    # For loop training
    for epoch in range(opt.epochs):

        # Record learning rate
        for param_group in optimizer_G.param_groups:
            writer.add_scalar('data/lr', param_group['lr'], epoch)
            print('learning rate = ', param_group['lr'])

        if epoch == 0:
            iters_done = 0

        ### Training
        for i, (in_img, RGBout_img) in enumerate(train_loader):

            # To device
            # A is for input image, B is for target image
            in_img = in_img.cuda()
            RGBout_img = RGBout_img.cuda()

            ## Train Discriminator
            # Forward propagation
            out = generator(in_img)

            optimizer_D.zero_grad()
            # Fake samples
            fake_scalar_d = discriminator(in_img, out.detach())
            true_scalar_d = discriminator(in_img, RGBout_img)
            # Overall Loss and optimize
            loss_D = -torch.mean(true_scalar_d) + torch.mean(fake_scalar_d)
            loss_D.backward()
            #torch.nn.utils.clip_grad_norm(discriminator.parameters(), opt.grad_clip_norm)
            optimizer_D.step()

            ## Train Generator
            # Forward propagation
            out = generator(in_img)

            # GAN loss
            fake_scalar = discriminator(in_img, out)
            L_gan = -torch.mean(fake_scalar) * opt.lambda_gan

            # Perceptual loss features
            fake_B_fea = perceptualnet(utils.normalize_ImageNet_stats(out))
            true_B_fea = perceptualnet(
                utils.normalize_ImageNet_stats(RGBout_img))
            L_percep = opt.lambda_percep * criterion_L1(fake_B_fea, true_B_fea)

            # Pixel loss
            L_pixel = opt.lambda_pixel * criterion_L1(out, RGBout_img)

            # Color loss
            L_color = opt.lambda_color * yuv_loss(out, RGBout_img)

            # Sum up to total loss
            loss = L_pixel + L_percep + L_gan + L_color

            # Record losses
            writer.add_scalar('data/L_pixel', L_pixel.item(), iters_done)
            writer.add_scalar('data/L_percep', L_percep.item(), iters_done)
            writer.add_scalar('data/L_color', L_color.item(), iters_done)
            writer.add_scalar('data/L_gan', L_gan.item(), iters_done)
            writer.add_scalar('data/L_total', loss.item(), iters_done)
            writer.add_scalar('data/loss_D', loss_D.item(), iters_done)

            # Backpropagate gradients
            optimizer_G.zero_grad()
            loss.backward()
            #torch.nn.utils.clip_grad_norm(generator.parameters(), opt.grad_clip_norm)
            optimizer_G.step()

            # Determine approximate time left
            iters_done = epoch * len(train_loader) + i + 1
            iters_left = opt.epochs * len(train_loader) - iters_done
            time_left = datetime.timedelta(seconds=iters_left *
                                           (time.time() - prev_time))
            prev_time = time.time()

            # Print log
            print(
                "\r[Epoch %d/%d] [Batch %d/%d] [Total Loss: %.4f] [L_pixel: %.4f]"
                % ((epoch + 1), opt.epochs, i, len(train_loader), loss.item(),
                   L_pixel.item()))
            print(
                "\r[L_percep: %.4f] [L_color: %.4f] [L_gan: %.4f] [loss_D: %.4f] Time_left: %s"
                % (L_percep.item(), L_color.item(), L_gan.item(),
                   loss_D.item(), time_left))

            # Save model at certain epochs or iterations
            save_model(opt, (epoch + 1), iters_done, len(train_loader),
                       generator)

            # Learning rate decrease at certain epochs
            adjust_learning_rate(opt, (epoch + 1), iters_done, optimizer_G,
                                 opt.lr_g)
            adjust_learning_rate(opt, (epoch + 1), iters_done, optimizer_D,
                                 opt.lr_d)

        ### Sample data every epoch
        if (epoch + 1) % 1 == 0:
            img_list = [out, RGBout_img]
            name_list = ['pred', 'gt']
            utils.save_sample_png(sample_folder=sample_folder,
                                  sample_name='train_epoch%d' % (epoch + 1),
                                  img_list=img_list,
                                  name_list=name_list,
                                  pixel_max_cnt=255)

        ### Validation
        val_PSNR = 0
        num_of_val_image = 0

        for j, (in_img, RGBout_img) in enumerate(val_loader):

            # To device
            # A is for input image, B is for target image
            in_img = in_img.cuda()
            RGBout_img = RGBout_img.cuda()

            # Forward propagation
            with torch.no_grad():
                out = generator(in_img)

            # Accumulate num of image and val_PSNR
            num_of_val_image += in_img.shape[0]
            val_PSNR += utils.psnr(out, RGBout_img, 1) * in_img.shape[0]
        val_PSNR = val_PSNR / num_of_val_image

        ### Sample data every epoch
        if (epoch + 1) % 1 == 0:
            img_list = [out, RGBout_img]
            name_list = ['pred', 'gt']
            utils.save_sample_png(sample_folder=sample_folder,
                                  sample_name='val_epoch%d' % (epoch + 1),
                                  img_list=img_list,
                                  name_list=name_list,
                                  pixel_max_cnt=255)

        # Record average PSNR
        writer.add_scalar('data/val_PSNR', val_PSNR, epoch)
        print('PSNR at epoch %d: %.4f' % ((epoch + 1), val_PSNR))

    writer.close()
        # Forward propagation
        with torch.no_grad():
            #print(true_input.size())
            fake_target = generator(true_input, true_input)

        #print(fake_target.shape, true_input.shape)

        # Save
        print('The %d-th iteration' % (i))
        img_list = [true_input, fake_target, true_target]
        name_list = ['in', 'pred', 'gt']
        sample_name = '%d' % (i + 1)
        utils.save_sample_png(sample_folder=sample_folder,
                              sample_name='%d' % (i + 1),
                              img_list=img_list,
                              name_list=name_list,
                              pixel_max_cnt=255,
                              height=height_origin,
                              width=width_origin)

        # Evaluation
        #psnr_sum = psnr_sum + utils.psnr(cv2.imread(sample_folder + '/' + sample_name + '_' + name_list[1] + '.png').astype(np.float32), cv2.imread(sample_folder + '/' + sample_name + '_' + name_list[2] + '.png').astype(np.float32))
        img_pred_recover = utils.recover_process(fake_target,
                                                 height=height_origin,
                                                 width=width_origin)
        img_gt_recover = utils.recover_process(true_target,
                                               height=height_origin,
                                               width=width_origin)
        #psnr_sum = psnr_sum + utils.psnr(utils.recover_process(fake_target, height = height_origin, width = width_origin), utils.recover_process(true_target, height = height_origin, width = width_origin))
        psnr_sum = psnr_sum + utils.psnr(img_pred_recover, img_gt_recover)
        ssim_sum = ssim_sum + compare_ssim(img_gt_recover,
Beispiel #14
0
    parser.add_argument('--crop_size', type = int, default = 256, help = 'single patch size')
    opt = parser.parse_args()
    print(opt)
    utils.check_path(opt.val_path)
    
    # Define the network
    generator = utils.create_generator_val(opt)

    # Define the dataset
    trainset = dataset.ColorizationDataset_Val(opt)
    print('The overall number of images:', len(trainset))

    # Define the dataloader
    dataloader = DataLoader(trainset, batch_size = opt.batch_size, shuffle = True, num_workers = opt.num_workers, pin_memory = True)
    
    # For loop training
    for i, (true_L, true_RGB) in enumerate(dataloader):

        # To device
        true_L = true_L.cuda()
        true_RGB = true_RGB.cuda()

        # Forward
        fake_RGB = generator(true_L)

        # Save validation images
        img_list = [fake_RGB, true_RGB]
        name_list = ['pred', 'gt']
        utils.save_sample_png(sample_folder = opt.val_path, sample_name = str(i), img_list = img_list, name_list = name_list)

Beispiel #15
0
def MyDNN(opt):
    # ----------------------------------------
    #       Network training parameters
    # ----------------------------------------

    # cudnn benchmark
    cudnn.benchmark = opt.cudnn_benchmark

    # configurations
    save_folder = os.path.join(opt.save_path, opt.task)
    sample_folder = os.path.join(opt.sample_path, opt.task)
    if not os.path.exists(save_folder):
        os.makedirs(save_folder)
    if not os.path.exists(sample_folder):
        os.makedirs(sample_folder)

    # Loss functions
    criterion_L1 = torch.nn.L1Loss().cuda()
    criterion_L2 = torch.nn.MSELoss().cuda()
    mse_loss = nn.MSELoss().cuda()
    ms_ssim_module = MS_SSIM(data_range=2,
                             size_average=True,
                             channel=3,
                             nonnegative_ssim=True)
    # Pretrained VGG
    # vgg = MINCFeatureExtractor(opt).cuda()
    # Initialize Generator
    generator = utils.create_MyDNN(opt)
    use_checkpoint = False
    if use_checkpoint:
        checkpoint_path = './MyDNN1_denoise_epoch175_bs1'
        # Load a pre-trained network
        pretrained_net = torch.load(checkpoint_path + '.pth')
        load_dict(generator, pretrained_net)
        print('Generator is loaded!')
    # To device
    if opt.multi_gpu:
        generator = nn.DataParallel(generator)
        generator = generator.cuda()
    else:
        generator = generator.cuda()

    # Optimizers
    optimizer_G = torch.optim.Adam(generator.parameters(),
                                   lr=opt.lr_g,
                                   betas=(opt.b1, opt.b2),
                                   weight_decay=opt.weight_decay)

    # Learning rate decrease
    def adjust_learning_rate(opt, epoch, iteration, optimizer):
        #Set the learning rate to the initial LR decayed by "lr_decrease_factor" every "lr_decrease_epoch" epochs
        if opt.lr_decrease_mode == 'epoch':
            lr = opt.lr_g * (opt.lr_decrease_factor
                             **(epoch // opt.lr_decrease_epoch))
            if epoch < 200:
                lr = 0.0001
            if epoch >= 200:
                lr = 0.00005
            if epoch >= 300:
                lr = 0.00001
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
        if opt.lr_decrease_mode == 'iter':
            lr = opt.lr_g * (opt.lr_decrease_factor
                             **(iteration // opt.lr_decrease_iter))
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
        return lr

    # Save the model if pre_train == True
    def save_model(opt, epoch, iteration, len_dataset, generator, val_PSNR,
                   best_PSNR):
        """Save the model at "checkpoint_interval" and its multiple"""
        if opt.save_best_model and best_PSNR == val_PSNR:
            torch.save(generator,
                       'final_%s_epoch%d_best.pth' % (opt.task, epoch))
            print('The best model is successfully saved at epoch %d' % (epoch))
        if opt.multi_gpu == True:
            if opt.save_mode == 'epoch':
                if (epoch % opt.save_by_epoch
                        == 0) and (iteration % len_dataset == 0):
                    if opt.save_name_mode:
                        torch.save(
                            generator.module, 'MyDNN1_%s_epoch%d_bs%d.pth' %
                            (opt.task, epoch, opt.batch_size))
                        print(
                            'The trained model is successfully saved at epoch %d'
                            % (epoch))
            if opt.save_mode == 'iter':
                if iteration % opt.save_by_iter == 0:
                    if opt.save_name_mode:
                        torch.save(
                            generator.module, 'MyDNN1_%s_iter%d_bs%d.pth' %
                            (opt.task, iteration, opt.batch_size))
                        print(
                            'The trained model is successfully saved at iteration %d'
                            % (iteration))
        else:
            if opt.save_mode == 'epoch':
                if (epoch % opt.save_by_epoch
                        == 0) and (iteration % len_dataset == 0):
                    if opt.save_name_mode:
                        torch.save(
                            generator, 'final_%s_epoch%d_bs%d.pth' %
                            (opt.task, epoch, opt.batch_size))
                        print(
                            'The trained model is successfully saved at epoch %d'
                            % (epoch))

            if opt.save_mode == 'iter':
                if iteration % opt.save_by_iter == 0:
                    if opt.save_name_mode:
                        torch.save(
                            generator, 'final_%s_iter%d_bs%d.pth' %
                            (opt.task, iteration, opt.batch_size))
                        print(
                            'The trained model is successfully saved at iteration %d'
                            % (iteration))

    # ----------------------------------------
    #             Network dataset
    # ----------------------------------------

    # Define the dataloader
    # trainset = dataset.TestDataset(opt)
    trainset = dataset.Noise2CleanDataset(opt)
    print('The overall number of training images:', len(trainset))
    testset = dataset.TestDataset(opt)
    valset = dataset.ValDataset(opt)
    print('The overall number of val images:', len(valset))
    # Define the dataloader
    dataloader = DataLoader(trainset,
                            batch_size=opt.batch_size,
                            shuffle=True,
                            num_workers=opt.num_workers,
                            pin_memory=True)
    val_loader = DataLoader(valset,
                            batch_size=opt.batch_size,
                            shuffle=True,
                            num_workers=opt.num_workers,
                            pin_memory=True)
    test_loader = DataLoader(testset,
                             batch_size=opt.batch_size,
                             shuffle=True,
                             num_workers=opt.num_workers,
                             pin_memory=True)
    # ----------------------------------------
    #                 Training
    # ----------------------------------------

    # Count start time
    prev_time = time.time()
    best_PSNR = 0
    # For loop training
    for epoch in range(opt.epochs):
        total_loss = 0
        total_ploss = 0
        total_sobel = 0
        total_Lap = 0
        for i, (true_input, simulated_input, true_target,
                noise_level_map) in enumerate(dataloader):

            # To device
            true_input = true_input.cuda()
            true_target = true_target.cuda()
            simulated_input = simulated_input.cuda()
            noise_level_map = noise_level_map.cuda()
            # Train Generator
            optimizer_G.zero_grad()
            pre_clean = generator(true_input)

            # Parse through VGGMINC layers
            # features_y = vgg(pre_clean)
            # features_x = vgg(true_input)
            # content_loss =  criterion_L2(features_y, features_x).

            pre = pre_clean[0, :, :, :].data.permute(1, 2, 0).cpu().numpy()
            pre = rgb2gray(pre)
            true = true_input[0, :, :, :].data.permute(1, 2, 0).cpu().numpy()
            true = rgb2gray(true)
            laplacian_pre = cv2.Laplacian(pre, cv2.CV_32F)  #CV_64F为图像深度
            laplacian_gt = cv2.Laplacian(true, cv2.CV_32F)  #CV_64F为图像深度
            sobel_pre = 0.5 * (cv2.Sobel(pre, cv2.CV_32F, 1, 0, ksize=5) +
                               cv2.Sobel(pre, cv2.CV_32F, 0, 1, ksize=5)
                               )  #1,0参数表示在x方向求一阶导数
            sobel_gt = 0.5 * (cv2.Sobel(true, cv2.CV_32F, 1, 0, ksize=5) +
                              cv2.Sobel(true, cv2.CV_32F, 0, 1, ksize=5)
                              )  #0,1参数表示在y方向求一阶导数
            sobel_loss = mean_squared_error(sobel_pre, sobel_gt)
            laplacian_loss = mean_squared_error(laplacian_pre, laplacian_gt)
            # L1 Loss
            Pixellevel_L1_Loss = criterion_L1(pre_clean, true_target)

            # MS-SSIM loss
            ms_ssim_loss = 1 - ms_ssim_module(pre_clean + 1, true_target + 1)

            # Overall Loss and optimize
            loss = Pixellevel_L1_Loss + 0.5 * laplacian_loss
            # loss =  Pixellevel_L1_Loss
            loss.backward()
            optimizer_G.step()

            # Determine approximate time left
            iters_done = epoch * len(dataloader) + i
            iters_left = opt.epochs * len(dataloader) - iters_done
            time_left = datetime.timedelta(seconds=iters_left *
                                           (time.time() - prev_time))
            prev_time = time.time()
            total_loss = Pixellevel_L1_Loss.item() + total_loss
            # total_ploss = content_loss.item() + total_ploss
            total_sobel = sobel_loss + total_sobel
            total_Lap = laplacian_loss + total_Lap

            # # Print log
            print(
                "\r[Epoch %d/%d] [Batch %d/%d] [Pixellevel L1 Loss: %.4f] [laplacian_loss Loss: %.4f] [sobel_loss Loss: %.4f] Time_left: %s"
                % ((epoch + 1), opt.epochs, i, len(dataloader),
                   Pixellevel_L1_Loss.item(), laplacian_loss.item(),
                   sobel_loss.item(), time_left))
            img_list = [pre_clean, true_target, true_input]
            name_list = ['pred', 'gt', 'noise']
            utils.save_sample_png(sample_folder=sample_folder,
                                  sample_name='MyDNN_MS_epoch%d' % (epoch + 1),
                                  img_list=img_list,
                                  name_list=name_list,
                                  pixel_max_cnt=255)

            # Learning rate decrease at certain epochs
            lr = adjust_learning_rate(opt, (epoch + 1), (iters_done + 1),
                                      optimizer_G)
        print(
            "\r[Epoch %d/%d] [Batch %d/%d] [Pixellevel L1 Loss: %.4f] [laplacian_loss Loss: %.4f] [sobel_loss Loss: %.4f] Time_left: %s"
            % ((epoch + 1), opt.epochs, i, len(dataloader), total_loss / 320,
               total_Lap / 320, total_sobel / 320, time_left))
        ### Validation
        val_PSNR = 0
        be_PSNR = 0
        num_of_val_image = 0

        for j, (true_input, simulated_input, true_target,
                noise_level_map) in enumerate(val_loader):

            # To device
            # A is for input image, B is for target image
            true_input = true_input.cuda()
            true_target = true_target.cuda()

            # Forward propagation
            with torch.no_grad():
                pre_clean = generator(true_input)

            # Accumulate num of image and val_PSNR
            num_of_val_image += true_input.shape[0]
            val_PSNR += utils.psnr(pre_clean, true_target,
                                   255) * true_input.shape[0]
            be_PSNR += utils.psnr(true_input, true_target,
                                  255) * true_input.shape[0]
        val_PSNR = val_PSNR / num_of_val_image
        be_PSNR = be_PSNR / num_of_val_image

        # Record average PSNR
        print('PSNR at epoch %d: %.4f' % ((epoch + 1), val_PSNR))
        print('PSNR before denoising %d: %.4f' % ((epoch + 1), be_PSNR))
        best_PSNR = max(val_PSNR, best_PSNR)
        # Save model at certain epochs or iterations
        save_model(opt, (epoch + 1), (iters_done + 1), len(dataloader),
                   generator, val_PSNR, best_PSNR)