示例#1
0
parser = argparse.ArgumentParser(description='Train Super Resolution Models')
parser.add_argument('--crop_size', default=88, type=int, help='training images crop size')
parser.add_argument('--upscale_factor', default=4, type=int, choices=[2, 4, 8],
                    help='super resolution upscale factor')
parser.add_argument('--num_epochs', default=100, type=int, help='train epoch number')
parser.add_argument('--loss_net', default="vgg16", type=string, help='choose network for loss')

if __name__ == '__main__':
    opt = parser.parse_args()
    
    CROP_SIZE = opt.crop_size
    UPSCALE_FACTOR = opt.upscale_factor
    NUM_EPOCHS = opt.num_epochs
    
    train_set = TrainDatasetFromFolder('data/DIV2K_train_HR', crop_size=CROP_SIZE, upscale_factor=UPSCALE_FACTOR)
    val_set = ValDatasetFromFolder('data/DIV2K_valid_HR', upscale_factor=UPSCALE_FACTOR)
    train_loader = DataLoader(dataset=train_set, num_workers=4, batch_size=64, shuffle=True)
    val_loader = DataLoader(dataset=val_set, num_workers=4, batch_size=1, shuffle=False)
    
    netG = Generator(UPSCALE_FACTOR)
    print('# generator parameters:', sum(param.numel() for param in netG.parameters()))
    netD = Discriminator()
    print('# discriminator parameters:', sum(param.numel() for param in netD.parameters()))
    
    generator_criterion = GeneratorLoss(opt.loss_net)
    
    if torch.cuda.is_available():
        netG.cuda()
        netD.cuda()
        generator_criterion.cuda()
    
                    help='super resolution upscale factor')
parser.add_argument('--num_epochs',
                    default=100,
                    type=int,
                    help='train epoch number')

opt = parser.parse_args()

CROP_SIZE = opt.crop_size
UPSCALE_FACTOR = opt.upscale_factor
NUM_EPOCHS = opt.num_epochs

train_set = TrainDatasetFromFolder('data/VOC2012/celeba',
                                   crop_size=CROP_SIZE,
                                   upscale_factor=UPSCALE_FACTOR)
val_set = ValDatasetFromFolder('data/VOC2012/val',
                               upscale_factor=UPSCALE_FACTOR)
train_loader = DataLoader(dataset=train_set,
                          num_workers=4,
                          batch_size=400,
                          shuffle=True)
val_loader = DataLoader(dataset=val_set,
                        num_workers=4,
                        batch_size=1,
                        shuffle=False)

netG = Generator(UPSCALE_FACTOR)
print('# generator parameters:',
      sum(param.numel() for param in netG.parameters()))
netD = Discriminator()
print('# discriminator parameters:',
      sum(param.numel() for param in netD.parameters()))
示例#3
0
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = '0, 1, 2, 3'

    train_set = TrainDatasetFromFolder('data/DIV2K_train_HR',
                                       crop_size=CROP_SIZE,
                                       upscale_factor=UPSCALE_FACTOR)
    val_set = ValDatasetFromFolder('data/DIV2K_valid_HR',
                                   upscale_factor=UPSCALE_FACTOR)
    train_loader = DataLoader(dataset=train_set,
                              num_workers=4,
                              batch_size=64,
                              shuffle=True)
    val_loader = DataLoader(dataset=val_set,
                            num_workers=4,
                            batch_size=1,
                            shuffle=False)

    netG = Generator(UPSCALE_FACTOR)
    print('# generator parameters:',
          sum(param.numel()
              for param in netG.parameters()))  # Generator의 총 parameter 수
    netD = Discriminator()
    print('# discriminator parameters:',
          sum(param.numel()
              for param in netD.parameters()))  # Discriminator의 총 parameter 수

    generator_criterion = GeneratorLoss()  # loss function
    netG = nn.DataParallel(netG).cuda()
    netD = nn.DataParallel(netD).cuda()
    #netG = netG.cuda()
    #netD = netD.cuda()
    generator_criterion.cuda()

    optimizerG = optim.Adam(netG.parameters())  # optimizer : adam
    optimizerD = optim.Adam(netD.parameters())  # optimizer : adam

    results = {
        'd_loss': [],
        'g_loss': [],
        'd_score': [],
        'g_score': [],
        'psnr': [],
        'ssim': []
    }

    for epoch in range(1, NUM_EPOCHS + 1):
        # model train
        d_loss, g_loss, d_score, g_score = train(netG, netD,
                                                 generator_criterion,
                                                 optimizerG, optimizerD,
                                                 train_loader, epoch)

        # validation data acc
        psnr, ssim = test(netG, netD, val_loader, epoch)

        # save loss\scores\psnr\ssim
        results['d_loss'].append(d_loss)
        results['g_loss'].append(g_loss)
        results['d_score'].append(d_score)
        results['g_score'].append(g_score)
        results['psnr'].append(psnr)
        results['ssim'].append(ssim)

        # save results
        if epoch % 10 == 0 and epoch != 0:
            out_path = 'statistics/'
            data_frame = pd.DataFrame(data={
                'Loss_D': results['d_loss'],
                'Loss_G': results['g_loss'],
                'Score_D': results['d_score'],
                'Score_G': results['g_score'],
                'PSNR': results['psnr'],
                'SSIM': results['ssim']
            },
                                      index=range(1, epoch + 1))
            data_frame.to_csv(out_path + 'srf_' + str(UPSCALE_FACTOR) +
                              '_train_results.csv',
                              index_label='Epoch')

        print()
示例#4
0
                    type=int,
                    help='train epoch number')

# 对之前add的参数进行赋值,并返回响应namespace
opt = parser.parse_args()

# 提取opt(选项器)中设置的参数,设定为常量
CROP_SIZE = opt.crop_size
UPSCALE_FACTOR = opt.upscale_factor
NUM_EPOCHS = opt.num_epochs

# 从指定路径导入train_set,指定裁剪大小和放大因子
train_set = TrainDatasetFromFolder('./data/train',
                                   crop_size=CROP_SIZE,
                                   upscale_factor=UPSCALE_FACTOR)
val_set = ValDatasetFromFolder('./data/valLR', upscale_factor=UPSCALE_FACTOR)
# 使用loader,从训练集中,一次性处理一个batch的文件 (批量加载器)
train_loader = DataLoader(dataset=train_set,
                          num_workers=8,
                          batch_size=64,
                          shuffle=True)
val_loader = DataLoader(dataset=val_set,
                        num_workers=8,
                        batch_size=1,
                        shuffle=False)

# 创建生成器实例 netG ,输出生成器参数的数量
netG = Generator(UPSCALE_FACTOR)
netG = nn.DataParallel(netG)  #,device_ids=[0,1,2]) # multi-GPU
print('# generator parameters:',
      sum(param.numel() for param in netG.parameters()))
示例#5
0
                    default=100,
                    type=int,
                    help='train epoch number')

if __name__ == '__main__':
    opt = parser.parse_args()

    CROP_SIZE = opt.crop_size
    UPSCALE_FACTOR = opt.upscale_factor
    NUM_EPOCHS = opt.num_epochs

    train_set = TrainDatasetFromFolder('data/DIV2K_train_HR',
                                       crop_size=CROP_SIZE,
                                       upscale_factor=UPSCALE_FACTOR)
    val_set = ValDatasetFromFolder('data/DIV2K_valid_HR',
                                   crop_size=CROP_SIZE * 2,
                                   upscale_factor=UPSCALE_FACTOR)
    train_loader = DataLoader(dataset=train_set,
                              num_workers=4,
                              batch_size=4,
                              shuffle=True)
    val_loader = DataLoader(dataset=val_set,
                            num_workers=4,
                            batch_size=1,
                            shuffle=False)

    netG = Generator(UPSCALE_FACTOR)
    print('# generator parameters:',
          sum(param.numel() for param in netG.parameters()))
    netD = Discriminator()
    print('# discriminator parameters:',