Ejemplo n.º 1
0
def getDataset(opt, transform):
    """ 
    Return the dataloader object 

    Parameters
    ----------
    opt : namespace

    transform : torchvision.transform

    Return
    ------
    train_loader, val_loader : torch.utils.data.DataLoader
    """
    train_dataset = DatasetFromFolder(opt.train, transform=transform)
    val_dataset   = DatasetFromFolder(opt.val, transform=transform)

    train_loader = DataLoader(
        dataset=train_dataset, 
        num_workers=opt.threads, 
        batch_size=opt.batchsize, 
        pin_memory=True, 
        shuffle=True
    )

    val_loader = DataLoader(
        dataset=val_dataset, 
        num_workers=opt.threads, 
        batch_size=opt.batchsize, 
        pin_memory=True, 
        shuffle=True
    )

    return train_loader, val_loader
Ejemplo n.º 2
0
def main():
    global opt, logger, model, criterion
    opt = parser.parse_args()
    print(opt)

    logger = SummaryWriter(opt.name)

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    seed = 1334
    torch.manual_seed(seed)
    if cuda:
        torch.cuda.manual_seed(seed)

    cudnn.benchmark = True

    print("==========> Loading datasets")

    train_dataset = DatasetFromFolder(opt.train,
                                      transform=Compose([ToTensor()]))

    indoor_test_dataset = DatasetFromFolder(opt.test,
                                            transform=Compose([ToTensor()]))

    training_data_loader = DataLoader(dataset=train_dataset,
                                      num_workers=opt.threads,
                                      batch_size=opt.batchSize,
                                      pin_memory=True,
                                      shuffle=True)
    indoor_test_loader = DataLoader(dataset=indoor_test_dataset,
                                    num_workers=opt.threads,
                                    batch_size=1,
                                    pin_memory=True,
                                    shuffle=True)

    print("==========> Building model")
    model = Net()
    criterion = nn.MSELoss(size_average=True)

    print(model)

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] // 2 + 1
            model.load_state_dict(checkpoint["state_dict"])
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['state_dict'].state_dict())
        else:
            print("=> no model found at '{}'".format(opt.pretrained))

    print("==========> Setting GPU")
    if cuda:
        model = nn.DataParallel(model,
                                device_ids=[i
                                            for i in range(opt.gpus)]).cuda()
        criterion = criterion.cuda()
    else:
        model = model.cpu()
        criterion = criterion.cpu()

    print("==========> Setting Optimizer")
    optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                  model.parameters()),
                           lr=opt.lr)

    print("==========> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, indoor_test_loader, optimizer, epoch)
Ejemplo n.º 3
0
def main():
    global opt, name, logger, model, criterion, criterion2
    opt = parser.parse_args()
    print(opt)

    # Tag_ResidualBlocks_BatchSize
    name = "%s_%d_%d" % (opt.tag, opt.rb, opt.batchSize)

    with open('logs/' + name + 'log.csv', 'a') as file:
        file.write(opt.loss + ', \n')
        file.write('Epochs, Test Loss, Test PSNR, Test SSIM, Test score\n')

    logger = SummaryWriter("runs/" + name)

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    seed = 1334
    torch.manual_seed(seed)
    if cuda:
        torch.cuda.manual_seed(seed)

    cudnn.benchmark = True

    print("==========> Loading datasets")

    train_dataset = DatasetFromFolder(opt.train,
                                      transform=Compose([ToTensor()]),
                                      phase='train')

    indoor_test_dataset = DatasetFromFolder(opt.test,
                                            transform=Compose([ToTensor()]),
                                            phase='test')

    training_data_loader = DataLoader(dataset=train_dataset,
                                      num_workers=opt.threads,
                                      batch_size=opt.batchSize,
                                      pin_memory=True,
                                      shuffle=True)
    indoor_test_loader = DataLoader(dataset=indoor_test_dataset,
                                    num_workers=opt.threads,
                                    batch_size=1,
                                    pin_memory=True,
                                    shuffle=True)

    print("==========> Building model")
    model = Net(opt.rb)
    #model.half()
    criterion = nn.MSELoss(size_average=True)
    if opt.loss == 'ssim':
        criterion2 = pytorch_ssim.SSIM()
    elif opt.loss == 'vggloss':
        criterion2 = customLoss()
    elif opt.loss != 'MSE':
        raise RunTimeError('no loss')

    print(model)

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] // 2 + 1
            model.load_state_dict(checkpoint["state_dict"])
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['state_dict'].state_dict())
        else:
            print("=> no model found at '{}'".format(opt.pretrained))

    print("==========> Setting GPU")
    if cuda:
        model = nn.DataParallel(model,
                                device_ids=[i
                                            for i in range(opt.gpus)]).cuda()
        criterion = criterion.cuda()
        try:
            criterion2 = criterion2.cuda()
        except:
            print('loss is MSE, no criterion2')
    else:
        model = model.cpu()
        criterion = criterion.cpu()
        try:
            criterion2 = criterion2.cpu()
        except:
            print('loss is MSE, no criterion2')

    print("==========> Setting Optimizer")
    optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                  model.parameters()),
                           lr=opt.lr,
                           eps=1e-4)

    best_score = 0
    print("==========> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, indoor_test_loader, optimizer, epoch)
        score = test(indoor_test_loader, epoch)
        #score = testLargeImage(opt.test)
        if score < best_score:
            best_score = score
            try:
                del_checkpoint(name)
            except:
                print('Nothing is deleted!')
            save_checkpoint(model, epoch, name)
Ejemplo n.º 4
0
    default='/export/liuzhe/data/dataset/DIV2K3/DIV2K_val_LR_bicubic',
    help='lr_dataset directory')

args = parser.parse_args()

device = torch.device("cuda:0" if (
    torch.cuda.is_available() and args.cuda) else "cpu")
torch.manual_seed(0)
torch.cuda.manual_seed(0)

# Parameters
BATCH_SIZE = 4
NUM_WORKERS = 0  # on Windows, set this variable to 0

trainset = DatasetFromFolder(split='train',
                             hr_path=args.hr_path,
                             lr_path=args.lr_path,
                             zoom_factor=args.zoom_factor)
testset = DatasetFromFolder(split='test',
                            hr_path=args.hrval_path,
                            lr_path=args.lrval_path,
                            zoom_factor=args.zoom_factor)

trainloader = DataLoader(dataset=trainset,
                         batch_size=BATCH_SIZE,
                         shuffle=False,
                         num_workers=NUM_WORKERS)
testloader = DataLoader(dataset=testset,
                        batch_size=BATCH_SIZE,
                        shuffle=False,
                        num_workers=NUM_WORKERS)
# trainloader = DataLoader(dataset=trainset, batch_size=1, shuffle=True, num_workers=NUM_WORKERS)
Ejemplo n.º 5
0
def main():

    global opt, name, logger, netG, netD, vgg, curriculum_ssim, loss_mse, rgb2yuv, instance_ssim, loss_bce

    opt = parser.parse_args()

    name = "ShadowRemoval"

    print(opt)

    # Tag_ResidualBlocks_BatchSize

    cuda = opt.cuda

    if cuda and not torch.cuda.is_available():

        raise Exception("No GPU found, please run without --cuda")

    seed = 1334

    torch.manual_seed(seed)

    if 'WORLD_SIZE' in os.environ:
        opt.distributed = int(os.environ['WORLD_SIZE']) > 1

    if cuda and not torch.cuda.is_available():

        raise Exception("No GPU found, please run without --cuda")

    if opt.parallel:
        opt.gpu = opt.local_rank
        torch.cuda.set_device(opt.gpu)
        torch.distributed.init_process_group(backend='nccl',
                                             init_method='env://')
        opt.world_size = torch.distributed.get_world_size()

    if cuda:

        torch.cuda.manual_seed(seed)

    cudnn.benchmark = True

    print("==========> Loading datasets")
    test_dataset = DatasetFromFolder(opt.test,
                                     transform=Compose([ToTensor()]),
                                     training=False,
                                     experiments="ShadowRemoval")

    data_loader = DataLoader(dataset=test_dataset,
                             num_workers=4,
                             batch_size=opt.batchSize,
                             pin_memory=True,
                             shuffle=False)

    print("==========> Building model")
    netG = ShadowRemoval(channels=64)
    print("=========> Building criterion")
    loss_smooth_l1 = nn.SmoothL1Loss()
    loss_l1 = nn.L1Loss()
    loss_mse = torch.nn.MSELoss()
    loss_bce = torch.nn.BCELoss()
    loss_perceptual = perceptual()

    instance_ssim = SSIM(reduction='mean', window_size=7)
    rgb2yuv = rgb2yuv()
    curriculum_ssim_mask = CLBase(lossfunc=nn.BCELoss(reduce=False))
    curriculum_ssim_clean = CLBase()

    # optionally copy weights from a checkpoint
    if opt.pretrained and opt.continue_training:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            netG.load_state_dict(weights['state_dict'])
        else:
            print("=> no model found at '{}'".format(opt.pretrained))

    print("==========> Setting Optimizer")
    optimizerG = optim.Adam(filter(lambda p: p.requires_grad,
                                   netG.parameters()),
                            lr=opt.lr_g,
                            betas=(0.9, 0.999))

    print("==========> Setting GPU")
    if cuda:
        netG = netG.cuda()
        instance_ssim = instance_ssim.cuda()
        loss_smooth_l1 = loss_smooth_l1.cuda()
        loss_mse = loss_mse.cuda()
        loss_l1 = loss_l1.cuda()
        loss_bce = loss_bce.cuda()
        curriculum_ssim_mask = curriculum_ssim_mask.cuda()
        curriculum_ssim_clean = curriculum_ssim_clean.cuda()
        loss_perceptual = loss_perceptual.cuda()
        rgb2yuv = rgb2yuv.cuda()

        if opt.acceleration:
            print("FP 16 Trianing")
            amp.register_float_function(torch, 'sigmoid')
            netG, optimizerG = amp.initialize(netG,
                                              optimizerG,
                                              opt_level=opt.opt_level)

    else:
        netG = netG.cpu()

        instance_ssim = instance_ssim.cpu()
        loss_smooth_l1 = loss_smooth_l1.cpu()
        loss_mse = loss_mse.cpu()
        loss_l1 = loss_l1.cpu()
        loss_bce = loss_bce.cpu()
        curriculum_ssim = curriculum_ssim.cpu()
        loss_perceptual = loss_perceptual.cpu()
        rgb2yuv = rgb2yuv.cpu()

    test(data_loader, netG)
Ejemplo n.º 6
0
def main():

    global opt, name, logger, netG, netD, vgg, curriculum_ssim_mask, curriculum_ssim_clean, loss_mse, rgb2yuv, instance_ssim, loss_bce

    opt = parser.parse_args()

    name = "ShadowRemoval"

    print(opt)

    # Tag_ResidualBlocks_BatchSize

    logger = SummaryWriter("./runs_sr/" +
                           time.strftime("/%Y-%m-%d-%H/", time.localtime()))

    cuda = opt.cuda

    if 'WORLD_SIZE' in os.environ:
        opt.distributed = int(os.environ['WORLD_SIZE']) > 1

    if cuda and not torch.cuda.is_available():

        raise Exception("No GPU found, please run without --cuda")

    if opt.distributed:
        opt.gpu = opt.local_rank
        torch.cuda.set_device(opt.gpu)
        torch.distributed.init_process_group(backend='nccl',
                                             init_method='env://')
        opt.world_size = torch.distributed.get_world_size()

    seed = 1334

    torch.manual_seed(seed)

    if cuda:

        torch.cuda.manual_seed(seed)

    cudnn.benchmark = True

    print("==========> Loading datasets")
    train_dataset = DatasetFromFolder(
        opt.train,
        transform=Compose([ToTensor()]),
        training=True,
        experiments="ShadowRemoval",
    )
    test_dataset = DatasetFromFolder(opt.test,
                                     transform=Compose([ToTensor()]),
                                     training=False,
                                     experiments="ShadowRemoval")

    train_data_loader = DataLoader(dataset=train_dataset,
                                   num_workers=4,
                                   batch_size=opt.batchSize,
                                   pin_memory=True,
                                   shuffle=True)
    test_data_loader = DataLoader(dataset=test_dataset,
                                  num_workers=4,
                                  batch_size=opt.batchSize,
                                  pin_memory=True,
                                  shuffle=False)

    print("==========> Building model")
    netG = ShadowRemoval(channels=64)
    netD = Discrimator(in_channels=6, channels=64, depth=3)

    print("=========> Building criterion")
    loss_smooth_l1 = nn.SmoothL1Loss()
    loss_l1 = nn.L1Loss()
    loss_mse = torch.nn.MSELoss()
    loss_bce = torch.nn.BCELoss()
    loss_perceptual = perceptual()

    instance_ssim = SSIM(reduction='mean', window_size=7)
    rgb2yuv = rgb2yuv()
    curriculum_ssim_mask = CLBase(lossfunc=nn.BCELoss(reduce=False))
    curriculum_ssim_clean = CLBase()

    # optionally copy weights from a checkpoint
    if opt.pretrained and opt.continue_training:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            netG.load_state_dict(weights['state_dict'])

        else:
            print("=> no model found at '{}'".format(opt.pretrained))

    print("==========> Setting Optimizer")
    optimizerG = optim.Adam(filter(lambda p: p.requires_grad,
                                   netG.parameters()),
                            lr=opt.lr_g,
                            betas=(0.9, 0.999))
    #optimizerD = optim.Adam(filter(lambda p: p.requires_grad, netD.module.parameters() if opt.parallel else netD.parameters()), lr=opt.lr_d, betas = (0.5, 0.999))
    optimizerD = optim.SGD(filter(lambda p: p.requires_grad,
                                  netD.parameters()),
                           lr=opt.lr_d)

    print("==========> Setting GPU")
    if cuda:
        netG = netG.cuda()
        netD = netD.cuda()

        instance_ssim = instance_ssim.cuda()
        loss_smooth_l1 = loss_smooth_l1.cuda()
        loss_mse = loss_mse.cuda()
        loss_l1 = loss_l1.cuda()
        loss_bce = loss_bce.cuda()
        curriculum_ssim_mask = curriculum_ssim_mask.cuda()
        curriculum_ssim_clean = curriculum_ssim_clean.cuda()
        loss_perceptual = loss_perceptual.cuda()
        rgb2yuv = rgb2yuv.cuda()

        if opt.acceleration:
            print("FP 16 Trianing")
            amp.register_float_function(torch, 'sigmoid')
            [netD,
             netG], [optimizerD,
                     optimizerG] = amp.initialize([netD, netG],
                                                  [optimizerD, optimizerG],
                                                  opt_level=opt.opt_level)

        if opt.parallel:
            print("Parallel Training")
            netG = nn.DataParallel(netG)
            netD = nn.DataParallel(netD)
        elif opt.distributed:
            netG = DDP(netG, delay_allreduce=True)
            netD = DDP(netD, delay_allreduce=True)

    else:
        netG = netG.cpu()
        netD = netD.cpu()

        instance_ssim = instance_ssim.cpu()
        loss_smooth_l1 = loss_smooth_l1.cpu()
        loss_mse = loss_mse.cpu()
        loss_l1 = loss_l1.cpu()
        loss_bce = loss_bce.cpu()
        curriculum_ssim = curriculum_ssim.cpu()
        loss_perceptual = loss_perceptual.cpu()
        rgb2yuv = rgb2yuv.cpu()

    lr_schedulerG = optim.lr_scheduler.CosineAnnealingLR(optimizerG,
                                                         opt.epoch,
                                                         eta_min=1e-7)
    lr_schedulerD = optim.lr_scheduler.CosineAnnealingLR(optimizerD,
                                                         opt.epoch,
                                                         eta_min=1e-7)

    print("==========> Training")
    for epoch in range(opt.epoch + 1):

        train(train_data_loader,
              netG,
              netD,
              optimizerG,
              optimizerD,
              epoch,
              logger=logger)
        #test(test_data_loader, netG)

        if epoch % opt.save_model_freq == 0:
            save_checkpoint(netG, epoch, name, opt)

        lr_schedulerG.step()
        lr_schedulerD.step()

    logger.close()
Ejemplo n.º 7
0
def main():

    global opt, name, logger, netG, netD, vgg, curriculum_ssim, loss_mse, rgb2yuv, instance_ssim 

    opt = parser.parse_args()
    
    name = "ShadowSyns"
    
    print(opt)

    # Tag_ResidualBlocks_BatchSize

    cuda = opt.cuda

    if cuda and not torch.cuda.is_available():

        raise Exception("No GPU found, please run without --cuda")

    seed = 1334

    torch.manual_seed(seed)

    if cuda:

        torch.cuda.manual_seed(seed)

    cudnn.benchmark = True

    print("==========> Loading datasets")
    train_dataset = DatasetFromFolder(
        opt.train, 
        transform=Compose([ToTensor()]),
        training = True,        
        )

    test_dataset = DatasetFromFolder(
        opt.test, 
        transform=Compose([ToTensor()]),
        training = False,
        )

    train_data_loader = DataLoader(dataset=train_dataset, num_workers=4, batch_size=opt.batchSize,
                                      pin_memory=True, shuffle=True)
    test_data_loader = DataLoader(dataset=test_dataset, num_workers=4, batch_size=opt.batchSize,
                                      pin_memory=True, shuffle=True)


    print("==========> Building model")
    netG = ShadowMattingNet(channels =64, depth = 9)
    netD = Discrimator(in_channels = 7, channels = 64, depth = 5)

    
    print("=========> Building criterion")
    loss_smooth_l1 = nn.SmoothL1Loss()
    loss_l1 = nn.L1Loss()
    loss_mse = torch.nn.MSELoss()
    instance_ssim = SSIM(reduction = 'mean', window_size = 7)
    curriculum_ssim  = CLBase()
    loss_perceptual = perceptual() 
    rgb2yuv = rgb2yuv()
    
    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            netG.load_state_dict(weights['state_dict_g'])

        else:
            print("=> no model found at '{}'".format(opt.pretrained))

    print("==========> Setting GPU")
    if cuda:
        if opt.parallel:
          netG = nn.DataParallel(netG, [0, 1, 2, 3]).cuda()
          netD = nn.DataParallel(netD, [0, 1, 2, 3]).cuda()
        
          instance_ssim = nn.DataParallel(instance_ssim, [0, 1, 2, 3]).cuda()
          loss_smooth_l1 = nn.DataParallel(loss_smooth_l1, [0, 1, 2, 3]).cuda()        
          loss_mse = nn.DataParallel(loss_mse, [0, 1, 2, 3]).cuda()
          loss_l1 = nn.DataParallel(loss_l1, [0, 1, 2, 3]).cuda()
          curriculum_ssim = nn.DataParallel(curriculum_ssim, [0, 1, 2, 3]).cuda()
          rgb2yuv = nn.DataParallel(rgb2yuv, [0, 1, 2, 3]).cuda()
        else:
          netG = netG.cuda()
          netD = netD.cuda()
        
          instance_ssim = instance_ssim.cuda()
          loss_smooth_l1 = loss_smooth_l1.cuda()        
          loss_mse = loss_mse.cuda()
          loss_l1 = loss_l1.cuda()
          curriculum_ssim = curriculum_ssim.cuda()
          loss_perceptual = loss_perceptual.cuda()
          rgb2yuv = rgb2yuv.cuda()
    else:
        netG = netG.cpu()
        netD = netD.cpu()
        
        instance_ssim = instance_ssim.cpu()
        loss_smooth_l1 = loss_smooth_l1.cpu()        
        loss_mse = loss_mse.cpu()
        loss_l1 = loss_l1.cpu()
        curriculum_ssim = curriculum_ssim.cpu()
        loss_perceptual = loss_perceptual.cpu()
        rgb2yuv = rgb2yuv.cpu()
    
    print("==========> Setting Optimizer")
    
    optimizerG = optim.Adam(filter(lambda p: p.requires_grad, netG.module.parameters() if opt.parallel else netG.parameters()), lr=opt.lr_g, betas = (0.5, 0.99))
    #optimizerD = optim.Adam(filter(lambda p: p.requires_grad, netD.module.parameters() if opt.parallel else netD.parameters()), lr = opt.lr_d, betas = (0.5, 0.999))
    optimizerD = optim.SGD(filter(lambda p: p.requires_grad, netD.module.parameters() if opt.parallel else netD.parameters()), lr = opt.lr_d)

    
    lr_schedulerG = optim.lr_scheduler.CosineAnnealingLR(optimizerG, opt.epoch, eta_min = 1e-7)
    lr_schedulerD = optim.lr_scheduler.CosineAnnealingLR(optimizerD, opt.epoch, eta_min = 1e-7)

   
    print("==========> Training")
    for epoch in range(opt.epoch + 1):

        train(train_data_loader, netG, netD, optimizerG, optimizerD, epoch)
        test(test_data_loader, netG)
        
        if epoch % opt.save_model_freq == 0:
          save_checkpoint(netG, epoch, name)

        lr_schedulerG.step()
        lr_schedulerD.step()     
Ejemplo n.º 8
0
parser = argparse.ArgumentParser(description='SRCNN training parameters')
parser.add_argument('--zoom_factor', type=int, required=True)
parser.add_argument('--nb_epochs', type=int, default=200)
parser.add_argument('--cuda', action='store_true')
args = parser.parse_args()

device = torch.device("cuda:0" if (
    torch.cuda.is_available() and args.cuda) else "cpu")
torch.manual_seed(0)
torch.cuda.manual_seed(0)

# Parameters
BATCH_SIZE = 4
NUM_WORKERS = 0  # on Windows, set this variable to 0

trainset = DatasetFromFolder("data/train", zoom_factor=args.zoom_factor)
testset = DatasetFromFolder("data/test", zoom_factor=args.zoom_factor)

trainloader = DataLoader(dataset=trainset,
                         batch_size=BATCH_SIZE,
                         shuffle=True,
                         num_workers=NUM_WORKERS)
testloader = DataLoader(dataset=testset,
                        batch_size=BATCH_SIZE,
                        shuffle=False,
                        num_workers=NUM_WORKERS)

model = SRCNN().to(device)
criterion = nn.MSELoss()
optimizer = optim.Adam(  # we use Adam instead of SGD like in the paper, because it's faster
    [