Beispiel #1
0
 def __init__(self, smoothl1 = True, l1 = False, mse = False, instance_ssim = True, perceptual_loss = True):
   super(loss_function, self).__init__()
   print("=========> Building criterion")
   self.loss_function = nn.ModuleDict()
   self.weight = dict()
   
   self.loss_function['loss_smooth_l1'] = nn.SmoothL1Loss() if smoothl1 else None
   self.loss_function['loss_l1'] = nn.L1Loss() if l1 else None
   self.loss_function['loss_mse'] = torch.nn.MSELoss() if mse else None
   self.loss_function['instance_ssim'] = SSIM(reduction = 'mean', window_size = 7, asloss = True) if instance_ssim else None
   self.loss_function['loss_perceptual'] = perceptual() if perceptual_loss else None
   
   self.weight['loss_smooth_l1'] = 1
   self.weight['loss_l1'] = 1
   self.weight['loss_mse'] = 1
   self.weight['instance_ssim'] = 1
   self.weight['loss_perceptual'] = 1
   
   if opt.cuda:
       if opt.parallel:
         for key in self.loss_function.keys():
           if self.loss_function[key] is not None:
             self.loss_function[key] = nn.DataParallel(self.loss_function[key], [0, 1, 2, 3]).cuda()
         
       else:
         for key in self.loss_function.keys():
           if self.loss_function[key] is not None:
             self.loss_function[key] = self.loss_function[key].cuda()
         
   else:
       for key in self.loss_function.keys():
           if self.loss_function[key] is not None:
             self.loss_function[key] = self.loss_function[key].cpu()
Beispiel #2
0
def main():

    global opt, name, logger, netG, netD, vgg, curriculum_ssim, loss_mse, rgb2yuv, instance_ssim, loss_bce

    opt = parser.parse_args()

    name = "ShadowRemoval"

    print(opt)

    # Tag_ResidualBlocks_BatchSize

    cuda = opt.cuda

    if cuda and not torch.cuda.is_available():

        raise Exception("No GPU found, please run without --cuda")

    seed = 1334

    torch.manual_seed(seed)

    if 'WORLD_SIZE' in os.environ:
        opt.distributed = int(os.environ['WORLD_SIZE']) > 1

    if cuda and not torch.cuda.is_available():

        raise Exception("No GPU found, please run without --cuda")

    if opt.parallel:
        opt.gpu = opt.local_rank
        torch.cuda.set_device(opt.gpu)
        torch.distributed.init_process_group(backend='nccl',
                                             init_method='env://')
        opt.world_size = torch.distributed.get_world_size()

    if cuda:

        torch.cuda.manual_seed(seed)

    cudnn.benchmark = True

    print("==========> Loading datasets")
    test_dataset = DatasetFromFolder(opt.test,
                                     transform=Compose([ToTensor()]),
                                     training=False,
                                     experiments="ShadowRemoval")

    data_loader = DataLoader(dataset=test_dataset,
                             num_workers=4,
                             batch_size=opt.batchSize,
                             pin_memory=True,
                             shuffle=False)

    print("==========> Building model")
    netG = ShadowRemoval(channels=64)
    print("=========> Building criterion")
    loss_smooth_l1 = nn.SmoothL1Loss()
    loss_l1 = nn.L1Loss()
    loss_mse = torch.nn.MSELoss()
    loss_bce = torch.nn.BCELoss()
    loss_perceptual = perceptual()

    instance_ssim = SSIM(reduction='mean', window_size=7)
    rgb2yuv = rgb2yuv()
    curriculum_ssim_mask = CLBase(lossfunc=nn.BCELoss(reduce=False))
    curriculum_ssim_clean = CLBase()

    # optionally copy weights from a checkpoint
    if opt.pretrained and opt.continue_training:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            netG.load_state_dict(weights['state_dict'])
        else:
            print("=> no model found at '{}'".format(opt.pretrained))

    print("==========> Setting Optimizer")
    optimizerG = optim.Adam(filter(lambda p: p.requires_grad,
                                   netG.parameters()),
                            lr=opt.lr_g,
                            betas=(0.9, 0.999))

    print("==========> Setting GPU")
    if cuda:
        netG = netG.cuda()
        instance_ssim = instance_ssim.cuda()
        loss_smooth_l1 = loss_smooth_l1.cuda()
        loss_mse = loss_mse.cuda()
        loss_l1 = loss_l1.cuda()
        loss_bce = loss_bce.cuda()
        curriculum_ssim_mask = curriculum_ssim_mask.cuda()
        curriculum_ssim_clean = curriculum_ssim_clean.cuda()
        loss_perceptual = loss_perceptual.cuda()
        rgb2yuv = rgb2yuv.cuda()

        if opt.acceleration:
            print("FP 16 Trianing")
            amp.register_float_function(torch, 'sigmoid')
            netG, optimizerG = amp.initialize(netG,
                                              optimizerG,
                                              opt_level=opt.opt_level)

    else:
        netG = netG.cpu()

        instance_ssim = instance_ssim.cpu()
        loss_smooth_l1 = loss_smooth_l1.cpu()
        loss_mse = loss_mse.cpu()
        loss_l1 = loss_l1.cpu()
        loss_bce = loss_bce.cpu()
        curriculum_ssim = curriculum_ssim.cpu()
        loss_perceptual = loss_perceptual.cpu()
        rgb2yuv = rgb2yuv.cpu()

    test(data_loader, netG)
Beispiel #3
0
def main():

    global opt, name, logger, netG, netD, vgg, curriculum_ssim_mask, curriculum_ssim_clean, loss_mse, rgb2yuv, instance_ssim, loss_bce

    opt = parser.parse_args()

    name = "ShadowRemoval"

    print(opt)

    # Tag_ResidualBlocks_BatchSize

    logger = SummaryWriter("./runs_sr/" +
                           time.strftime("/%Y-%m-%d-%H/", time.localtime()))

    cuda = opt.cuda

    if 'WORLD_SIZE' in os.environ:
        opt.distributed = int(os.environ['WORLD_SIZE']) > 1

    if cuda and not torch.cuda.is_available():

        raise Exception("No GPU found, please run without --cuda")

    if opt.distributed:
        opt.gpu = opt.local_rank
        torch.cuda.set_device(opt.gpu)
        torch.distributed.init_process_group(backend='nccl',
                                             init_method='env://')
        opt.world_size = torch.distributed.get_world_size()

    seed = 1334

    torch.manual_seed(seed)

    if cuda:

        torch.cuda.manual_seed(seed)

    cudnn.benchmark = True

    print("==========> Loading datasets")
    train_dataset = DatasetFromFolder(
        opt.train,
        transform=Compose([ToTensor()]),
        training=True,
        experiments="ShadowRemoval",
    )
    test_dataset = DatasetFromFolder(opt.test,
                                     transform=Compose([ToTensor()]),
                                     training=False,
                                     experiments="ShadowRemoval")

    train_data_loader = DataLoader(dataset=train_dataset,
                                   num_workers=4,
                                   batch_size=opt.batchSize,
                                   pin_memory=True,
                                   shuffle=True)
    test_data_loader = DataLoader(dataset=test_dataset,
                                  num_workers=4,
                                  batch_size=opt.batchSize,
                                  pin_memory=True,
                                  shuffle=False)

    print("==========> Building model")
    netG = ShadowRemoval(channels=64)
    netD = Discrimator(in_channels=6, channels=64, depth=3)

    print("=========> Building criterion")
    loss_smooth_l1 = nn.SmoothL1Loss()
    loss_l1 = nn.L1Loss()
    loss_mse = torch.nn.MSELoss()
    loss_bce = torch.nn.BCELoss()
    loss_perceptual = perceptual()

    instance_ssim = SSIM(reduction='mean', window_size=7)
    rgb2yuv = rgb2yuv()
    curriculum_ssim_mask = CLBase(lossfunc=nn.BCELoss(reduce=False))
    curriculum_ssim_clean = CLBase()

    # optionally copy weights from a checkpoint
    if opt.pretrained and opt.continue_training:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            netG.load_state_dict(weights['state_dict'])

        else:
            print("=> no model found at '{}'".format(opt.pretrained))

    print("==========> Setting Optimizer")
    optimizerG = optim.Adam(filter(lambda p: p.requires_grad,
                                   netG.parameters()),
                            lr=opt.lr_g,
                            betas=(0.9, 0.999))
    #optimizerD = optim.Adam(filter(lambda p: p.requires_grad, netD.module.parameters() if opt.parallel else netD.parameters()), lr=opt.lr_d, betas = (0.5, 0.999))
    optimizerD = optim.SGD(filter(lambda p: p.requires_grad,
                                  netD.parameters()),
                           lr=opt.lr_d)

    print("==========> Setting GPU")
    if cuda:
        netG = netG.cuda()
        netD = netD.cuda()

        instance_ssim = instance_ssim.cuda()
        loss_smooth_l1 = loss_smooth_l1.cuda()
        loss_mse = loss_mse.cuda()
        loss_l1 = loss_l1.cuda()
        loss_bce = loss_bce.cuda()
        curriculum_ssim_mask = curriculum_ssim_mask.cuda()
        curriculum_ssim_clean = curriculum_ssim_clean.cuda()
        loss_perceptual = loss_perceptual.cuda()
        rgb2yuv = rgb2yuv.cuda()

        if opt.acceleration:
            print("FP 16 Trianing")
            amp.register_float_function(torch, 'sigmoid')
            [netD,
             netG], [optimizerD,
                     optimizerG] = amp.initialize([netD, netG],
                                                  [optimizerD, optimizerG],
                                                  opt_level=opt.opt_level)

        if opt.parallel:
            print("Parallel Training")
            netG = nn.DataParallel(netG)
            netD = nn.DataParallel(netD)
        elif opt.distributed:
            netG = DDP(netG, delay_allreduce=True)
            netD = DDP(netD, delay_allreduce=True)

    else:
        netG = netG.cpu()
        netD = netD.cpu()

        instance_ssim = instance_ssim.cpu()
        loss_smooth_l1 = loss_smooth_l1.cpu()
        loss_mse = loss_mse.cpu()
        loss_l1 = loss_l1.cpu()
        loss_bce = loss_bce.cpu()
        curriculum_ssim = curriculum_ssim.cpu()
        loss_perceptual = loss_perceptual.cpu()
        rgb2yuv = rgb2yuv.cpu()

    lr_schedulerG = optim.lr_scheduler.CosineAnnealingLR(optimizerG,
                                                         opt.epoch,
                                                         eta_min=1e-7)
    lr_schedulerD = optim.lr_scheduler.CosineAnnealingLR(optimizerD,
                                                         opt.epoch,
                                                         eta_min=1e-7)

    print("==========> Training")
    for epoch in range(opt.epoch + 1):

        train(train_data_loader,
              netG,
              netD,
              optimizerG,
              optimizerD,
              epoch,
              logger=logger)
        #test(test_data_loader, netG)

        if epoch % opt.save_model_freq == 0:
            save_checkpoint(netG, epoch, name, opt)

        lr_schedulerG.step()
        lr_schedulerD.step()

    logger.close()
Beispiel #4
0
def main():

    global opt, name, logger, netG, netD, vgg, curriculum_ssim, loss_mse, rgb2yuv, instance_ssim 

    opt = parser.parse_args()
    
    name = "ShadowSyns"
    
    print(opt)

    # Tag_ResidualBlocks_BatchSize

    cuda = opt.cuda

    if cuda and not torch.cuda.is_available():

        raise Exception("No GPU found, please run without --cuda")

    seed = 1334

    torch.manual_seed(seed)

    if cuda:

        torch.cuda.manual_seed(seed)

    cudnn.benchmark = True

    print("==========> Loading datasets")
    train_dataset = DatasetFromFolder(
        opt.train, 
        transform=Compose([ToTensor()]),
        training = True,        
        )

    test_dataset = DatasetFromFolder(
        opt.test, 
        transform=Compose([ToTensor()]),
        training = False,
        )

    train_data_loader = DataLoader(dataset=train_dataset, num_workers=4, batch_size=opt.batchSize,
                                      pin_memory=True, shuffle=True)
    test_data_loader = DataLoader(dataset=test_dataset, num_workers=4, batch_size=opt.batchSize,
                                      pin_memory=True, shuffle=True)


    print("==========> Building model")
    netG = ShadowMattingNet(channels =64, depth = 9)
    netD = Discrimator(in_channels = 7, channels = 64, depth = 5)

    
    print("=========> Building criterion")
    loss_smooth_l1 = nn.SmoothL1Loss()
    loss_l1 = nn.L1Loss()
    loss_mse = torch.nn.MSELoss()
    instance_ssim = SSIM(reduction = 'mean', window_size = 7)
    curriculum_ssim  = CLBase()
    loss_perceptual = perceptual() 
    rgb2yuv = rgb2yuv()
    
    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            netG.load_state_dict(weights['state_dict_g'])

        else:
            print("=> no model found at '{}'".format(opt.pretrained))

    print("==========> Setting GPU")
    if cuda:
        if opt.parallel:
          netG = nn.DataParallel(netG, [0, 1, 2, 3]).cuda()
          netD = nn.DataParallel(netD, [0, 1, 2, 3]).cuda()
        
          instance_ssim = nn.DataParallel(instance_ssim, [0, 1, 2, 3]).cuda()
          loss_smooth_l1 = nn.DataParallel(loss_smooth_l1, [0, 1, 2, 3]).cuda()        
          loss_mse = nn.DataParallel(loss_mse, [0, 1, 2, 3]).cuda()
          loss_l1 = nn.DataParallel(loss_l1, [0, 1, 2, 3]).cuda()
          curriculum_ssim = nn.DataParallel(curriculum_ssim, [0, 1, 2, 3]).cuda()
          rgb2yuv = nn.DataParallel(rgb2yuv, [0, 1, 2, 3]).cuda()
        else:
          netG = netG.cuda()
          netD = netD.cuda()
        
          instance_ssim = instance_ssim.cuda()
          loss_smooth_l1 = loss_smooth_l1.cuda()        
          loss_mse = loss_mse.cuda()
          loss_l1 = loss_l1.cuda()
          curriculum_ssim = curriculum_ssim.cuda()
          loss_perceptual = loss_perceptual.cuda()
          rgb2yuv = rgb2yuv.cuda()
    else:
        netG = netG.cpu()
        netD = netD.cpu()
        
        instance_ssim = instance_ssim.cpu()
        loss_smooth_l1 = loss_smooth_l1.cpu()        
        loss_mse = loss_mse.cpu()
        loss_l1 = loss_l1.cpu()
        curriculum_ssim = curriculum_ssim.cpu()
        loss_perceptual = loss_perceptual.cpu()
        rgb2yuv = rgb2yuv.cpu()
    
    print("==========> Setting Optimizer")
    
    optimizerG = optim.Adam(filter(lambda p: p.requires_grad, netG.module.parameters() if opt.parallel else netG.parameters()), lr=opt.lr_g, betas = (0.5, 0.99))
    #optimizerD = optim.Adam(filter(lambda p: p.requires_grad, netD.module.parameters() if opt.parallel else netD.parameters()), lr = opt.lr_d, betas = (0.5, 0.999))
    optimizerD = optim.SGD(filter(lambda p: p.requires_grad, netD.module.parameters() if opt.parallel else netD.parameters()), lr = opt.lr_d)

    
    lr_schedulerG = optim.lr_scheduler.CosineAnnealingLR(optimizerG, opt.epoch, eta_min = 1e-7)
    lr_schedulerD = optim.lr_scheduler.CosineAnnealingLR(optimizerD, opt.epoch, eta_min = 1e-7)

   
    print("==========> Training")
    for epoch in range(opt.epoch + 1):

        train(train_data_loader, netG, netD, optimizerG, optimizerD, epoch)
        test(test_data_loader, netG)
        
        if epoch % opt.save_model_freq == 0:
          save_checkpoint(netG, epoch, name)

        lr_schedulerG.step()
        lr_schedulerD.step()