Esempio n. 1
0
    def __init__(self, opt):
        """Initialize the pix2pix class.

        Parameters:
            opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        BaseModel.__init__(self, opt)

        self.result_names = [
            'real_A', 'mask_M', 'fake_B', 'final_B', 'real_B', 'refer_R',
            'fake_B_gradient', 'real_B_gradient'
        ]  ###
        # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
        #self.model_names = ['PCU']
        self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
        #self.loss_names = ['PixWise']
        #self.netPCU = networks.PCUnet(opt.input_nc, opt.output_nc, self.gpu_ids)
        self.gradient = GetGradient()

        if self.isTrain:
            self.model_names = ['G', 'D']
        else:  # during test time, only load G
            self.model_names = ['G']

        # define networks (both generator and discriminator)
        self.netG = networks.generator(opt.input_nc, opt.output_nc,
                                       self.gpu_ids)

        if self.isTrain:  # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc
            self.netD = networks.discriminator(
                opt.input_nc + opt.output_nc + 1, opt.ndf, opt.n_layers_D,
                opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
        if self.isTrain:
            # define loss functions
            self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
            self.criterionL1 = InpaintingLoss(
                networks.VGG16FeatureExtractor()).to(self.device)
            # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
            self.optimizer_G = torch.optim.Adam(filter(
                lambda p: p.requires_grad, self.netG.parameters()),
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
            #self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)
Esempio n. 2
0
print("Loading the Validation Dataset...")
dataset_val = InitDataset(config.data_root,
                      img_tf,
                      mask_tf,
                      data="val")


print("Loading the Training Dataset...")
dataset_train = InitDataset(config.data_root,
                        img_tf,
                        mask_tf,
                        data="train")

# Loss fucntion
criterion = InpaintingLoss(VGG16FeatureExtractor(),
                            tv_loss=config.tv_loss).to(device)
# Optimizer
lr = config.finetune_lr if config.finetune else config.initial_lr
if config.optim == "Adam":
    optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                        model.parameters()),
                                    lr=lr,
                                    weight_decay=config.weight_decay)
elif config.optim == "SGD":
    optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad,
                                        model.parameters()),
                                lr=lr,
                                momentum=config.momentum,
                                weight_decay=config.weight_decay)

start_iter = 0
Esempio n. 3
0
                    sampler=InfiniteSampler(len(dataset_train)),
                    num_workers=args.n_threads))
print(len(dataset_train))
model = PConvUNet(input_guides=1 if use_depth else 0).to(device)

if args.finetune:
    lr = args.lr_finetune
    model.freeze_enc_bn = True
else:
    lr = args.lr

start_iter = 0
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                    model.parameters()),
                             lr=lr)
criterion = InpaintingLoss(VGG16FeatureExtractor()).to(device)

if args.resume:
    start_iter = load_ckpt(args.resume, [('model', model)],
                           [('optimizer', optimizer)])
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr
    print('Starting from iter ', start_iter)

for i in tqdm(range(start_iter, args.max_iter)):
    model.train()

    image, mask, gt = [x.to(device) for x in next(iterator_train)]
    if args.mask_root is not None:
        guide = image[:, 3:4, :, :]
        image = image[:, 0:3, :, :]
Esempio n. 4
0
dataset = DS(args.root, img_tf)

iterator_train = iter(
    data.DataLoader(dataset,
                    batch_size=args.batch_size,
                    sampler=InfiniteSampler(len(dataset)),
                    num_workers=args.n_threads))
print(len(dataset))
model = DFNet().to(device)

lr = args.lr

start_iter = 0
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)
criterion = InpaintingLoss().to(device)

if args.resume:
    checkpoint = torch.load(args.resume, map_location=device)
    model.load_state_dict(checkpoint)

for i in tqdm(range(start_iter, args.max_iter)):
    model.train()

    img, mask = [x.to(device) for x in next(iterator_train)]

    # inpainting
    masked = img * mask

    # mosaic
    """
Esempio n. 5
0
loader_val = data.DataLoader(dataset_val,
                             batch_size=mini_batch,
                             sampler=RandomSampler(data_source=dataset_val),
                             num_workers=4)

loaders = {"train": loader_train, "valid": loader_val}

print('model')

# model, criterion, optimizer, scheduler
#model = vgg13().cuda()
#model = resnet18(pretrained=False, progress=True).cuda()
#model = inception_v3().cuda()
model = resnext101_32x8d().cuda()
#criterion = CustomCriterion().cuda()
criterion = InpaintingLoss(VGG16FeatureExtractor()).cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=0.0)
scheduler = torch.optim.lr_scheduler.MultiStepLR(
    optimizer,
    milestones=[3, 6, 9, 12, 18, 24, 30, 40, 50, 60, 70, 80, 90],
    gamma=.5)

print('training')

# model training
runner = dl.SupervisedRunner()
logdir = './logdir'
runner.train(model=model,
             criterion=criterion,
             optimizer=optimizer,
             scheduler=scheduler,