def initialize(self, opt, net):
        BaseModel.initialize(self, opt)
        self.net = net.to(self.device)
        self.edge_map = EdgeMap(scale=1).to(self.device)

        if self.isTrain:
            # define loss functions
            self.vgg = losses.Vgg19(requires_grad=False).to(self.device)
            self.loss_dic = losses.init_loss(opt, self.Tensor)
            vggloss = losses.ContentLoss()
            vggloss.initialize(losses.VGGLoss(self.vgg))
            self.loss_dic['t_vgg'] = vggloss

            cxloss = losses.ContentLoss()
            if opt.unaligned_loss == 'vgg':
                cxloss.initialize(
                    losses.VGGLoss(self.vgg, weights=[0.1], indices=[31]))
            elif opt.unaligned_loss == 'ctx':
                cxloss.initialize(
                    losses.CXLoss(self.vgg,
                                  weights=[0.1, 0.1, 0.1],
                                  indices=[8, 13, 22]))
            elif opt.unaligned_loss == 'mse':
                cxloss.initialize(nn.MSELoss())
            elif opt.unaligned_loss == 'ctx_vgg':
                cxloss.initialize(
                    losses.CXLoss(self.vgg,
                                  weights=[0.1, 0.1, 0.1, 0.1],
                                  indices=[8, 13, 22, 31],
                                  criterions=[losses.CX_loss] * 3 +
                                  [nn.L1Loss()]))

            else:
                raise NotImplementedError

            self.loss_dic['t_cx'] = cxloss

            # initialize optimizers
            self.optimizer_G = torch.optim.Adam(self.net.parameters(),
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999),
                                                weight_decay=opt.wd)

            self._init_optimizer([self.optimizer_G])

            # define discriminator
            # if self.opt.lambda_gan > 0:
            self.netD = networks.define_D(opt, 3)
            self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
            self._init_optimizer([self.optimizer_D])

        if opt.no_verbose is False:
            self.print_network()
Exemple #2
0
# ### Cycle consistency loss

# In[9]:


import itertools
import util.util as util
import numpy as np
from models.losses import init_loss

"""Quote from the paper about the loss function: For all the experiments, we set λ = 10 in Equation 3.
We use the Adam solver [24] with a batch size of 1"""

cycle_consistency_criterion= torch.nn.L1Loss()
_, contentLoss = init_loss(opt, torch.cuda.FloatTensor)
#criterion= forward_cycle_consistency_criterion+backward_cycle_consistency_criterion()

#lambda_cycle is irrelevant for the moment as we use only cycle consistency loss as of now

optimizer = torch.optim.Adam(itertools.chain(filter(lambda p: p.requires_grad, netG_frozen_deblur.parameters()),
filter(lambda p: p.requires_grad, netG_frozen_blur.parameters())), lr=learning_rate)


# ### Training

# In[10]:

def model_type_gpu(blur_net, deblur_net):
    num_gpus= torch.cuda.device_count()
    def initialize(self, opt):
        if len(opt.gpu_ids) > 0:
            self.device = torch.device(
                "cuda:0" if torch.cuda.is_available() else "cpu")
        else:
            self.device = torch.device("cpu")
        BaseModel.initialize(self, opt)

        in_channels = 3
        self.vgg = None

        if opt.hyper:
            self.vgg = losses.Vgg19(requires_grad=False).to(self.device)
            #in_channels += 1472
            #siju mod
            in_channels += 1280

        self.net_i = arch.__dict__[self.opt.inet](in_channels,
                                                  3).to(self.device)
        networks.init_weights(
            self.net_i,
            init_type=opt.init_type)  # using default initialization as EDSR
        self.edge_map = EdgeMap(scale=1).to(self.device)

        if self.isTrain:
            # define loss functions
            self.loss_dic = losses.init_loss(opt, self.Tensor)
            vggloss = losses.ContentLoss()
            vggloss.initialize(losses.VGGLoss(self.vgg))
            self.loss_dic['t_vgg'] = vggloss

            cxloss = losses.ContentLoss()
            if opt.unaligned_loss == 'vgg':
                cxloss.initialize(
                    losses.VGGLoss(self.vgg,
                                   weights=[0.1],
                                   indices=[opt.vgg_layer]))
            elif opt.unaligned_loss == 'ctx':
                cxloss.initialize(
                    losses.CXLoss(self.vgg,
                                  weights=[0.1, 0.1, 0.1],
                                  indices=[8, 13, 22]))
            elif opt.unaligned_loss == 'mse':
                cxloss.initialize(nn.MSELoss())
            elif opt.unaligned_loss == 'ctx_vgg':
                cxloss.initialize(
                    losses.CXLoss(self.vgg,
                                  weights=[0.1, 0.1, 0.1, 0.1],
                                  indices=[8, 13, 22, 31],
                                  criterions=[losses.CX_loss] * 3 +
                                  [nn.L1Loss()]))
            else:
                raise NotImplementedError

            self.loss_dic['t_cx'] = cxloss

            # Define discriminator
            # if self.opt.lambda_gan > 0:
            self.netD = networks.define_D(opt, 3)
            self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                                lr=opt.lr,
                                                betas=(0.9, 0.999))
            self._init_optimizer([self.optimizer_D])

            # initialize optimizers
            self.optimizer_G = torch.optim.Adam(self.net_i.parameters(),
                                                lr=opt.lr,
                                                betas=(0.9, 0.999),
                                                weight_decay=opt.wd)

            self._init_optimizer([self.optimizer_G])

        if opt.resume:
            self.load(self, opt.resume_epoch)

        if opt.no_verbose is False:
            self.print_network()
Exemple #4
0
lrd = 0.000001
transforms = None  #make data augmentation. For now using only the transforms defined above

# ### Cycle consistency loss

# In[9]:

import itertools
import util.util as util
import numpy as np
from models.losses import init_loss
"""Quote from the paper about the loss function: For all the experiments, we set λ = 10 in Equation 3.
We use the Adam solver [24] with a batch size of 1"""

cycle_consistency_criterion = torch.nn.L1Loss()
disLoss, _ = init_loss(opt, torch.cuda.FloatTensor)
#criterion= forward_cycle_consistency_criterion+backward_cycle_consistency_criterion()

# ### Training

# In[10]:


def model_type_gpu(blur_net, deblur_net):
    num_gpus = torch.cuda.device_count()

    if num_gpus > 1:
        print("more than 1 GPU detected...")
        netDeblur = torch.nn.DataParallel(deblur_net)
        netBlur = torch.nn.DataParallel(blur_net)