Example #1
0
    def initialize(self, opt):
        BaseModel.initialize(self, opt)
        self.isTrain = opt.isTrain
        self.loss_names = ['loss']
        self.model_names = ['fc1', 'fc2']
        self.fc1 = init_net(MyLinear(opt.max_idx, 4096), init_type='normal', gpu=self.opt.gpu)
        self.fc2 = init_net(MyLinear(4096, 1), init_type='normal', gpu=self.opt.gpu)

        if opt.isTrain:
            if opt.propensity == 'no':
                self.criterion = nn.BCELoss(size_average=True)
                if opt.gpu >= 0:
                    self.criterion.cuda(opt.gpu)
            else:
                self.criterion = None

            self.schedulers = []
            self.optimizers = []
            self.optimizer = torch.optim.Adam(self.parameters(), lr=opt.lr, weight_decay=1e-5)
            # self.optimizer = torch.optim.SparseAdam(self.parameters(), lr=opt.lr)
            self.optimizers.append(self.optimizer)
            for optimizer in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optimizer, opt))

        if not self.isTrain or opt.continue_train:
            self.load_networks(opt.which_epoch)

        self.print_networks()
Example #2
0
    def setup(self):
        if self.isTrain:
            self.schedulers = [
                networks.get_scheduler(optimizer)
                for optimizer in self.optimizers
            ]

        if not self.isTrain or cfg.continue_train:
            self.load_networks(cfg.which_epoch)
        # self.print_networks(opt.verbose)
        self.print_named_networks(cfg.print_net_in_detail)
Example #3
0
    def setup(self, opt):
        """Load and print networks; create schedulers

        Parameters:
            opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        if self.isTrain:
            self.schedulers = [
                networks.get_scheduler(optimizer, opt)
                for optimizer in self.optimizers
            ]
        if not self.isTrain or opt.continue_train:
            load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch
            self.load_networks(load_suffix)
        self.print_networks(opt.verbose)
Example #4
0
    def _init_optimizers(self):
        if self.opt.stage==0:
            param_groups = [{'params': self.net_E.module.base_model.parameters(), 'lr_mult': 0.1},
                            {'params': self.net_E.module.classifier.parameters(), 'lr_mult': 1.0}]
            self.optimizer_E = torch.optim.SGD(param_groups, lr=self.opt.lr, momentum=0.9, weight_decay=5e-4)
            #self.optimizer_E = torch.optim.Adam(param_groups, lr=self.opt.lr, betas=(0.9, 0.999), weight_decay=5e-4)
            self.optimizer_G = torch.optim.Adam(self.net_G.parameters(),
                                                lr=1e-5, betas=(0.5, 0.999))
            self.optimizer_Di = torch.optim.SGD(self.net_Di.parameters(),
                                                lr=4e-5, momentum=0.9, weight_decay=1e-4)
            self.optimizer_Dp = torch.optim.SGD(self.net_Dp.parameters(),
                                                lr=4e-5, momentum=0.9, weight_decay=1e-4)
        elif self.opt.stage==1:
            param_groups = [{'params': self.net_E.module.base_model.parameters(), 'lr_mult': 0.1},
                            {'params': self.net_E.module.classifier.parameters(), 'lr_mult': 0.1}]

            self.optimizer_E = torch.optim.SGD(param_groups, lr=self.opt.lr, momentum=0.9, weight_decay=5e-4)

            self.optimizer_G = torch.optim.Adam(self.net_G.parameters(),
                                                lr=self.opt.lr*0.1, betas=(0.5, 0.999))
            self.optimizer_Di = torch.optim.SGD(self.net_Di.parameters(),
                                                lr=self.opt.lr, momentum=0.9, weight_decay=1e-4)
            self.optimizer_Dp = torch.optim.SGD(self.net_Dp.parameters(),
                                                lr=self.opt.lr, momentum=0.9, weight_decay=1e-4)
        elif self.opt.stage==2:
            param_groups = [{'params': self.net_E.module.base_model.parameters(), 'lr_mult': 0.01},
                            {'params': self.net_E.module.classifier.parameters(), 'lr_mult': 0.1}]

            self.optimizer_E = torch.optim.SGD(param_groups, lr=self.opt.lr, momentum=0.9, weight_decay=5e-4)

            self.optimizer_G = torch.optim.Adam(self.net_G.parameters(),
                                                lr=1e-6, betas=(0.5, 0.999))
            self.optimizer_Di = torch.optim.SGD(self.net_Di.parameters(),
                                                lr=1e-5, momentum=0.9, weight_decay=1e-4)
            self.optimizer_Dp = torch.optim.SGD(self.net_Dp.parameters(),
                                                lr=1e-5, momentum=0.9, weight_decay=1e-4)


        self.schedulers = []
        self.optimizers = []
        self.optimizers.append(self.optimizer_E)
        self.optimizers.append(self.optimizer_G)
        self.optimizers.append(self.optimizer_Di)
        self.optimizers.append(self.optimizer_Dp)
        for optimizer in self.optimizers:
            self.schedulers.append(get_scheduler(optimizer, self.opt))
Example #5
0
    def initialize(self, opt):
        BaseModel.initialize(self, opt)
        self.isTrain = opt.isTrain
        self.loss_names = ['loss']
        self.info_names = []
        self.model_names = ['fc1', 'fc2']
        self.fc1 = init_net(nn.Linear(opt.max_idx, 4096),
                            init_type='normal',
                            gpu=self.opt.gpu)
        self.fc2 = init_net(nn.Linear(4096, 1),
                            init_type='normal',
                            gpu=self.opt.gpu)

        class POEMLoss(nn.Module):
            def __init__(self):
                super(POEMLoss, self).__init__()

            def forward(self, weight, theta):
                return torch.sum(theta * weight) / weight.shape[0]

        if opt.isTrain:
            self.criterion = POEMLoss()

            self.schedulers = []
            self.optimizers = []
            self.optimizer = torch.optim.Adam(self.parameters(),
                                              lr=opt.lr,
                                              weight_decay=1e-3)
            self.optimizers.append(self.optimizer)
            for optimizer in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optimizer, opt))

        if not self.isTrain or opt.continue_train:
            self.load_networks(opt.which_epoch)

        self.print_networks()
Example #6
0
if opt.loss_method !='WGAN-GP':
    use_sigmoid = True
    norm = 'batch'
else:
    use_sigmoid = False
    norm = 'instance'

net_d = define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,norm=norm, gpu_id=device,use_sigmoid=use_sigmoid)

criterionGAN = GANLoss(opt.loss_method).to(device)
criterionL1 = nn.L1Loss().to(device)
criterionMSE = nn.MSELoss().to(device)
criterionSSIM = ssim
optimizer_g = optim.Adam(net_g.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizer_d = optim.Adam(net_d.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
net_g_scheduler = get_scheduler(optimizer_g, opt)
net_d_scheduler = get_scheduler(optimizer_d, opt)

if opt.resume_netG_path:
    # resume training
    if os.path.isfile(opt.resume_netG_path):
        print("====>loading checkpoint for netG {}".format(opt.resume_netG_path))
        checkpoint = torch.load(opt.resume_netG_path)
        opt.start_epoch = checkpoint['epoch']
        opt.epoch_count = opt.start_epoch
        net_g.load_state_dict(checkpoint['netG_state_dict'])
        optimizer_g.load_state_dict(checkpoint['optimizer_state_dict'])
        net_g_scheduler.load_state_dict(checkpoint['lr_learning_rate'])
    if os.path.isfile(opt.resume_netD_path):
        print("===>loading checkpoint for netD {}".format(opt.resume_netD_path))
        checkpoint = torch.load(opt.resume_netD_path)