示例#1
0
 def init_lambda_network(self):
     assert hasattr(self, 'lambda_network')
     if self.use_cuda:
         use_cuda(True, self._opt['device_id'])
         self.lambda_network.cuda()
     self.optimizer = use_optimizer(self.lambda_network,
                                    self.lambda_network_opt)
     self.scheduler = ExponentialLR(
         self.optimizer, gamma=self.lambda_network_opt['lr_exp_decay'])
示例#2
0
 def init_episode(self):
     opt = self.opt
     self.model = MF(opt)
     self._train_step_idx = 0
     if self.use_cuda:
         use_cuda(True, opt['device_id'])
         self.model.cuda()
     self.optimizer = use_optimizer(self.model, opt)
     self.scheduler = ExponentialLR(self.optimizer,
                                    gamma=opt['lr_exp_decay'])
     self.param = [p.data.clone() for p in self.model.parameters()]
示例#3
0
    def __init__(self, opt):
        super(MFBPRFactorizer, self).__init__(opt)
        self.model = MF(opt)
        self.opt = opt
        if self.use_cuda:
            use_cuda(True, opt['device_id'])
            self.model.cuda()

        self.optimizer = use_optimizer(self.model, opt)
        self.scheduler = ExponentialLR(self.optimizer,
                                       gamma=opt['lr_exp_decay'])
示例#4
0
    def copy(self, new_factorizer):
        """Return a new copy of factorizer

        # Note: directly using deepcopy wont copy factorizer.scheduler correctly
                the gradient of self.model is not copied!
        """
        self.train_step_idx = new_factorizer.train_step_idx
        self.param = new_factorizer.param
        self.model.load_state_dict(new_factorizer.model.state_dict())
        self.optimizer = use_optimizer(self.model, self.opt)
        self.optimizer.load_state_dict(new_factorizer.optimizer.state_dict())

        self.scheduler = ExponentialLR(self.optimizer,
                                       gamma=self.opt['lr_exp_decay'],
                                       last_epoch=self.scheduler.last_epoch)
示例#5
0
    def init_episode(self):
        opt = self.opt
        if opt['model'] == 'linear':
            self.model = LR(opt)
        elif opt['model'] == 'fm':
            self.model = FM(opt)
        elif opt['model'] == 'deepfm':
            self.model = DeepFM(opt)
        elif opt['model'] == 'autoint':
            self.model = AutoInt(opt)
        else:
            raise ValueError("Invalid FM model type: {}".format(opt['model']))

        self._train_step_idx = 0
        if self.use_cuda:
            use_cuda(True, opt['device_id'])
            self.model.cuda()
        self.optimizer = use_optimizer(self.model, opt)
        self.scheduler = ExponentialLR(self.optimizer,
                                       gamma=opt['lr_exp_decay'])
示例#6
0
    def __init__(self, opt):
        super(FMFactorizer, self).__init__(opt)
        self.opt = opt
        if opt['model'] == 'linear':
            self.model = LR(opt)
        elif opt['model'] == 'fm':
            self.model = FM(opt)
        elif opt['model'] == 'deepfm':
            self.model = DeepFM(opt)
        elif opt['model'] == 'autoint':
            self.model = AutoInt(opt)
        else:
            raise ValueError("Invalid FM model type: {}".format(opt['model']))

        if self.use_cuda:
            use_cuda(True, opt['device_id'])
            self.model.cuda()

        self.optimizer = use_optimizer(self.model, opt)
        self.scheduler = ExponentialLR(self.optimizer,
                                       gamma=opt['lr_exp_decay'])