def load_pretrain_weights(self): """Loading weights from trained MLP model & GMF model""" config = self.config config['latent_dim'] = config['latent_dim_mlp'] mlp_model = MLP(config) if config['use_cuda'] is True: mlp_model.cuda() resume_checkpoint(mlp_model, model_dir=config['pretrain_mlp'], device_id=config['device_id']) self.embedding_user_mlp.weight.data = mlp_model.embedding_user.weight.data self.embedding_item_mlp.weight.data = mlp_model.embedding_item.weight.data for idx in range(len(self.fc_layers)): self.fc_layers[idx].weight.data = mlp_model.fc_layers[ idx].weight.data config['latent_dim'] = config['latent_dim_mf'] gmf_model = GMF(config) if config['use_cuda'] is True: gmf_model.cuda() resume_checkpoint(gmf_model, model_dir=config['pretrain_mf'], device_id=config['device_id']) self.embedding_user_mf.weight.data = gmf_model.embedding_user.weight.data self.embedding_item_mf.weight.data = gmf_model.embedding_item.weight.data self.affine_output.weight.data = 0.5 * torch.cat([ mlp_model.affine_output.weight.data, gmf_model.affine_output.weight.data ], dim=-1) self.affine_output.bias.data = 0.5 * ( mlp_model.affine_output.bias.data + gmf_model.affine_output.bias.data)
def load_pretrain_weights(self): """Loading weights from trained MLP model & GMF model""" config = self.config mlp_model = MLP(config) device_id = -1 if config['use_cuda'] is True: mlp_model.cuda() device_id = config['device_id'] resume_checkpoint(mlp_model, model_dir=config['pretrain_mlp'], device_id=device_id) self.embedding_account_mlp.weight.data = mlp_model.embedding_account.weight.data self.embedding_location_mlp.weight.data = mlp_model.embedding_location.weight.data for idx in range(len(self.fc_layers)): self.fc_layers[idx].weight.data = mlp_model.fc_layers[ idx].weight.data config['latent_dim'] = config['latent_dim_mf'] gmf_model = GMF(config) if config['use_cuda'] is True: gmf_model.cuda() resume_checkpoint(gmf_model, model_dir=config['pretrain_mf'], device_id=device_id) self.embedding_account_mf.weight.data = gmf_model.embedding_account.weight.data self.embedding_location_mf.weight.data = gmf_model.embedding_location.weight.data self.embedding_account_mlp.require = False self.embedding_location_mlp.require = False self.embedding_account_mf.require = False self.embedding_location_mf.require = False
def load_pretrain_weights(self): """Loading weights from trained GMF model""" config = self.config gmf_model = GMF(config) if config['use_cuda'] is True: gmf_model.cuda() resume_checkpoint(gmf_model, model_dir=config['pretrain_mf'], device_id=config['device_id']) self.embedding_user.weight.data = gmf_model.embedding_user.weight.data self.embedding_item.weight.data = gmf_model.embedding_item.weight.data
def load_pretrain_weights(self): """Loading weights from trained GMF model""" config = self.config gmf_model = GMF(config) if config['use_cuda'] is True: gmf_model.cuda() resume_checkpoint(gmf_model, model_dir=config['pretrain_mf'], device_id=config['device_id']) self.embedding_user.weight.data = gmf_model.embedding_user.weight.data self.embedding_item.weight.data = gmf_model.embedding_item.weight.data # class MLPEngine(Engine): # """Engine for training & evaluating GMF model""" # def __init__(self, config): # self.model = MLP(config) # if config['use_cuda'] is True: # use_cuda(True, config['device_id']) # self.model.cuda() # super(MLPEngine, self).__init__(config) # print(self.model) # if config['pretrain']: # self.model.load_pretrain_weights()