def forward(self, user, pos, neg): u_e = self.user_embedding[user] pos_e = self.item_embedding[pos] neg_e = self.item_embedding[neg] loss = bpr_loss(u_e, pos_e, neg_e) regularizer = l2_loss(u_e, pos_e, neg_e) reg_loss = self.regs * regularizer return loss, reg_loss
def get_cls_loss(self, user, item, label): u_e = self.user_embedding[user] i_e = self.item_embedding[item] b = self.bias[item] logits = torch.sum(u_e * i_e, dim=1) + b cls_loss = F.binary_cross_entropy_with_logits(logits, label) reg_loss = self.regs * l2_loss(u_e, i_e, b) return cls_loss, reg_loss
def forward(self, user, pos, neg, **kwargs): u_e = self.user_embedding[user] pos_e = self.item_embedding[pos] neg_e = self.item_embedding[neg] reg_loss = self.regs * l2_loss(u_e, pos_e, neg_e) pos_score = torch.sum(pos_e * u_e, dim=1) neg_score = torch.sum(neg_e * u_e, dim=1) maxi = torch.log(torch.sigmoid(pos_score - neg_score)) bpr_loss = torch.neg(torch.mean(maxi)) return bpr_loss, reg_loss
def forward(self, user, items, reward): u_e, i_e, b = self._get_embedding(user, items) u_e = u_e.unsqueeze(dim=1) logits = torch.sum(u_e * i_e, dim=2) + b probs = F.softmax(logits, dim=1) reward = reward * probs log_probs = F.log_softmax(logits, dim=1) gan_loss = -torch.mean(log_probs * reward) regularizer = l2_loss(u_e, i_e, b) reg_loss = self.regs * regularizer return gan_loss, reg_loss
def forward(self, user, items, reward): u_e = self.user_embedding[user] i_e = self.item_embedding[items] u_e = u_e.unsqueeze(dim=1) logits = torch.sum(u_e * i_e, dim=2) probs = F.softmax(logits, dim=1) log_probs = F.log_softmax(logits, dim=1) sampled_id = torch.multinomial(probs, num_samples=1) row_idx = get_row_index(u_e) sampled_probs = log_probs[row_idx, sampled_id] sampled_reward = reward[row_idx, sampled_id] gan_loss = -torch.mean(sampled_probs * sampled_reward) reg_loss = self.regs * l2_loss(u_e, i_e) return gan_loss, reg_loss
def forward(self, user, items, reward): u_e = self.umlp(self.user_embedding[user]) i_e = self.imlp(self.item_embedding[items]) u_e = u_e.unsqueeze(dim=1) distance = euclidean_distance(u_e, i_e) probs = F.softmax(-distance, dim=1) sampled_id = torch.multinomial(probs, num_samples=1) row_ids = get_row_index(u_e) log_probs = F.log_softmax(-distance, dim=-1) sampled_probs = log_probs[row_ids, sampled_id] sampled_reward = reward[row_ids, sampled_id] gan_loss = -torch.mean(sampled_probs * sampled_reward) reg_loss = self.regs * l2_loss(u_e, i_e) return gan_loss, reg_loss
def forward(self, user, pos, negs): u_e = self.user_embedding[user] pos_e = self.item_embedding[pos] negs_e = self.item_embedding[negs] with torch.no_grad(): ranking = self.rank(u_e, negs_e) indices = torch.argmax(ranking, dim=1).unsqueeze(dim=1) row_id = get_row_index(u_e) good_neg = negs[row_id, indices].squeeze() neg_e = self.item_embedding[good_neg] loss = bpr_loss(u_e, pos_e, neg_e) regularizer = l2_loss(u_e, pos_e, neg_e) reg_loss = self.regs * regularizer return loss, reg_loss
def forward(self, user, pos, neg, **kwargs): u_e = self.user_embedding[user] pos_e = self.item_embedding[pos] neg_e = self.item_embedding[neg] negs_e = self.item_embedding[kwargs["negs"]] reg_loss = self.regs * l2_loss(u_e, pos_e, neg_e, negs_e) pos_d = euclidean_distance(u_e, pos_e) neg_d = euclidean_distance(u_e, neg_e) negs_d = euclidean_distance(u_e.unsqueeze(dim=1), negs_e) impostor = (pos_d.unsqueeze(dim=1) - negs_d + self.margin > 0).float() rank = torch.mean(impostor, dim=1) * self.n_user hinge_loss = torch.sum( torch.log(rank + 1) * torch.clamp(self.margin + pos_d - neg_d, min=0)) return hinge_loss, reg_loss