def forward(self, pos_g, neg_g, gpu_id=-1): pos_g.ndata['emb'] = self.entity_emb(pos_g.ndata['id'], gpu_id, True) pos_g.edata['emb'] = self.relation_emb(pos_g.edata['id'], gpu_id, True) self.score_func.prepare(pos_g, gpu_id, True) pos_score = self.predict_score(pos_g) pos_score = logsigmoid(pos_score) if gpu_id >= 0: neg_score = self.predict_neg_score(pos_g, neg_g, to_device=cuda, gpu_id=gpu_id, trace=True) else: neg_score = self.predict_neg_score(pos_g, neg_g, trace=True) neg_score = reshape(neg_score, -1, neg_g.neg_sample_size) # Adversarial sampling if self.args.neg_adversarial_sampling: neg_score = F.sum( F.softmax(neg_score * self.args.adversarial_temperature, dim=1).detach() * logsigmoid(-neg_score), dim=1) else: neg_score = F.mean(logsigmoid(-neg_score), dim=1) # subsampling weight # TODO: add subsampling to new sampler if self.args.non_uni_weight: subsampling_weight = pos_g.edata['weight'] pos_score = (pos_score * subsampling_weight).sum() / subsampling_weight.sum() neg_score = (neg_score * subsampling_weight).sum() / subsampling_weight.sum() else: pos_score = pos_score.mean() neg_score = neg_score.mean() # compute loss loss = -(pos_score + neg_score) / 2 log = { 'pos_loss': -get_scalar(pos_score), 'neg_loss': -get_scalar(neg_score), 'loss': get_scalar(loss) } # regularization: TODO(zihao) #TODO: only reg ent&rel embeddings. other params to be added. if self.args.regularization_coef > 0.0 and self.args.regularization_norm > 0: coef, nm = self.args.regularization_coef, self.args.regularization_norm reg = coef * (norm(self.entity_emb.curr_emb(), nm) + norm(self.relation_emb.curr_emb(), nm)) log['regularization'] = get_scalar(reg) loss = loss + reg return loss, log
def forward(self, pos_g, neg_g, gpu_id=-1): """Do the forward. Parameters ---------- pos_g : DGLGraph Graph holding positive edges. neg_g : DGLGraph Graph holding negative edges. gpu_id : int Which gpu to accelerate the calculation. if -1 is provided, cpu is used. Returns ------- tensor loss value dict loss info """ pos_g.ndata['emb'] = self.entity_emb(pos_g.ndata['id'], gpu_id, True) pos_g.edata['emb'] = self.relation_emb(pos_g.edata['id'], gpu_id, True) self.score_func.prepare(pos_g, gpu_id, True) pos_score = self.predict_score(pos_g) pos_score = logsigmoid(pos_score) if gpu_id >= 0: neg_score = self.predict_neg_score( pos_g, neg_g, to_device=cuda, gpu_id=gpu_id, trace=True, neg_deg_sample=self.args.neg_deg_sample) else: neg_score = self.predict_neg_score( pos_g, neg_g, trace=True, neg_deg_sample=self.args.neg_deg_sample) neg_score = reshape(neg_score, -1, neg_g.neg_sample_size) # Adversarial sampling if self.args.neg_adversarial_sampling: neg_score = F.sum( F.softmax(neg_score * self.args.adversarial_temperature, dim=1).detach() * logsigmoid(-neg_score), dim=1) else: neg_score = F.mean(logsigmoid(-neg_score), dim=1) # subsampling weight # TODO: add subsampling to new sampler #if self.args.non_uni_weight: # subsampling_weight = pos_g.edata['weight'] # pos_score = (pos_score * subsampling_weight).sum() / subsampling_weight.sum() # neg_score = (neg_score * subsampling_weight).sum() / subsampling_weight.sum() #else: pos_score = pos_score.mean() neg_score = neg_score.mean() # compute loss loss = -(pos_score + neg_score) / 2 log = { 'pos_loss': -get_scalar(pos_score), 'neg_loss': -get_scalar(neg_score), 'loss': get_scalar(loss) } # regularization: TODO(zihao) #TODO: only reg ent&rel embeddings. other params to be added. if self.args.regularization_coef > 0.0 and self.args.regularization_norm > 0: coef, nm = self.args.regularization_coef, self.args.regularization_norm reg = coef * (norm(self.entity_emb.curr_emb(), nm) + norm(self.relation_emb.curr_emb(), nm)) log['regularization'] = get_scalar(reg) loss = loss + reg return loss, log