예제 #1
0
 def evaluate(self,batch):
     s_negs = self.ns.batch_sample(batch, False)
     t_negs = self.ns.batch_sample(batch, True)
     s_scores = self.model.predict(batch,s_negs, False).data.cpu().numpy()
     t_scores = self.model.predict(batch, t_negs,True).data.cpu().numpy()
     s_rank = util.ranks(s_scores, ascending=False)
     t_rank = util.ranks(t_scores, ascending=False)
     s_mrr = np.mean([1./r for r in s_rank])
     t_mrr = np.mean([1. / r for r in t_rank])
     return (s_mrr + t_mrr)/2.
예제 #2
0
 def calc_scores(batches):
     scores = []
     for b in batches:
         scores.append(self.model.predict(self.params, b))
     scores = np.array(scores).ravel()
     scores = np.append(scores, pos)
     assert pos == scores[-1]
     ranks = util.ranks(scores.flatten(), ascending=False)
     return ranks[-1]
    def predict(self, maximizer, ex):
        samples = self.neg_generator(ex, 't')
        samples.insert(0, ex.t)  # insert positive at front

        scores = maximizer.objective.predict(maximizer.params, PathQuery(ex.s, ex.r, samples)).ravel()
        assert len(scores.shape) == 1

        ranks = util.ranks(scores, ascending=False)
        return samples, scores, ranks
예제 #4
0
 def metrics(self,batch,is_target):
     negs,num_negs = self.ns.batch_sample(batch, is_target)
     scores = self.model.predict(batch, negs, is_target, is_pad=True).data.cpu().numpy()
     scores = self.unpad_scores(scores,num_negs)
     ranks = util.ranks(scores, ascending=False)
     self.all_ranks.extend(ranks)
     # self.write_ranks()
     hits_1 = len([x for x in ranks if x <= 10]) / float(len(ranks))
     rr = 1. / np.asarray(ranks)
     return np.mean(rr), hits_1
예제 #5
0
    def predict(self, maximizer, ex):
        samples = self.neg_generator(ex, 't')
        samples.insert(0, ex.t)  # insert positive at front

        scores = maximizer.objective.predict(maximizer.params,
                                             PathQuery(ex.s, ex.r,
                                                       samples)).ravel()
        assert len(scores.shape) == 1

        ranks = util.ranks(scores, ascending=False)
        return samples, scores, ranks
예제 #6
0
    def evaluate(self, params, batch, num_negs=constants.num_dev_negs):

        pos_scores = self.model.predict(params, batch).flatten()
        mean_rank = []
        for p, ex in zip(pos_scores, batch):
            negs = self.neg_sampler.sample(ex, num_negs, True)
            neg_batch = [Path(ex.s, ex.r, n) for n in negs]
            scores = self.model.predict(params, neg_batch).flatten()
            scores = np.append(scores, p)
            ranks = util.ranks(scores, ascending=False)
            mean_rank.append(ranks[-1])

        return np.nanmean(mean_rank)
예제 #7
0
    def evaluate(self, params, batch):
        '''
        Computes mean rank
        :param params: model parameters
        :param batch: data batch
        :return: mean rank 
        '''
        pos_scores = self.model.predict(params, batch).flatten()
        mean_rank = []
        for p, ex in zip(pos_scores, batch):
            negs = self.neg_sampler.sample(ex, self.num_negs, True)
            neg_ex = [Path(ex.s, ex.r, n) for n in negs]
            scores = self.model.predict(params, neg_ex).flatten()
            scores = np.insert(scores, constants.pos_position, p)
            ranks = util.ranks(scores, ascending=False)
            mean_rank.append(ranks[constants.pos_position])

        return np.nanmean(mean_rank)