コード例 #1
0
def api(label_tags, test_y, y_scores, all_ids):

    eval_samples = []
    for sample in range(test_y.shape[0]):
        if (test_y[sample, :] == np.ones(test_y.shape[1])).any():
            eval_samples.append(sample)

    test_y, y_scores = test_y[eval_samples, :], y_scores[eval_samples, :]

    ev = Evaluation(y_scores, None, test_y)

    all_rankedat10_tags = []
    query_ids = []

    for sample_id, sample_output in zip(eval_samples, y_scores):
        q_id = all_ids[sample_id]
        query_ids.append(q_id)
        cols = np.argsort(sample_output)[-10:]
        rankedat10_tags = []
        for col in cols[::-1]:
            label_name = label_tags[col]
            rankedat10_tags.append(label_name)
        all_rankedat10_tags.append(rankedat10_tags)

    all_Pat5, all_Pat10, all_Rat5, all_Rat10 = \
        ev.Precision(5, True), ev.Precision(10, True), ev.Recall(5, True), ev.Recall(10, True)
    upper_bounds_pat5 = ev.upper_bound(5, True)
    upper_bounds_pat10 = ev.upper_bound(10, True)
    all_MAP = ev.MeanAveragePrecision(True)
    assert len(all_Pat5) == len(all_rankedat10_tags)

    R = (query_ids, all_rankedat10_tags, list(all_Pat5), list(all_Pat10),
         list(all_Rat5), list(all_Rat10), upper_bounds_pat5,
         upper_bounds_pat10, all_MAP)

    raw_corpus = myio.read_corpus(args.corpus_w_tags, with_tags=True)

    with open(args.results_file, 'w') as f:
        for i in range(len(R[0])):
            query_id, rankedat10_tags, Pat5, Pat10, Rat5, Rat10, UB5, UB10, MAP = \
                R[0][i], R[1][i], R[2][i], R[3][i], R[4][i], R[5][i], R[6][i], R[7][i], R[8][i]

            real_tags = raw_corpus[str(query_id)][2]
            real_tags = list(set(real_tags) & set(label_tags))
            real_tags = " ".join([str(x) for x in real_tags])

            rankedat10_tags = " ".join([str(x) for x in rankedat10_tags])

            f.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(
                query_id, real_tags, rankedat10_tags, Pat5, Pat10, Rat5, Rat10,
                UB5, UB10, MAP))
コード例 #2
0
 def evaluate(self, args, data, cnn):
     res = []
     for idts, idbs, labels in data:
         xt = self.embedding.forward(idts.ravel())
         xt = xt.reshape((idts.shape[0], idts.shape[1], self.embedding.n_d))
         xb = self.embedding.forward(idbs.ravel())
         xb = xb.reshape((idbs.shape[0], idbs.shape[1], self.embedding.n_d))
         titles = Variable(torch.from_numpy(xt)).float()
         bodies = Variable(torch.from_numpy(xb)).float()
         if args.cuda:
             titles = titles.cuda()
             bodies = bodies.cuda()
         outputs = cnn(titles, bodies)
         pos = outputs[0].view(1, outputs[0].size(0))
         scores = torch.mm(pos, outputs[1:].transpose(1, 0)).squeeze()
         if args.cuda:
             scores = scores.data.cpu().numpy()
         else:
             scores = scores.data.numpy()
         assert len(scores) == len(labels)
         ranks = (-scores).argsort()
         ranked_labels = labels[ranks]
         res.append(ranked_labels)
     e = Evaluation(res)
     MAP = e.MAP() * 100
     MRR = e.MRR() * 100
     P1 = e.Precision(1) * 100
     P5 = e.Precision(5) * 100
     return MAP, MRR, P1, P5
コード例 #3
0
    def evaluate(self, data, session):
        # return for each query the labels, ranked results, and scores
        eval_func = self.score_func
        all_ranked_labels = []
        all_ranked_ids = []
        all_ranked_scores = []
        query_ids = []
        all_MAP, all_MRR, all_Pat1, all_Pat5 = [], [], [], []
        for idts, idbs, labels, pid, qids in data:
            scores = eval_func(idts, idbs, session)
            assert len(scores) == len(labels)
            ranks = (-scores).argsort()
            ranked_scores = np.array(scores)[ranks]
            ranked_labels = labels[ranks]
            ranked_ids = np.array(qids)[ranks]
            query_ids.append(pid)
            all_ranked_labels.append(ranked_labels)
            all_ranked_ids.append(ranked_ids)
            all_ranked_scores.append(ranked_scores)
            this_ev = Evaluation([ranked_labels])
            all_MAP.append(this_ev.MAP())
            all_MRR.append(this_ev.MRR())
            all_Pat1.append(this_ev.Precision(1))
            all_Pat5.append(this_ev.Precision(5))

        print 'average all ... ', sum(all_MAP) / len(all_MAP), sum(
            all_MRR) / len(all_MRR), sum(all_Pat1) / len(all_Pat1), sum(
                all_Pat5) / len(all_Pat5)
        return all_MAP, all_MRR, all_Pat1, all_Pat5, all_ranked_labels, all_ranked_ids, query_ids, all_ranked_scores
コード例 #4
0
    def evaluate(self, data, sess):
        res = []
        all_labels = []
        all_scores = []

        sample = 0
        for idts, idbs, id_labels in data:
            sample += 1
            cur_scores = self.eval_batch(idts, idbs, sess)
            assert len(id_labels) == len(cur_scores)  # equal to 20

            all_labels.append(id_labels)
            all_scores.append(cur_scores)
            ranks = (-cur_scores).argsort()
            ranked_labels = id_labels[ranks]
            res.append(ranked_labels)

        e = Evaluation(res)
        MAP = e.MAP()
        MRR = e.MRR()
        P1 = e.Precision(1)
        P5 = e.Precision(5)
        if 'mlp_dim' in self.args and self.args.mlp_dim != 0:
            loss1 = dev_entropy_loss(all_labels, all_scores)
        else:
            loss1 = devloss1(all_labels, all_scores)
        loss0 = devloss0(all_labels, all_scores)
        loss2 = devloss2(all_labels, all_scores)
        return MAP, MRR, P1, P5, loss0, loss1, loss2
コード例 #5
0
ファイル: models.py プロジェクト: jimmyz42/question-retrieval
def evaluate(all_ranked_labels):
    evaluator = Evaluation(all_ranked_labels)
    MAP = evaluator.MAP()*100
    MRR = evaluator.MRR()*100
    P1 = evaluator.Precision(1)*100
    P5 = evaluator.Precision(5)*100
    return MAP, MRR, P1, P5
コード例 #6
0
def evaluate(data, labels, model):
    res = [ ]
    model.eval()
    res = compute_scores(data, labels, model)
    evaluation = Evaluation(res)
    MAP = evaluation.MAP()*100
    MRR = evaluation.MRR()*100
    P1 = evaluation.Precision(1)*100
    P5 = evaluation.Precision(5)*100
    print MAP, MRR, P1, P5
    return MAP, MRR, P1, P5
コード例 #7
0
def evaluate(test_y, y_scores, verbose=0, tag_names=None):
    """------------------------------------------remove ill evaluation-------------------------------------------"""
    # eval_labels = []
    # for label in range(test_y.shape[1]):
    #     if (test_y[:, label] == np.ones(test_y.shape[0])).any():
    #         eval_labels.append(label)
    eval_samples = []
    for sample in range(test_y.shape[0]):
        if (test_y[sample, :] == np.ones(test_y.shape[1])).any():
            eval_samples.append(sample)

    test_y, y_scores = test_y[eval_samples, :], y_scores[eval_samples, :]
    # test_y, y_scores = test_y[:, eval_labels], y_scores[:, eval_labels]

    ev = Evaluation(y_scores, None, test_y)

    EVAL_LABELS = set()
    for sample_id, sample_scores in zip(eval_samples, y_scores):
        cols = np.argsort(sample_scores)[-10:]
        for col in cols[::-1]:
            label_name = tag_names[col]
            EVAL_LABELS.add(label_name)
    mat = ev.ConfusionMatrix(5)
    eval_labels = list(EVAL_LABELS & set(TOP50LABELS))
    print_matrix(
        mat,
        tag_names,
        'Confusion:True Tag on x-axis, False Tag on y-axis',
        some_labels=eval_labels,
    )

    if verbose:
        print 'P@1: {}\tP@3: {}\tP@5: {}\tP@10: {}\tR@1: {}\tR@3: {}\tR@5: {}\tR@10: {}\tUBP@5: {}\tUBP@10: {}\tMAP: {}\n'.format(
            ev.Precision(1), ev.Precision(3), ev.Precision(5),
            ev.Precision(10), ev.Recall(1), ev.Recall(3), ev.Recall(5),
            ev.Recall(10), ev.upper_bound(5), ev.upper_bound(10),
            ev.MeanAveragePrecision())
    return ev.Recall(10)
コード例 #8
0
 def evaluate(self, data, eval_func):
     res = []
     for idts, idbs, labels in data:
         scores = eval_func(idts, idbs)
         assert len(scores) == len(labels)
         ranks = (-scores).argsort()
         ranked_labels = labels[ranks]
         res.append(ranked_labels)
     e = Evaluation(res)
     MAP = e.MAP() * 100
     MRR = e.MRR() * 100
     P1 = e.Precision(1) * 100
     P5 = e.Precision(5) * 100
     return MAP, MRR, P1, P5
コード例 #9
0
def evaluate(test_x, test_y, model):
    """"""
    """------------------------------------------remove ill evaluation-------------------------------------------"""
    eval_samples = []
    for sample in range(test_y.shape[0]):
        if (test_y[sample, :] == np.ones(test_y.shape[1])).any():
            eval_samples.append(sample)
    print '\n{} samples ouf of {} will be evaluated (zero-labeled-samples removed).'.format(len(eval_samples), test_y.shape[0])
    print type(test_y), test_y.shape
    test_x = test_x[eval_samples, :]
    test_y = test_y[eval_samples, :]
    # test_y = test_y[:, eval_labels]
    print test_x.shape, test_x.dtype, type(test_x), test_y.shape, test_y.dtype, type(test_y)
    """------------------------------------------remove ill evaluation-------------------------------------------"""

    y_scores = model.predict_proba(test_x)  # probability for each class
    predictions = model.predict(test_x)  # 1 or 0 for each class

    ev = Evaluation(y_scores, predictions, test_y)
    print 'P@1: {}\tP@3: {}\tP@5: {}\tP@10: {}\tR@1: {}\tR@3: {}\tR@5: {}\tR@10: {}\tUBP@5: {}\tUBP@10: {}\tMAP: {}\n'.format(
        ev.Precision(1), ev.Precision(3), ev.Precision(5), ev.Precision(10),
        ev.Recall(1), ev.Recall(3), ev.Recall(5), ev.Recall(10), ev.upper_bound(5), ev.upper_bound(10),
        ev.MeanAveragePrecision()
    )

    """------------------------------------------remove ill evaluation-------------------------------------------"""
    print 'outputs before ', y_scores.shape
    eval_labels = []
    for label in range(test_y.shape[1]):
        if (test_y[:, label] == np.ones(test_y.shape[0])).any():
            eval_labels.append(label)
    print '\n{} labels out of {} will be evaluated (zero-sampled-labels removed).'.format(len(eval_labels), test_y.shape[1])
    y_scores, predictions, targets = y_scores[:, eval_labels], predictions[:, eval_labels], test_y[:, eval_labels]
    print 'outputs after ', y_scores.shape
    ev = Evaluation(y_scores, predictions, targets)
    print 'precision recall f1 macro: {}'.format(ev.precision_recall_fscore('macro'))
    print 'precision recall f1 micro: {}'.format(ev.precision_recall_fscore('micro'))
コード例 #10
0
    def evaluate(self, data, eval_func):
        res = []
        for idts, idbs, labels, weights in data:
            qq_query_weights = weights[0]

            individual_scores = []
            individual_scores_weights = []

            # for every (original and generated) query question
            for i, qq_query_weight in enumerate(qq_query_weights):
                idts_t = idts.transpose()
                # score all other question titles and generated questions
                idts_individual = np.array(
                    [idts_t[i]] + idts_t[len(qq_query_weights):].tolist(),
                    dtype=np.int32).transpose()
                # now we will add all scores
                scores_for_qq = eval_func(idts_individual)

                for j, cq_weights in enumerate(weights[1:]):
                    if len(individual_scores) == j:
                        individual_scores.append([])
                        individual_scores_weights.append([])
                    individual_scores[j] += scores_for_qq[:len(cq_weights
                                                               )].tolist()
                    individual_scores_weights[j] += [
                        cq_weight * qq_query_weight for cq_weight in cq_weights
                    ]
                    scores_for_qq = scores_for_qq[len(cq_weights):]

            # now we determine the weights
            scores = []
            for individual_scores_item, individual_scores_weights_item in zip(
                    individual_scores, individual_scores_weights):
                scores.append(
                    np.average(individual_scores_item,
                               weights=individual_scores_weights_item))

            assert len(scores) == len(labels)
            scores = np.array(scores)

            ranks = (-scores).argsort()
            ranked_labels = labels[ranks]
            res.append(ranked_labels)
        e = Evaluation(res)
        MAP = e.MAP() * 100
        MRR = e.MRR() * 100
        P1 = e.Precision(1) * 100
        P5 = e.Precision(5) * 100
        return MAP, MRR, P1, P5
コード例 #11
0
 def evaluate(self, data, eval_func):
     res = []
     for t, b, labels in data:
         idts, idbs = myio.create_one_batch(t, b, self.padding_id)
         scores = eval_func(idts)
         #assert len(scores) == len(labels)
         ranks = (-scores).argsort()
         ranked_labels = labels[ranks]
         res.append(ranked_labels)
     e = Evaluation(res)
     MAP = e.MAP() * 100
     MRR = e.MRR() * 100
     P1 = e.Precision(1) * 100
     P5 = e.Precision(5) * 100
     return MAP, MRR, P1, P5
コード例 #12
0
ファイル: basemodel.py プロジェクト: mdhuggins/6.884-project
 def on_test_epoch_end(self):
     print("Calculating test accuracy...")
     vacc = self.testaccuracy.compute()
     e = Evaluation(self.eval_res)
     MAP = e.MAP() * 100
     MRR = e.MRR() * 100
     P1 = e.Precision(1) * 100
     P5 = e.Precision(5) * 100
     # print(e)
     print("Test accuracy:", vacc),
     print(MAP, MRR, P1, P5)
     self.log('test_acc_epoch', vacc)
     self.log('t_MAP', MAP)
     self.log('t_Mrr', MRR)
     self.log('t_p1', P1)
     self.log('t_p5', P5)
     return vacc, MAP, MRR, P1, P5
コード例 #13
0
    def evaluate(self, data, tag_names, folder, session):

        all_ids = []
        eval_func = self.predict_func
        outputs, targets = [], []
        for ids, idts, idbs, tags in data:
            all_ids += ids
            output = eval_func(idts, idbs, session)
            outputs.append(output)
            targets.append(tags)

        outputs = np.vstack(outputs)
        targets = np.vstack(targets).astype(np.int32)  # it was dtype object
        """------------------------------------------remove ill evaluation-------------------------------------------"""
        eval_samples = []
        for sample in range(targets.shape[0]):
            if (targets[sample, :] == np.ones(targets.shape[1])).any():
                eval_samples.append(sample)
        print '\n{} samples ouf of {} will be evaluated (zero-labeled-samples removed).'.format(
            len(eval_samples), outputs.shape[0])
        outputs, targets = outputs[eval_samples, :], targets[eval_samples, :]
        """------------------------------------------remove ill evaluation-------------------------------------------"""

        ev = Evaluation(outputs, None, targets)

        all_rankedat10_tags = []
        query_ids = []

        # EVAL_LABELS = set()
        for sample_id, sample_output in zip(eval_samples, outputs):
            q_id = all_ids[sample_id]
            query_ids.append(q_id)
            cols = np.argsort(sample_output)[-10:]
            rankedat10_tags = []
            for col in cols[::-1]:
                # label_id = eval_labels[col]
                # label_name = tag_names[label_id]
                label_name = tag_names[col]
                # EVAL_LABELS.add(label_name)
                rankedat10_tags.append(label_name)
            all_rankedat10_tags.append(rankedat10_tags)
        # eval_labels = list(EVAL_LABELS & set(TOP50LABELS))

        all_Pat5, all_Pat10, all_Rat5, all_Rat10 = \
            ev.Precision(5, True), ev.Precision(10, True), ev.Recall(5, True), ev.Recall(10, True)
        upper_bounds_pat5 = ev.upper_bound(5, True)
        upper_bounds_pat10 = ev.upper_bound(10, True)
        all_MAP = ev.MeanAveragePrecision(True)
        assert len(all_Pat5) == len(all_rankedat10_tags)

        # mat = ev.ConfusionMatrix(5)
        # print_matrix(
        #     mat,
        #     tag_names,
        #     'Confusion:True Tag on x-axis, False Tag on y-axis',
        #     folder,
        #     some_labels=eval_labels,
        # )
        # mat = ev.CorrelationMatrix()
        # print_matrix(
        #     mat,
        #     tag_names,
        #     'Correlation: True Tag on both axis',
        #     folder,
        #     some_labels=eval_labels
        # )
        print 'average: P@5: {} P@10: {} R@5: {} R@10: {} UBP@5: {} UBP@10: {} MAP: {}'.format(
            ev.Precision(5), ev.Precision(10), ev.Recall(5), ev.Recall(10),
            ev.upper_bound(5), ev.upper_bound(10), ev.MeanAveragePrecision())
        """------------------------------------------remove ill evaluation-------------------------------------------"""
        print 'outputs before ', outputs.shape
        eval_labels = []
        for label in range(targets.shape[1]):
            if (targets[:, label] == np.ones(targets.shape[0])).any():
                eval_labels.append(label)
        print '\n{} labels out of {} will be evaluated (zero-sampled-labels removed).'.format(
            len(eval_labels), targets.shape[1])
        outputs, targets = outputs[:, eval_labels], targets[:, eval_labels]
        print 'outputs after ', outputs.shape
        predictions = np.where(outputs > 0.5, np.ones_like(outputs),
                               np.zeros_like(outputs))
        ev = Evaluation(outputs, predictions, targets)
        print 'precision recall f1 macro: {}'.format(
            ev.precision_recall_fscore('macro'))
        print 'precision recall f1 micro: {}'.format(
            ev.precision_recall_fscore('micro'))

        return query_ids, all_rankedat10_tags, list(all_Pat5), list(all_Pat10), list(all_Rat5), list(all_Rat10), \
               upper_bounds_pat5, upper_bounds_pat10, all_MAP
コード例 #14
0
    def evaluate_z(self,
                   data,
                   data_raw,
                   ids_corpus,
                   zeval_func,
                   dump_path=None):
        args = self.args
        padding_id = self.padding_id
        tot_p1 = 0.0
        portion_title = 0.0
        tot_selected = 0.0
        res = []
        output_data = []
        for i in range(len(data)):
            idts, labels = data[i]
            pid, qids, _ = data_raw[i]
            scores, p1, z = zeval_func(idts)
            assert len(scores) == len(labels)
            ranks = (-scores).argsort()
            ranked_labels = labels[ranks]
            res.append(ranked_labels)
            tot_p1 += p1

            for wids_i, z_i, question_id in zip(idts.T, z.T, [pid] + qids):
                z2_i = [
                    zv for wid, zv in zip(wids_i, z_i) if wid != padding_id
                ]
                title, body = ids_corpus[question_id]
                #portion_title += sum(z2_i[:len(title)])
                if args.merge == 1 or question_id % 2 == 0:
                    portion_title += sum(z2_i[:len(title)])
                else:
                    portion_title += sum(z2_i[-len(title):])
                tot_selected += sum(z2_i)

            if dump_path is not None:
                output_data.append(("Query: ", idts[:, 0], z[:, 0], pid))
                for id in ranks[:3]:
                    output_data.append(("Retrieved: {}  label={}".format(
                        scores[id],
                        labels[id]), idts[:, id + 1], z[:, id + 1], qids[id]))
        if dump_path is not None:
            embedding_layer = self.embedding_layer
            padding = "<padding>"
            filter_func = lambda w: w != padding
            with open(dump_path, "w") as fout:
                for heading, wordids, z, question_id in output_data:
                    words = embedding_layer.map_to_words(wordids)
                    fout.write(heading + "\tID: {}\n".format(question_id))
                    fout.write("    " + " ".join(filter(filter_func, words)) +
                               "\n")
                    fout.write("------------\n")
                    fout.write("Rationale:\n")
                    fout.write("    " + " ".join(w if zv == 1 else "__"
                                                 for w, zv in zip(words, z)
                                                 if w != padding) + "\n")
                    fout.write("\n\n")

        e = Evaluation(res)
        MAP = e.MAP() * 100
        MRR = e.MRR() * 100
        P1 = e.Precision(1) * 100
        P5 = e.Precision(5) * 100
        return MAP, MRR, P1, P5, tot_p1 / len(data), portion_title / (
            tot_selected + 1e-8)