示例#1
0
def print_summary_of_one_iteration(summary):
    print('----------------------------------------------------')
    print('Iteration:', summary['iteration'])
    print('Seen classes:', summary['seen_classes'])
    print('Unseen classes:', summary['unseen_classes'])
    print('Classifier statistics:')
    for idx, stat in enumerate(summary['stat_list']):
        print('- Class', summary['seen_classes'][idx],
              utils.dict_to_string_4_print(stat))
    print('Average classifier statistics:')
    print(utils.dict_to_string_4_print(summary['avg_classifier_stat']))
    print('Rejection power (TP = correctly accept):')
    print(utils.dict_to_string_4_print(summary['accepted_stats']))
    cal_result(utils.dict_to_string_4_print(summary['accepted_stats']))
示例#2
0
def print_summary_of_all_iterations(iteration_statistics):
    avg_rejection_performance = dict()
    for key in iteration_statistics[0]['accepted_stats']:
        avg_rejection_performance[key] = sum([
            summary['accepted_stats'][key] for summary in iteration_statistics
        ]) / len(iteration_statistics)
    print('====================================================')
    print('The results of all iterations')
    for summary in iteration_statistics:
        print_summary_of_one_iteration(summary)
    print('====================================================')
    print('Rejection power: averaging over', len(iteration_statistics),
          'iterations (TP = correctly accept)')
    print(utils.dict_to_string_4_print(avg_rejection_performance))
示例#3
0
    def __test__(self, epoch, text_seqs, class_list):

        assert len(text_seqs) == len(class_list)

        start_time = time.time()
        step_time = time.time()

        test_steps = len(text_seqs) // config.batch_size

        topk_list = list()
        pred_class_list = list()

        all_loss = np.zeros(1)

        for cstep in range(test_steps):

            text_seqs_mini = text_seqs[cstep * config.batch_size : (cstep + 1) * config.batch_size]
            class_idx_or_mini = [_ for _ in class_list[cstep * config.batch_size : (cstep + 1) * config.batch_size]]
            class_idx_mini = [self.seen_class_map2index[_] for _ in class_list[cstep * config.batch_size : (cstep + 1) * config.batch_size]]

            encode_seqs_id_mini, encode_seqs_mat_mini = self.prepro_encode(text_seqs_mini, False)

            pred_mat = np.zeros([config.batch_size, len(self.class_dict)])

            test_loss, out  = self.sess.run([
                self.model.test_loss,
                self.model.test_net.outputs,
            ], feed_dict={
                self.model.encode_seqs: encode_seqs_mat_mini,
                self.model.category_target_index: class_idx_mini
            })

            all_loss[0] += test_loss

            pred = np.array([_ / np.sum(_) for _ in np.exp(out)])

            for i in range(len(self.seen_class)):
                pred_mat[:, self.full_class_map2index[self.seen_class[i]]] = pred[:, i]

            topk = self.get_pred_class_topk(pred_mat, k=1)
            topk_list.append(topk)
            pred_class_list.append(pred_mat)

            if cstep % config.cstep_print == 0 and cstep > 0:
                tmp_topk = np.concatenate(topk_list, axis=0)
                tmp_topk = self.get_one_hot_results(np.array(tmp_topk[(cstep + 1 - config.cstep_print) * config.batch_size : (cstep + 1) * config.batch_size]))
                tmp_gt = self.get_one_hot_results(np.reshape(np.array(class_list[(cstep + 1 - config.cstep_print) * config.batch_size : (cstep + 1) * config.batch_size]), newshape=(-1, 1)))
                tmp_stats = utils.get_statistics(tmp_topk, tmp_gt, single_label_pred=True)

                print(
                    "[Test] Epoch: [%3d][%4d/%4d] time: %.4f, loss: %s \n %s" %
                    (epoch, cstep, test_steps, time.time() - step_time, all_loss / (cstep + 1), utils.dict_to_string_4_print(tmp_stats))
                )
                step_time = time.time()


        prediction_topk = np.concatenate(topk_list, axis=0)

        # np.set_printoptions(threshold=np.nan, linewidth=100000)
        # print(class_list[: 200])
        # print(np.squeeze(prediction_topk[: 200]))
        # print(class_list[-200: ])
        # print(np.squeeze(prediction_topk[-200: ]))

        prediction_topk = self.get_one_hot_results(np.array(prediction_topk[: test_steps * config.batch_size]))
        ground_truth = self.get_one_hot_results(np.reshape(np.array(class_list[: test_steps * config.batch_size]), newshape=(-1, 1)))

        stats = utils.get_statistics(prediction_topk, ground_truth, single_label_pred=True)

        print(
            "[Test Sum] Epoch: [%3d] time: %.4f, loss: %s \n %s" %
            (epoch, time.time() - start_time, all_loss / test_steps , utils.dict_to_string_4_print(stats))
        )

        return stats, prediction_topk, ground_truth, np.array([0]), np.array([0])
示例#4
0
    def __test__(self, epoch, text_seqs, class_list):

        assert len(text_seqs) == len(class_list)

        start_time = time.time()
        step_time = time.time()

        test_steps = len(text_seqs) // config.batch_size
        if test_steps * config.batch_size < len(text_seqs):
            test_steps += 1

        # topk_list = list()
        pred_all = np.array([])
        out_confidence_all = np.array([])

        all_loss = np.zeros(1)

        for cstep in range(test_steps):

            text_seqs_mini = text_seqs[cstep * config.batch_size:min(
                (cstep + 1) * config.batch_size, len(text_seqs))]
            class_idx_mini = class_list[cstep * config.batch_size:min(
                (cstep + 1) * config.batch_size, len(text_seqs))]

            encode_seqs_id_mini, encode_seqs_mat_mini = self.prepro_encode(
                text_seqs_mini)

            # pred_mat = np.zeros([config.batch_size, len(self.class_dict)])

            test_loss, out = self.sess.run(
                [
                    self.model.test_loss,
                    self.model.test_net.outputs,
                ],
                feed_dict={
                    self.model.encode_seqs:
                    encode_seqs_mat_mini,
                    self.model.label_logits:
                    np.array(class_idx_mini).reshape(-1, 1)
                })

            all_loss[0] += test_loss

            pred = np.array(
                [1.0 if x[0] >= self.model.threshold else 0.0 for x in out])
            out_confidence = np.array([x[0] for x in out])

            # pred = np.array([_ / np.sum(_) for _ in np.exp(out)])

            # for i in range(len(self.seen_class)):
            #     pred_mat[:, self.full_class_map2index[self.seen_class[i]]] = pred[:, i]

            # topk = self.get_pred_class_topk(pred_mat, k=1)
            # topk_list.append(topk)
            pred_all = np.concatenate((pred_all, pred), axis=0)
            out_confidence_all = np.concatenate(
                (out_confidence_all, out_confidence), axis=0)

            if cstep % config.cstep_print == 0 and cstep > 0:
                # tmp_topk = np.concatenate(topk_list, axis=0)
                # tmp_topk = self.get_one_hot_results(np.array(tmp_topk[: (cstep + 1) * config.batch_size]))
                # tmp_gt = self.get_one_hot_results(np.reshape(np.array(class_list[ : (cstep + 1) * config.batch_size]), newshape=(-1, 1)))
                # tmp_stats = utils.get_statistics(tmp_topk, tmp_gt, single_label_pred=True)
                tmp_stats = utils.get_precision_recall_f1(
                    pred_all,
                    np.array(class_list[:len(pred_all)]),
                    with_confusion_matrix=True)

                print(
                    "[Test] Epoch: [%3d][%4d/%4d] time: %.4f, loss: %s, threshold: %.4f \n %s"
                    % (epoch, cstep, test_steps, time.time() - step_time,
                       all_loss / (cstep + 1), self.model.threshold,
                       utils.dict_to_string_4_print(tmp_stats)))
                step_time = time.time()

        # prediction_topk = np.concatenate(topk_list, axis=0)
        # prediction_topk = self.get_one_hot_results(np.array(prediction_topk[: test_steps * config.batch_size]))
        # ground_truth = self.get_one_hot_results(np.reshape(np.array(class_list[: test_steps * config.batch_size]), newshape=(-1, 1)))

        stats = utils.get_precision_recall_f1(pred_all,
                                              np.array(class_list),
                                              with_confusion_matrix=True)

        print(
            "[Test Sum] Epoch: [%3d] time: %.4f, loss: %s, threshold: %.4f \n %s"
            % (epoch, time.time() - start_time, all_loss / test_steps,
               self.model.threshold, utils.dict_to_string_4_print(stats)))

        return stats, pred_all, class_list, out_confidence_all