예제 #1
0
def write_voc_results_file(all_boxes, test_imgid_list, det_save_dir):
    '''

  :param all_boxes: is a list. each item reprensent the detections of a img.
  the detections is a array. shape is [-1, 7]. [category, score, x, y, w, h, theta]
  Note that: if none detections in this img. that the detetions is : []

  :param test_imgid_list:
  :param det_save_path:
  :return:
  '''
    for cls, cls_id in NAME_LABEL_MAP.items():
        if cls == 'back_ground':
            continue
        print("Writing {} VOC resutls file".format(cls))

        tools.mkdir(det_save_dir)
        det_save_path = os.path.join(det_save_dir, "det_" + cls + ".txt")
        with open(det_save_path, 'wt') as f:
            for index, img_name in enumerate(test_imgid_list):
                this_img_detections = all_boxes[index]

                this_cls_detections = this_img_detections[
                    this_img_detections[:, 0] == cls_id]
                if this_cls_detections.shape[0] == 0:
                    continue  # this cls has none detections in this img
                for a_det in this_cls_detections:
                    f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
                            format(img_name, a_det[1], a_det[2], a_det[3],
                                   a_det[4], a_det[5], a_det[6])
                            )  # that is [img_name, score, x, y, w, h, theta]
예제 #2
0
def do_python_eval(test_imgid_list, test_annotation_path):
    AP_list = []
    #import matplotlib.pyplot as plt
    #import matplotlib.colors as colors
    #color_list = colors.cnames.keys()[::6]

    for cls, index in NAME_LABEL_MAP.items():
        if cls == 'back_ground':
            continue
        recall, precision, AP = voc_eval(detpath=os.path.join(
            cfgs.EVALUATE_DIR, cfgs.VERSION),
                                         test_imgid_list=test_imgid_list,
                                         cls_name=cls,
                                         annopath=test_annotation_path)
        AP_list += [AP]
        pl.plot(recall,
                precision,
                lw=2,
                label='{} (AP = {:.4f})'
                ''.format(cls, AP))
        print(10 * "__")
    pl.xlabel('Recall')
    pl.ylabel('Precision')
    pl.grid(True)
    pl.ylim([0.0, 1.05])
    pl.xlim([0.0, 1.0])
    pl.title('Precision-Recall')
    pl.legend(loc="lower left")
    pl.show()
    pl.savefig(cfgs.VERSION + '_eval.jpg')
    print("hello")
    print("mAP is : {}".format(np.mean(AP_list)))
예제 #3
0
def do_python_eval(test_imgid_list, test_annotation_path):
    import matplotlib.colors as colors
    import matplotlib.pyplot as plt

    AP_list = []
    for cls, index in NAME_LABEL_MAP.items():
        if cls == 'back_ground':
            continue
        recall, precision, AP = voc_eval(detpath=cfgs.EVALUATE_R_DIR,
                                         test_imgid_list=test_imgid_list,
                                         cls_name=cls,
                                         annopath=test_annotation_path)
        AP_list += [AP]
        print("cls : {}|| Recall: {} || Precison: {}|| AP: {}".format(
            cls, recall[-1], precision[-1], AP))
        # print("{}_ap: {}".format(cls, AP))
        # print("{}_recall: {}".format(cls, recall[-1]))
        # print("{}_precision: {}".format(cls, precision[-1]))

        c = colors.cnames.keys()
        c_dark = list(filter(lambda x: x.startswith('dark'), c))
        c = ['red', 'orange']
        plt.axis([0, 1.2, 0, 1])
        plt.plot(recall, precision, color=c_dark[index], label=cls)

    plt.legend(loc='upper right')
    plt.xlabel('R')
    plt.ylabel('P')
    plt.savefig('./PR_R.png')

    print("mAP is : {}".format(np.mean(AP_list)))
예제 #4
0
def do_python_eval(test_imgid_list, test_annotation_path):
    AP_list = []
    # import matplotlib.pyplot as plt
    # import matplotlib.colors as colors
    # color_list = colors.cnames.keys()[::6]

    for cls, index in NAME_LABEL_MAP.items():
        if cls == 'back_ground':
            continue
        recall, precision, AP = voc_eval(detpath=os.path.join(
            cfgs.EVALUATE_DIR, cfgs.VERSION),
                                         test_imgid_list=test_imgid_list,
                                         cls_name=cls,
                                         annopath=test_annotation_path,
                                         ovthresh=cfgs.EVAL_THRESHOLD,
                                         use_07_metric=cfgs.USE_07_METRIC)
        AP_list += [AP]
        print("cls : {}|| Recall: {} || Precison: {}|| AP: {}".format(
            cls, recall[-1], precision[-1], AP))
        # plt.plot(recall, precision, label=cls, color=color_list[index])
        # plt.legend(loc='upper right')
        print(10 * "__")
    # plt.show()
    # plt.savefig(cfgs.VERSION+'.jpg')
    print("mAP is : {}".format(np.mean(AP_list)))
예제 #5
0
def do_python_eval(test_imgid_list, test_annotation_path, iou_thresh=0.5):
    import matplotlib.colors as colors
    import matplotlib.pyplot as plt

    AP_list = []
    recall_all = 0
    precision_all = 0
    cls_to_avg = 0
    gt_cls_num_all = 0
    tp_all = 0
    fp_all = 0
    for cls, index in NAME_LABEL_MAP.items():
        if cls == 'back_ground':
            continue
        recall, precision, AP, gt_cls_num, tp, fp = voc_eval(
            detpath=cfgs.INFERENCE_SAVE_PATH,
            test_imgid_list=test_imgid_list,
            cls_name=cls,
            annopath=test_annotation_path,
            ovthresh=iou_thresh)
        # print('cls:', cls, 'gt num:', gt_cls_num)
        if np.isnan(AP):
            continue
        if AP == 0:  #recall.size == 0 or precision.size == 0:
            if gt_cls_num == 0:
                continue
            else:
                recall, precision, tp, fp = [0], [0], [0], [0]
        AP_list += [AP]
        recall_all += recall[-1]
        precision_all += precision[-1]
        cls_to_avg += 1
        gt_cls_num_all += gt_cls_num
        tp_all += tp[-1]
        fp_all += fp[-1]
        print(
            "cls : {}|| num : {}|| Recall: {} || Precison: {}|| AP: {}".format(
                cls, gt_cls_num, recall[-1], precision[-1], AP))

        c = colors.cnames.keys()
        c_dark = list(filter(lambda x: x.startswith('dark'), c))
        c = ['red', 'orange']
        plt.axis([0, 1.2, 0.5, 1])
        plt.plot(recall[::500],
                 precision[::500],
                 color=c_dark[index],
                 label=cls)
        # print(recall, precision)

    plt.legend(loc='upper right')
    plt.xlabel('R')
    plt.ylabel('P')
    plt.savefig('./PR_R.png')

    print("avg recall is {}".format(recall_all / cls_to_avg))
    print("avg precision is {}".format(precision_all / cls_to_avg))
    print("avg false alarm is {}".format(fp_all / (tp_all + fp_all)))
    print("mAP is : {}".format(np.mean(AP_list)))
def _write_voc_results_file(all_boxes, test_imgid_list, det_save_path):
    for cls, cls_ind in NAME_LABEL_MAP.items():
        if cls == 'back_ground':
            continue
        print('Writing {} VOC results file'.format(cls))

        with open(det_save_path, 'wt') as f:
            for im_ind, index in enumerate(test_imgid_list):
                dets = all_boxes[cls_ind][im_ind]
                if dets == []:
                    continue
                # the VOCdevkit expects 1-based indices
                for k in range(dets.shape[0]):
                    f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.format(
                        index, dets[k, -1], dets[k, 0] + 1, dets[k, 1] + 1,
                        dets[k, 2] + 1, dets[k, 3] + 1))
def do_python_eval(test_imgid_list, test_annotation_path):
    import matplotlib.colors as colors
    import matplotlib.pyplot as plt
    mAP_dict = {}
    mPrecision_dict = {}
    mRecall_dict = {}
    for cls, index in NAME_LABEL_MAP.items():
        print(cls)
        if cls == 'back_ground':
            continue
        recall, precision, AP = voc_eval(detpath=cfgs.EVALUATE_H_DIR,
                                         test_imgid_list=test_imgid_list,
                                         cls_name=cls,
                                         annopath=test_annotation_path)

        Precision_cls = np.mean(precision)
        Recall_cls = np.mean(recall)
        print("{}_AP: {}".format(cls, AP))

        print("{}_mRecall: {}".format(cls, Recall_cls))
        print("{}_mPrecision: {}".format(cls, Precision_cls))

        mAP_dict[cls] = AP
        mPrecision_dict[cls] = Precision_cls
        mRecall_dict[cls] = Recall_cls

        c = colors.cnames.keys()
        c_dark = list(filter(lambda x: x.startswith('dark'), c))
        c = ['blue', 'green']
        plt.axis([0, 1.2, 0, 1])
        plt.plot(recall, precision, color=c_dark[index], label=cls)

    plt.legend(loc='upper right')
    plt.xlabel('Recall')
    plt.ylabel('Precision')
    plt.savefig('./PR_H.png')

    print(mAP_dict, mRecall_dict, mPrecision_dict)
    total_mAP = np.mean(get_values_from_dict(mAP_dict))
    total_mRecall = np.mean(get_values_from_dict(mRecall_dict))
    total_mPrecision = np.mean(get_values_from_dict(mPrecision_dict))

    print("mAP_H is : {}".format(total_mAP))
    print("mRecall_H is : {}".format(total_mRecall))
    print("mPrecision_H is : {}".format(total_mPrecision))
    # print(mAP, recall, precision)
    return total_mAP, total_mRecall, total_mPrecision, mAP_dict, mRecall_dict, mPrecision_dict
예제 #8
0
def do_python_eval(test_imgid_list, test_annotation_path):
    # import matplotlib.colors as colors
    # import matplotlib.pyplot as plt

    AP_list = []
    for cls, index in NAME_LABEL_MAP.items():
        if cls == 'back_ground':
            continue
        recall, precision, AP = voc_eval(detpath=cfgs.EVALUATE_R_DIR,
                                         test_imgid_list=test_imgid_list,
                                         cls_name=cls,
                                         annopath=test_annotation_path,
                                         use_07_metric=cfgs.USE_07_METRIC,
                                         ovthresh=cfgs.EVAL_THRESHOLD)
        AP_list += [AP]
        print("cls : {}|| Recall: {} || Precison: {}|| AP: {}".format(
            cls, recall[-1], precision[-1], AP))
        # print("{}_ap: {}".format(cls, AP))
        # print("{}_recall: {}".format(cls, recall[-1]))
        # print("{}_precision: {}".format(cls, precision[-1]))
        r = np.array(recall)
        p = np.array(precision)
        F1 = 2 * r * p / (r + p)
        max_ind = np.argmax(F1)
        print('F1:{} P:{} R:{}'.format(F1[max_ind], p[max_ind], r[max_ind]))

        # c = colors.cnames.keys()
        # c_dark = list(filter(lambda x: x.startswith('dark'), c))
        # c = ['red', 'orange']
        # plt.axis([0, 1.2, 0, 1])
        # plt.plot(recall, precision, color=c_dark[index], label=cls)

    # plt.legend(loc='upper right')
    # plt.xlabel('R')
    # plt.ylabel('P')
    # plt.savefig('./PR_R.png')

    print("mAP is : {}".format(np.mean(AP_list)))