Пример #1
0
def write_voc_results_file(all_boxes, test_imgid_list, det_save_dir):
    '''

  :param all_boxes: is a list. each item reprensent the detections of a img.
  the detections is a array. shape is [-1, 7]. [category, score, x, y, w, h, theta]
  Note that: if none detections in this img. that the detetions is : []

  :param test_imgid_list:
  :param det_save_path:
  :return:
  '''
    for cls, cls_id in NAME_LABEL_MAP.items():
        if cls == 'back_ground':
            continue
        print("Writing {} VOC resutls file".format(cls))

        tools.mkdir(det_save_dir)
        det_save_path = os.path.join(det_save_dir, "det_" + cls + ".txt")
        with open(det_save_path, 'wt') as f:
            for index, img_name in enumerate(test_imgid_list):
                this_img_detections = all_boxes[index]

                this_cls_detections = this_img_detections[
                    this_img_detections[:, 0] == cls_id]
                if this_cls_detections.shape[0] == 0:
                    continue  # this cls has none detections in this img
                for a_det in this_cls_detections:
                    f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
                            format(img_name, a_det[1], a_det[2], a_det[3],
                                   a_det[4], a_det[5], a_det[6])
                            )  # that is [img_name, score, x, y, w, h, theta]
Пример #2
0
def do_python_eval(test_imgid_list, test_annotation_path):
    AP_list = []
    #import matplotlib.pyplot as plt
    #import matplotlib.colors as colors
    #color_list = colors.cnames.keys()[::6]

    for cls, index in NAME_LABEL_MAP.items():
        if cls == 'back_ground':
            continue
        recall, precision, AP = voc_eval(detpath=os.path.join(
            cfgs.EVALUATE_DIR, cfgs.VERSION),
                                         test_imgid_list=test_imgid_list,
                                         cls_name=cls,
                                         annopath=test_annotation_path)
        AP_list += [AP]
        pl.plot(recall,
                precision,
                lw=2,
                label='{} (AP = {:.4f})'
                ''.format(cls, AP))
        print(10 * "__")
    pl.xlabel('Recall')
    pl.ylabel('Precision')
    pl.grid(True)
    pl.ylim([0.0, 1.05])
    pl.xlim([0.0, 1.0])
    pl.title('Precision-Recall')
    pl.legend(loc="lower left")
    pl.show()
    pl.savefig(cfgs.VERSION + '_eval.jpg')
    print("hello")
    print("mAP is : {}".format(np.mean(AP_list)))
Пример #3
0
def do_python_eval(test_imgid_list, test_annotation_path):
    import matplotlib.colors as colors
    import matplotlib.pyplot as plt

    AP_list = []
    for cls, index in NAME_LABEL_MAP.items():
        if cls == 'back_ground':
            continue
        recall, precision, AP = voc_eval(detpath=cfgs.EVALUATE_R_DIR,
                                         test_imgid_list=test_imgid_list,
                                         cls_name=cls,
                                         annopath=test_annotation_path)
        AP_list += [AP]
        print("cls : {}|| Recall: {} || Precison: {}|| AP: {}".format(
            cls, recall[-1], precision[-1], AP))
        # print("{}_ap: {}".format(cls, AP))
        # print("{}_recall: {}".format(cls, recall[-1]))
        # print("{}_precision: {}".format(cls, precision[-1]))

        c = colors.cnames.keys()
        c_dark = list(filter(lambda x: x.startswith('dark'), c))
        c = ['red', 'orange']
        plt.axis([0, 1.2, 0, 1])
        plt.plot(recall, precision, color=c_dark[index], label=cls)

    plt.legend(loc='upper right')
    plt.xlabel('R')
    plt.ylabel('P')
    plt.savefig('./PR_R.png')

    print("mAP is : {}".format(np.mean(AP_list)))
Пример #4
0
def do_python_eval(test_imgid_list, test_annotation_path):
    AP_list = []
    # import matplotlib.pyplot as plt
    # import matplotlib.colors as colors
    # color_list = colors.cnames.keys()[::6]

    for cls, index in NAME_LABEL_MAP.items():
        if cls == 'back_ground':
            continue
        recall, precision, AP = voc_eval(detpath=os.path.join(
            cfgs.EVALUATE_DIR, cfgs.VERSION),
                                         test_imgid_list=test_imgid_list,
                                         cls_name=cls,
                                         annopath=test_annotation_path,
                                         ovthresh=cfgs.EVAL_THRESHOLD,
                                         use_07_metric=cfgs.USE_07_METRIC)
        AP_list += [AP]
        print("cls : {}|| Recall: {} || Precison: {}|| AP: {}".format(
            cls, recall[-1], precision[-1], AP))
        # plt.plot(recall, precision, label=cls, color=color_list[index])
        # plt.legend(loc='upper right')
        print(10 * "__")
    # plt.show()
    # plt.savefig(cfgs.VERSION+'.jpg')
    print("mAP is : {}".format(np.mean(AP_list)))
Пример #5
0
def do_python_eval(test_imgid_list, test_annotation_path, iou_thresh=0.5):
    import matplotlib.colors as colors
    import matplotlib.pyplot as plt

    AP_list = []
    recall_all = 0
    precision_all = 0
    cls_to_avg = 0
    gt_cls_num_all = 0
    tp_all = 0
    fp_all = 0
    for cls, index in NAME_LABEL_MAP.items():
        if cls == 'back_ground':
            continue
        recall, precision, AP, gt_cls_num, tp, fp = voc_eval(
            detpath=cfgs.INFERENCE_SAVE_PATH,
            test_imgid_list=test_imgid_list,
            cls_name=cls,
            annopath=test_annotation_path,
            ovthresh=iou_thresh)
        # print('cls:', cls, 'gt num:', gt_cls_num)
        if np.isnan(AP):
            continue
        if AP == 0:  #recall.size == 0 or precision.size == 0:
            if gt_cls_num == 0:
                continue
            else:
                recall, precision, tp, fp = [0], [0], [0], [0]
        AP_list += [AP]
        recall_all += recall[-1]
        precision_all += precision[-1]
        cls_to_avg += 1
        gt_cls_num_all += gt_cls_num
        tp_all += tp[-1]
        fp_all += fp[-1]
        print(
            "cls : {}|| num : {}|| Recall: {} || Precison: {}|| AP: {}".format(
                cls, gt_cls_num, recall[-1], precision[-1], AP))

        c = colors.cnames.keys()
        c_dark = list(filter(lambda x: x.startswith('dark'), c))
        c = ['red', 'orange']
        plt.axis([0, 1.2, 0.5, 1])
        plt.plot(recall[::500],
                 precision[::500],
                 color=c_dark[index],
                 label=cls)
        # print(recall, precision)

    plt.legend(loc='upper right')
    plt.xlabel('R')
    plt.ylabel('P')
    plt.savefig('./PR_R.png')

    print("avg recall is {}".format(recall_all / cls_to_avg))
    print("avg precision is {}".format(precision_all / cls_to_avg))
    print("avg false alarm is {}".format(fp_all / (tp_all + fp_all)))
    print("mAP is : {}".format(np.mean(AP_list)))
def _write_voc_results_file(all_boxes, test_imgid_list, det_save_path):
    for cls, cls_ind in NAME_LABEL_MAP.items():
        if cls == 'back_ground':
            continue
        print('Writing {} VOC results file'.format(cls))

        with open(det_save_path, 'wt') as f:
            for im_ind, index in enumerate(test_imgid_list):
                dets = all_boxes[cls_ind][im_ind]
                if dets == []:
                    continue
                # the VOCdevkit expects 1-based indices
                for k in range(dets.shape[0]):
                    f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.format(
                        index, dets[k, -1], dets[k, 0] + 1, dets[k, 1] + 1,
                        dets[k, 2] + 1, dets[k, 3] + 1))
def do_python_eval(test_imgid_list, test_annotation_path):
    import matplotlib.colors as colors
    import matplotlib.pyplot as plt
    mAP_dict = {}
    mPrecision_dict = {}
    mRecall_dict = {}
    for cls, index in NAME_LABEL_MAP.items():
        print(cls)
        if cls == 'back_ground':
            continue
        recall, precision, AP = voc_eval(detpath=cfgs.EVALUATE_H_DIR,
                                         test_imgid_list=test_imgid_list,
                                         cls_name=cls,
                                         annopath=test_annotation_path)

        Precision_cls = np.mean(precision)
        Recall_cls = np.mean(recall)
        print("{}_AP: {}".format(cls, AP))

        print("{}_mRecall: {}".format(cls, Recall_cls))
        print("{}_mPrecision: {}".format(cls, Precision_cls))

        mAP_dict[cls] = AP
        mPrecision_dict[cls] = Precision_cls
        mRecall_dict[cls] = Recall_cls

        c = colors.cnames.keys()
        c_dark = list(filter(lambda x: x.startswith('dark'), c))
        c = ['blue', 'green']
        plt.axis([0, 1.2, 0, 1])
        plt.plot(recall, precision, color=c_dark[index], label=cls)

    plt.legend(loc='upper right')
    plt.xlabel('Recall')
    plt.ylabel('Precision')
    plt.savefig('./PR_H.png')

    print(mAP_dict, mRecall_dict, mPrecision_dict)
    total_mAP = np.mean(get_values_from_dict(mAP_dict))
    total_mRecall = np.mean(get_values_from_dict(mRecall_dict))
    total_mPrecision = np.mean(get_values_from_dict(mPrecision_dict))

    print("mAP_H is : {}".format(total_mAP))
    print("mRecall_H is : {}".format(total_mRecall))
    print("mPrecision_H is : {}".format(total_mPrecision))
    # print(mAP, recall, precision)
    return total_mAP, total_mRecall, total_mPrecision, mAP_dict, mRecall_dict, mPrecision_dict
Пример #8
0
def do_python_eval(test_imgid_list, test_annotation_path):
    # import matplotlib.colors as colors
    # import matplotlib.pyplot as plt

    AP_list = []
    for cls, index in NAME_LABEL_MAP.items():
        if cls == 'back_ground':
            continue
        recall, precision, AP = voc_eval(detpath=cfgs.EVALUATE_R_DIR,
                                         test_imgid_list=test_imgid_list,
                                         cls_name=cls,
                                         annopath=test_annotation_path,
                                         use_07_metric=cfgs.USE_07_METRIC,
                                         ovthresh=cfgs.EVAL_THRESHOLD)
        AP_list += [AP]
        print("cls : {}|| Recall: {} || Precison: {}|| AP: {}".format(
            cls, recall[-1], precision[-1], AP))
        # print("{}_ap: {}".format(cls, AP))
        # print("{}_recall: {}".format(cls, recall[-1]))
        # print("{}_precision: {}".format(cls, precision[-1]))
        r = np.array(recall)
        p = np.array(precision)
        F1 = 2 * r * p / (r + p)
        max_ind = np.argmax(F1)
        print('F1:{} P:{} R:{}'.format(F1[max_ind], p[max_ind], r[max_ind]))

        # c = colors.cnames.keys()
        # c_dark = list(filter(lambda x: x.startswith('dark'), c))
        # c = ['red', 'orange']
        # plt.axis([0, 1.2, 0, 1])
        # plt.plot(recall, precision, color=c_dark[index], label=cls)

    # plt.legend(loc='upper right')
    # plt.xlabel('R')
    # plt.ylabel('P')
    # plt.savefig('./PR_R.png')

    print("mAP is : {}".format(np.mean(AP_list)))
Пример #9
0
def inference(det_net,
              file_paths,
              des_folder,
              h_len,
              w_len,
              h_overlap,
              w_overlap,
              save_res=False):

    if save_res:
        assert cfgs.SHOW_SCORE_THRSHOLD >= 0.5, \
            'please set score threshold (example: SHOW_SCORE_THRSHOLD = 0.5) in cfgs.py'

    else:
        assert cfgs.SHOW_SCORE_THRSHOLD < 0.005, \
            'please set score threshold (example: SHOW_SCORE_THRSHOLD = 0.00) in cfgs.py'

    tmp_file = './tmp_%s.txt' % cfgs.VERSION

    # 1. preprocess img
    img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3])
    img_batch = tf.cast(img_plac, tf.float32)
    if cfgs.NET_NAME in ['resnet101_v1d', 'resnet50_v1d']:
        img_batch = (img_batch / 255 - tf.constant(
            cfgs.PIXEL_MEAN_)) / tf.constant(cfgs.PIXEL_STD)
    else:
        img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)

    img_batch = tf.expand_dims(img_batch, axis=0)

    img_batch = short_side_resize_for_inference_data(
        img_tensor=img_batch,
        target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN[0],
        is_resize=False)

    det_boxes_h, det_scores_h, det_category_h = det_net.build_whole_detection_network(
        input_img_batch=img_batch, gtboxes_batch=None)

    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())

    restorer, restore_ckpt = det_net.get_restorer()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        sess.run(init_op)
        if not restorer is None:
            restorer.restore(sess, restore_ckpt)
            print('restore model')

        if not os.path.exists(tmp_file):
            fw = open(tmp_file, 'w')
            fw.close()

        fr = open(tmp_file, 'r')
        pass_img = fr.readlines()
        fr.close()

        for count, img_path in enumerate(file_paths):
            fw = open(tmp_file, 'a+')
            if img_path + '\n' in pass_img:
                continue
            start = timer()
            img = cv2.imread(img_path)

            box_res = []
            label_res = []
            score_res = []

            imgH = img.shape[0]
            imgW = img.shape[1]
            ori_H = imgH
            ori_W = imgW
            # print("  ori_h, ori_w: ", imgH, imgW)
            if imgH < h_len:
                temp = np.zeros([h_len, imgW, 3], np.float32)
                temp[0:imgH, :, :] = img
                img = temp
                imgH = h_len

            if imgW < w_len:
                temp = np.zeros([imgH, w_len, 3], np.float32)
                temp[:, 0:imgW, :] = img
                img = temp
                imgW = w_len

            for hh in range(0, imgH, h_len - h_overlap):
                if imgH - hh - 1 < h_len:
                    hh_ = imgH - h_len
                else:
                    hh_ = hh
                for ww in range(0, imgW, w_len - w_overlap):
                    if imgW - ww - 1 < w_len:
                        ww_ = imgW - w_len
                    else:
                        ww_ = ww
                    src_img = img[hh_:(hh_ + h_len), ww_:(ww_ + w_len), :]

                    for short_size in cfgs.IMG_SHORT_SIDE_LEN:
                        max_len = 1200

                        if h_len < w_len:
                            new_h, new_w = short_size, min(
                                int(short_size * float(w_len) / h_len),
                                max_len)
                        else:
                            new_h, new_w = min(
                                int(short_size * float(h_len) / w_len),
                                max_len), short_size

                        img_resize = cv2.resize(src_img, (new_h, new_w))

                        det_boxes_h_, det_scores_h_, det_category_h_ = \
                            sess.run(
                                [det_boxes_h, det_scores_h, det_category_h],
                                feed_dict={img_plac: img_resize[:, :, ::-1]}
                            )

                        valid = det_scores_h_ > 1e-4
                        det_boxes_h_ = det_boxes_h_[valid]
                        det_scores_h_ = det_scores_h_[valid]
                        det_category_h_ = det_category_h_[valid]
                        det_boxes_h_[:, 0] = det_boxes_h_[:, 0] * w_len / new_w
                        det_boxes_h_[:, 1] = det_boxes_h_[:, 1] * h_len / new_h
                        det_boxes_h_[:, 2] = det_boxes_h_[:, 2] * w_len / new_w
                        det_boxes_h_[:, 3] = det_boxes_h_[:, 3] * h_len / new_h

                        if len(det_boxes_h_) > 0:
                            for ii in range(len(det_boxes_h_)):
                                box = det_boxes_h_[ii]
                                box[0] = box[0] + ww_
                                box[1] = box[1] + hh_
                                box[2] = box[2] + ww_
                                box[3] = box[3] + hh_
                                box_res.append(box)
                                label_res.append(det_category_h_[ii])
                                score_res.append(det_scores_h_[ii])

            box_res = np.array(box_res)
            label_res = np.array(label_res)
            score_res = np.array(score_res)

            box_res_, label_res_, score_res_ = [], [], []

            # h_threshold = {'roundabout': 0.35, 'tennis-court': 0.35, 'swimming-pool': 0.4, 'storage-tank': 0.3,
            #                'soccer-ball-field': 0.3, 'small-vehicle': 0.4, 'ship': 0.35, 'plane': 0.35,
            #                'large-vehicle': 0.4, 'helicopter': 0.4, 'harbor': 0.3, 'ground-track-field': 0.4,
            #                'bridge': 0.3, 'basketball-court': 0.4, 'baseball-diamond': 0.3}
            h_threshold = {
                'turntable': 0.5,
                'tennis-court': 0.5,
                'swimming-pool': 0.5,
                'storage-tank': 0.5,
                'soccer-ball-field': 0.5,
                'small-vehicle': 0.5,
                'ship': 0.5,
                'plane': 0.5,
                'large-vehicle': 0.5,
                'helicopter': 0.5,
                'harbor': 0.5,
                'ground-track-field': 0.5,
                'bridge': 0.5,
                'basketball-court': 0.5,
                'baseball-diamond': 0.5,
                'container-crane': 0.5
            }

            for sub_class in range(1, cfgs.CLASS_NUM + 1):
                index = np.where(label_res == sub_class)[0]
                if len(index) == 0:
                    continue
                tmp_boxes_h = box_res[index]
                tmp_label_h = label_res[index]
                tmp_score_h = score_res[index]

                tmp_boxes_h = np.array(tmp_boxes_h)
                tmp = np.zeros(
                    [tmp_boxes_h.shape[0], tmp_boxes_h.shape[1] + 1])
                tmp[:, 0:-1] = tmp_boxes_h
                tmp[:, -1] = np.array(tmp_score_h)

                # inx = nms.py_cpu_nms(dets=np.array(tmp, np.float32),
                #                      thresh=h_threshold[LABEL_NAME_MAP[sub_class]],
                #                      max_output_size=500)

                inx = nms(np.array(tmp, np.float32),
                          h_threshold[LABEl_NAME_MAP[sub_class]])

                inx = inx[:500]  # max_outpus is 500

                box_res_.extend(np.array(tmp_boxes_h)[inx])
                score_res_.extend(np.array(tmp_score_h)[inx])
                label_res_.extend(np.array(tmp_label_h)[inx])

            time_elapsed = timer() - start

            if save_res:
                scores = np.array(score_res_)
                labels = np.array(label_res_)
                boxes = np.array(box_res_)
                valid_show = scores > cfgs.SHOW_SCORE_THRSHOLD
                scores = scores[valid_show]
                boxes = boxes[valid_show]
                labels = labels[valid_show]
                det_detections_h = draw_box_in_img.draw_boxes_with_label_and_scores(
                    np.array(img, np.float32),
                    boxes=boxes,
                    labels=labels,
                    scores=scores,
                    in_graph=False)
                det_detections_h = det_detections_h[:ori_H, :ori_W]
                save_dir = os.path.join(des_folder, cfgs.VERSION)
                tools.mkdir(save_dir)
                cv2.imwrite(
                    save_dir + '/' + img_path.split('/')[-1].split('.')[0] +
                    '_h_s%d_t%f.jpg' %
                    (h_len, cfgs.FAST_RCNN_NMS_IOU_THRESHOLD),
                    det_detections_h)

                view_bar(
                    '{} cost {}s'.format(
                        img_path.split('/')[-1].split('.')[0], time_elapsed),
                    count + 1, len(file_paths))

            else:
                # eval txt
                CLASS_DOTA = NAME_LABEL_MAP.keys()

                # Task2
                write_handle_h = {}
                txt_dir_h = os.path.join('txt_output', cfgs.VERSION + '_h')
                tools.mkdir(txt_dir_h)
                for sub_class in CLASS_DOTA:
                    if sub_class == 'back_ground':
                        continue
                    write_handle_h[sub_class] = open(
                        os.path.join(txt_dir_h, 'Task2_%s.txt' % sub_class),
                        'a+')

                for i, hbox in enumerate(box_res_):
                    command = '%s %.3f %.1f %.1f %.1f %.1f\n' % (
                        img_path.split('/')[-1].split('.')[0], score_res_[i],
                        hbox[0], hbox[1], hbox[2], hbox[3])
                    write_handle_h[LABEl_NAME_MAP[label_res_[i]]].write(
                        command)

                for sub_class in CLASS_DOTA:
                    if sub_class == 'back_ground':
                        continue
                    write_handle_h[sub_class].close()

            view_bar(
                '{} cost {}s'.format(
                    img_path.split('/')[-1].split('.')[0], time_elapsed),
                count + 1, len(file_paths))
            fw.write('{}\n'.format(img_path))
            fw.close()
        os.remove(tmp_file)
def eval_with_plac(img_dir, det_net, num_imgs, image_ext, draw_imgs,
                   test_annotation_path):

    # 1. preprocess img
    img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None,
                                                     3])  # is RGB. not GBR
    img_batch = tf.cast(img_plac, tf.float32)
    img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)
    img_batch = short_side_resize_for_inference_data(
        img_tensor=img_batch,
        target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN,
        is_resize=False)

    det_boxes_h, det_scores_h, det_category_h, \
    det_boxes_r, det_scores_r, det_category_r = det_net.build_whole_detection_network(
        input_img_batch=img_batch,
        gtboxes_h_batch=None, gtboxes_r_batch=None)

    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())

    global_step_tensor = slim.get_or_create_global_step()

    eval_result = []
    last_checkpoint_name = None

    while True:

        restorer, restore_ckpt = det_net.get_restorer()
        #saver = tf.train.Saver(max_to_keep=10)
        start_time = time.time()

        model_path = os.path.splitext(os.path.basename(restore_ckpt))[0]
        if model_path == None:
            print("Wait for available checkpoint")
        elif last_checkpoint_name == model_path:
            print(
                "Already evaluated checkpoint {}, we will try evaluation in {} seconds"
                .format(model_path, EVAL_INTERVAL))
            #continue
        else:
            print('Last ckpt was {}, new ckpt is {}'.format(
                last_checkpoint_name, model_path))
            last_checkpoint_name = model_path

            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True

            with tf.Session(config=config) as sess:
                sess.run(init_op)
                sess.run(global_step_tensor.initializer)
                if not restorer is None:
                    restorer.restore(sess, restore_ckpt)
                    print('restore model', restore_ckpt)

                global_stepnp = tf.train.global_step(sess, global_step_tensor)
                print('#########################', global_stepnp)

                all_boxes_h = []
                all_boxes_r = []
                imgs = os.listdir(img_dir)
                imgs_len = len(imgs)
                none_detected_image = []
                for i, a_img_name in enumerate(imgs[:]):
                    a_img_name = a_img_name.split(image_ext)[0]
                    image_name = a_img_name + image_ext
                    print('\n', a_img_name)

                    raw_img = cv2.imread(
                        os.path.join(img_dir, a_img_name + image_ext))
                    raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]

                    start = time.time()
                    resized_img, det_boxes_h_, det_scores_h_, det_category_h_, \
                    det_boxes_r_, det_scores_r_, det_category_r_ = \
                        sess.run(
                            [img_batch, det_boxes_h, det_scores_h, det_category_h,
                             det_boxes_r, det_scores_r, det_category_r],
                            feed_dict={img_plac: raw_img}
                        )
                    end = time.time()
                    print("det category H : ", det_category_h_)
                    print("det category R : ", det_category_r_)
                    # print("{} cost time : {} ".format(img_name, (end - start)))
                    if draw_imgs:
                        det_detections_h = draw_box_in_img.draw_box_cv(
                            np.squeeze(resized_img, 0),
                            boxes=det_boxes_h_,
                            labels=det_category_h_,
                            scores=det_scores_h_)
                        det_detections_r = draw_box_in_img.draw_rotate_box_cv(
                            np.squeeze(resized_img, 0),
                            boxes=det_boxes_r_,
                            labels=det_category_r_,
                            scores=det_scores_r_)
                        save_dir = os.path.join(cfgs.TEST_SAVE_PATH,
                                                cfgs.VERSION)
                        tools.mkdir(save_dir)
                        cv2.imwrite(save_dir + '/' + a_img_name + '_h.jpg',
                                    det_detections_h[:, :, ::-1])
                        cv2.imwrite(save_dir + '/' + a_img_name + '_r.jpg',
                                    det_detections_r[:, :, ::-1])

                    xmin, ymin, xmax, ymax = det_boxes_h_[:, 0], det_boxes_h_[:, 1], \
                                     det_boxes_h_[:, 2], det_boxes_h_[:, 3]

                    if det_boxes_r_.shape[0] != 0:
                        #print('### Has box ###')
                        resized_h, resized_w = resized_img.shape[
                            1], resized_img.shape[2]
                        det_boxes_r_ = forward_convert(det_boxes_r_, False)
                        det_boxes_r_[:, 0::2] *= (raw_w / resized_w)
                        det_boxes_r_[:, 1::2] *= (raw_h / resized_h)
                        det_boxes_r_ = back_forward_convert(
                            det_boxes_r_, False)

                        x_c, y_c, w, h, theta = det_boxes_r_[:, 0], det_boxes_r_[:, 1], det_boxes_r_[:, 2], \
                                                det_boxes_r_[:, 3], det_boxes_r_[:, 4]

                        xmin = xmin * raw_w / resized_w
                        xmax = xmax * raw_w / resized_w
                        ymin = ymin * raw_h / resized_h
                        ymax = ymax * raw_h / resized_h

                        boxes_h = np.transpose(
                            np.stack([xmin, ymin, xmax, ymax]))
                        boxes_r = np.transpose(
                            np.stack([x_c, y_c, w, h, theta]))
                        dets_h = np.hstack((det_category_h_.reshape(-1, 1),
                                            det_scores_h_.reshape(-1,
                                                                  1), boxes_h))
                        dets_r = np.hstack((det_category_r_.reshape(-1, 1),
                                            det_scores_r_.reshape(-1,
                                                                  1), boxes_r))
                        all_boxes_h.append(dets_h)
                        all_boxes_r.append(dets_r)
                    else:
                        imgs.remove(image_name)
                        none_detected_image.append(image_name)
                        print('No detected')

                    tools.view_bar(
                        '{} image cost {}s'.format(a_img_name, (end - start)),
                        i + 1, imgs_len)

                fw1 = open(cfgs.VERSION + '_detections_h.pkl', 'wb')
                fw2 = open(cfgs.VERSION + '_detections_r.pkl', 'wb')
                pickle.dump(all_boxes_h, fw1)
                pickle.dump(all_boxes_r, fw2)

                # with open(cfgs.VERSION + '_detections_h.pkl', 'rb') as f1:
                #     all_boxes_h = pickle.load(f1, encoding='unicode')

                # print(10 * "###")
                # print(len(all_boxes_h))
                #
                # with open(cfgs.VERSION + '_detections_r.pkl', 'rb') as f2:
                #     all_boxes_r = pickle.load(f2, encoding='unicode')
                #
                #     print(len(all_boxes_r))

                # imgs = os.listdir(img_dir)
                real_test_imgname_list = [i.split(image_ext)[0] for i in imgs]

                print(10 * "**")
                print('horizon eval:')
                # print(len(all_boxes_h), len(all_boxes_r))
                # print(len(real_test_imgname_list))
                mAP_h, recall_h, precision_h, total_mAP_h, total_recall_h, total_precision_h = voc_eval.voc_evaluate_detections(
                    all_boxes=all_boxes_h,
                    test_imgid_list=real_test_imgname_list,
                    test_annotation_path=test_annotation_path)
                print('mAP_h: ', mAP_h)
                print('mRecall_h:', recall_h)
                print('mPrecision_h:', precision_h)
                print('total_mAP_h: ', total_mAP_h)
                print('total_recall_h_list:', total_recall_h)
                print('total_precision_h_list:', total_precision_h)

                print(10 * "**")
                print('rotation eval:')
                mAP_r, recall_r, precision_r, total_mAP_r, total_recall_r, total_precision_r = voc_eval_r.voc_evaluate_detections(
                    all_boxes=all_boxes_r,
                    test_imgid_list=real_test_imgname_list,
                    test_annotation_path=test_annotation_path)

                f1score_h_check = (1 + 1**2) * precision_h * recall_h / (
                    1**2 * precision_h + recall_h)
                f1score_h = calc_fscore(precision_h, recall_h, 1)

                f1score_r_check = (1 + 1**2) * precision_r * recall_r / (
                    1**2 * precision_r + recall_r)
                f1score_r = calc_fscore(precision_r, recall_r, 1)

                print(10 * '##')
                print('mAP_r:', mAP_r)
                print('mRecall_r:', recall_r)
                print('mPrecision_r:', precision_r)
                print('total_mAP_r_list: ', total_mAP_r)
                print('total_recall_r_list:', total_recall_r)
                print('total_precision_r_list:', total_precision_r)
                print('f1score_r:', f1score_r)

                summary_path = os.path.join(cfgs.SUMMARY_PATH,
                                            cfgs.VERSION + '/eval_0')
                tools.mkdir(summary_path)

                summary_writer = tf.summary.FileWriter(summary_path,
                                                       graph=sess.graph)

                mAP_h_summ = tf.Summary()
                mAP_h_summ.value.add(tag='EVAL_Global/mAP_h',
                                     simple_value=mAP_h)
                summary_writer.add_summary(mAP_h_summ, global_stepnp)

                mAP_r_summ = tf.Summary()
                mAP_r_summ.value.add(tag='EVAL_Global/mAP_r',
                                     simple_value=mAP_r)
                summary_writer.add_summary(mAP_r_summ, global_stepnp)

                mRecall_h_summ = tf.Summary()
                mRecall_h_summ.value.add(tag='EVAL_Global/Recall_h',
                                         simple_value=recall_h)
                summary_writer.add_summary(mRecall_h_summ, global_stepnp)

                mRecall_r_summ = tf.Summary()
                mRecall_r_summ.value.add(tag='EVAL_Global/Recall_r',
                                         simple_value=recall_r)
                summary_writer.add_summary(mRecall_r_summ, global_stepnp)

                mPrecision_h_summ = tf.Summary()
                mPrecision_h_summ.value.add(tag='EVAL_Global/Precision_h',
                                            simple_value=precision_h)
                summary_writer.add_summary(mPrecision_h_summ, global_stepnp)

                mPrecision_r_summ = tf.Summary()
                mPrecision_r_summ.value.add(tag='EVAL_Global/Precision_r',
                                            simple_value=precision_r)
                summary_writer.add_summary(mPrecision_r_summ, global_stepnp)

                mF1Score_h_summ = tf.Summary()
                mF1Score_h_summ.value.add(tag='EVAL_Global/F1Score_h',
                                          simple_value=f1score_h)
                summary_writer.add_summary(mF1Score_h_summ, global_stepnp)

                mF1Score_r_summ = tf.Summary()
                mF1Score_r_summ.value.add(tag='EVAL_Global/F1Score_r',
                                          simple_value=f1score_r)
                summary_writer.add_summary(mF1Score_r_summ, global_stepnp)

                mAP_h_class_dict = {}
                mAP_r_class_dict = {}
                recall_h_class_dict = {}
                recall_r_class_dict = {}
                precision_h_class_dict = {}
                precision_r_class_dict = {}
                f1score_h_class_dict = {}
                f1score_r_class_dict = {}

                label_list = list(NAME_LABEL_MAP.keys())
                label_list.remove('back_ground')

                for cls in label_list:
                    mAP_h_class_dict["cls_%s_mAP_h_summ" % cls] = tf.Summary()
                    mAP_r_class_dict["cls_%s_mAP_r_summ" % cls] = tf.Summary()
                    recall_h_class_dict["cls_%s_recall_h_summ" %
                                        cls] = tf.Summary()
                    recall_r_class_dict["cls_%s_recall_r_summ" %
                                        cls] = tf.Summary()
                    precision_h_class_dict["cls_%s_precision_h_summ" %
                                           cls] = tf.Summary()
                    precision_r_class_dict["cls_%s_precision_r_summ" %
                                           cls] = tf.Summary()
                    f1score_h_class_dict["cls_%s_f1score_h_summ" %
                                         cls] = tf.Summary()
                    f1score_r_class_dict["cls_%s_f1score_r_summ" %
                                         cls] = tf.Summary()

                for cls in label_list:
                    mAP_h_class_dict["cls_%s_mAP_h_summ" % cls].value.add(
                        tag='EVAL_Class_mAP/{}_mAP_h'.format(cls),
                        simple_value=total_mAP_h[cls])
                    mAP_r_class_dict["cls_%s_mAP_r_summ" % cls].value.add(
                        tag='EVAL_Class_mAP/{}_mAP_r'.format(cls),
                        simple_value=total_mAP_r[cls])
                    recall_h_class_dict[
                        "cls_%s_recall_h_summ" % cls].value.add(
                            tag='EVAL_Class_recall/{}_recall_h'.format(cls),
                            simple_value=total_recall_h[cls])
                    recall_r_class_dict[
                        "cls_%s_recall_r_summ" % cls].value.add(
                            tag='EVAL_Class_recall/{}_recall_r'.format(cls),
                            simple_value=total_recall_r[cls])
                    precision_h_class_dict[
                        "cls_%s_precision_h_summ" % cls].value.add(
                            tag='EVAL_Class_precision/{}_precision_h'.format(
                                cls),
                            simple_value=total_precision_h[cls])
                    precision_r_class_dict[
                        "cls_%s_precision_r_summ" % cls].value.add(
                            tag='EVAL_Class_precision/{}_precision_r'.format(
                                cls),
                            simple_value=total_precision_r[cls])

                    f1score_h_cls = calc_fscore(total_precision_h[cls],
                                                total_recall_h[cls], 1)
                    f1score_r_cls = calc_fscore(total_precision_r[cls],
                                                total_recall_r[cls], 1)
                    f1score_h_class_dict[
                        "cls_%s_f1score_h_summ" % cls].value.add(
                            tag='EVAL_Class_f1score/{}_f1score_h'.format(cls),
                            simple_value=f1score_h_cls)
                    f1score_r_class_dict[
                        "cls_%s_f1score_r_summ" % cls].value.add(
                            tag='EVAL_Class_f1score/{}_f1score_r'.format(cls),
                            simple_value=f1score_r_cls)

                for cls in label_list:
                    summary_writer.add_summary(
                        mAP_h_class_dict["cls_%s_mAP_h_summ" % cls],
                        global_stepnp)
                    summary_writer.add_summary(
                        mAP_r_class_dict["cls_%s_mAP_r_summ" % cls],
                        global_stepnp)
                    summary_writer.add_summary(
                        recall_h_class_dict["cls_%s_recall_h_summ" % cls],
                        global_stepnp)
                    summary_writer.add_summary(
                        recall_r_class_dict["cls_%s_recall_r_summ" % cls],
                        global_stepnp)
                    summary_writer.add_summary(
                        precision_h_class_dict["cls_%s_precision_h_summ" %
                                               cls], global_stepnp)
                    summary_writer.add_summary(
                        precision_r_class_dict["cls_%s_precision_r_summ" %
                                               cls], global_stepnp)
                    summary_writer.add_summary(
                        f1score_h_class_dict["cls_%s_f1score_h_summ" % cls],
                        global_stepnp)
                    summary_writer.add_summary(
                        f1score_r_class_dict["cls_%s_f1score_r_summ" % cls],
                        global_stepnp)

                summary_writer.flush()

        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        save_ckpt = os.path.join(save_dir,
                                 'voc_' + str(global_stepnp) + 'model.ckpt')
        #saver.save(sess, save_ckpt)
        print(' weights had been saved')

        time_to_next_eval = start_time + EVAL_INTERVAL - time.time()
        if time_to_next_eval > 0:
            time.sleep(time_to_next_eval)