def find_head_edge(box, head):
    head_dict = {0: '11', 1: '10', 2: '00', 3: '01'}
    flag = head_dict[int(head)]
    box_eight = forward_convert(np.array([box]), False)[0]
    box_eight = np.reshape(box_eight, [4, 2])
    four_edges = [[box_eight[0], box_eight[1]], [box_eight[1], box_eight[2]],
                  [box_eight[2], box_eight[3]], [box_eight[3], box_eight[0]]]
    for i in range(4):
        center_x = (four_edges[i][0][0] + four_edges[i][1][0]) / 2.
        center_y = (four_edges[i][0][1] + four_edges[i][1][1]) / 2.
        if (center_x - box[0]) >= 0 and (center_y - box[1]) >= 0:
            res = '11'
            if res == flag:
                return four_edges[i]
        elif (center_x - box[0]) >= 0 and (center_y - box[1]) <= 0:
            res = '10'
            if res == flag:
                return four_edges[i]
        elif (center_x - box[0]) <= 0 and (center_y - box[1]) <= 0:
            res = '00'
            if res == flag:
                return four_edges[i]
        else:
            res = '01'
            if res == flag:
                return four_edges[i]
def iou_rotate(boxes1, boxes2):
    boxes1_convert = forward_convert(boxes1, False)
    # boxes2_convert = forward_convert(boxes2, False)

    boxes1_h = get_horizen_minAreaRectangle(boxes1_convert)
    # boxes2_h = get_horizen_minAreaRectangle(boxes2_convert)

    iou_h = bbox_overlaps(np.ascontiguousarray(boxes1_h, dtype=np.float),
                          np.ascontiguousarray(boxes2, dtype=np.float))

    # argmax_overlaps_inds = np.argmax(iou_h, axis=1)
    # target_boxes = boxes2[argmax_overlaps_inds]
    #
    # delta_theta = np.abs(boxes1[:, -1] - target_boxes[:, -1])
    # iou_h[delta_theta > 10] = 0
    #
    # argmax_overlaps_inds = np.argmax(iou_h, axis=1)
    # max_overlaps = iou_h[np.arange(iou_h.shape[0]), argmax_overlaps_inds]
    # indices = max_overlaps < 0.7
    # iou_h[indices] = 0

    # boxes1 = boxes1[indices]
    #
    # overlaps = get_iou_matrix(np.ascontiguousarray(boxes1, dtype=np.float32),
    #                           np.ascontiguousarray(boxes2, dtype=np.float32))
    #
    # iou_r = np.zeros_like(iou_h)
    # iou_r[indices] = overlaps

    return iou_h
def filter_small_gt(gtboxes):
    gtboxes_5 = backward_convert(gtboxes)
    gtboxes_5_ = gtboxes_5[gtboxes_5[:, 2] >= 5, :]
    if gtboxes_5_.shape[0] != 0:
        gtboxes_5_ = gtboxes_5_[gtboxes_5_[:, 3] >= 5, :]
    if gtboxes_5_.shape[0] == 0:
        gtboxes_5_ = np.reshape(gtboxes_5[0, :], [-1, 6])
        gtboxes_5_[:, 2] = 5.
        gtboxes_5_[:, 3] = 5.
    gtboxes_8 = forward_convert(gtboxes_5_)
    return gtboxes_8
예제 #4
0
def write_pixel_results(boxes, labels, scores, img_name, det_save_dir):
    '''
    write coordinate in pixel
    img_name.txt:
    [x1, y1, ..., x4, y4, category, probability]
    '''
    boxes = coordinate_convert.forward_convert(boxes, with_label=False)
    if len(labels) == 0:
        return None

    num, _ = boxes.shape
    with open(os.path.join(cfgs.INFERENCE_SAVE_PATH, img_name + '.txt'),
              'a') as txt:
        for i in range(num):
            box = boxes[i].astype(np.int32)
            box = box.tolist()
            box.append(LABEl_NAME_MAP[labels[i]])
            box.append(scores[i])
            box.append('\n')
            txt.write(','.join([str(x) for x in box]))
def inference(det_net, data_dir, res_dir, save_res):

    # 1. preprocess img
    img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3])
    img_batch = tf.cast(img_plac, tf.float32)
    img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)
    img_batch = short_side_resize_for_inference_data(
        img_tensor=img_batch, target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN[0])

    det_boxes_h, det_scores_h, det_category_h, \
    det_boxes_r, det_scores_r, det_category_r = det_net.build_whole_detection_network(input_img_batch=img_batch,
                                                                                      gtboxes_h_batch=None,
                                                                                      gtboxes_r_batch=None,
                                                                                      mask_batch=None)

    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())

    restorer, restore_ckpt = det_net.get_restorer()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        sess.run(init_op)
        if not restorer is None:
            restorer.restore(sess, restore_ckpt)
            print('restore model')

        imgs = os.listdir(data_dir)
        for i, a_img_name in enumerate(imgs):

            # f = open('./res_icdar_r/res_{}.txt'.format(a_img_name.split('.jpg')[0]), 'w')

            raw_img = cv2.imread(os.path.join(data_dir, a_img_name))
            # raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]

            start = time.time()
            resized_img, det_boxes_h_, det_scores_h_, det_category_h_, \
            det_boxes_r_, det_scores_r_, det_category_r_ = \
                sess.run(
                    [img_batch, det_boxes_h, det_scores_h, det_category_h,
                     det_boxes_r, det_scores_r, det_category_r],
                    feed_dict={img_plac: raw_img}
                )
            end = time.time()

            # res_r = coordinate_convert.forward_convert(det_boxes_r_, False)
            # res_r = np.array(res_r, np.int32)
            # for r in res_r:
            #     f.write('{},{},{},{},{},{},{},{}\n'.format(r[0], r[1], r[2], r[3],
            #                                                r[4], r[5], r[6], r[7]))
            # f.close()

            det_detections_h = draw_box_in_img.draw_box_cv(
                np.squeeze(resized_img, 0),
                boxes=det_boxes_h_,
                labels=det_category_h_,
                scores=det_scores_h_)
            det_detections_r = draw_box_in_img.draw_rotate_box_cv(
                np.squeeze(resized_img, 0),
                boxes=det_boxes_r_,
                labels=det_category_r_,
                scores=det_scores_r_)

            if save_res:
                save_dir = os.path.join(cfgs.INFERENCE_SAVE_PATH, cfgs.VERSION)
                tools.mkdir(save_dir)
                cv2.imwrite(save_dir + '/' + a_img_name + '_h.jpg',
                            det_detections_h)
                cv2.imwrite(save_dir + '/' + a_img_name + '_r.jpg',
                            det_detections_r)

            tools.mkdir(os.path.join(res_dir, cfgs.VERSION))
            txt_dir = os.path.join(
                res_dir, cfgs.VERSION,
                'res_{}.txt'.format(a_img_name.split('.jpg')[0]))

            fw = open(txt_dir, 'w')
            for b in coordinate_convert.forward_convert(det_boxes_r_, False):
                fw.write('%d,%d,%d,%d,%d,%d,%d,%d\n' %
                         (b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]))

            fw.close()
            view_bar('{} cost {}s'.format(a_img_name, (end - start)), i + 1,
                     len(imgs))
def worker(gpu_id, images, det_net, args, result_queue):
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

    img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None,
                                                     3])  # is RGB. not BGR
    img_batch = tf.cast(img_plac, tf.float32)

    img_batch = short_side_resize_for_inference_data(
        img_tensor=img_batch,
        target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN,
        length_limitation=cfgs.IMG_MAX_LENGTH)
    if cfgs.NET_NAME in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
        img_batch = (img_batch / 255 - tf.constant(
            cfgs.PIXEL_MEAN_)) / tf.constant(cfgs.PIXEL_STD)
    else:
        img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)

    img_batch = tf.expand_dims(img_batch, axis=0)

    detection_boxes, detection_scores, detection_category = det_net.build_whole_detection_network(
        input_img_batch=img_batch, gtboxes_batch_h=None, gtboxes_batch_r=None)

    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())

    restorer, restore_ckpt = det_net.get_restorer()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        sess.run(init_op)
        if not restorer is None:
            restorer.restore(sess, restore_ckpt)
            print('restore model %d ...' % gpu_id)

        for img_path in images:

            # if 'P0016' not in img_path:
            #     continue

            img = cv2.imread(img_path)

            box_res_rotate = []
            label_res_rotate = []
            score_res_rotate = []

            imgH = img.shape[0]
            imgW = img.shape[1]

            if imgH < args.h_len:
                temp = np.zeros([args.h_len, imgW, 3], np.float32)
                temp[0:imgH, :, :] = img
                img = temp
                imgH = args.h_len

            if imgW < args.w_len:
                temp = np.zeros([imgH, args.w_len, 3], np.float32)
                temp[:, 0:imgW, :] = img
                img = temp
                imgW = args.w_len

            for hh in range(0, imgH, args.h_len - args.h_overlap):
                if imgH - hh - 1 < args.h_len:
                    hh_ = imgH - args.h_len
                else:
                    hh_ = hh
                for ww in range(0, imgW, args.w_len - args.w_overlap):
                    if imgW - ww - 1 < args.w_len:
                        ww_ = imgW - args.w_len
                    else:
                        ww_ = ww
                    src_img = img[hh_:(hh_ + args.h_len),
                                  ww_:(ww_ + args.w_len), :]

                    resized_img, det_boxes_r_, det_scores_r_, det_category_r_ = \
                        sess.run(
                            [img_batch, detection_boxes, detection_scores, detection_category],
                            feed_dict={img_plac: src_img[:, :, ::-1]}
                        )

                    resized_h, resized_w = resized_img.shape[
                        1], resized_img.shape[2]
                    src_h, src_w = src_img.shape[0], src_img.shape[1]

                    if len(det_boxes_r_) > 0:
                        det_boxes_r_ = forward_convert(det_boxes_r_, False)
                        det_boxes_r_[:, 0::2] *= (src_w / resized_w)
                        det_boxes_r_[:, 1::2] *= (src_h / resized_h)
                        det_boxes_r_ = backward_convert(det_boxes_r_, False)

                        for ii in range(len(det_boxes_r_)):
                            box_rotate = det_boxes_r_[ii]
                            box_rotate[0] = box_rotate[0] + ww_
                            box_rotate[1] = box_rotate[1] + hh_
                            box_res_rotate.append(box_rotate)
                            label_res_rotate.append(det_category_r_[ii])
                            score_res_rotate.append(det_scores_r_[ii])

            box_res_rotate = np.array(box_res_rotate)
            label_res_rotate = np.array(label_res_rotate)
            score_res_rotate = np.array(score_res_rotate)

            box_res_rotate_ = []
            label_res_rotate_ = []
            score_res_rotate_ = []
            threshold = {
                'roundabout': 0.1,
                'tennis-court': 0.3,
                'swimming-pool': 0.1,
                'storage-tank': 0.2,
                'soccer-ball-field': 0.3,
                'small-vehicle': 0.2,
                'ship': 0.05,
                'plane': 0.3,
                'large-vehicle': 0.1,
                'helicopter': 0.2,
                'harbor': 0.0001,
                'ground-track-field': 0.3,
                'bridge': 0.0001,
                'basketball-court': 0.3,
                'baseball-diamond': 0.3
            }

            for sub_class in range(1, cfgs.CLASS_NUM + 1):
                index = np.where(label_res_rotate == sub_class)[0]
                if len(index) == 0:
                    continue
                tmp_boxes_r = box_res_rotate[index]
                tmp_label_r = label_res_rotate[index]
                tmp_score_r = score_res_rotate[index]

                tmp_boxes_r = np.array(tmp_boxes_r)
                tmp = np.zeros(
                    [tmp_boxes_r.shape[0], tmp_boxes_r.shape[1] + 1])
                tmp[:, 0:-1] = tmp_boxes_r
                tmp[:, -1] = np.array(tmp_score_r)

                try:
                    inx = nms_rotate.nms_rotate_cpu(
                        boxes=np.array(tmp_boxes_r),
                        scores=np.array(tmp_score_r),
                        iou_threshold=threshold[LABEL_NAME_MAP[sub_class]],
                        max_output_size=500)
                except:
                    # Note: the IoU of two same rectangles is 0, which is calculated by rotate_gpu_nms
                    jitter = np.zeros(
                        [tmp_boxes_r.shape[0], tmp_boxes_r.shape[1] + 1])
                    jitter[:,
                           0] += np.random.rand(tmp_boxes_r.shape[0], ) / 1000
                    inx = rotate_gpu_nms(
                        np.array(tmp, np.float32) +
                        np.array(jitter, np.float32),
                        float(threshold[LABEL_NAME_MAP[sub_class]]), 0)

                box_res_rotate_.extend(np.array(tmp_boxes_r)[inx])
                score_res_rotate_.extend(np.array(tmp_score_r)[inx])
                label_res_rotate_.extend(np.array(tmp_label_r)[inx])

            result_dict = {
                'boxes': np.array(box_res_rotate_),
                'scores': np.array(score_res_rotate_),
                'labels': np.array(label_res_rotate_),
                'image_id': img_path
            }
            result_queue.put_nowait(result_dict)
def test_dota(det_net, real_test_img_list, args, txt_name):

    save_path = os.path.join('./test_dota', cfgs.VERSION)

    nr_records = len(real_test_img_list)
    pbar = tqdm(total=nr_records)
    gpu_num = len(args.gpus.strip().split(','))

    nr_image = math.ceil(nr_records / gpu_num)
    result_queue = Queue(500)
    procs = []

    for i, gpu_id in enumerate(args.gpus.strip().split(',')):
        start = i * nr_image
        end = min(start + nr_image, nr_records)
        split_records = real_test_img_list[start:end]
        proc = Process(target=worker,
                       args=(int(gpu_id), split_records, det_net, args,
                             result_queue))
        print('process:%d, start:%d, end:%d' % (i, start, end))
        proc.start()
        procs.append(proc)

    for i in range(nr_records):
        res = result_queue.get()

        if args.show_box:

            nake_name = res['image_id'].split('/')[-1]
            tools.mkdir(os.path.join(save_path, 'dota_img_vis'))
            draw_path = os.path.join(save_path, 'dota_img_vis', nake_name)

            draw_img = np.array(cv2.imread(res['image_id']), np.float32)

            detected_indices = res['scores'] >= cfgs.VIS_SCORE
            detected_scores = res['scores'][detected_indices]
            detected_boxes = res['boxes'][detected_indices]
            detected_categories = res['labels'][detected_indices]

            final_detections = draw_box_in_img.draw_boxes_with_label_and_scores(
                draw_img,
                boxes=detected_boxes,
                labels=detected_categories,
                scores=detected_scores,
                method=1,
                in_graph=False)
            cv2.imwrite(draw_path, final_detections)

        else:
            CLASS_DOTA = NAME_LABEL_MAP.keys()
            write_handle = {}

            tools.mkdir(os.path.join(save_path, 'dota_res'))
            for sub_class in CLASS_DOTA:
                if sub_class == 'back_ground':
                    continue
                write_handle[sub_class] = open(
                    os.path.join(save_path, 'dota_res',
                                 'Task1_%s.txt' % sub_class), 'a+')

            rboxes = forward_convert(res['boxes'], with_label=False)

            for i, rbox in enumerate(rboxes):
                command = '%s %.3f %.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f\n' % (
                    res['image_id'].split('/')[-1].split('.')[0],
                    res['scores'][i],
                    rbox[0],
                    rbox[1],
                    rbox[2],
                    rbox[3],
                    rbox[4],
                    rbox[5],
                    rbox[6],
                    rbox[7],
                )
                write_handle[LABEL_NAME_MAP[res['labels'][i]]].write(command)

            for sub_class in CLASS_DOTA:
                if sub_class == 'back_ground':
                    continue
                write_handle[sub_class].close()

            fw = open(txt_name, 'a+')
            fw.write('{}\n'.format(res['image_id'].split('/')[-1]))
            fw.close()

        pbar.set_description("Test image %s" % res['image_id'].split('/')[-1])

        pbar.update(1)

    for p in procs:
        p.join()
예제 #8
0
def inference(det_net,
              file_paths,
              des_folder,
              h_len,
              w_len,
              h_overlap,
              w_overlap,
              save_res=False):

    if save_res:
        assert cfgs.SHOW_SCORE_THRSHOLD >= 0.5, \
            'please set score threshold (example: SHOW_SCORE_THRSHOLD = 0.5) in cfgs.py'

    else:
        assert cfgs.SHOW_SCORE_THRSHOLD < 0.005, \
            'please set score threshold (example: SHOW_SCORE_THRSHOLD = 0.00) in cfgs.py'

    # 1. preprocess img
    img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3])
    img_batch = tf.cast(img_plac, tf.float32)
    img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)
    img_batch = short_side_resize_for_inference_data(
        img_tensor=img_batch,
        target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN,
        is_resize=False)

    det_boxes_h, det_scores_h, det_category_h, \
    det_boxes_r, det_scores_r, det_category_r = det_net.build_whole_detection_network(input_img_batch=img_batch,
                                                                                      gtboxes_h_batch=None,
                                                                                      gtboxes_r_batch=None)

    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())

    restorer, restore_ckpt = det_net.get_restorer()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        sess.run(init_op)
        if not restorer is None:
            restorer.restore(sess, restore_ckpt)
            print('restore model')

        for count, img_path in enumerate(file_paths):
            start = timer()
            img = cv2.imread(img_path)

            box_res = []
            label_res = []
            score_res = []
            box_res_rotate = []
            label_res_rotate = []

            score_res_rotate = []

            imgH = img.shape[0]
            imgW = img.shape[1]

            if imgH < h_len:
                temp = np.zeros([h_len, imgW, 3], np.float32)
                temp[0:imgH, :, :] = img
                img = temp
                imgH = h_len

            if imgW < w_len:
                temp = np.zeros([imgH, w_len, 3], np.float32)
                temp[:, 0:imgW, :] = img
                img = temp
                imgW = w_len

            for hh in range(0, imgH, h_len - h_overlap):
                if imgH - hh - 1 < h_len:
                    hh_ = imgH - h_len
                else:
                    hh_ = hh
                for ww in range(0, imgW, w_len - w_overlap):
                    if imgW - ww - 1 < w_len:
                        ww_ = imgW - w_len
                    else:
                        ww_ = ww
                    src_img = img[hh_:(hh_ + h_len), ww_:(ww_ + w_len), :]

                    det_boxes_h_, det_scores_h_, det_category_h_, \
                    det_boxes_r_, det_scores_r_, det_category_r_ = \
                        sess.run(
                            [det_boxes_h, det_scores_h, det_category_h,
                             det_boxes_r, det_scores_r, det_category_r],
                            feed_dict={img_plac: src_img[:, :, ::-1]}
                        )

                    if len(det_boxes_h_) > 0:
                        for ii in range(len(det_boxes_h_)):
                            box = det_boxes_h_[ii]
                            box[0] = box[0] + ww_
                            box[1] = box[1] + hh_
                            box[2] = box[2] + ww_
                            box[3] = box[3] + hh_
                            box_res.append(box)
                            label_res.append(det_category_h_[ii])
                            score_res.append(det_scores_h_[ii])
                    if len(det_boxes_r_) > 0:
                        for ii in range(len(det_boxes_r_)):
                            box_rotate = det_boxes_r_[ii]
                            box_rotate[0] = box_rotate[0] + ww_
                            box_rotate[1] = box_rotate[1] + hh_
                            box_res_rotate.append(box_rotate)
                            label_res_rotate.append(det_category_r_[ii])
                            score_res_rotate.append(det_scores_r_[ii])

            box_res = np.array(box_res)
            label_res = np.array(label_res)
            score_res = np.array(score_res)

            box_res_rotate = np.array(box_res_rotate)
            label_res_rotate = np.array(label_res_rotate)
            score_res_rotate = np.array(score_res_rotate)

            box_res_rotate_, label_res_rotate_, score_res_rotate_ = [], [], []
            box_res_, label_res_, score_res_ = [], [], []

            r_threshold = {
                'roundabout': 0.1,
                'tennis-court': 0.3,
                'swimming-pool': 0.1,
                'storage-tank': 0.2,
                'soccer-ball-field': 0.3,
                'small-vehicle': 0.2,
                'ship': 0.05,
                'plane': 0.3,
                'large-vehicle': 0.1,
                'helicopter': 0.2,
                'harbor': 0.0001,
                'ground-track-field': 0.3,
                'bridge': 0.0001,
                'basketball-court': 0.3,
                'baseball-diamond': 0.3
            }

            h_threshold = {
                'roundabout': 0.35,
                'tennis-court': 0.35,
                'swimming-pool': 0.4,
                'storage-tank': 0.3,
                'soccer-ball-field': 0.3,
                'small-vehicle': 0.4,
                'ship': 0.35,
                'plane': 0.35,
                'large-vehicle': 0.4,
                'helicopter': 0.4,
                'harbor': 0.3,
                'ground-track-field': 0.4,
                'bridge': 0.3,
                'basketball-court': 0.4,
                'baseball-diamond': 0.3
            }

            for sub_class in range(1, cfgs.CLASS_NUM + 1):
                index = np.where(label_res_rotate == sub_class)[0]
                if len(index) == 0:
                    continue
                tmp_boxes_r = box_res_rotate[index]
                tmp_label_r = label_res_rotate[index]
                tmp_score_r = score_res_rotate[index]

                tmp_boxes_r = np.array(tmp_boxes_r)
                tmp = np.zeros(
                    [tmp_boxes_r.shape[0], tmp_boxes_r.shape[1] + 1])
                tmp[:, 0:-1] = tmp_boxes_r
                tmp[:, -1] = np.array(tmp_score_r)

                try:
                    inx = nms_rotate.nms_rotate_cpu(
                        boxes=np.array(tmp_boxes_r),
                        scores=np.array(tmp_score_r),
                        iou_threshold=r_threshold[LABEl_NAME_MAP[sub_class]],
                        max_output_size=500)
                except:
                    # Note: the IoU of two same rectangles is 0, which is calculated by rotate_gpu_nms
                    jitter = np.zeros(
                        [tmp_boxes_r.shape[0], tmp_boxes_r.shape[1] + 1])
                    jitter[:,
                           0] += np.random.rand(tmp_boxes_r.shape[0], ) / 1000
                    inx = rotate_gpu_nms(
                        np.array(tmp, np.float32) +
                        np.array(jitter, np.float32),
                        float(r_threshold[LABEl_NAME_MAP[sub_class]]), 0)

                box_res_rotate_.extend(np.array(tmp_boxes_r)[inx])
                score_res_rotate_.extend(np.array(tmp_score_r)[inx])
                label_res_rotate_.extend(np.array(tmp_label_r)[inx])

            for sub_class in range(1, cfgs.CLASS_NUM + 1):
                index = np.where(label_res == sub_class)[0]
                if len(index) == 0:
                    continue
                tmp_boxes_h = box_res[index]
                tmp_label_h = label_res[index]
                tmp_score_h = score_res[index]

                tmp_boxes_h = np.array(tmp_boxes_h)
                tmp = np.zeros(
                    [tmp_boxes_h.shape[0], tmp_boxes_h.shape[1] + 1])
                tmp[:, 0:-1] = tmp_boxes_h
                tmp[:, -1] = np.array(tmp_score_h)

                inx = nms.py_cpu_nms(
                    dets=np.array(tmp, np.float32),
                    thresh=h_threshold[LABEl_NAME_MAP[sub_class]],
                    max_output_size=500)

                box_res_.extend(np.array(tmp_boxes_h)[inx])
                score_res_.extend(np.array(tmp_score_h)[inx])
                label_res_.extend(np.array(tmp_label_h)[inx])

            time_elapsed = timer() - start

            if save_res:
                det_detections_h = draw_box_in_img.draw_box_cv(
                    np.array(img, np.float32) - np.array(cfgs.PIXEL_MEAN),
                    boxes=np.array(box_res_),
                    labels=np.array(label_res_),
                    scores=np.array(score_res_))
                det_detections_r = draw_box_in_img.draw_rotate_box_cv(
                    np.array(img, np.float32) - np.array(cfgs.PIXEL_MEAN),
                    boxes=np.array(box_res_rotate_),
                    labels=np.array(label_res_rotate_),
                    scores=np.array(score_res_rotate_))
                save_dir = os.path.join(des_folder, cfgs.VERSION)
                tools.mkdir(save_dir)
                cv2.imwrite(
                    save_dir + '/' + img_path.split('/')[-1].split('.')[0] +
                    '_h.jpg', det_detections_h)
                cv2.imwrite(
                    save_dir + '/' + img_path.split('/')[-1].split('.')[0] +
                    '_r.jpg', det_detections_r)

                view_bar(
                    '{} cost {}s'.format(
                        img_path.split('/')[-1].split('.')[0], time_elapsed),
                    count + 1, len(file_paths))

            else:
                # eval txt
                CLASS_DOTA = NAME_LABEL_MAP.keys()
                # Task1
                write_handle_r = {}
                write_handle_h_ = {}
                txt_dir_r = os.path.join('txt_output', cfgs.VERSION + '_r')
                txt_dir_h_minAreaRect = os.path.join(
                    'txt_output', cfgs.VERSION + '_h_minAreaRect')
                tools.mkdir(txt_dir_r)
                tools.mkdir(txt_dir_h_minAreaRect)
                for sub_class in CLASS_DOTA:
                    if sub_class == 'back_ground':
                        continue
                    write_handle_r[sub_class] = open(
                        os.path.join(txt_dir_r, 'Task1_%s.txt' % sub_class),
                        'a+')
                    write_handle_h_[sub_class] = open(
                        os.path.join(txt_dir_h_minAreaRect,
                                     'Task2_%s.txt' % sub_class), 'a+')

                rboxes = coordinate_convert.forward_convert(box_res_rotate_,
                                                            with_label=False)

                for i, rbox in enumerate(rboxes):
                    command = '%s %.3f %.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f\n' % (
                        img_path.split('/')[-1].split('.')[0],
                        score_res_rotate_[i],
                        rbox[0],
                        rbox[1],
                        rbox[2],
                        rbox[3],
                        rbox[4],
                        rbox[5],
                        rbox[6],
                        rbox[7],
                    )
                    command_ = '%s %.3f %.1f %.1f %.1f %.1f\n' % (
                        img_path.split('/')[-1].split('.')[0],
                        score_res_rotate_[i], min(rbox[::2]), min(
                            rbox[1::2]), max(rbox[::2]), max(rbox[1::2]))
                    write_handle_r[LABEl_NAME_MAP[label_res_rotate_[i]]].write(
                        command)
                    write_handle_h_[LABEl_NAME_MAP[
                        label_res_rotate_[i]]].write(command_)

                for sub_class in CLASS_DOTA:
                    if sub_class == 'back_ground':
                        continue
                    write_handle_r[sub_class].close()

                # Task2
                write_handle_h = {}
                txt_dir_h = os.path.join('txt_output', cfgs.VERSION + '_h')
                tools.mkdir(txt_dir_h)
                for sub_class in CLASS_DOTA:
                    if sub_class == 'back_ground':
                        continue
                    write_handle_h[sub_class] = open(
                        os.path.join(txt_dir_h, 'Task2_%s.txt' % sub_class),
                        'a+')

                for i, hbox in enumerate(box_res_):
                    command = '%s %.3f %.1f %.1f %.1f %.1f\n' % (
                        img_path.split('/')[-1].split('.')[0], score_res_[i],
                        hbox[0], hbox[1], hbox[2], hbox[3])
                    write_handle_h[LABEl_NAME_MAP[label_res_[i]]].write(
                        command)

                for sub_class in CLASS_DOTA:
                    if sub_class == 'back_ground':
                        continue
                    write_handle_h[sub_class].close()

                view_bar(
                    '{} cost {}s'.format(
                        img_path.split('/')[-1].split('.')[0], time_elapsed),
                    count + 1, len(file_paths))
    # init_op = tf.group(
    #     tf.global_variables_initializer(),
    #     tf.local_variables_initializer()
    # )
    #
    # config = tf.ConfigProto()
    # config.gpu_options.allow_growth = True
    #
    # with tf.Session(config=config) as sess:
    #     sess.run(init_op)
    #
    #     coord = tf.train.Coordinator()
    #     threads = tf.train.start_queue_runners(sess, coord)
    #
    #     img_name_batch_, img_batch_, gtboxes_and_label_batch_, num_objects_batch_, img_h_batch_, img_w_batch_ \
    #         = sess.run([img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch, img_h_batch, img_w_batch])
    #
    #     print(img_name_batch_.shape)
    #     print(img_batch_.shape)
    #     print(gtboxes_and_label_batch_.shape)
    #     print(num_objects_batch_.shape)
    #     print(img_h_batch_.shape)
    #     print('debug')
    #
    #     coord.request_stop()
    #     coord.join(threads)
    tmp = np.array([[50, 50, 40, 50, -30, 1], [50, 50, 4, 5, -30, 1]])
    tmp = forward_convert(tmp)
    print(filter_small_gt(tmp))
    print(backward_convert(filter_small_gt(tmp)))
예제 #10
0
파일: eval.py 프로젝트: zhangiguang/ship
def eval_with_plac(img_dir, det_net, image_ext, draw_imgs=False):

    # 1. preprocess img
    img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None,
                                                     3])  # is RGB. not GBR
    img_batch = tf.cast(img_plac, tf.float32)
    img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)
    img_batch = short_side_resize_for_inference_data(
        img_tensor=img_batch,
        target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN,
        is_resize=False)

    det_boxes_r, det_scores_r, det_category_r = det_net.build_whole_detection_network(
        input_img_batch=img_batch, gtboxes_h_batch=None, gtboxes_r_batch=None)

    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())

    restorer, restore_ckpt = det_net.get_restorer()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        sess.run(init_op)
        if not restorer is None:
            restorer.restore(sess, restore_ckpt)
            print('restore model')

        all_boxes_h = []
        all_boxes_r = []
        imgs = os.listdir(img_dir)
        for i, a_img_name in enumerate(imgs):

            a_img_name = a_img_name.split(image_ext)[0]
            recs = {}
            recs[a_img_name] = parse_rec(
                os.path.join(test_annotation_path, a_img_name + '.xml'))
            #R = [obj for obj in recs[a_img_name]]
            bbox = np.squeeze(np.array([x['bbox'] for x in recs[a_img_name]]))
            #labels = bbox[:, -1]
            if len(bbox.shape) == 1:
                bbox = np.expand_dims(bbox, axis=0)
            labels = bbox[:, -1]

            raw_img = cv2.imread(os.path.join(img_dir, a_img_name + image_ext))
            raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]

            start = time.time()
            resized_img,  \
            det_boxes_r_, det_scores_r_, det_category_r_ = \
                sess.run(
                    [img_batch,
                     det_boxes_r, det_scores_r, det_category_r],
                    feed_dict={img_plac: raw_img}
                )
            end = time.time()
            det_boxes_r_ = det_boxes_r_[det_scores_r_ >= 0.4]
            det_category_r_ = det_category_r_[det_scores_r_ >= 0.4]
            det_scores_r_ = det_scores_r_[det_scores_r_ >= 0.4]

            keep = nms_rotate.nms_rotate_cpu(det_boxes_r_, det_scores_r_, 0.3,
                                             20)
            det_boxes_r_ = det_boxes_r_[keep]
            det_scores_r_ = det_scores_r_[keep]
            det_category_r_ = det_category_r_[keep]
            ##### box_ratio > 2  or < 1/2
            index = (det_boxes_r_[:, 2] / det_boxes_r_[:, 3] >
                     2) | (det_boxes_r_[:, 2] / det_boxes_r_[:, 3] <= 0.5)
            det_boxes_r_ = det_boxes_r_[index]
            det_scores_r_ = det_scores_r_[index]
            det_category_r_ = det_category_r_[index]

            # print("{} cost time : {} ".format(img_name, (end - start)))
            if draw_imgs:
                det_detections_h = draw_box_in_img.draw_rotate_box_cv1(
                    np.squeeze(resized_img, 0),
                    boxes=bbox,
                    labels=labels,
                    scores=np.ones(bbox.shape[0]))
                det_detections_r = draw_box_in_img.draw_rotate_box_cv(
                    np.squeeze(resized_img, 0),
                    boxes=det_boxes_r_,
                    labels=det_category_r_,
                    scores=det_scores_r_)
                save_dir = os.path.join(cfgs.TEST_SAVE_PATH, cfgs.VERSION)
                tools.mkdir(save_dir)
                cv2.imwrite(save_dir + '/' + a_img_name + '_h.jpg',
                            det_detections_h[:, :, ::-1])
                cv2.imwrite(save_dir + '/' + a_img_name + '_r.jpg',
                            det_detections_r[:, :, ::-1])

            # xmin, ymin, xmax, ymax = det_boxes_h_[:, 0], det_boxes_h_[:, 1], \
            #                          det_boxes_h_[:, 2], det_boxes_h_[:, 3]

            if det_boxes_r_.shape[0] != 0:
                resized_h, resized_w = resized_img.shape[1], resized_img.shape[
                    2]
                det_boxes_r_ = forward_convert(det_boxes_r_, False)
                det_boxes_r_[:, 0::2] *= (raw_w / resized_w)
                det_boxes_r_[:, 1::2] *= (raw_h / resized_h)
                det_boxes_r_ = back_forward_convert(det_boxes_r_, False)

            x_c, y_c, w, h, theta = det_boxes_r_[:, 0], det_boxes_r_[:, 1], det_boxes_r_[:, 2], \
                                    det_boxes_r_[:, 3], det_boxes_r_[:, 4]

            # xmin = xmin * raw_w / resized_w
            # xmax = xmax * raw_w / resized_w
            # ymin = ymin * raw_h / resized_h
            # ymax = ymax * raw_h / resized_h

            # boxes_h = np.transpose(np.stack([xmin, ymin, xmax, ymax]))
            boxes_r = np.transpose(np.stack([x_c, y_c, w, h, theta]))
            # dets_h = np.hstack((det_category_h_.reshape(-1, 1),
            #                     det_scores_h_.reshape(-1, 1),
            #                     boxes_h))
            dets_r = np.hstack((det_category_r_.reshape(-1, 1),
                                det_scores_r_.reshape(-1, 1), boxes_r))
            # all_boxes_h.append(dets_h)
            all_boxes_r.append(dets_r)

            tools.view_bar(
                '{} image cost {}s'.format(a_img_name, (end - start)), i + 1,
                len(imgs))

        fw1 = open(cfgs.VERSION + '_detections_h.pkl', 'w')
        fw2 = open(cfgs.VERSION + '_detections_r.pkl', 'w')
        pickle.dump(all_boxes_h, fw1)
        pickle.dump(all_boxes_r, fw2)
예제 #11
0
def eval_with_plac(det_net, args):

    # 1. preprocess img
    img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None,
                                                     3])  # is RGB. not BGR
    img_batch = tf.cast(img_plac, tf.float32)

    if cfgs.NET_NAME in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
        img_batch = (img_batch / 255 - tf.constant(
            cfgs.PIXEL_MEAN_)) / tf.constant(cfgs.PIXEL_STD)
    else:
        img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)

    img_batch = tf.expand_dims(img_batch, axis=0)

    detection_boxes, detection_scores, detection_category, detection_boxes_angle = det_net.build_whole_detection_network(
        input_img_batch=img_batch,
        gtboxes_batch_h=None,
        gtboxes_batch_r=None,
        gt_smooth_label=None)

    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())

    restorer, restore_ckpt = det_net.get_restorer()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        sess.run(init_op)
        if not restorer is None:
            restorer.restore(sess, restore_ckpt)
            print('restore model')

        all_boxes_r = []
        img_short_side_len_list = cfgs.IMG_SHORT_SIDE_LEN if isinstance(
            cfgs.IMG_SHORT_SIDE_LEN, list) else [cfgs.IMG_SHORT_SIDE_LEN]
        img_short_side_len_list = [
            img_short_side_len_list[0]
        ] if not args.multi_scale else img_short_side_len_list
        imgs = os.listdir(args.img_dir)
        pbar = tqdm(imgs)
        for a_img_name in pbar:
            a_img_name = a_img_name.split(args.image_ext)[0]

            raw_img = cv2.imread(
                os.path.join(args.img_dir, a_img_name + args.image_ext))
            raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]

            box_res_rotate = []
            label_res_rotate = []
            score_res_rotate = []

            for short_size in img_short_side_len_list:
                max_len = cfgs.IMG_MAX_LENGTH
                if raw_h < raw_w:
                    new_h, new_w = short_size, min(
                        int(short_size * float(raw_w) / raw_h), max_len)
                else:
                    new_h, new_w = min(int(short_size * float(raw_h) / raw_w),
                                       max_len), short_size
                img_resize = cv2.resize(raw_img, (new_w, new_h))

                resized_img, det_boxes_r_, det_scores_r_, det_category_r_ = \
                    sess.run(
                        [img_batch, detection_boxes_angle, detection_scores, detection_category],
                        feed_dict={img_plac: img_resize[:, :, ::-1]}
                    )
                resized_h, resized_w = resized_img.shape[1], resized_img.shape[
                    2]

                if len(det_boxes_r_) > 0:
                    det_boxes_r_ = forward_convert(det_boxes_r_, False)
                    det_boxes_r_[:, 0::2] *= (raw_w / resized_w)
                    det_boxes_r_[:, 1::2] *= (raw_h / resized_h)

                    for ii in range(len(det_boxes_r_)):
                        box_rotate = det_boxes_r_[ii]
                        box_res_rotate.append(box_rotate)
                        label_res_rotate.append(det_category_r_[ii])
                        score_res_rotate.append(det_scores_r_[ii])
            box_res_rotate = np.array(box_res_rotate)
            label_res_rotate = np.array(label_res_rotate)
            score_res_rotate = np.array(score_res_rotate)

            box_res_rotate_ = []
            label_res_rotate_ = []
            score_res_rotate_ = []
            threshold = {'car': 0.2, 'plane': 0.3}

            for sub_class in range(1, cfgs.CLASS_NUM + 1):
                index = np.where(label_res_rotate == sub_class)[0]
                if len(index) == 0:
                    continue
                tmp_boxes_r = box_res_rotate[index]
                tmp_label_r = label_res_rotate[index]
                tmp_score_r = score_res_rotate[index]

                tmp_boxes_r_ = backward_convert(tmp_boxes_r, False)

                try:
                    inx = nms_rotate.nms_rotate_cpu(
                        boxes=np.array(tmp_boxes_r_),
                        scores=np.array(tmp_score_r),
                        iou_threshold=threshold[LABEL_NAME_MAP[sub_class]],
                        max_output_size=150)
                except:
                    tmp_boxes_r_ = np.array(tmp_boxes_r_)
                    tmp = np.zeros(
                        [tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
                    tmp[:, 0:-1] = tmp_boxes_r_
                    tmp[:, -1] = np.array(tmp_score_r)
                    # Note: the IoU of two same rectangles is 0, which is calculated by rotate_gpu_nms
                    jitter = np.zeros(
                        [tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
                    jitter[:,
                           0] += np.random.rand(tmp_boxes_r_.shape[0], ) / 1000
                    inx = rotate_gpu_nms(
                        np.array(tmp, np.float32) +
                        np.array(jitter, np.float32),
                        float(threshold[LABEL_NAME_MAP[sub_class]]), 0)

                box_res_rotate_.extend(np.array(tmp_boxes_r)[inx])
                score_res_rotate_.extend(np.array(tmp_score_r)[inx])
                label_res_rotate_.extend(np.array(tmp_label_r)[inx])

            box_res_rotate_ = np.array(box_res_rotate_)
            score_res_rotate_ = np.array(score_res_rotate_)
            label_res_rotate_ = np.array(label_res_rotate_)

            if args.draw_imgs:
                detected_indices = score_res_rotate_ >= cfgs.VIS_SCORE
                detected_scores = score_res_rotate_[detected_indices]
                detected_boxes = box_res_rotate_[detected_indices]
                detected_boxes = backward_convert(detected_boxes,
                                                  with_label=False)
                detected_categories = label_res_rotate_[detected_indices]

                det_detections_r = draw_box_in_img.draw_boxes_with_label_and_scores(
                    np.array(raw_img, np.float32),
                    boxes=detected_boxes,
                    labels=detected_categories,
                    scores=detected_scores,
                    method=1,
                    in_graph=False,
                    is_csl=True)

                save_dir = os.path.join('test_ucas_aod', cfgs.VERSION,
                                        'ucas_aod_img_vis')
                tools.mkdir(save_dir)

                cv2.imwrite(save_dir + '/{}.jpg'.format(a_img_name),
                            det_detections_r[:, :, ::-1])

            if box_res_rotate_.shape[0] != 0:
                box_res_rotate_ = backward_convert(box_res_rotate_, False)

            x_c, y_c, w, h, theta = box_res_rotate_[:, 0], box_res_rotate_[:, 1], box_res_rotate_[:, 2], \
                                    box_res_rotate_[:, 3], box_res_rotate_[:, 4]

            boxes_r = np.transpose(np.stack([x_c, y_c, w, h, theta]))
            dets_r = np.hstack((label_res_rotate_.reshape(-1, 1),
                                score_res_rotate_.reshape(-1, 1), boxes_r))
            all_boxes_r.append(dets_r)

            pbar.set_description("Eval image %s" % a_img_name)

        # fw1 = open(cfgs.VERSION + '_detections_r.pkl', 'wb')
        # pickle.dump(all_boxes_r, fw1)
        return all_boxes_r
예제 #12
0
def eval_with_plac(img_dir, det_net, num_imgs, image_ext, draw_imgs=False):

    # 1. preprocess img
    img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3])  # is RGB. not GBR
    img_batch = tf.cast(img_plac, tf.float32)
    img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)
    img_batch = short_side_resize_for_inference_data(img_tensor=img_batch,
                                                     target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN,
                                                     is_resize=False)

    det_boxes_h, det_scores_h, det_category_h, \
    det_boxes_r, det_scores_r, det_category_r = det_net.build_whole_detection_network(
        input_img_batch=img_batch,
        gtboxes_h_batch=None, gtboxes_r_batch=None)

    init_op = tf.group(
        tf.global_variables_initializer(),
        tf.local_variables_initializer()
    )

    restorer, restore_ckpt = det_net.get_restorer()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        sess.run(init_op)
        if not restorer is None:
            restorer.restore(sess, restore_ckpt)
            print('restore model')

        all_boxes_r = []
        imgs = os.listdir(img_dir)
        for i, a_img_name in enumerate(imgs):
            a_img_name = a_img_name.split(image_ext)[0]

            raw_img = cv2.imread(os.path.join(img_dir,
                                              a_img_name + image_ext))
            raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]

            start = time.time()
            resized_img, det_boxes_r_, det_scores_r_, det_category_r_ = \
                sess.run(
                    [img_batch, det_boxes_h, det_scores_h, det_category_h,
                     det_boxes_r, det_scores_r, det_category_r],
                    feed_dict={img_plac: raw_img}
                )
            end = time.time()
            # print("{} cost time : {} ".format(img_name, (end - start)))
            if draw_imgs:

                det_detections_r = draw_box_in_img.draw_rotate_box_cv(np.squeeze(resized_img, 0),
                                                                      boxes=det_boxes_r_,
                                                                      labels=det_category_r_,
                                                                      scores=det_scores_r_)
                save_dir = os.path.join(cfgs.TEST_SAVE_PATH, cfgs.VERSION)
                tools.mkdir(save_dir)

                cv2.imwrite(save_dir + '/' + a_img_name + '_r.jpg',
                            det_detections_r[:, :, ::-1])

            resized_h, resized_w = resized_img.shape[1], resized_img.shape[2]
            det_boxes_r_ = forward_convert(det_boxes_r_, False)
            det_boxes_r_[:, 0::2] *= (raw_w / resized_w)
            det_boxes_r_[:, 1::2] *= (raw_h / resized_h)
            det_boxes_r_ = back_forward_convert(det_boxes_r_, False)

            x_c, y_c, w, h, theta = det_boxes_r_[:, 0], det_boxes_r_[:, 1], det_boxes_r_[:, 2], \
                                    det_boxes_r_[:, 3], det_boxes_r_[:, 4]

            boxes_r = np.transpose(np.stack([x_c, y_c, w, h, theta]))

            dets_r = np.hstack((det_category_r_.reshape(-1, 1),
                                det_scores_r_.reshape(-1, 1),
                                boxes_r))
            all_boxes_r.append(dets_r)

            tools.view_bar('{} image cost {}s'.format(a_img_name, (end - start)), i + 1, len(imgs))

        fw2 = open(cfgs.VERSION + '_detections_r.pkl', 'w')
        pickle.dump(all_boxes_r, fw2)
def eval_with_plac(img_dir, det_net, num_imgs, image_ext, draw_imgs=False):

    # 1. preprocess img
    img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None,
                                                     3])  # is RGB. not BGR
    img_batch = tf.cast(img_plac, tf.float32)

    img_batch = short_side_resize_for_inference_data(
        img_tensor=img_batch,
        target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN,
        length_limitation=cfgs.IMG_MAX_LENGTH)
    if cfgs.NET_NAME in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
        img_batch = (img_batch / 255 - tf.constant(
            cfgs.PIXEL_MEAN_)) / tf.constant(cfgs.PIXEL_STD)
    else:
        img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)

    img_batch = tf.expand_dims(img_batch, axis=0)

    detection_boxes, detection_scores, detection_category = det_net.build_whole_detection_network(
        input_img_batch=img_batch, gtboxes_batch_h=None, gtboxes_batch_r=None)

    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())

    restorer, restore_ckpt = det_net.get_restorer()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        sess.run(init_op)
        if not restorer is None:
            restorer.restore(sess, restore_ckpt)
            print('restore model')

        all_boxes_r = []
        imgs = os.listdir(img_dir)
        pbar = tqdm(imgs)
        for a_img_name in pbar:
            a_img_name = a_img_name.split(image_ext)[0]

            raw_img = cv2.imread(os.path.join(img_dir, a_img_name + image_ext))
            raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]

            resized_img, det_boxes_r_, det_scores_r_, det_category_r_ = \
                sess.run(
                    [img_batch, detection_boxes, detection_scores, detection_category],
                    feed_dict={img_plac: raw_img[:, :, ::-1]}
                )

            if draw_imgs:
                detected_indices = det_scores_r_ >= cfgs.VIS_SCORE
                detected_scores = det_scores_r_[detected_indices]
                detected_boxes = det_boxes_r_[detected_indices]
                detected_categories = det_category_r_[detected_indices]

                det_detections_r = draw_box_in_img.draw_boxes_with_label_and_scores(
                    np.squeeze(resized_img, 0),
                    boxes=detected_boxes,
                    labels=detected_categories,
                    scores=detected_scores,
                    method=1,
                    in_graph=True)

                save_dir = os.path.join('test_hrsc', cfgs.VERSION,
                                        'hrsc2016_img_vis')
                tools.mkdir(save_dir)

                cv2.imwrite(save_dir + '/{}.jpg'.format(a_img_name),
                            det_detections_r[:, :, ::-1])

            if det_boxes_r_.shape[0] != 0:
                resized_h, resized_w = resized_img.shape[1], resized_img.shape[
                    2]
                det_boxes_r_ = forward_convert(det_boxes_r_, False)
                det_boxes_r_[:, 0::2] *= (raw_w / resized_w)
                det_boxes_r_[:, 1::2] *= (raw_h / resized_h)
                det_boxes_r_ = backward_convert(det_boxes_r_, False)

            x_c, y_c, w, h, theta = det_boxes_r_[:, 0], det_boxes_r_[:, 1], det_boxes_r_[:, 2], \
                                    det_boxes_r_[:, 3], det_boxes_r_[:, 4]

            boxes_r = np.transpose(np.stack([x_c, y_c, w, h, theta]))
            dets_r = np.hstack((det_category_r_.reshape(-1, 1),
                                det_scores_r_.reshape(-1, 1), boxes_r))
            all_boxes_r.append(dets_r)

            pbar.set_description("Eval image %s" % a_img_name)

        # fw1 = open(cfgs.VERSION + '_detections_r.pkl', 'wb')
        # pickle.dump(all_boxes_r, fw1)
        return all_boxes_r
def worker(gpu_id, images, det_net, result_queue):
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    # 1. preprocess img
    img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None,
                                                     3])  # is RGB. not BGR
    img_batch = tf.cast(img_plac, tf.float32)

    if cfgs.NET_NAME in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
        img_batch = (img_batch / 255 - tf.constant(
            cfgs.PIXEL_MEAN_)) / tf.constant(cfgs.PIXEL_STD)
    else:
        img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)

    img_batch = tf.expand_dims(img_batch, axis=0)

    detection_boxes, detection_scores, detection_category, detection_boxes_angle = det_net.build_whole_detection_network(
        input_img_batch=img_batch,
        gtboxes_batch_h=None,
        gtboxes_batch_r=None,
        gt_smooth_label=None,
        gpu_id=0)

    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())

    restorer, restore_ckpt = det_net.get_restorer()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        sess.run(init_op)
        if not restorer is None:
            restorer.restore(sess, restore_ckpt)
            print('restore model %d ...' % gpu_id)
        for a_img in images:
            raw_img = cv2.imread(a_img)
            raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]

            det_boxes_r_all, det_scores_r_all, det_category_r_all = [], [], []

            img_short_side_len_list = cfgs.IMG_SHORT_SIDE_LEN if isinstance(
                cfgs.IMG_SHORT_SIDE_LEN, list) else [cfgs.IMG_SHORT_SIDE_LEN]
            img_short_side_len_list = [
                img_short_side_len_list[0]
            ] if not args.multi_scale else img_short_side_len_list

            for short_size in img_short_side_len_list:
                max_len = cfgs.IMG_MAX_LENGTH
                if raw_h < raw_w:
                    new_h, new_w = short_size, min(
                        int(short_size * float(raw_w) / raw_h), max_len)
                else:
                    new_h, new_w = min(int(short_size * float(raw_h) / raw_w),
                                       max_len), short_size
                img_resize = cv2.resize(raw_img, (new_w, new_h))

                resized_img, detected_boxes, detected_scores, detected_categories = \
                    sess.run(
                        [img_batch, detection_boxes_angle, detection_scores, detection_category],
                        feed_dict={img_plac: img_resize[:, :, ::-1]}
                    )

                detected_indices = detected_scores >= cfgs.VIS_SCORE
                detected_scores = detected_scores[detected_indices]
                detected_boxes = detected_boxes[detected_indices]
                detected_categories = detected_categories[detected_indices]

                if detected_boxes.shape[0] == 0:
                    continue
                resized_h, resized_w = resized_img.shape[1], resized_img.shape[
                    2]
                detected_boxes = forward_convert(detected_boxes, False)
                detected_boxes[:, 0::2] *= (raw_w / resized_w)
                detected_boxes[:, 1::2] *= (raw_h / resized_h)
                # detected_boxes = backward_convert(detected_boxes, False)

                det_boxes_r_all.extend(detected_boxes)
                det_scores_r_all.extend(detected_scores)
                det_category_r_all.extend(detected_categories)
            det_boxes_r_all = np.array(det_boxes_r_all)
            det_scores_r_all = np.array(det_scores_r_all)
            det_category_r_all = np.array(det_category_r_all)

            box_res_rotate_ = []
            label_res_rotate_ = []
            score_res_rotate_ = []

            if det_scores_r_all.shape[0] != 0:
                for sub_class in range(1, cfgs.CLASS_NUM + 1):
                    index = np.where(det_category_r_all == sub_class)[0]
                    if len(index) == 0:
                        continue
                    tmp_boxes_r = det_boxes_r_all[index]
                    tmp_label_r = det_category_r_all[index]
                    tmp_score_r = det_scores_r_all[index]

                    tmp_boxes_r_ = backward_convert(tmp_boxes_r, False)

                    try:
                        inx = nms_rotate.nms_rotate_cpu(
                            boxes=np.array(tmp_boxes_r_),
                            scores=np.array(tmp_score_r),
                            iou_threshold=cfgs.NMS_IOU_THRESHOLD,
                            max_output_size=5000)
                    except:
                        tmp_boxes_r_ = np.array(tmp_boxes_r_)
                        tmp = np.zeros(
                            [tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
                        tmp[:, 0:-1] = tmp_boxes_r_
                        tmp[:, -1] = np.array(tmp_score_r)
                        # Note: the IoU of two same rectangles is 0, which is calculated by rotate_gpu_nms
                        jitter = np.zeros(
                            [tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
                        jitter[:, 0] += np.random.rand(
                            tmp_boxes_r_.shape[0], ) / 1000
                        inx = rotate_gpu_nms(
                            np.array(tmp, np.float32) +
                            np.array(jitter, np.float32),
                            float(cfgs.NMS_IOU_THRESHOLD), 0)

                    box_res_rotate_.extend(np.array(tmp_boxes_r)[inx])
                    score_res_rotate_.extend(np.array(tmp_score_r)[inx])
                    label_res_rotate_.extend(np.array(tmp_label_r)[inx])

            box_res_rotate_ = np.array(box_res_rotate_)
            score_res_rotate_ = np.array(score_res_rotate_)
            label_res_rotate_ = np.array(label_res_rotate_)

            result_dict = {
                'scales': [1, 1],
                'boxes': box_res_rotate_,
                'scores': score_res_rotate_,
                'labels': label_res_rotate_,
                'image_id': a_img
            }
            result_queue.put_nowait(result_dict)
def eval_with_plac(img_dir, det_net, num_imgs, image_ext, draw_imgs,
                   test_annotation_path):

    # 1. preprocess img
    img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None,
                                                     3])  # is RGB. not GBR
    img_batch = tf.cast(img_plac, tf.float32)
    img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)
    img_batch = short_side_resize_for_inference_data(
        img_tensor=img_batch,
        target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN,
        is_resize=False)

    det_boxes_h, det_scores_h, det_category_h, \
    det_boxes_r, det_scores_r, det_category_r = det_net.build_whole_detection_network(
        input_img_batch=img_batch,
        gtboxes_h_batch=None, gtboxes_r_batch=None)

    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())

    global_step_tensor = slim.get_or_create_global_step()

    eval_result = []
    last_checkpoint_name = None

    while True:

        restorer, restore_ckpt = det_net.get_restorer()
        #saver = tf.train.Saver(max_to_keep=10)
        start_time = time.time()

        model_path = os.path.splitext(os.path.basename(restore_ckpt))[0]
        if model_path == None:
            print("Wait for available checkpoint")
        elif last_checkpoint_name == model_path:
            print(
                "Already evaluated checkpoint {}, we will try evaluation in {} seconds"
                .format(model_path, EVAL_INTERVAL))
            #continue
        else:
            print('Last ckpt was {}, new ckpt is {}'.format(
                last_checkpoint_name, model_path))
            last_checkpoint_name = model_path

            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True

            with tf.Session(config=config) as sess:
                sess.run(init_op)
                sess.run(global_step_tensor.initializer)
                if not restorer is None:
                    restorer.restore(sess, restore_ckpt)
                    print('restore model', restore_ckpt)

                global_stepnp = tf.train.global_step(sess, global_step_tensor)
                print('#########################', global_stepnp)

                all_boxes_h = []
                all_boxes_r = []
                imgs = os.listdir(img_dir)
                imgs_len = len(imgs)
                none_detected_image = []
                for i, a_img_name in enumerate(imgs[:]):
                    a_img_name = a_img_name.split(image_ext)[0]
                    image_name = a_img_name + image_ext
                    print('\n', a_img_name)

                    raw_img = cv2.imread(
                        os.path.join(img_dir, a_img_name + image_ext))
                    raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]

                    start = time.time()
                    resized_img, det_boxes_h_, det_scores_h_, det_category_h_, \
                    det_boxes_r_, det_scores_r_, det_category_r_ = \
                        sess.run(
                            [img_batch, det_boxes_h, det_scores_h, det_category_h,
                             det_boxes_r, det_scores_r, det_category_r],
                            feed_dict={img_plac: raw_img}
                        )
                    end = time.time()
                    print("det category H : ", det_category_h_)
                    print("det category R : ", det_category_r_)
                    # print("{} cost time : {} ".format(img_name, (end - start)))
                    if draw_imgs:
                        det_detections_h = draw_box_in_img.draw_box_cv(
                            np.squeeze(resized_img, 0),
                            boxes=det_boxes_h_,
                            labels=det_category_h_,
                            scores=det_scores_h_)
                        det_detections_r = draw_box_in_img.draw_rotate_box_cv(
                            np.squeeze(resized_img, 0),
                            boxes=det_boxes_r_,
                            labels=det_category_r_,
                            scores=det_scores_r_)
                        save_dir = os.path.join(cfgs.TEST_SAVE_PATH,
                                                cfgs.VERSION)
                        tools.mkdir(save_dir)
                        cv2.imwrite(save_dir + '/' + a_img_name + '_h.jpg',
                                    det_detections_h[:, :, ::-1])
                        cv2.imwrite(save_dir + '/' + a_img_name + '_r.jpg',
                                    det_detections_r[:, :, ::-1])

                    xmin, ymin, xmax, ymax = det_boxes_h_[:, 0], det_boxes_h_[:, 1], \
                                     det_boxes_h_[:, 2], det_boxes_h_[:, 3]

                    if det_boxes_r_.shape[0] != 0:
                        #print('### Has box ###')
                        resized_h, resized_w = resized_img.shape[
                            1], resized_img.shape[2]
                        det_boxes_r_ = forward_convert(det_boxes_r_, False)
                        det_boxes_r_[:, 0::2] *= (raw_w / resized_w)
                        det_boxes_r_[:, 1::2] *= (raw_h / resized_h)
                        det_boxes_r_ = back_forward_convert(
                            det_boxes_r_, False)

                        x_c, y_c, w, h, theta = det_boxes_r_[:, 0], det_boxes_r_[:, 1], det_boxes_r_[:, 2], \
                                                det_boxes_r_[:, 3], det_boxes_r_[:, 4]

                        xmin = xmin * raw_w / resized_w
                        xmax = xmax * raw_w / resized_w
                        ymin = ymin * raw_h / resized_h
                        ymax = ymax * raw_h / resized_h

                        boxes_h = np.transpose(
                            np.stack([xmin, ymin, xmax, ymax]))
                        boxes_r = np.transpose(
                            np.stack([x_c, y_c, w, h, theta]))
                        dets_h = np.hstack((det_category_h_.reshape(-1, 1),
                                            det_scores_h_.reshape(-1,
                                                                  1), boxes_h))
                        dets_r = np.hstack((det_category_r_.reshape(-1, 1),
                                            det_scores_r_.reshape(-1,
                                                                  1), boxes_r))
                        all_boxes_h.append(dets_h)
                        all_boxes_r.append(dets_r)
                    else:
                        imgs.remove(image_name)
                        none_detected_image.append(image_name)
                        print('No detected')

                    tools.view_bar(
                        '{} image cost {}s'.format(a_img_name, (end - start)),
                        i + 1, imgs_len)

                fw1 = open(cfgs.VERSION + '_detections_h.pkl', 'wb')
                fw2 = open(cfgs.VERSION + '_detections_r.pkl', 'wb')
                pickle.dump(all_boxes_h, fw1)
                pickle.dump(all_boxes_r, fw2)

                # with open(cfgs.VERSION + '_detections_h.pkl', 'rb') as f1:
                #     all_boxes_h = pickle.load(f1, encoding='unicode')

                # print(10 * "###")
                # print(len(all_boxes_h))
                #
                # with open(cfgs.VERSION + '_detections_r.pkl', 'rb') as f2:
                #     all_boxes_r = pickle.load(f2, encoding='unicode')
                #
                #     print(len(all_boxes_r))

                # imgs = os.listdir(img_dir)
                real_test_imgname_list = [i.split(image_ext)[0] for i in imgs]

                print(10 * "**")
                print('horizon eval:')
                # print(len(all_boxes_h), len(all_boxes_r))
                # print(len(real_test_imgname_list))
                mAP_h, recall_h, precision_h, total_mAP_h, total_recall_h, total_precision_h = voc_eval.voc_evaluate_detections(
                    all_boxes=all_boxes_h,
                    test_imgid_list=real_test_imgname_list,
                    test_annotation_path=test_annotation_path)
                print('mAP_h: ', mAP_h)
                print('mRecall_h:', recall_h)
                print('mPrecision_h:', precision_h)
                print('total_mAP_h: ', total_mAP_h)
                print('total_recall_h_list:', total_recall_h)
                print('total_precision_h_list:', total_precision_h)

                print(10 * "**")
                print('rotation eval:')
                mAP_r, recall_r, precision_r, total_mAP_r, total_recall_r, total_precision_r = voc_eval_r.voc_evaluate_detections(
                    all_boxes=all_boxes_r,
                    test_imgid_list=real_test_imgname_list,
                    test_annotation_path=test_annotation_path)

                f1score_h_check = (1 + 1**2) * precision_h * recall_h / (
                    1**2 * precision_h + recall_h)
                f1score_h = calc_fscore(precision_h, recall_h, 1)

                f1score_r_check = (1 + 1**2) * precision_r * recall_r / (
                    1**2 * precision_r + recall_r)
                f1score_r = calc_fscore(precision_r, recall_r, 1)

                print(10 * '##')
                print('mAP_r:', mAP_r)
                print('mRecall_r:', recall_r)
                print('mPrecision_r:', precision_r)
                print('total_mAP_r_list: ', total_mAP_r)
                print('total_recall_r_list:', total_recall_r)
                print('total_precision_r_list:', total_precision_r)
                print('f1score_r:', f1score_r)

                summary_path = os.path.join(cfgs.SUMMARY_PATH,
                                            cfgs.VERSION + '/eval_0')
                tools.mkdir(summary_path)

                summary_writer = tf.summary.FileWriter(summary_path,
                                                       graph=sess.graph)

                mAP_h_summ = tf.Summary()
                mAP_h_summ.value.add(tag='EVAL_Global/mAP_h',
                                     simple_value=mAP_h)
                summary_writer.add_summary(mAP_h_summ, global_stepnp)

                mAP_r_summ = tf.Summary()
                mAP_r_summ.value.add(tag='EVAL_Global/mAP_r',
                                     simple_value=mAP_r)
                summary_writer.add_summary(mAP_r_summ, global_stepnp)

                mRecall_h_summ = tf.Summary()
                mRecall_h_summ.value.add(tag='EVAL_Global/Recall_h',
                                         simple_value=recall_h)
                summary_writer.add_summary(mRecall_h_summ, global_stepnp)

                mRecall_r_summ = tf.Summary()
                mRecall_r_summ.value.add(tag='EVAL_Global/Recall_r',
                                         simple_value=recall_r)
                summary_writer.add_summary(mRecall_r_summ, global_stepnp)

                mPrecision_h_summ = tf.Summary()
                mPrecision_h_summ.value.add(tag='EVAL_Global/Precision_h',
                                            simple_value=precision_h)
                summary_writer.add_summary(mPrecision_h_summ, global_stepnp)

                mPrecision_r_summ = tf.Summary()
                mPrecision_r_summ.value.add(tag='EVAL_Global/Precision_r',
                                            simple_value=precision_r)
                summary_writer.add_summary(mPrecision_r_summ, global_stepnp)

                mF1Score_h_summ = tf.Summary()
                mF1Score_h_summ.value.add(tag='EVAL_Global/F1Score_h',
                                          simple_value=f1score_h)
                summary_writer.add_summary(mF1Score_h_summ, global_stepnp)

                mF1Score_r_summ = tf.Summary()
                mF1Score_r_summ.value.add(tag='EVAL_Global/F1Score_r',
                                          simple_value=f1score_r)
                summary_writer.add_summary(mF1Score_r_summ, global_stepnp)

                mAP_h_class_dict = {}
                mAP_r_class_dict = {}
                recall_h_class_dict = {}
                recall_r_class_dict = {}
                precision_h_class_dict = {}
                precision_r_class_dict = {}
                f1score_h_class_dict = {}
                f1score_r_class_dict = {}

                label_list = list(NAME_LABEL_MAP.keys())
                label_list.remove('back_ground')

                for cls in label_list:
                    mAP_h_class_dict["cls_%s_mAP_h_summ" % cls] = tf.Summary()
                    mAP_r_class_dict["cls_%s_mAP_r_summ" % cls] = tf.Summary()
                    recall_h_class_dict["cls_%s_recall_h_summ" %
                                        cls] = tf.Summary()
                    recall_r_class_dict["cls_%s_recall_r_summ" %
                                        cls] = tf.Summary()
                    precision_h_class_dict["cls_%s_precision_h_summ" %
                                           cls] = tf.Summary()
                    precision_r_class_dict["cls_%s_precision_r_summ" %
                                           cls] = tf.Summary()
                    f1score_h_class_dict["cls_%s_f1score_h_summ" %
                                         cls] = tf.Summary()
                    f1score_r_class_dict["cls_%s_f1score_r_summ" %
                                         cls] = tf.Summary()

                for cls in label_list:
                    mAP_h_class_dict["cls_%s_mAP_h_summ" % cls].value.add(
                        tag='EVAL_Class_mAP/{}_mAP_h'.format(cls),
                        simple_value=total_mAP_h[cls])
                    mAP_r_class_dict["cls_%s_mAP_r_summ" % cls].value.add(
                        tag='EVAL_Class_mAP/{}_mAP_r'.format(cls),
                        simple_value=total_mAP_r[cls])
                    recall_h_class_dict[
                        "cls_%s_recall_h_summ" % cls].value.add(
                            tag='EVAL_Class_recall/{}_recall_h'.format(cls),
                            simple_value=total_recall_h[cls])
                    recall_r_class_dict[
                        "cls_%s_recall_r_summ" % cls].value.add(
                            tag='EVAL_Class_recall/{}_recall_r'.format(cls),
                            simple_value=total_recall_r[cls])
                    precision_h_class_dict[
                        "cls_%s_precision_h_summ" % cls].value.add(
                            tag='EVAL_Class_precision/{}_precision_h'.format(
                                cls),
                            simple_value=total_precision_h[cls])
                    precision_r_class_dict[
                        "cls_%s_precision_r_summ" % cls].value.add(
                            tag='EVAL_Class_precision/{}_precision_r'.format(
                                cls),
                            simple_value=total_precision_r[cls])

                    f1score_h_cls = calc_fscore(total_precision_h[cls],
                                                total_recall_h[cls], 1)
                    f1score_r_cls = calc_fscore(total_precision_r[cls],
                                                total_recall_r[cls], 1)
                    f1score_h_class_dict[
                        "cls_%s_f1score_h_summ" % cls].value.add(
                            tag='EVAL_Class_f1score/{}_f1score_h'.format(cls),
                            simple_value=f1score_h_cls)
                    f1score_r_class_dict[
                        "cls_%s_f1score_r_summ" % cls].value.add(
                            tag='EVAL_Class_f1score/{}_f1score_r'.format(cls),
                            simple_value=f1score_r_cls)

                for cls in label_list:
                    summary_writer.add_summary(
                        mAP_h_class_dict["cls_%s_mAP_h_summ" % cls],
                        global_stepnp)
                    summary_writer.add_summary(
                        mAP_r_class_dict["cls_%s_mAP_r_summ" % cls],
                        global_stepnp)
                    summary_writer.add_summary(
                        recall_h_class_dict["cls_%s_recall_h_summ" % cls],
                        global_stepnp)
                    summary_writer.add_summary(
                        recall_r_class_dict["cls_%s_recall_r_summ" % cls],
                        global_stepnp)
                    summary_writer.add_summary(
                        precision_h_class_dict["cls_%s_precision_h_summ" %
                                               cls], global_stepnp)
                    summary_writer.add_summary(
                        precision_r_class_dict["cls_%s_precision_r_summ" %
                                               cls], global_stepnp)
                    summary_writer.add_summary(
                        f1score_h_class_dict["cls_%s_f1score_h_summ" % cls],
                        global_stepnp)
                    summary_writer.add_summary(
                        f1score_r_class_dict["cls_%s_f1score_r_summ" % cls],
                        global_stepnp)

                summary_writer.flush()

        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        save_ckpt = os.path.join(save_dir,
                                 'voc_' + str(global_stepnp) + 'model.ckpt')
        #saver.save(sess, save_ckpt)
        print(' weights had been saved')

        time_to_next_eval = start_time + EVAL_INTERVAL - time.time()
        if time_to_next_eval > 0:
            time.sleep(time_to_next_eval)
예제 #16
0
def worker(gpu_id, images, det_net, result_queue):
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    # 1. preprocess img
    img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None,
                                                     3])  # is RGB. not BGR
    img_batch = tf.cast(img_plac, tf.float32)

    img_batch = short_side_resize_for_inference_data(
        img_tensor=img_batch,
        target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN,
        length_limitation=cfgs.IMG_MAX_LENGTH)
    if cfgs.NET_NAME in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
        img_batch = (img_batch / 255 - tf.constant(
            cfgs.PIXEL_MEAN_)) / tf.constant(cfgs.PIXEL_STD)
    else:
        img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)

    img_batch = tf.expand_dims(img_batch, axis=0)

    detection_boxes, detection_scores, detection_category = det_net.build_whole_detection_network(
        input_img_batch=img_batch, gtboxes_batch_h=None, gtboxes_batch_r=None)

    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())

    restorer, restore_ckpt = det_net.get_restorer()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        sess.run(init_op)
        if not restorer is None:
            restorer.restore(sess, restore_ckpt)
            print('restore model %d ...' % gpu_id)
        for a_img in images:
            raw_img = cv2.imread(a_img)
            raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]

            resized_img, detected_boxes, detected_scores, detected_categories = \
                sess.run(
                    [img_batch, detection_boxes, detection_scores, detection_category],
                    feed_dict={img_plac: raw_img[:, :, ::-1]}
                )
            detected_boxes = forward_convert(detected_boxes, False)

            detected_indices = detected_scores >= cfgs.VIS_SCORE
            detected_scores = detected_scores[detected_indices]
            detected_boxes = detected_boxes[detected_indices]
            detected_categories = detected_categories[detected_indices]

            resized_h, resized_w = resized_img.shape[1], resized_img.shape[2]
            scales = [raw_w / resized_w, raw_h / resized_h]
            result_dict = {
                'scales': scales,
                'boxes': detected_boxes,
                'scores': detected_scores,
                'labels': detected_categories,
                'image_id': a_img
            }
            result_queue.put_nowait(result_dict)