コード例 #1
0
def demo_net(predictor, image_name, vis=False, save_dir='./', save_name='tmp.jpg', threshold=0.7):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    assert os.path.exists(image_name), image_name + ' not found'
    result_lst = list()
    try:
        im = cv2.imread(image_name)
        data_batch, data_names, im_scale = generate_batch(im)
        scores, boxes, data_dict = im_detect(
            predictor, data_batch, data_names, im_scale)

        all_boxes = [[] for _ in CLASSES]
        for cls in CLASSES:
            cls_ind = CLASSES.index(cls)
            cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
            cls_scores = scores[:, cls_ind, np.newaxis]
            keep = np.where(cls_scores >= threshold)[0]
            dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
            keep = nms(dets)
            all_boxes[cls_ind] = dets[keep, :]

        boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]

    except:
        print('detection error')
        return None

    # print results
    print('class ---- [[x1, x2, y1, y2, confidence]]')
    for ind, boxes in enumerate(boxes_this_image):
        if len(boxes) > 0:
            print('---------', CLASSES[ind], '---------')
            print(boxes)
            for box in boxes:
                tmp_box = box.tolist()[:4]
                tmp_box.append(str(CLASSES[ind]))
                result_lst.append(tmp_box)
    if vis:
        vis_all_detection(data_dict['data'].asnumpy(),
                          boxes_this_image, CLASSES, im_scale)
    else:
        # result_dir = os.path.dirname(image_name)
        # result_file = save_dir + os.path.basename(image_name)
        result_file = save_dir + save_name
        print('results saved to %s' % result_file)
        im = draw_all_detection(
            data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)
        if not os.path.exists(os.path.dirname(result_file)):
            os.system('mkdir -p '+os.path.dirname(result_file))
        cv2.imwrite(result_file, im)

    return result_lst
コード例 #2
0
def demo_net(predictor, image_name, args):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    assert os.path.exists(image_name), image_name + ' not found'
    im = cv2.imread(image_name)
    data_batch, data_names, im_scale = generate_batch(im)
    #for i in range(10):
    #    scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale)
    t0 = time.clock()
    for i in range(1):
        scores, boxes, data_dict = im_detect(predictor, data_batch, data_names,
                                             im_scale)
    print(time.clock() - t0)

    all_boxes = [[] for _ in CLASSES]
    for cls in CLASSES:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind, np.newaxis]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
        keep = nms(dets)
        all_boxes[cls_ind] = dets[keep, :]

    boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]

    # print results
    print('class ---- [[x1, x2, y1, y2, confidence]]')
    for ind, boxes in enumerate(boxes_this_image):
        if len(boxes) > 0:
            print('---------', CLASSES[ind], '---------')
            print(boxes)

    if args.vis:
        vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image,
                          CLASSES, im_scale)
    else:
        #print(os.path.join(args.out_dir, os.path.split(image_name.replace('.jpg', '_result.jpg'))[1]))
        result_file = os.path.join(
            args.out_dir,
            os.path.split(image_name.replace('.jpg', '_result.jpg'))[1])
        print('results saved to %s' % result_file)
        im = draw_all_detection(data_dict['data'].asnumpy(), boxes_this_image,
                                CLASSES, im_scale)
        cv2.imwrite(result_file, im)
コード例 #3
0
def draw(args, queue):
    #cap = cv2.VideoCapture(args.in_video)
    #cat = cv2.VideoWriter(args.out, fourcc=cv.CV_FOURCC(*"MJPG"), fps=cap.get(cv.CV_CAP_PROP_FPS), frameSize=(int(cap.get(cv.CV_CAP_PROP_FRAME_WIDTH)), int(cap.get(cv.CV_CAP_PROP_FRAME_HEIGHT))), isColor=True)
    #cap.release()
    while True:
        item = queue.get()
        if not item:
            print('draw finish...')
            break
        data_dict, boxes_this_image, im_scale = item

        im = draw_all_detection(data_dict, boxes_this_image, CLASSES, im_scale)
        print('write: ', time.clock())
        #cat.write(im)
    #cat.release()
    print('Hello3')
コード例 #4
0
def demo_net(predictor, image_name, vis=False):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    assert os.path.exists(image_name), image_name + ' not found'
    im = cv2.imread(image_name)
    data_batch, data_names, im_scale = generate_batch(im)
    scores, boxes, head_boxes, joints, data_dict = im_detect(
        predictor, data_batch, data_names, im_scale)
    print 'im_scale = {}'.format(im_scale)

    all_boxes = [[] for _ in CLASSES]
    for cls in CLASSES:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind, np.newaxis]
        keep1 = np.where(cls_scores >= CONF_THRESH)[0]
        dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep1, :]
        keep2 = nms(dets)
        all_boxes[cls_ind] = dets[keep2, :]
        if cls == 'person':
            head_boxes = head_boxes[keep1[keep2], :]
            joints = joints[keep1[keep2], :]

    boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]

    # print results
    print 'class ---- [[x1, x2, y1, y2, confidence]]'
    for ind, boxes in enumerate(boxes_this_image):
        if len(boxes) > 0:
            print '---------', CLASSES[ind], '---------'
            print boxes

    if vis:
        vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image,
                          CLASSES, im_scale, head_boxes, joints)
    else:
        result_file = image_name.replace('.', '_result.')
        print 'results saved to %s' % result_file
        im = draw_all_detection(data_dict['data'].asnumpy(), boxes_this_image,
                                CLASSES, im_scale)
        cv2.imwrite(result_file, im)
コード例 #5
0
ファイル: result.py プロジェクト: luwei001/MXNet-Faster-RCNN
def demo_net(predictor, image_name, vis=True):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    BOOL = 0
    assert os.path.exists(image_name), image_name + ' not found'
    im = cv2.imread(image_name)
    data_batch, data_names, im_scale = generate_batch(im)
    scores, boxes, data_dict = im_detect(predictor, data_batch, data_names,
                                         im_scale)
    all_boxes = [[] for _ in Global.CLASSES]
    for cls in Global.CLASSES:
        cls_ind = Global.CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind, np.newaxis]
        keep = np.where(cls_scores >= Global.conf_thresh_value)[0]
        dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
        keep = py_nms_wrapper(Global.nms_thresh_value)(dets)
        all_boxes[cls_ind] = dets[keep, :]

    boxes_this_image = [[]] + [
        all_boxes[j] for j in range(1, len(Global.CLASSES))
    ]

    for ind, boxes in enumerate(boxes_this_image):
        if len(boxes) > 0:
            BOOL = 1
            logger.info('---%s---' % Global.CLASSES[ind])
            logger.info('%s' % boxes)

    result_file = image_name.replace(str(Global.open_img_dir),
                                     str(Global.save_path))
    print result_file
    logger.info('results saved to %s' % result_file)
    im, CLASS, SCORE = draw_all_detection(data_dict['data'].asnumpy(),
                                          boxes_this_image, Global.CLASSES,
                                          im_scale)
    cv2.imwrite(result_file, im)
    Global.PICTURE_INFO[0].append(result_file)
    Global.PICTURE_INFO[1].append(CLASS)
    Global.PICTURE_INFO[2].append(SCORE)
    return CLASS, SCORE, BOOL
コード例 #6
0
def demo_net(predictor, image_name, vis=False):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    assert os.path.exists(image_name), image_name + ' not found'
    im = cv2.imread(image_name)
    data_batch, data_names, im_scale = generate_batch(im)
    scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale)

    all_boxes = [[] for _ in CLASSES]
    for cls in CLASSES:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind, np.newaxis]
        print(cls_scores.shape)
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
        keep = nms(dets)
        all_boxes[cls_ind] = dets[keep, :]

    boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]

    # print results
    logger.info('---class---')
    logger.info('[[x1, x2, y1, y2, confidence]]')
    for ind, boxes in enumerate(boxes_this_image):
        if len(boxes) > 0:
            logger.info('---%s---' % CLASSES[ind])
            logger.info('%s' % boxes)

    if vis:
        vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)
    else:
        
        idx = [i for i, v in enumerate(image_name) if v == '/'][-1]
        result_file = "data/VOCdevkit/results/test/" + image_name[idx+1:]

        result_file = result_file.replace('.', '_result.')
        logger.info('results saved to %s' % result_file)
        im = draw_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)
        cv2.imwrite(result_file, im)
コード例 #7
0
ファイル: demo.py プロジェクト: CoderHHX/incubator-mxnet
def demo_net(predictor, image_name, vis=False):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    assert os.path.exists(image_name), image_name + ' not found'
    im = cv2.imread(image_name)
    data_batch, data_names, im_scale = generate_batch(im)
    scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale)

    all_boxes = [[] for _ in CLASSES]
    for cls in CLASSES:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind, np.newaxis]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
        keep = nms(dets)
        all_boxes[cls_ind] = dets[keep, :]

    boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]

    # print results
    logger.info('---class---')
    logger.info('[[x1, x2, y1, y2, confidence]]')
    for ind, boxes in enumerate(boxes_this_image):
        if len(boxes) > 0:
            logger.info('---%s---' % CLASSES[ind])
            logger.info('%s' % boxes)

    if vis:
        vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)
    else:
        result_file = image_name.replace('.', '_result.')
        logger.info('results saved to %s' % result_file)
        im = draw_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)
        cv2.imwrite(result_file, im)
コード例 #8
0
def demo_net(predictor, image_name, image, with_label, vis, out_dir,
             label_dir):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    global detect_num
    global tp, fp, fn
    global gp, gr, gf1
    # assert os.path.exists(image_name), image_name + ' not found'
    # im = cv2.imread(image_name)
    im = image
    # im = cv2.flip(im, 1)
    data_batch, data_names, im_scale = generate_batch(im)
    # for i in range(10):
    #    scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale)
    # t0 = time.clock()
    for i in range(1):
        scores, boxes, data_dict = im_detect(predictor, data_batch, data_names,
                                             im_scale)
    # print(time.clock() - t0)
    xn = []
    yn = []
    wn = []
    hn = []
    all_boxes = [[] for _ in CLASSES]
    for cls in CLASSES:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind, np.newaxis]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
        keep = nms(dets)
        all_boxes[cls_ind] = dets[keep, :]

    boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]
    # print(boxes_this_image)

    # print results
    rst = {}
    lfn, lfp, ltp = 0, 0, 0
    #print('class ---- [[x1, x2, y1, y2, confidence]]')
    for ind, boxes in enumerate(boxes_this_image):
        if len(boxes) > 0:
            #print('---------', CLASSES[ind], '---------')
            #print(boxes)
            for i in range(0, len(boxes)):
                xn.append(int(boxes[i][0] + 0))
                yn.append(int(boxes[i][1] + 0))
                wn.append(int(boxes[i][2] - boxes[i][0]))
                hn.append(int(boxes[i][3] - boxes[i][1]))

            rst[CLASSES[ind]] = [box for box in boxes]
            # detect_num[CLASSES[ind]] += len(boxes)
            detect_num[CLASSES[ind]] += 1  # len(boxes)
    """if image == '' and with_label:
        label_file = os.path.join(label_dir, os.path.split(image_name.replace('.jpg', '.txt'))[1])
        with open(label_file) as fd:
            for line in fd:
                cls, poss = line.split(':')
                x1, y1, x2, y2 = [float(item) for item in poss.split(',')]

                if cls not in rst:
                    lfn += 1
                    continue

                iou_thd = 0.5
                now_iou = 0
                now_idx = 0
                for ind, box in enumerate(rst[cls]):
                    # print('box = ', box, type(box))
                    # print('box = {}, true = {}'.format(box, (x1, y1, x2, y2)))
                    if (box[0] >= x2) or (box[2] <= x1) or (box[1] >= y2) or (box[3] <= y1):
                        continue
                    else:
                        # print('###############################################')
                        i = (min(x2, box[2]) - max(x1, box[0])) * (min(y2, box[3]) - max(y1, box[1]))
                        assert (i > 0)
                        u = (x2 - x1) * (y2 - y1) + (box[0] - box[2]) * (box[1] - box[3]) - i
                        if i / u > now_iou:
                            now_iou = i / u
                            now_idx = ind
                if now_iou > iou_thd:
                    ltp += 1
                    rst[cls].pop(now_idx)
                    if len(rst[cls]) == 0: rst.pop(cls)
                else:
                    lfn += 1
        for vs in rst.values():
            lfp += len(vs)

        p, r, f1 = 0, 0, 0
        if ltp != 0:
            p = 100.0 * ltp / (ltp + lfp)
            r = 100.0 * ltp / (ltp + lfn)
            f1 = 2 * p * r / (p + r)
        print('precision = {}%, recall = {}%, f1 score = {}%'.format(p, r, f1))

        tp += ltp
        fp += lfp
        fn += lfn
        gp += p
        gr += r
        gf1 += f1"""

    if vis:
        vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image,
                          CLASSES, im_scale)
    else:
        # print(os.path.join(args.out_dir, os.path.split(image_name.replace('.jpg', '_result.jpg'))[1]))
        # result_file = os.path.join(out_dir, os.path.split(image_name.replace('.jpg', '_result.jpg'))[1])
        result_file = os.path.join(out_dir, os.path.split('_result.jpg')[1])
        print('results saved to %s' % result_file)
        im = draw_all_detection(data_dict['data'].asnumpy(), boxes_this_image,
                                CLASSES, im_scale)
        cv2.imwrite(result_file, im)
    # print(xn, yn, wn, hn)
    # print(type(xn))
    return xn, yn, wn, hn
コード例 #9
0
def demo_net(predictor, image_name, vis=False):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    # warm up: twice
    for i in range(1, 2):
        im = cv2.imread(image_name)
        data_batch, data_names, im_scale = generate_batch(im)
        scores, boxes, data_dict = im_detect(predictor, data_batch, data_names,
                                             im_scale)
    print "begin test ================================\n"

    test_list = '/home/junchao/hexiangteng/mxnet/mx-rcnn-master/data/VOCdevkit2007/VOC2007/test.list'
    f_test = open(test_list, 'r')
    f_object = f_test.read()
    rows = f_object.split('\n')
    starttime1 = time.time()
    totaltime = 0
    for i in range(0, 999):
        image_name = rows[i]
        if os.path.exists(image_name) != True:
            break
        im = cv2.imread(image_name)
        data_batch, data_names, im_scale = generate_batch(im)
        starttime = time.time()
        scores, boxes, data_dict = im_detect(predictor, data_batch, data_names,
                                             im_scale)
        endtime = time.time()
        totaltime = (endtime - starttime) + totaltime
        print image_name, '\t', i, '\t', 'per:', (
            endtime - starttime
        ), '\s\ttotal:', totaltime / (i + 1), 's-------------------'
    print '--------time for all image:', totaltime, 'ms-------------------'
    print '--------time for all image with all processes:', (
        time.time() - starttime1), 's-------------------'
    all_boxes = [[] for _ in CLASSES]
    for cls in CLASSES:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind, np.newaxis]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
        keep = nms(dets)
        all_boxes[cls_ind] = dets[keep, :]

    boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]

    # print results
    print 'class ---- [[x1, x2, y1, y2, confidence]]'
    for ind, boxes in enumerate(boxes_this_image):
        if len(boxes) > 0:
            print '---------', CLASSES[ind], '---------'
            print boxes

    if vis:
        vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image,
                          CLASSES, im_scale)
    else:
        result_file = image_name.replace('.', '_result.')
        print 'results saved to %s' % result_file
        im = draw_all_detection(data_dict['data'].asnumpy(), boxes_this_image,
                                CLASSES, im_scale)
        cv2.imwrite(result_file, im)
コード例 #10
0
def demo_net(predictor, image_name, args):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    global detect_num
    global tp, fp, fn
    global gp, gr, gf1
    assert os.path.exists(image_name), image_name + ' not found'
    im = cv2.imread(image_name)
    #im = cv2.flip(im, 0)
    #im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    #im.reshape((im.shape[0],im.shape[1],1))
    data_batch, data_names, im_scale = generate_batch(im)
    print(im.shape)
    print(im_scale)
    #for i in range(10):
    #    scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale)
    t0 = time.clock()
    for i in range(1):
        scores, boxes, data_dict = im_detect(predictor, data_batch, data_names,
                                             im_scale)

    print(time.clock() - t0)

    all_boxes = [[] for _ in CLASSES]
    for cls in CLASSES:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind, np.newaxis]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
        keep = nms(dets)
        all_boxes[cls_ind] = dets[keep, :]

    boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]
    #print(boxes_this_image)

    # print results
    rst = {}
    lfn, lfp, ltp = 0, 0, 0
    print('class ---- [[x1, x2, y1, y2, confidence]]')
    for ind, boxes in enumerate(boxes_this_image):
        if len(boxes) > 0:
            #print('---------', CLASSES[ind], '---------')
            #print(boxes)
            rst[CLASSES[ind]] = [box for box in boxes]
            #detect_num[CLASSES[ind]] += len(boxes)
            detect_num[CLASSES[ind]] += 1  #len(boxes)

    bcs = [(list(box), cls) for (cls, boxes) in rst.items() for box in boxes
           if is_valid(box, im, im_scale)]
    bcs = sorted(bcs, key=lambda x: -x[0][-1])
    print('bcs:', bcs)
    #print('rst:', rst)

    rst_bcs = []
    for bc in bcs:
        ignore = False
        for rbc in rst_bcs:
            print('bc', bc)
            print('rbc', rbc)
            iarea = intersect(bc[0], rbc[0])
            area1 = area(bc[0])
            area2 = area(rbc[0])
            print(iarea, area1, area2)
            if (iarea / area1 > 0.8) or (iarea / area2 > 0.8):
                ignore = True
                print(
                    'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
                )
                print('area', iarea, area1, area2)
                break
        if not ignore: rst_bcs.append(bc)

    tmp_rst = {}
    for bc in rst_bcs:
        tmp_rst.setdefault(bc[1], [])
        tmp_rst[bc[1]].append(np.array(bc[0]))

    rst = tmp_rst
    boxes_this_image = [
        copy.deepcopy(rst[CLASSES[idx]]) if CLASSES[idx] in rst else []
        for idx in range(len(boxes_this_image))
    ]

    #for boxes in boxes_this_image:
    #    if len(boxes):
    #        boxes.append(pre_box(im, im_scale))
    #        break
    #else:
    #    boxes_this_image[-1].append(pre_box(im, im_scale))
    boxes_this_image[-1].append(get_pre_box(im, im_scale))

    print('in, rst', rst)
    print(boxes_this_image)

    if args.image == '' and args.with_label:
        label_file = os.path.join(
            args.label_dir,
            os.path.split(image_name.replace('.jpg', '.txt'))[1])
        with open(label_file) as fd:
            for line in fd:
                cls, poss = line.split(':')
                x1, y1, x2, y2 = [float(item) for item in poss.split(',')]

                if not is_valid([x1, y1, x2, y2], im, im_scale): continue

                if cls not in rst:
                    lfn += 1
                    continue

                iou_thd = 0.5
                now_iou = 0
                now_idx = 0
                for ind, box in enumerate(rst[cls]):
                    #print('box = ', box, type(box))
                    #print('box = {}, true = {}'.format(box, (x1, y1, x2, y2)))
                    if (box[0] >= x2) or (box[2] <= x1) or (box[1] >= y2) or (
                            box[3] <= y1):
                        continue
                    else:
                        #print('###############################################')
                        i = (min(x2, box[2]) - max(x1, box[0])) * (
                            min(y2, box[3]) - max(y1, box[1]))
                        assert (i > 0)
                        u = (x2 - x1) * (y2 - y1) + (box[0] - box[2]) * (
                            box[1] - box[3]) - i
                        if i / u > now_iou:
                            now_iou = i / u
                            now_idx = ind
                if now_iou > iou_thd:
                    ltp += 1
                    rst[cls].pop(now_idx)
                    if len(rst[cls]) == 0: rst.pop(cls)
                else:
                    lfn += 1
        print('after rst:', rst)
        for vs in rst.values():
            lfp += len([v for v in vs if is_valid(v, im, im_scale)])

        p, r, f1 = 0, 0, 0
        if ltp != 0:
            p = 100.0 * ltp / (ltp + lfp)
            r = 100.0 * ltp / (ltp + lfn)
            f1 = 2 * p * r / (p + r)
        print('precision = {}%, recall = {}%, f1 score = {}%'.format(p, r, f1))
        print(boxes_this_image)

        tp += ltp
        fp += lfp
        fn += lfn
        gp += p
        gr += r
        gf1 += f1

    if args.vis:
        vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image,
                          CLASSES, im_scale)
    else:
        #print(os.path.join(args.out_dir, os.path.split(image_name.replace('.jpg', '_result.jpg'))[1]))
        result_file = os.path.join(
            args.out_dir,
            os.path.split(image_name.replace('.jpg', '_result.jpg'))[1])
        print('results saved to %s' % result_file)
        im = draw_all_detection(data_dict['data'].asnumpy(), boxes_this_image,
                                CLASSES, im_scale)
        cv2.imwrite(result_file, im)
コード例 #11
0
def demo_net(predictor,
             dataset,
             image_set,
             root_path,
             dataset_path,
             vis=False,
             vis_image_dir=None,
             use_box_voting=False):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    image_set_index_file = os.path.join(dataset_path, 'DET', 'ImageSets',
                                        'DET', image_set + '.txt')
    assert os.path.exists(
        image_set_index_file), image_set_index_file + ' not found'
    with open(image_set_index_file) as f:
        image_set_index = [x.strip().split(' ')[0] for x in f.readlines()]

    num_images = len(image_set_index)
    num_classes = len(CLASSES)
    all_boxes = [[[] for _ in xrange(num_images)] for _ in xrange(num_classes)]
    i = 0
    for index in image_set_index:
        image_file = image_path_from_index(index, dataset_path, image_set)
        print("processing {}/{} image:{}".format(i, num_images, image_file))
        im = cv2.imread(image_file)
        data_batch, data_names, im_scale = generate_batch(im)
        scores, boxes, data_dict = im_detect(predictor, data_batch, data_names,
                                             im_scale)
        for cls in CLASSES:
            cls_ind = CLASSES.index(cls)
            cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
            cls_scores = scores[:, cls_ind, np.newaxis]
            keep = np.where(cls_scores >= CONF_THRESH)[0]
            cls_dets = np.hstack(
                (cls_boxes, cls_scores)).astype(np.float32)[keep, :]
            keep = nms(cls_dets)

            # apply box voting after nms
            if use_box_voting:
                nms_cls_dets = cls_dets[keep, :]
                all_boxes[cls_ind][i] = box_voting(nms_cls_dets, cls_dets)
            else:
                all_boxes[cls_ind][i] = cls_dets[keep, :]

        boxes_this_image = [[]] + [
            all_boxes[j][i] for j in xrange(1, len(CLASSES))
        ]

        # print results
        """
        print('class ---- [[x1, x2, y1, y2, confidence]]')
        for ind, boxes in enumerate(boxes_this_image):
            if len(boxes) > 0:
                print('---------', CLASSES[ind], '---------')
                print(boxes)
        """

        i += 1
        if vis:
            #vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)
            if not os.path.exists(vis_image_dir):
                os.mkdir(vis_image_dir)
            result_file = os.path.join(
                vis_image_dir,
                index.strip().split('/')[-1] + '_result' + '.JPEG')
            print('results saved to %s' % result_file)
            im = draw_all_detection(data_dict['data'].asnumpy(),
                                    boxes_this_image, CLASSES, im_scale)
            im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
            cv2.imwrite(result_file, im)

    print("num of images: detection:{}, gt:{}".format(len(all_boxes[0]),
                                                      num_images))
    # assert len(all_boxes) == num_images, 'calculations not complete'

    # save results
    cache_folder = os.path.join(root_path, 'cache')
    if not os.path.exists(cache_folder):
        os.mkdir(cache_folder)

    cache_file = os.path.join(cache_folder,
                              dataset + '_' + image_set + '_detections.pkl')
    print("save to {}".format(cache_file))
    with open(cache_file, 'wb') as f:
        cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)