Exemple #1
0
def detect(pred, args, im_queue, rst_queue):
    while True:
        im_item = im_queue.get()
        if not im_item:
            print('detect finish...')
            rst_queue.put(None)
            break
        im, data_batch, data_names, im_scale = im_item

        t0 = time.clock()
        scores, boxes, data_dict = im_detect(pred, data_batch, data_names,
                                             im_scale)
        all_boxes = [[] for _ in CLASSES]
        for cls in CLASSES:
            cls_ind = CLASSES.index(cls)
            cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
            cls_scores = scores[:, cls_ind, np.newaxis]
            keep = np.where(cls_scores >= CONF_THRESH)[0]
            dets = np.hstack(
                (cls_boxes, cls_scores)).astype(np.float32)[keep, :]
            keep = nms(dets)
            all_boxes[cls_ind] = dets[keep, :]

        boxes_this_image = [[]
                            ] + [all_boxes[j] for j in range(1, len(CLASSES))]
        print('detect:', time.clock() - t0)

        rst_queue.put([data_dict['data'.asnumpy], boxes_this_image, im_scale])
    print('Hello2')
Exemple #2
0
def demo_net(predictor, image_name):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :return: None
    """
    assert os.path.exists(image_name), image_name + ' not found'
    im = cv2.imread(image_name)
    data_batch, data_names, im_scale = generate_batch(im)
    scores, boxes, data_dict = im_detect(predictor, data_batch, data_names,
                                         im_scale)

    all_boxes = [[] for _ in CLASSES]
    for cls in CLASSES:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind, np.newaxis]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
        keep = nms(dets, NMS_THRESH)
        all_boxes[cls_ind] = dets[keep, :]

    boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]
    vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES,
                      im_scale)
Exemple #3
0
def demo_net(predictor, image_name, vis=False, save_dir='./', save_name='tmp.jpg', threshold=0.7):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    assert os.path.exists(image_name), image_name + ' not found'
    result_lst = list()
    try:
        im = cv2.imread(image_name)
        data_batch, data_names, im_scale = generate_batch(im)
        scores, boxes, data_dict = im_detect(
            predictor, data_batch, data_names, im_scale)

        all_boxes = [[] for _ in CLASSES]
        for cls in CLASSES:
            cls_ind = CLASSES.index(cls)
            cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
            cls_scores = scores[:, cls_ind, np.newaxis]
            keep = np.where(cls_scores >= threshold)[0]
            dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
            keep = nms(dets)
            all_boxes[cls_ind] = dets[keep, :]

        boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]

    except:
        print('detection error')
        return None

    # print results
    print('class ---- [[x1, x2, y1, y2, confidence]]')
    for ind, boxes in enumerate(boxes_this_image):
        if len(boxes) > 0:
            print('---------', CLASSES[ind], '---------')
            print(boxes)
            for box in boxes:
                tmp_box = box.tolist()[:4]
                tmp_box.append(str(CLASSES[ind]))
                result_lst.append(tmp_box)
    if vis:
        vis_all_detection(data_dict['data'].asnumpy(),
                          boxes_this_image, CLASSES, im_scale)
    else:
        # result_dir = os.path.dirname(image_name)
        # result_file = save_dir + os.path.basename(image_name)
        result_file = save_dir + save_name
        print('results saved to %s' % result_file)
        im = draw_all_detection(
            data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)
        if not os.path.exists(os.path.dirname(result_file)):
            os.system('mkdir -p '+os.path.dirname(result_file))
        cv2.imwrite(result_file, im)

    return result_lst
Exemple #4
0
def demo_net(predictor, image_name, args):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    assert os.path.exists(image_name), image_name + ' not found'
    im = cv2.imread(image_name)
    data_batch, data_names, im_scale = generate_batch(im)
    #for i in range(10):
    #    scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale)
    t0 = time.clock()
    for i in range(1):
        scores, boxes, data_dict = im_detect(predictor, data_batch, data_names,
                                             im_scale)
    print(time.clock() - t0)

    all_boxes = [[] for _ in CLASSES]
    for cls in CLASSES:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind, np.newaxis]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
        keep = nms(dets)
        all_boxes[cls_ind] = dets[keep, :]

    boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]

    # print results
    print('class ---- [[x1, x2, y1, y2, confidence]]')
    for ind, boxes in enumerate(boxes_this_image):
        if len(boxes) > 0:
            print('---------', CLASSES[ind], '---------')
            print(boxes)

    if args.vis:
        vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image,
                          CLASSES, im_scale)
    else:
        #print(os.path.join(args.out_dir, os.path.split(image_name.replace('.jpg', '_result.jpg'))[1]))
        result_file = os.path.join(
            args.out_dir,
            os.path.split(image_name.replace('.jpg', '_result.jpg'))[1])
        print('results saved to %s' % result_file)
        im = draw_all_detection(data_dict['data'].asnumpy(), boxes_this_image,
                                CLASSES, im_scale)
        cv2.imwrite(result_file, im)
def demo_net(predictor, image_name, args):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    global detect_num
    global tp, fp, fn
    global gp, gr, gf1
    #assert os.path.exists(image_name), image_name + ' not found'
    #im = cv2.imread(image_name)
    #im = cv2.flip(im, 1)
    data_batch, data_names, im_scale = generate_batch(image_name)
    #for i in range(10):
    #    scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale)
    t0 = time.clock()
    for i in range(1):
        scores, boxes, data_dict = im_detect(predictor, data_batch, data_names,
                                             im_scale)

    print(time.clock() - t0)

    all_boxes = [[] for _ in CLASSES]
    for cls in CLASSES:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind, np.newaxis]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
        keep = nms(dets)
        all_boxes[cls_ind] = dets[keep, :]

    boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]
    #print(boxes_this_image)

    # print results
    rst = {}
    lfn, lfp, ltp = 0, 0, 0
    print('class ---- [[x1, x2, y1, y2, confidence]]')
    for ind, boxes in enumerate(boxes_this_image):
        if len(boxes) > 0:
            print('---------', CLASSES[ind], '---------')
            print(boxes)
            rst[CLASSES[ind]] = [box for box in boxes]
            #detect_num[CLASSES[ind]] += len(boxes)
            detect_num[CLASSES[ind]] += 1  #len(boxes)
    """if args.image == '' and args.with_label:
def demo_net(predictor, image_name, result_txt, vis=False):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    assert os.path.exists(image_name), image_name + ' not found'
    print image_name
    im = cv2.imread(image_name)
    data_batch, data_names, im_scale = generate_batch(im)
    scores, boxes, data_dict = im_detect(predictor, data_batch, data_names,
                                         im_scale)

    result_txt.write(
        image_name.split('/')[-2] + '/' + image_name.split('/')[-1] + '\n')

    all_boxes = [[] for _ in CLASSES]
    for cls in CLASSES:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind, np.newaxis]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
        keep = nms(dets)
        all_boxes[cls_ind] = dets[keep, :]

    boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]

    # print results
    #logger.info('---class---')
    #logger.info('[[x1, x2, y1, y2, confidence]]')
    for ind, boxes in enumerate(boxes_this_image):
        #print ind
        if len(boxes) > 0:
            logger.info('---%s---' % CLASSES[ind])
            logger.info('%s' % boxes)
            #print len(boxes)
            for iii in range(0, len(boxes)):
                result_txt.write(
                    str(boxes[iii][0]) + ' ' + str(boxes[iii][1]) + ' ' +
                    str(boxes[iii][2]) + ' ' + str(boxes[iii][3]) + ' ' +
                    str(boxes[iii][4]) + ' ')

    result_txt.write('\n')
Exemple #7
0
def demo_net(predictor, image_name, vis=False):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    assert os.path.exists(image_name), image_name + ' not found'
    im = cv2.imread(image_name)
    data_batch, data_names, im_scale = generate_batch(im)
    scores, boxes, head_boxes, joints, data_dict = im_detect(
        predictor, data_batch, data_names, im_scale)
    print 'im_scale = {}'.format(im_scale)

    all_boxes = [[] for _ in CLASSES]
    for cls in CLASSES:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind, np.newaxis]
        keep1 = np.where(cls_scores >= CONF_THRESH)[0]
        dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep1, :]
        keep2 = nms(dets)
        all_boxes[cls_ind] = dets[keep2, :]
        if cls == 'person':
            head_boxes = head_boxes[keep1[keep2], :]
            joints = joints[keep1[keep2], :]

    boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]

    # print results
    print 'class ---- [[x1, x2, y1, y2, confidence]]'
    for ind, boxes in enumerate(boxes_this_image):
        if len(boxes) > 0:
            print '---------', CLASSES[ind], '---------'
            print boxes

    if vis:
        vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image,
                          CLASSES, im_scale, head_boxes, joints)
    else:
        result_file = image_name.replace('.', '_result.')
        print 'results saved to %s' % result_file
        im = draw_all_detection(data_dict['data'].asnumpy(), boxes_this_image,
                                CLASSES, im_scale)
        cv2.imwrite(result_file, im)
Exemple #8
0
def demo_net(predictor, image_name, vis=True):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    BOOL = 0
    assert os.path.exists(image_name), image_name + ' not found'
    im = cv2.imread(image_name)
    data_batch, data_names, im_scale = generate_batch(im)
    scores, boxes, data_dict = im_detect(predictor, data_batch, data_names,
                                         im_scale)
    all_boxes = [[] for _ in Global.CLASSES]
    for cls in Global.CLASSES:
        cls_ind = Global.CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind, np.newaxis]
        keep = np.where(cls_scores >= Global.conf_thresh_value)[0]
        dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
        keep = py_nms_wrapper(Global.nms_thresh_value)(dets)
        all_boxes[cls_ind] = dets[keep, :]

    boxes_this_image = [[]] + [
        all_boxes[j] for j in range(1, len(Global.CLASSES))
    ]

    for ind, boxes in enumerate(boxes_this_image):
        if len(boxes) > 0:
            BOOL = 1
            logger.info('---%s---' % Global.CLASSES[ind])
            logger.info('%s' % boxes)

    result_file = image_name.replace(str(Global.open_img_dir),
                                     str(Global.save_path))
    print result_file
    logger.info('results saved to %s' % result_file)
    im, CLASS, SCORE = draw_all_detection(data_dict['data'].asnumpy(),
                                          boxes_this_image, Global.CLASSES,
                                          im_scale)
    cv2.imwrite(result_file, im)
    Global.PICTURE_INFO[0].append(result_file)
    Global.PICTURE_INFO[1].append(CLASS)
    Global.PICTURE_INFO[2].append(SCORE)
    return CLASS, SCORE, BOOL
Exemple #9
0
def demo_net(predictor, image_name, vis=False):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    assert os.path.exists(image_name), image_name + ' not found'
    im = cv2.imread(image_name)
    data_batch, data_names, im_scale = generate_batch(im)
    scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale)

    all_boxes = [[] for _ in CLASSES]
    for cls in CLASSES:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind, np.newaxis]
        print(cls_scores.shape)
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
        keep = nms(dets)
        all_boxes[cls_ind] = dets[keep, :]

    boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]

    # print results
    logger.info('---class---')
    logger.info('[[x1, x2, y1, y2, confidence]]')
    for ind, boxes in enumerate(boxes_this_image):
        if len(boxes) > 0:
            logger.info('---%s---' % CLASSES[ind])
            logger.info('%s' % boxes)

    if vis:
        vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)
    else:
        
        idx = [i for i, v in enumerate(image_name) if v == '/'][-1]
        result_file = "data/VOCdevkit/results/test/" + image_name[idx+1:]

        result_file = result_file.replace('.', '_result.')
        logger.info('results saved to %s' % result_file)
        im = draw_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)
        cv2.imwrite(result_file, im)
Exemple #10
0
def predict(imagePath, predictor):
    if imagePath.startswith('http://') or imagePath.startswith(
            'https://') or imagePath.startswith('ftp://'):
        req = urllib.urlopen(imagePath)
        arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
        im = cv2.imdecode(arr, -1)
    else:
        if not os.path.exists(imagePath):
            raise Exception('Input image file does not exist')
        im = cv2.imread(imagePath)

    data_batch, data_names, im_scale = generate_batch(im)
    scores, boxes, data_dict = im_detect(predictor, data_batch, data_names,
                                         im_scale)
    all_boxes = [[] for _ in CLASSES]
    for cls in CLASSES:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind, np.newaxis]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
        keep = nms(dets)
        all_boxes[cls_ind] = dets[keep, :]

    boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]
    result = []

    # print results
    for ind, boxes in enumerate(boxes_this_image):
        if len(boxes) > 0:
            result.append({
                "object_name": CLASSES[ind],
                "confidence": boxes[0][4],
                "bounding_box": {
                    "x1": boxes[0][0],
                    "x2": boxes[0][1],
                    "y1": boxes[0][2],
                    "y2": boxes[0][3]
                }
            })

    result = jsonpickle.encode(result, unpicklable=False)
    return json.loads(result)
Exemple #11
0
def net_inference(model, args):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :return: None
    """

    predictor = model['predictor']
    classes = model['classes']
    threshold = model['threshold']
    rets = []
    try:
        for data in args:
            im, im_info, err = _load_image(data['data'])
            if err is not None:
                rets.append(err)
                continue

            data_batch, data_names, im_scale = generate_batch(im)
            scores, boxes, data_dict = im_detect(predictor, data_batch,
                                                 data_names, im_scale)

            det_ret = []
            for cls_ind, cls_name in enumerate(classes[1:], start=1):
                cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
                cls_scores = scores[:, cls_ind, np.newaxis]
                keep = np.where(cls_scores >= threshold)[0]
                dets = np.hstack(
                    (cls_boxes, cls_scores)).astype(np.float32)[keep, :]
                keep = nms(dets)
                det_ret.extend(
                    _build_result(det, cls_name, cls_ind, im_info)
                    for det in dets[keep, :])

            rets.append(
                dict(code=0, message='', result=dict(detections=det_ret)))
    except Exception as _e:
        logger.info("inference error:%s",
                    traceback.format_exc(),
                    extra={"reqid": ""})
        return [], 599, {"code": 599, "message": str(_e)}
    return rets, 0, None
Exemple #12
0
def demo_net(predictor, image_name, cat, vis=False):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    assert os.path.exists(image_name), image_name + ' not found'
    im = cv2.imread(image_name)
    data_batch, data_names, im_scale = generate_batch(im)
    scores, boxes, data_dict = im_detect(predictor, data_batch, data_names,
                                         im_scale)
    CLASSES = ['__background__', cat]
    all_boxes = [[] for _ in CLASSES]
    for cls in CLASSES:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind, np.newaxis]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
        if dets.shape[0] > 1:
            keep = [np.argmax(dets[:, 4])]
            all_boxes[cls_ind] = dets[keep, :]
        else:
            all_boxes[cls_ind] = dets

    boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]
    # print results
    """
    #logger.info('---class---')
    #logger.info('[[x1, x2, y1, y2, confidence]]')
    for ind, boxes in enumerate(boxes_this_image):
        #if len(boxes) > 0:
         #   logger.info('---%s---' % CLASSES[ind])
            logger.info('%s' % boxes)
    """
    if vis:
        vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image,
                          CLASSES, im_scale)

    return boxes_this_image[1][0]
Exemple #13
0
def demo_net(predictor, image_name, vis=False):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    assert os.path.exists(image_name), image_name + ' not found'
    im = cv2.imread(image_name)
    data_batch, data_names, im_scale = generate_batch(im)
    scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale)

    all_boxes = [[] for _ in CLASSES]
    for cls in CLASSES:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind, np.newaxis]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
        keep = nms(dets)
        all_boxes[cls_ind] = dets[keep, :]

    boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]

    # print results
    logger.info('---class---')
    logger.info('[[x1, x2, y1, y2, confidence]]')
    for ind, boxes in enumerate(boxes_this_image):
        if len(boxes) > 0:
            logger.info('---%s---' % CLASSES[ind])
            logger.info('%s' % boxes)

    if vis:
        vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)
    else:
        result_file = image_name.replace('.', '_result.')
        logger.info('results saved to %s' % result_file)
        im = draw_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)
        cv2.imwrite(result_file, im)
Exemple #14
0
    def detect(self, im, vis=False, verbose=False):
        """
        generate data_batch -> im_detect -> post process
        :param vis: will save as a new image if not visualized
        :return: None
        """
        data_batch, data_names, im_scale = self.generate_batch(im)
        scores, boxes, data_dict = im_detect(self.predictor, data_batch,
                                             data_names, im_scale)

        all_boxes = [[] for _ in CLASSES]
        for cls in CLASSES:
            #if cls == 'person':
            cls_ind = CLASSES.index(cls)
            cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
            cls_scores = scores[:, cls_ind, np.newaxis]
            keep = np.where(cls_scores >= CONF_THRESH)[0]
            dets = np.hstack(
                (cls_boxes, cls_scores)).astype(np.float32)[keep, :]
            keep = nms(dets)
            all_boxes[cls_ind] = dets[keep, :]
        boxes_this_image = [[]
                            ] + [all_boxes[j] for j in range(1, len(CLASSES))]

        detections = []
        for ind, boxes in enumerate(boxes_this_image):
            if len(boxes) > 0:
                for box in boxes:
                    x1 = int(round(box[0]))
                    y1 = int(round(box[1]))
                    x2 = int(round(box[2]))
                    y2 = int(round(box[3]))
                    score = int(round(box[4]))
                    new_det = Detection(x1, y1, x2 - x1, y2 - y1, score)
                    detections.append(new_det)
                    if verbose:
                        format_str = ('%d,%d,%d,%d,%.3f')
                        print(format_str % (x1, y1, x2 - x1, y2 - y1, score))
        return detections
Exemple #15
0
def demo_net(predictor, data, image_names, im_scales):
    data = [[mx.nd.array(data[i][name]) for name in DATA_NAMES] for i in xrange(len(data))]
    # warm up
    for i in xrange(2):
        data_batch = mx.io.DataBatch(data=[data[0]], label=[], pad=0, index=0,
                                     provide_data=[[(k, v.shape) for k, v in zip(DATA_NAMES, data[0])]],
                                     provide_label=[None])
        scales = [data_batch.data[i][1].asnumpy()[0, 2] for i in xrange(len(data_batch.data))]
        _, _, _, _, _= im_detect(predictor, data_batch, DATA_NAMES, scales)

    # test
    for idx, im_name in enumerate(image_names):
        data_batch = mx.io.DataBatch(data=[data[idx]], label=[], pad=0, index=idx,
                                     provide_data=[[(k, v.shape) for k, v in zip(DATA_NAMES, data[idx])]],
                                     provide_label=[None])
        scales = [data_batch.data[i][1].asnumpy()[0, 2] for i in xrange(len(data_batch.data))]

        tic()
        scores, boxes, boxes2, masks, data_dict = im_detect(predictor, data_batch, DATA_NAMES, scales)
        im_shapes = [data_batch.data[i][0].shape[2:4] for i in xrange(len(data_batch.data))]

        # mask output
        if not config.TEST.USE_MASK_MERGE:
            all_boxes = [[] for _ in xrange(config.NUM_CLASSES)]
            all_masks = [[] for _ in xrange(config.NUM_CLASSES)]
            nms = py_nms_wrapper(config.TEST.NMS)
            for j in range(1, config.NUM_CLASSES):
                indexes = np.where(scores[0][:, j] > 0.7)[0]
                cls_scores = scores[0][indexes, j, np.newaxis]
                cls_masks = masks[0][indexes, 1, :, :]
                try:
                    if config.CLASS_AGNOSTIC:
                        cls_boxes = boxes[0][indexes, :]
                    else:
                        raise Exception()
                except:
                    cls_boxes = boxes[0][indexes, j * 4:(j + 1) * 4]

                cls_dets = np.hstack((cls_boxes, cls_scores))
                keep = nms(cls_dets)
                all_boxes[j] = cls_dets[keep, :]
                all_masks[j] = cls_masks[keep, :]
            dets = [all_boxes[j] for j in range(1, config.NUM_CLASSES)]
            masks = [all_masks[j] for j in range(1, config.NUM_CLASSES)]
        else:
            masks = masks[0][:, 1:, :, :]
            im_height = np.round(im_shapes[0][0] / scales[0]).astype('int')
            im_width = np.round(im_shapes[0][1] / scales[0]).astype('int')
            print (im_height, im_width)
            boxes_ = clip_boxes(boxes[0], (im_height, im_width))
            result_masks, result_dets = gpu_mask_voting(masks, boxes_, scores[0], config.NUM_CLASSES,
                                                        100, im_width, im_height,
                                                        config.TEST.NMS, config.TEST.MASK_MERGE_THRESH,
                                                        config.BINARY_THRESH, 0)

            dets = [result_dets[j] for j in range(1, config.NUM_CLASSES)]
            masks = [result_masks[j][:, 0, :, :] for j in range(1, config.NUM_CLASSES)]
        print 'testing {} {:.4f}s'.format(im_name, toc())
        # visualize
        for i in xrange(len(dets)):
            keep = np.where(dets[i][:,-1]>0.7)
            dets[i] = dets[i][keep]
            masks[i] = masks[i][keep]
        im = cv2.imread('../data/demo/' + im_name)
        im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
        show_masks(im, dets, masks, CLASSES)

        # debug
        '''
        for ii in range(scores[0].shape[0]):
            for jj in range(1, scores[0].shape[1]):
                if scores[0][ii][jj]>0.7:
                    print ii, jj, scores[0][ii][jj]
        '''
        # bounding box output
        all_boxes = [[] for _ in CLASSES]
        nms = py_nms_wrapper(NMS_THRESH)
        for cls in CLASSES:
            cls_ind = CLASSES.index(cls)+1
            cls_boxes = boxes2[0][:, 4 * cls_ind:4 * (cls_ind + 1)]
            cls_scores = scores[0][:, cls_ind, np.newaxis]
            keep = np.where(cls_scores >= CONF_THRESH)[0]
            #print cls, keep
            dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
            keep = nms(dets)
            all_boxes[cls_ind-1] = dets[keep, :]

        boxes_this_image = [all_boxes[j] for j in range(len(CLASSES))]
        vis_all_detection(data_dict[0]['data'].asnumpy(), boxes_this_image, CLASSES, im_scales[idx])

    print 'done'
Exemple #16
0
def demo_net(predictor,
             dataset,
             image_set,
             root_path,
             dataset_path,
             vis=False,
             vis_image_dir=None,
             use_box_voting=False):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    image_set_index_file = os.path.join(dataset_path, 'DET', 'ImageSets',
                                        'DET', image_set + '.txt')
    assert os.path.exists(
        image_set_index_file), image_set_index_file + ' not found'
    with open(image_set_index_file) as f:
        image_set_index = [x.strip().split(' ')[0] for x in f.readlines()]

    num_images = len(image_set_index)
    num_classes = len(CLASSES)
    all_boxes = [[[] for _ in xrange(num_images)] for _ in xrange(num_classes)]
    i = 0
    for index in image_set_index:
        image_file = image_path_from_index(index, dataset_path, image_set)
        print("processing {}/{} image:{}".format(i, num_images, image_file))
        im = cv2.imread(image_file)
        data_batch, data_names, im_scale = generate_batch(im)
        scores, boxes, data_dict = im_detect(predictor, data_batch, data_names,
                                             im_scale)
        for cls in CLASSES:
            cls_ind = CLASSES.index(cls)
            cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
            cls_scores = scores[:, cls_ind, np.newaxis]
            keep = np.where(cls_scores >= CONF_THRESH)[0]
            cls_dets = np.hstack(
                (cls_boxes, cls_scores)).astype(np.float32)[keep, :]
            keep = nms(cls_dets)

            # apply box voting after nms
            if use_box_voting:
                nms_cls_dets = cls_dets[keep, :]
                all_boxes[cls_ind][i] = box_voting(nms_cls_dets, cls_dets)
            else:
                all_boxes[cls_ind][i] = cls_dets[keep, :]

        boxes_this_image = [[]] + [
            all_boxes[j][i] for j in xrange(1, len(CLASSES))
        ]

        # print results
        """
        print('class ---- [[x1, x2, y1, y2, confidence]]')
        for ind, boxes in enumerate(boxes_this_image):
            if len(boxes) > 0:
                print('---------', CLASSES[ind], '---------')
                print(boxes)
        """

        i += 1
        if vis:
            #vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)
            if not os.path.exists(vis_image_dir):
                os.mkdir(vis_image_dir)
            result_file = os.path.join(
                vis_image_dir,
                index.strip().split('/')[-1] + '_result' + '.JPEG')
            print('results saved to %s' % result_file)
            im = draw_all_detection(data_dict['data'].asnumpy(),
                                    boxes_this_image, CLASSES, im_scale)
            im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
            cv2.imwrite(result_file, im)

    print("num of images: detection:{}, gt:{}".format(len(all_boxes[0]),
                                                      num_images))
    # assert len(all_boxes) == num_images, 'calculations not complete'

    # save results
    cache_folder = os.path.join(root_path, 'cache')
    if not os.path.exists(cache_folder):
        os.mkdir(cache_folder)

    cache_file = os.path.join(cache_folder,
                              dataset + '_' + image_set + '_detections.pkl')
    print("save to {}".format(cache_file))
    with open(cache_file, 'wb') as f:
        cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
Exemple #17
0
def demo_net(predictor, image_name, args):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    global detect_num
    global tp, fp, fn
    global gp, gr, gf1
    assert os.path.exists(image_name), image_name + ' not found'
    im = cv2.imread(image_name)
    #im = cv2.flip(im, 0)
    #im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    #im.reshape((im.shape[0],im.shape[1],1))
    data_batch, data_names, im_scale = generate_batch(im)
    print(im.shape)
    print(im_scale)
    #for i in range(10):
    #    scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale)
    t0 = time.clock()
    for i in range(1):
        scores, boxes, data_dict = im_detect(predictor, data_batch, data_names,
                                             im_scale)

    print(time.clock() - t0)

    all_boxes = [[] for _ in CLASSES]
    for cls in CLASSES:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind, np.newaxis]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
        keep = nms(dets)
        all_boxes[cls_ind] = dets[keep, :]

    boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]
    #print(boxes_this_image)

    # print results
    rst = {}
    lfn, lfp, ltp = 0, 0, 0
    print('class ---- [[x1, x2, y1, y2, confidence]]')
    for ind, boxes in enumerate(boxes_this_image):
        if len(boxes) > 0:
            #print('---------', CLASSES[ind], '---------')
            #print(boxes)
            rst[CLASSES[ind]] = [box for box in boxes]
            #detect_num[CLASSES[ind]] += len(boxes)
            detect_num[CLASSES[ind]] += 1  #len(boxes)

    bcs = [(list(box), cls) for (cls, boxes) in rst.items() for box in boxes
           if is_valid(box, im, im_scale)]
    bcs = sorted(bcs, key=lambda x: -x[0][-1])
    print('bcs:', bcs)
    #print('rst:', rst)

    rst_bcs = []
    for bc in bcs:
        ignore = False
        for rbc in rst_bcs:
            print('bc', bc)
            print('rbc', rbc)
            iarea = intersect(bc[0], rbc[0])
            area1 = area(bc[0])
            area2 = area(rbc[0])
            print(iarea, area1, area2)
            if (iarea / area1 > 0.8) or (iarea / area2 > 0.8):
                ignore = True
                print(
                    'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
                )
                print('area', iarea, area1, area2)
                break
        if not ignore: rst_bcs.append(bc)

    tmp_rst = {}
    for bc in rst_bcs:
        tmp_rst.setdefault(bc[1], [])
        tmp_rst[bc[1]].append(np.array(bc[0]))

    rst = tmp_rst
    boxes_this_image = [
        copy.deepcopy(rst[CLASSES[idx]]) if CLASSES[idx] in rst else []
        for idx in range(len(boxes_this_image))
    ]

    #for boxes in boxes_this_image:
    #    if len(boxes):
    #        boxes.append(pre_box(im, im_scale))
    #        break
    #else:
    #    boxes_this_image[-1].append(pre_box(im, im_scale))
    boxes_this_image[-1].append(get_pre_box(im, im_scale))

    print('in, rst', rst)
    print(boxes_this_image)

    if args.image == '' and args.with_label:
        label_file = os.path.join(
            args.label_dir,
            os.path.split(image_name.replace('.jpg', '.txt'))[1])
        with open(label_file) as fd:
            for line in fd:
                cls, poss = line.split(':')
                x1, y1, x2, y2 = [float(item) for item in poss.split(',')]

                if not is_valid([x1, y1, x2, y2], im, im_scale): continue

                if cls not in rst:
                    lfn += 1
                    continue

                iou_thd = 0.5
                now_iou = 0
                now_idx = 0
                for ind, box in enumerate(rst[cls]):
                    #print('box = ', box, type(box))
                    #print('box = {}, true = {}'.format(box, (x1, y1, x2, y2)))
                    if (box[0] >= x2) or (box[2] <= x1) or (box[1] >= y2) or (
                            box[3] <= y1):
                        continue
                    else:
                        #print('###############################################')
                        i = (min(x2, box[2]) - max(x1, box[0])) * (
                            min(y2, box[3]) - max(y1, box[1]))
                        assert (i > 0)
                        u = (x2 - x1) * (y2 - y1) + (box[0] - box[2]) * (
                            box[1] - box[3]) - i
                        if i / u > now_iou:
                            now_iou = i / u
                            now_idx = ind
                if now_iou > iou_thd:
                    ltp += 1
                    rst[cls].pop(now_idx)
                    if len(rst[cls]) == 0: rst.pop(cls)
                else:
                    lfn += 1
        print('after rst:', rst)
        for vs in rst.values():
            lfp += len([v for v in vs if is_valid(v, im, im_scale)])

        p, r, f1 = 0, 0, 0
        if ltp != 0:
            p = 100.0 * ltp / (ltp + lfp)
            r = 100.0 * ltp / (ltp + lfn)
            f1 = 2 * p * r / (p + r)
        print('precision = {}%, recall = {}%, f1 score = {}%'.format(p, r, f1))
        print(boxes_this_image)

        tp += ltp
        fp += lfp
        fn += lfn
        gp += p
        gr += r
        gf1 += f1

    if args.vis:
        vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image,
                          CLASSES, im_scale)
    else:
        #print(os.path.join(args.out_dir, os.path.split(image_name.replace('.jpg', '_result.jpg'))[1]))
        result_file = os.path.join(
            args.out_dir,
            os.path.split(image_name.replace('.jpg', '_result.jpg'))[1])
        print('results saved to %s' % result_file)
        im = draw_all_detection(data_dict['data'].asnumpy(), boxes_this_image,
                                CLASSES, im_scale)
        cv2.imwrite(result_file, im)
Exemple #18
0
def demo_net(predictor, image_name, vis=False):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    for image_name in listfile:

        ll = []

        ll.append(image_name.split('/')[-1] + ',')
        assert os.path.exists(image_name), image_name + ' not found'
        im = cv2.imread(image_name)
        width, height, _ = im.shape
        data_batch, data_names, im_scale = generate_batch(im)
        scores, boxes, data_dict = im_detect(predictor, data_batch, data_names,
                                             im_scale)

        all_boxes = [[] for _ in CLASSES]
        for cls in CLASSES:
            cls_ind = CLASSES.index(cls)
            cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
            cls_scores = scores[:, cls_ind, np.newaxis]
            keep = np.where(cls_scores >= CONF_THRESH)[0]
            dets = np.hstack(
                (cls_boxes, cls_scores)).astype(np.float32)[keep, :]
            keep = nms(dets)
            all_boxes[cls_ind] = dets[keep, :]

        boxes_this_image = [[]
                            ] + [all_boxes[j] for j in range(1, len(CLASSES))]
        if len(CLASSES) == 0:
            ll.extend([2, 0, 183, 272, 517])

        # print results
        logger.info('---class---')
        logger.info('[[x1, x2, y1, y2, confidence]]')
        for ind, boxes in enumerate(boxes_this_image):
            if len(boxes) > 0:
                logger.info('---%s---' % CLASSES[ind])
                logger.info('%s' % boxes)
                print(boxes)
                item = boxes[0]
                xmin = int(round(item[0]))
                ymin = int(round(item[1]))
                xmax = int(round(item[2]))
                ymax = int(round(item[3]))
                if xmin < 0:
                    xmin = 0
                if ymin < 0:
                    ymin = 0
                if xmax > width:
                    xmax = width
                if ymax > height:
                    ymax = height
                ll.extend([bq[CLASSES[ind]], xmin, ymin, xmax, ymax])
                gl = [
                    str(i) for i in [
                        image_name.split('/')[-1], bq[CLASSES[ind]], item[4],
                        xmin, ymin, xmax, ymax
                    ]
                ]
                file2 = 'fasterrcnnrh.txt'
                with open(file2, 'a+') as f1:
                    f1.write(','.join(gl))
                    f1.write('\n')
                    print(ll)
        ll = [str(i) for i in ll]
        file = 'fasterrcnn.txt'
        with open(file, 'a+') as f:
            f.write(' '.join(ll))
            f.write('\n')
Exemple #19
0
def demo_net(predictor, image_name, image, with_label, vis, out_dir, label_dir):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    global detect_num
    global tp, fp, fn
    global gp, gr, gf1
    if (type(image_name)==str):
        assert os.path.exists(image_name), image_name + ' not found'
        im = cv2.imread(image_name)
    else:
        im = image
    # im = cv2.flip(im, 1)
    data_batch, data_names, im_scale = generate_batch(im)
    # for i in range(10):
    #    scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale)

    for i in range(1):
        scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale)
  
    
    xn = []
    yn = []
    wn = []
    hn = []
    all_boxes = [[] for _ in CLASSES]
    for cls in CLASSES:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind, np.newaxis]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
        keep = nms(dets)
        all_boxes[cls_ind] = dets[keep, :]

    boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]
    # print(boxes_this_image)
    
    # print results
    rst = {};
    lfn, lfp, ltp = 0, 0, 0
    #print('class ---- [[x1, x2, y1, y2, confidence]]')
    for ind, boxes in enumerate(boxes_this_image):
        if len(boxes) > 0:
            #print('---------', CLASSES[ind], '---------')
            #print(boxes)
            for i in range(0, len(boxes)):
                xn.append(int(boxes[i][0] + 0))
                yn.append(int(boxes[i][1] + 0))
                wn.append(int(boxes[i][2] - boxes[i][0]))
                hn.append(int(boxes[i][3] - boxes[i][1]))

            #rst[CLASSES[ind]] = [box for box in boxes]
            # detect_num[CLASSES[ind]] += len(boxes)
            #detect_num[CLASSES[ind]] += 1  # len(boxes)

    """if image == '' and with_label:
        label_file = os.path.join(label_dir, os.path.split(image_name.replace('.jpg', '.txt'))[1])
        with open(label_file) as fd:
            for line in fd:
                cls, poss = line.split(':')
                x1, y1, x2, y2 = [float(item) for item in poss.split(',')]

                if cls not in rst:
                    lfn += 1
                    continue

                iou_thd = 0.5
                now_iou = 0
                now_idx = 0
                for ind, box in enumerate(rst[cls]):
                    # print('box = ', box, type(box))
                    # print('box = {}, true = {}'.format(box, (x1, y1, x2, y2)))
                    if (box[0] >= x2) or (box[2] <= x1) or (box[1] >= y2) or (box[3] <= y1):
                        continue
                    else:
                        # print('###############################################')
                        i = (min(x2, box[2]) - max(x1, box[0])) * (min(y2, box[3]) - max(y1, box[1]))
                        assert (i > 0)
                        u = (x2 - x1) * (y2 - y1) + (box[0] - box[2]) * (box[1] - box[3]) - i
                        if i / u > now_iou:
                            now_iou = i / u
                            now_idx = ind
                if now_iou > iou_thd:
                    ltp += 1
                    rst[cls].pop(now_idx)
                    if len(rst[cls]) == 0: rst.pop(cls)
                else:
                    lfn += 1
        for vs in rst.values():
            lfp += len(vs)

        p, r, f1 = 0, 0, 0
        if ltp != 0:
            p = 100.0 * ltp / (ltp + lfp)
            r = 100.0 * ltp / (ltp + lfn)
            f1 = 2 * p * r / (p + r)
        print('precision = {}%, recall = {}%, f1 score = {}%'.format(p, r, f1))

        tp += ltp
        fp += lfp
        fn += lfn
        gp += p
        gr += r
        gf1 += f1"""

    """if vis:
        vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)
    else:
        # print(os.path.join(args.out_dir, os.path.split(image_name.replace('.jpg', '_result.jpg'))[1]))
        # result_file = os.path.join(out_dir, os.path.split(image_name.replace('.jpg', '_result.jpg'))[1])
        result_file = os.path.join(out_dir, os.path.split('_result.jpg')[1])
        print('results saved to %s' % result_file)
        im = draw_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)
        cv2.imwrite(result_file, im)"""
    # print(type(xn)) 
    return xn, yn, wn, hn
def demo_net(predictor, image_name, vis=False):
    """
    generate data_batch -> im_detect -> post process
    :param predictor: Predictor
    :param image_name: image name
    :param vis: will save as a new image if not visualized
    :return: None
    """
    # warm up: twice
    for i in range(1, 2):
        im = cv2.imread(image_name)
        data_batch, data_names, im_scale = generate_batch(im)
        scores, boxes, data_dict = im_detect(predictor, data_batch, data_names,
                                             im_scale)
    print "begin test ================================\n"

    test_list = '/home/junchao/hexiangteng/mxnet/mx-rcnn-master/data/VOCdevkit2007/VOC2007/test.list'
    f_test = open(test_list, 'r')
    f_object = f_test.read()
    rows = f_object.split('\n')
    starttime1 = time.time()
    totaltime = 0
    for i in range(0, 999):
        image_name = rows[i]
        if os.path.exists(image_name) != True:
            break
        im = cv2.imread(image_name)
        data_batch, data_names, im_scale = generate_batch(im)
        starttime = time.time()
        scores, boxes, data_dict = im_detect(predictor, data_batch, data_names,
                                             im_scale)
        endtime = time.time()
        totaltime = (endtime - starttime) + totaltime
        print image_name, '\t', i, '\t', 'per:', (
            endtime - starttime
        ), '\s\ttotal:', totaltime / (i + 1), 's-------------------'
    print '--------time for all image:', totaltime, 'ms-------------------'
    print '--------time for all image with all processes:', (
        time.time() - starttime1), 's-------------------'
    all_boxes = [[] for _ in CLASSES]
    for cls in CLASSES:
        cls_ind = CLASSES.index(cls)
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind, np.newaxis]
        keep = np.where(cls_scores >= CONF_THRESH)[0]
        dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
        keep = nms(dets)
        all_boxes[cls_ind] = dets[keep, :]

    boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]

    # print results
    print 'class ---- [[x1, x2, y1, y2, confidence]]'
    for ind, boxes in enumerate(boxes_this_image):
        if len(boxes) > 0:
            print '---------', CLASSES[ind], '---------'
            print boxes

    if vis:
        vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image,
                          CLASSES, im_scale)
    else:
        result_file = image_name.replace('.', '_result.')
        print 'results saved to %s' % result_file
        im = draw_all_detection(data_dict['data'].asnumpy(), boxes_this_image,
                                CLASSES, im_scale)
        cv2.imwrite(result_file, im)