예제 #1
0
파일: bbox_process.py 프로젝트: 711e/CSP
def parse_det_offset(Y, C, score=0.1, down=4):
    seman = Y[0][0, :, :, 0]
    height = Y[1][0, :, :, 0]
    offset_y = Y[2][0, :, :, 0]
    offset_x = Y[2][0, :, :, 1]
    y_c, x_c = np.where(seman > score)
    boxs = []
    if len(y_c) > 0:
        for i in range(len(y_c)):
            h = np.exp(height[y_c[i], x_c[i]]) * down
            w = 0.41 * h
            o_y = offset_y[y_c[i], x_c[i]]
            o_x = offset_x[y_c[i], x_c[i]]
            s = seman[y_c[i], x_c[i]]
            x1, y1 = max(0, (x_c[i] + o_x + 0.5) * down - w / 2), max(
                0, (y_c[i] + o_y + 0.5) * down - h / 2)
            boxs.append([
                x1, y1,
                min(x1 + w, C.size_test[1]),
                min(y1 + h, C.size_test[0]), s
            ])
        boxs = np.asarray(boxs, dtype=np.float32)
        keep = nms(boxs, 0.5, usegpu=False, gpu_id=0)
        boxs = boxs[keep, :]
    return boxs
예제 #2
0
파일: bbox_process.py 프로젝트: 711e/CSP
def parse_det(Y, C, score=0.1, down=4, scale='h'):
    seman = Y[0][0, :, :, 0]
    if scale == 'h':
        height = np.exp(Y[1][0, :, :, 0]) * down
        width = 0.41 * height
    elif scale == 'w':
        width = np.exp(Y[1][0, :, :, 0]) * down
        height = width / 0.41
    elif scale == 'hw':
        height = np.exp(Y[1][0, :, :, 0]) * down
        width = np.exp(Y[1][0, :, :, 1]) * down
    y_c, x_c = np.where(seman > score)
    boxs = []
    if len(y_c) > 0:
        for i in range(len(y_c)):
            h = height[y_c[i], x_c[i]]
            w = width[y_c[i], x_c[i]]
            s = seman[y_c[i], x_c[i]]
            x1, y1 = max(0, (x_c[i] + 0.5) * down - w / 2), max(
                0, (y_c[i] + 0.5) * down - h / 2)
            boxs.append([
                x1, y1,
                min(x1 + w, C.size_test[1]),
                min(y1 + h, C.size_test[0]), s
            ])
        boxs = np.asarray(boxs, dtype=np.float32)
        keep = nms(boxs, 0.5, usegpu=False, gpu_id=0)
        boxs = boxs[keep, :]
    return boxs
예제 #3
0
def demo(net, image_name):
    """Detect object classes in an image using pre-computed object proposals."""

    # Load the demo image
    # im_file = os.path.join('/home/shelly/superstar/star/SuperNova/static/upload',image_name)
    # change filepath
    im_file = os.path.join(
        '/home/shelly/flask/star/star/SuperNova/static/upload', image_name)
    im = cv2.imread(im_file)

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(net, im)
    timer.toc()
    print('Detection took {:.3f}s for '
          '{:d} object proposals').format(timer.total_time, boxes.shape[0])

    # Visualize detections for each class
    CONF_THRESH = 0.1
    NMS_THRESH = 0.3
    for cls_ind, cls in enumerate(CLASSES[1:]):
        cls_ind += 1  # because we skipped background
        cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        dets = np.hstack(
            (cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        vis_detections(im, cls, dets, image_name, thresh=CONF_THRESH)
예제 #4
0
def slpn_pred(ROIs, P_cls, P_regr, C, bbox_thresh=0.1, nms_thresh=0.3,roi_stride=8):
	# classifier output the box of x y w h and downscaled
	scores = np.squeeze(P_cls[:,:,0], axis=0)
	regr = np.squeeze(P_regr, axis=0)
	rois = np.squeeze(ROIs, axis=0)

	keep = np.where(scores>=bbox_thresh)[0]
	if len(keep)==0:
		return [], []

	rois[:, 2] += rois[:, 0]
	rois[:, 3] += rois[:, 1]
	rois = rois[keep]*roi_stride
	scores = scores[keep]
	regr = regr[keep]*np.array(C.classifier_regr_std).astype(dtype=np.float32)
	# regr = regr[keep]
	pred_boxes = bbox_transform_inv(rois, regr)
	pred_boxes = clip_boxes(pred_boxes, [C.random_crop[0],C.random_crop[1]])

	keep = np.where((pred_boxes[:,2]-pred_boxes[:,0]>=3)&
					(pred_boxes[:,3]-pred_boxes[:,1]>=3))[0]
	pred_boxes = pred_boxes[keep]
	scores = scores[keep].reshape((-1,1))

	keep = nms(np.hstack((pred_boxes, scores)), nms_thresh, usegpu=False, gpu_id=0)
	pred_boxes = pred_boxes[keep]
	scores = scores[keep]

	return pred_boxes, scores
예제 #5
0
def parse_det_offset_batch(Y, C, score=0.1, down=4):
    batch_size = Y[0].shape[0]
    boxes = []
    for bnum in range(batch_size):
        seman = Y[0][bnum, :, :, 0]
        height = Y[1][bnum, :, :, 0]
        offset_y = Y[2][bnum, :, :, 0]
        offset_x = Y[2][bnum, :, :, 1]
        y_c, x_c = np.where(seman > score)
        boxs_img = []
        if len(y_c) > 0:
            for i in range(len(y_c)):
                h = np.exp(height[y_c[i], x_c[i]]) * down
                w = 0.41 * h
                o_y = offset_y[y_c[i], x_c[i]]
                o_x = offset_x[y_c[i], x_c[i]]
                s = seman[y_c[i], x_c[i]]
                x1, y1 = max(0, (x_c[i] + o_x + 0.5) * down - w / 2), max(
                    0, (y_c[i] + o_y + 0.5) * down - h / 2)
                x1 = x1
                y1 = y1
                boxs_img.append([
                    x1, y1,
                    min(x1 + w, C.size_test[1]),
                    min(y1 + h, C.size_test[0]), s
                ])
            boxs_img = np.asarray(boxs_img, dtype=np.float32)
            keep = nms(boxs_img, 0.5, usegpu=False, gpu_id=0)
            boxs_img = boxs_img[keep, :]
        boxes.append(boxs_img)
    return boxes
    def detect(self, image):
        scores, boxes = self.im_detect(image)
        all_regions = {}

        # process all classes except the background
        for cls_ind in range(1, self._num_classes):
            cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
            cls_scores = scores[:, cls_ind]
            detections = np.hstack(
                (cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
            keep = nms(detections, self._nms_thresh)
            detections = detections[keep, :]

            regions = []
            #import pdb; pdb.set_trace()
            for detection in detections:
                overlap, idx = helpers.iou(np.asarray(regions), detection)
                if overlap < self._iou_thresh and detection[
                        4] > self._conf_thresh:
                    region = (int(detection[0]), int(detection[1]),
                              int(math.ceil(detection[2])),
                              int(math.ceil(detection[3])), detection[4])
                    regions.append(region)

            object_class = cls_ind
            if object_class < len(self._classes):
                object_class = self._classes[object_class]
            all_regions[object_class] = regions

        return all_regions
예제 #7
0
def apply_nms(all_boxes, thresh):
  """Apply non-maximum suppression to all predicted boxes output by the
  test_net method.
  """
  num_classes = len(all_boxes)
  num_images = len(all_boxes[0])
  nms_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]
  for cls_ind in range(num_classes):
    for im_ind in range(num_images):
      dets = all_boxes[cls_ind][im_ind]
      if dets == []:
        continue

      x1 = dets[:, 0]
      y1 = dets[:, 1]
      x2 = dets[:, 2]
      y2 = dets[:, 3]
      scores = dets[:, 4]
      inds = np.where((x2 > x1) & (y2 > y1))[0]
      dets = dets[inds,:]
      if dets == []:
        continue

      keep = nms(dets, thresh)
      if len(keep) == 0:
        continue
      nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
  return nms_boxes
예제 #8
0
def pred_det(anchors, cls_pred, regr_pred, C, step=1):
    if step == 1:
        scores = cls_pred[0, :, :]
    elif step == 2:
        scores = anchors[:, -1:] * cls_pred[0, :, :]
    elif step == 3:
        scores = anchors[:, -2:-1] * anchors[:, -1:] * cls_pred[0, :, :]
    A = np.copy(anchors[:, :4])
    bbox_deltas = regr_pred.reshape((-1, 4))
    bbox_deltas = bbox_deltas * np.array(
        C.classifier_regr_std).astype(dtype=np.float32)

    proposals = bbox_transform_inv(A, bbox_deltas)
    proposals = clip_boxes(proposals, [C.random_crop[0], C.random_crop[1]])
    keep = filter_boxes(proposals, C.roi_stride)
    proposals = proposals[keep, :]
    scores = scores[keep]

    order = scores.ravel().argsort()[::-1]
    order = order[:C.pre_nms_topN]
    proposals = proposals[order, :]
    scores = scores[order]

    keep = np.where(scores > C.scorethre)[0]
    proposals = proposals[keep, :]
    scores = scores[keep]
    keep = nms(np.hstack((proposals, scores)),
               C.overlap_thresh,
               usegpu=False,
               gpu_id=0)

    keep = keep[:C.post_nms_topN]
    proposals = proposals[keep, :]
    scores = scores[keep]
    return proposals, scores
예제 #9
0
def parse_det_offset(pos,
                     height,
                     offset,
                     size,
                     score=0.1,
                     down=4,
                     nms_thresh=0.3):
    pos = np.squeeze(pos)
    height = np.squeeze(height)
    offset_y = offset[0, 0, :, :]
    offset_x = offset[0, 1, :, :]
    y_c, x_c = np.where(pos > score)
    boxs = []
    if len(y_c) > 0:
        for i in range(len(y_c)):
            h = np.exp(height[y_c[i], x_c[i]]) * down
            w = 0.41 * h
            o_y = offset_y[y_c[i], x_c[i]]
            o_x = offset_x[y_c[i], x_c[i]]
            s = pos[y_c[i], x_c[i]]
            x1, y1 = max(0, (x_c[i] + o_x + 0.5) * down - w / 2), max(
                0, (y_c[i] + o_y + 0.5) * down - h / 2)
            boxs.append(
                [x1, y1, min(x1 + w, size[1]),
                 min(y1 + h, size[0]), s])
        boxs = np.asarray(boxs, dtype=np.float32)
        keep = nms(boxs, nms_thresh, usegpu=False, gpu_id=0)
        boxs = boxs[keep, :]
    return boxs
예제 #10
0
def inference(model_file, device, records, result_queue):

    def val_func():
        pred_boxes = net(net.inputs)
        return pred_boxes

    mge.set_default_device('xpu{}'.format(device))
    net = network.Network()
    net = load_model(net, model_file)
    net.eval()
    
    for record in records:

        np.set_printoptions(precision=2, suppress=True)
        image, gt_boxes, im_info, ID = get_data(record, device)

        net.inputs["image"].set_value(image.astype(np.float32))
        net.inputs["im_info"].set_value(im_info)
        del record, image

        pred_boxes = val_func().numpy()

        pred_bbox = pred_boxes[:, 1]
        scale = im_info[0, 2]

        cls_dets = pred_bbox[:, :4] / scale

        if config.test_nms_version == 'set_nms':
            n = cls_dets.shape[0] // 2
            idents = np.tile(np.linspace(0, n-1, n).reshape(-1, 1),(1, 2)).reshape(-1, 1)
            pred_boxes = np.hstack([cls_dets, pred_bbox[:,4:5], idents])
            flag = pred_boxes[:, 4] >= config.test_cls_threshold
            cls_dets = pred_boxes[flag]
            keep = emd_cpu_nms(cls_dets, config.test_nms)
            cls_dets = cls_dets[keep, :5].astype(np.float64)

        elif config.test_nms_version == 'normal_nms':
            pred_boxes = np.hstack([cls_dets, pred_bbox[:, 4:5]])
            flag = pred_boxes[:, 4] >= config.test_cls_threshold
            cls_dets = pred_boxes[flag]
            keep = nms(cls_dets.astype(np.float32), config.test_nms)
            cls_dets = cls_dets[keep, :5].astype(np.float64)
        else:
            raise NotImplementedError('the results should be post processed.')

        pred_tags = np.ones([cls_dets.shape[0],]).astype(np.float64)
        gt_boxes = gt_boxes.astype(np.float64)

        dtboxes = boxes_dump(cls_dets[:, :5], pred_tags, False)
        gtboxes = boxes_dump(gt_boxes, None, True)

        # im_info = im_info.astype(np.int32)
        # height, width = im_info[0, 3], im_info[0, 4]
        height, width = int(im_info[0, 3]), int(im_info[0, 4])
        result_dict = dict(ID=ID, height=height, width=width,
                dtboxes = dtboxes, gtboxes = gtboxes)
        result_queue.put_nowait(result_dict)
예제 #11
0
def eval_batch(scene_list, opts):
    """ evaluate a batch of bbox files """
    mAP = 0
    n_obj = 0
    if opts.scene_name != '':  # eval one scene
        scene_list = [opts.scene_name]
    for obj in opts.labels:  # iter thru each obj
        if obj == 'background':  # skip background class
            continue
        opts.test_cls = obj
        dets_all = np.empty((0, 6))
        scene_id = 0
        for scene in scene_list:  # iter thru each scene
            # read bbox file
            dets = np.genfromtxt('./data/out/' + opts.eval_dir + '/' +
                                 opts.eval_prefix + '_' + scene + '_' +
                                 opts.test_cls + '.txt')
            # remove the trash
            trash_idx = np.nonzero(dets < 0)[0][::2]
            dets = np.delete(dets, trash_idx, axis=0)
            # append scene_id to dets
            if dets.ndim > 1:
                dets = np.hstack(
                    [dets, scene_id * np.ones((dets.shape[0], 1))])
            else:
                dets = np.hstack([dets, scene_id])

            # use nms to prune dets
            if opts.nms_thresh > 0.0:
                # keep = selective_nms(dets, opts)
                keep = nms(dets.astype(np.float32), opts.nms_thresh, opts)  # pylint: disable=E1101
                nms_dets = dets[keep, :]
                dets_all = np.vstack([dets_all, nms_dets])  # adding dets
            else:
                dets_all = np.vstack([dets_all, dets])  # adding dets

            scene_id += 1  # increment scene_id

        # filter low likelihood bbox
        if opts.low_thresh > 0.0:
            low_idx = np.nonzero(dets_all[:, 4] < opts.low_thresh)
            dets_all = np.delete(dets_all, low_idx, axis=0)
        prec, rec = calc_pr_batch(dets_all, scene_list, opts)
        ap = calc_ap(rec, prec)
        print('Eval: {}, AP: {}'.format(obj, ap))
        if ap > 0:
            mAP += ap
            n_obj += 1
        if opts.viz_pr:
            plot_pr(prec, rec, ap, opts.test_cls)

    mAP = mAP / n_obj
    print('mAP: {}'.format(mAP))
    if opts.viz_pr:
        plt.show()

    return mAP
예제 #12
0
def main():
    data = read_csv(filename="../../ImageSets/gen_box.csv", sep=',')

    assert (data.shape[1] % 5 == 0)
    for i in range(data.shape[0]):
        dets = data[i].reshape(-1, 5)
        init_time = time.time()
        for j in range(50):
            nms(dets, 0.5, force_cpu=True)
        cpu_time = (time.time() - init_time)
        print("number of picture is: {}, the cpu_time is: {} \n".format(
            dets.shape[0], cpu_time))

        np.random.shuffle(dets)
        init_time = time.time()
        for j in range(50):
            nms(dets, 0.5, force_cpu=True)
        cpu_time = (time.time() - init_time)
        print("After shuffle, the cpu_time is: {} \n".format(cpu_time))
예제 #13
0
def proposal_layer(rpn_cls_prob, rpn_bbox_pred, im_info, cfg_key, _feat_stride,
                   anchors, num_anchors):
    """A simplified version compared to fast/er RCNN
     For details please see the technical report
  """
    if type(cfg_key) == bytes:
        cfg_key = cfg_key.decode('utf-8')
    pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N  # RPN_PRE_NMS_TOP_N = 6000
    post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
    # __C.TEST.RPN_POST_NMS_TOP_N = 300    非极大值抑制输出的 最大个数

    nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
    # __C.TEST.RPN_NMS_THRESH = 0.7

    # Get the scores and bounding boxes
    scores = rpn_cls_prob[:, :, :, num_anchors:]
    rpn_bbox_pred = rpn_bbox_pred.reshape((-1, 4))
    scores = scores.reshape((-1, 1))

    proposals = bbox_transform_inv(anchors, rpn_bbox_pred)
    # shape = (length, 4)
    # proposals  就是真实预测的边框的四个坐标值
    # 特征图映射到原图的所有的框anchors   与特征图的值rpn_bbox_pred  组合    进行回归预测

    proposals = clip_boxes(proposals, im_info[:2])
    # 限制预测坐标在原始图像上  限制这预测 的坐标的  值  在一定的范围内

    # Pick the top region proposals
    order = scores.ravel().argsort()[::-1]
    if pre_nms_topN > 0:
        order = order[:pre_nms_topN]
    proposals = proposals[order, :]
    scores = scores[order]

    # Non-maximal suppression
    keep = nms(np.hstack((proposals, scores)), nms_thresh)

    # Pick th top region proposals after NMS
    if post_nms_topN > 0:
        keep = keep[:post_nms_topN]
    proposals = proposals[keep, :]
    scores = scores[keep]

    # Only support single image as input
    batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
    blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))

    return blob, scores
예제 #14
0
def get_proposal(all_anchors, cls_layer, regr_layer, C, overlap_thresh=0.7,pre_nms_topN=1000,post_nms_topN=300, roi_stride=8):
	A = np.copy(all_anchors[:,:4])
	scores = cls_layer.reshape((-1,1))
	bbox_deltas = regr_layer.reshape((-1,4))
	proposals = bbox_transform_inv(A, bbox_deltas)
	proposals = clip_boxes(proposals, [C.random_crop[0],C.random_crop[1]])
	keep = filter_boxes(proposals, roi_stride)
	proposals = proposals[keep,:]
	scores = scores[keep]
	order = scores.ravel().argsort()[::-1]
	order = order[:pre_nms_topN]
	proposals =  proposals[order,:]
	scores = scores[order]
	keep = nms(np.hstack((proposals, scores)), overlap_thresh, usegpu=False, gpu_id=0)
	keep = keep[:post_nms_topN]
	proposals = proposals[keep,:]
	return proposals
예제 #15
0
파일: bbox_process.py 프로젝트: 711e/CSP
def parse_det_bottom(Y, C, score=0.1):
    seman = Y[0][0, :, :, 0]
    height = Y[1][0, :, :, 0]
    y_c, x_c = np.where(seman > score)
    boxs = []
    if len(y_c) > 0:
        for i in range(len(y_c)):
            h = np.exp(height[y_c[i], x_c[i]]) * 4
            w = 0.41 * h
            s = seman[y_c[i], x_c[i]]
            x1, y1 = max(0, x_c[i] * 4 + 2 - w / 2), max(0, y_c[i] * 4 + 2 - h)
            boxs.append([
                x1, y1,
                min(x1 + w, C.size_test[1]),
                min(y1 + h, C.size_test[0]), s
            ])
        boxs = np.asarray(boxs, dtype=np.float32)
        keep = nms(boxs, 0.5, usegpu=False, gpu_id=0)
        boxs = boxs[keep, :]
    return boxs
예제 #16
0
def apply_nms(all_boxes, thresh):
    """Apply non-maximum suppression to all predicted boxes output by the
    test_net method.
    """
    num_classes = len(all_boxes)
    num_images = len(all_boxes[0])
    nms_boxes = [[[] for _ in xrange(num_images)] for _ in xrange(num_classes)]
    for cls_ind in xrange(num_classes):
        for im_ind in xrange(num_images):
            dets = all_boxes[cls_ind][im_ind]
            if dets == []:
                continue
            # CPU NMS is much faster than GPU NMS when the number of boxes
            # is relative small (e.g., < 10k)
            # TODO(rbg): autotune NMS dispatch
            keep = nms(dets, thresh, force_cpu=True)
            if len(keep) == 0:
                continue
            nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
    return nms_boxes
예제 #17
0
def proposal_layer(rpn_cls_prob, rpn_bbox_pred, im_info, _feat_stride, anchors, num_anchors, mode='train'):
    """A simplified version compared to fast/er RCNN
       For details please see the technical report
    """

    pre_nms_topN = 12000
    post_nms_topN = 2000
    nms_thresh = 0.7

    if mode == 'test':
        pre_nms_topN = 3000
        post_nms_topN = 300

    # Get the scores and bounding boxes
    scores = rpn_cls_prob[:, :, :, num_anchors:]
    rpn_bbox_pred = rpn_bbox_pred.reshape((-1, 4))
    scores = scores.reshape((-1, 1))
    proposals = bbox_transform_inv(anchors, rpn_bbox_pred)
    proposals = clip_boxes(proposals, im_info[:2])

    # Pick the top region proposals
    order = scores.ravel().argsort()[::-1]
    if pre_nms_topN > 0:
        order = order[:pre_nms_topN]
    proposals = proposals[order, :]
    scores = scores[order]

    # Non-maximal suppression
    keep = nms(np.hstack((proposals, scores)), nms_thresh)

    # Pick th top region proposals after NMS
    if post_nms_topN > 0:
        keep = keep[:post_nms_topN]
    proposals = proposals[keep, :]
    scores = scores[keep]

    # Only support single image as input
    batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
    blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))

    return blob, scores
예제 #18
0
def _proposal_layer_py(rpn_bbox_cls_prob, rpn_bbox_pred, im_dims, cfg_key,
                       _feat_stride, anchor_scales):
    '''
    # Algorithm:
    #
    # for each (H, W) location i
    #   generate A anchor boxes centered on cell i
    #   apply predicted bbox deltas at cell i to each of the A anchors
    # clip predicted boxes to image
    # remove predicted boxes with either height or width < threshold
    # sort all (proposal, score) pairs by score from highest to lowest
    # take top pre_nms_topN proposals before NMS
    # apply NMS with threshold 0.7 to remaining proposals
    # take after_nms_topN proposals after NMS
    # return the top proposals (-> RoIs top, scores top)
    # rpn_bbox_cls_prob shape : 1 , h , w , 2*9
    # rpn_bbox_pred shape : 1 , h , w , 4*9
    '''
    _anchors = generate_anchor.generate_anchors(
        scales=np.array(anchor_scales))  # #_anchors ( 9, 4 )
    _num_anchors = _anchors.shape[0]  #9
    rpn_bbox_cls_prob = np.transpose(
        rpn_bbox_cls_prob, [0, 3, 1, 2])  # rpn bbox _cls prob # 1, 18 , h , w
    rpn_bbox_pred = np.transpose(rpn_bbox_pred, [0, 3, 1, 2])  # 1, 36 , h , w
    # Only minibatch of 1 supported
    assert rpn_bbox_cls_prob.shape[0] == 1, \
        'Only single item batches are supported'
    if cfg_key == 'TRAIN':
        pre_nms_topN = cfg.TRAIN.RPN_PRE_NMS_TOP_N  #12000
        post_nms_topN = cfg.TRAIN.RPN_POST_NMS_TOP_N  # 2000
        nms_thresh = cfg.TRAIN.RPN_NMS_THRESH  #0.7
        min_size = cfg.TRAIN.RPN_MIN_SIZE  # 16
    else:  # cfg_key == 'TEST':
        pre_nms_topN = cfg.TEST.RPN_PRE_NMS_TOP_N
        post_nms_topN = cfg.TEST.RPN_POST_NMS_TOP_N
        nms_thresh = cfg.TEST.RPN_NMS_THRESH
        min_size = cfg.TEST.RPN_MIN_SIZE
    # the first set of _num_anchors channels are bg probs
    # the second set are the fg probs

    scores = rpn_bbox_cls_prob[:,
                               _num_anchors:, :, :]  # 1, 18  , H, W --> 1, 9, H, W
    # 1. Generate proposals from bbox deltas and shifted anchors
    height, width = scores.shape[-2:]
    # Enumerate all shifts
    shift_x = np.arange(0, width) * _feat_stride
    shift_y = np.arange(0, height) * _feat_stride
    shift_x, shift_y = np.meshgrid(shift_x, shift_y)
    shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(),
                        shift_y.ravel())).transpose()
    # Enumerate all shifted anchors:
    # add A anchors (1, A, 4) to
    # cell K shifts (K, 1, 4) to get
    # shift anchors (K, A, 4)
    # reshape to (K*A, 4) shifted anchors
    A = _num_anchors
    K = shifts.shape[0]

    #anchors = _anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))
    anchors = np.array([])
    for i in range(len(_anchors)):
        if i == 0:
            anchors = np.add(shifts, _anchors[i])
        else:
            anchors = np.concatenate((anchors, np.add(shifts, _anchors[i])),
                                     axis=0)
    anchors = anchors.reshape((K * A, 4))

    ## BBOX TRANSPOSE (1,4*A,H,W --> A*H*W,4)
    shape = rpn_bbox_pred.shape  # 1,4*A , H, W
    rpn_bbox_pred = rpn_bbox_pred.reshape(
        [1, 4, (shape[1] // 4) * shape[2], shape[3]])
    rpn_bbox_pred = rpn_bbox_pred.transpose([0, 2, 3, 1])
    rpn_bbox_pred = rpn_bbox_pred.reshape([-1, 4])
    bbox_deltas = rpn_bbox_pred
    ## CLS TRANSPOSE

    scores = scores.transpose((0, 2, 3, 1)).reshape((-1, 1))  # (h * w * A , 1)
    scores_ori = scores
    ## BBOX TRANSPOSE Using Anchor
    proposals = bbox_transform_inv(anchors, bbox_deltas)
    proposals_ori = proposals
    proposals = clip_boxes(
        proposals, im_dims)  # image size 보다 큰 proposals 들이 줄어 들수 있도록 한다.

    keep = _filter_boxes(proposals,
                         min_size)  # min size = 16 # min보다 큰 놈들만 살아남았다
    proposals = proposals[keep, :]
    scores = scores[keep]

    # 4. sort all (proposal, score) pairs by score from highest to lowest
    # 5. take top pre_nms_topN (e.g. 6000)
    #print 'scores : ',np.shape(scores) #421 ,13 <--여기 13이 자꾸 바귄다..
    order = scores.ravel().argsort()[::-1]  # 크기 순서를 뒤집는다 가장 큰 값이 먼저 오게 한다

    if pre_nms_topN > 0:  #120000
        order = order[:pre_nms_topN]
    #print np.sum([scores>0.7])
    scores = scores[order]

    # 6. apply nms (e.g. threshold = 0.7)
    # 7. take after_nms_topN (e.g. 300)
    # 8. return the top proposals (-> RoIs top)
    #print np.shape(np.hstack ((proposals , scores))) # --> [x_start , y_start ,x_end, y_end , score ] 이런 형태로 만든다
    keep = nms(np.hstack((proposals, scores)),
               nms_thresh)  # nms_thresh = 0.7 | hstack --> axis =1
    if post_nms_topN > 0:
        keep = keep[:post_nms_topN]
    proposals = proposals[keep, :]
    scores = scores[keep]

    # Output rois blob
    # Our RPN implementation only supports a single input image, so all
    # batch inds are 0
    batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
    blob = np.hstack((batch_inds, proposals.astype(np.float32,
                                                   copy=False)))  # N , 5
    #blob=np.hstack((blob , scores))

    return blob, scores, proposals_ori, scores_ori
예제 #19
0
파일: test.py 프로젝트: AIYongY/model
def test_net(sess, net, imdb, weights_filename, max_per_image=100, thresh=0.):
    np.random.seed(cfg.RNG_SEED)  # 3
    """Test a Fast R-CNN network on an image database."""
    num_images = len(imdb.image_index)
    # all detections are collected into:
    #  all_boxes[cls][image] = N x 5 array of detections in
    #  (x1, y1, x2, y2, score)
    all_boxes = [[[] for _ in range(num_images)]
                 for _ in range(imdb.num_classes)]

    output_dir = get_output_dir(imdb, weights_filename)
    # timers
    _t = {'im_detect': Timer(), 'misc': Timer()}

    for i in range(num_images):
        im = cv2.imread(imdb.image_path_at(i))

        _t['im_detect'].tic()
        #得到的就是 是前景的概率scores  与    预测到的boxes边框
        scores, boxes = im_detect(sess, net, im)
        # scores 是rpn scores = self._predictions['cls_prob']  =  每个类别的概率cls_score 讲过soft_max得到
        # pred_boxes
        # pred_boxes = bbox_transform_inv(boxes, box_deltas)
        # 做回归预测   两条路劲rpn得到的的 box_deltas 作为 dx dy dw dh 与 筛选出来的框做回归预测
        # pred_boxes  anchors回归预测后的值
        #return scores, pred_boxes

        _t['im_detect'].toc()

        _t['misc'].tic()

        # skip j = 0, because it's the background class
        for j in range(1, imdb.num_classes):
            inds = np.where(scores[:, j] > thresh)[0]
            cls_scores = scores[inds, j]
            cls_boxes = boxes[inds, j * 4:(j + 1) * 4]
            cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
              .astype(np.float32, copy=False)
            keep = nms(cls_dets, cfg.TEST.NMS)
            cls_dets = cls_dets[keep, :]
            all_boxes[j][i] = cls_dets

        # Limit to max_per_image detections *over all classes*
        if max_per_image > 0:
            image_scores = np.hstack(
                [all_boxes[j][i][:, -1] for j in range(1, imdb.num_classes)])
            if len(image_scores) > max_per_image:
                image_thresh = np.sort(image_scores)[-max_per_image]
                for j in range(1, imdb.num_classes):
                    keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
                    all_boxes[j][i] = all_boxes[j][i][keep, :]
        _t['misc'].toc()

        print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
            .format(i + 1, num_images, _t['im_detect'].average_time,
                _t['misc'].average_time))

    det_file = os.path.join(output_dir, 'detections.pkl')
    with open(det_file, 'wb') as f:
        pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)

    print('Evaluating detections')
    imdb.evaluate_detections(all_boxes, output_dir)
예제 #20
0
import sys
sys.path.insert(0,'/home/yfji/Workspace/PyTorch/DLMOT-FRCNN/nms')
from nms_wrapper import nms
import numpy as numpy
import torch
import numpy as np

bboxes=np.array([[0,0,50,50],
                [1,1,51,51],
                [2,2,52,52],
                [25,25,75,75],
                [26,26,76,76],
                [100,100,150,150],
                [101,101,151,151]])

scores=np.array([0.9,0.8,0.7,0.9,0.5,0.2,0.6])

bboxes_with_score=np.hstack((bboxes, scores.reshape(-1,1)))
bboxes_with_score[:,0]+=20

bboxes_pth=torch.from_numpy(bboxes_with_score).float().cuda()

keep=nms(bboxes_pth, 0.7)
print(keep)
예제 #21
0
        r_box = np.array([x0,y0,x1,y1,x2,y2,x3,y3])
    return r_box



if __name__ == '__main__':
    boxes = np.array([[110, 110, 210, 210, 0,       0.88],
                      [100, 100, 200, 200, 0,       0.99],  # res1
                      [100, 100, 200, 200, 10,     0.66],
                      [250, 250, 350, 350, 0.,      0.77]],  # res2
                      dtype=np.float32)
    
    dets_th=torch.from_numpy(boxes).cuda()
    iou_thr = 0.1
    print(dets_th.shape)
    inds = nms(dets_th, iou_thr)
    print(inds)     
    
    img = np.zeros((1000,1000,3), np.uint8)
    img.fill(255)
    
    boxes = boxes[:, :-1]
    cbox = (255,0,0)    # format GBR!!
    ctar = (0,0,255)    # red is target!!
    boxes = [get_rotated_coors(i).reshape(-1,2).astype(np.int32)  for i in boxes]
    for idx, box in enumerate(boxes):
        color = ctar if  idx in inds else cbox
        img = cv2.polylines(img,[box],True,color,1)
        cv2.imshow('anchor_show', img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()             
예제 #22
0
def nms_detections(boxes_pred, scores, nms_thresh, force_cpu=False):
    dets = np.hstack((boxes_pred, scores[:, np.newaxis])).astype(np.float32)

    return nms(dets, nms_thresh, force_cpu)
예제 #23
0
def demo(sess, net, image_name):
    """Detect object classes in an image using pre-computed object proposals."""
    # im_names = ['000456.jpg', '000542.jpg', '001150.jpg',
    #             '001763.jpg', '004545.jpg']
    #一张张图片输入
    # net = vgg16()
    # demo(sess, net, im_name)

    im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
    im = cv2.imread(im_file)

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    #输入 net = vgg16()   im一张图片
    scores, boxes = im_detect(sess, net, im)

    # post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N#__C.TEST.RPN_POST_NMS_TOP_N = 300    非极大值抑制输出的 最大个数
    # nms_thresh = cfg[cfg_key].RPN_NMS_THRESH#__C.TEST.RPN_NMS_THRESH = 0.7

    # scores 是rpn scores = self._predictions['cls_prob']  =  每个类别的概率cls_score 讲过soft_max得到
    # pred_boxes
    # pred_boxes = bbox_transform_inv(boxes, box_deltas)
    # 做回归预测   两条路劲rpn得到的的 box_deltas 作为 dx dy dw dh 与 筛选出来的框做回归预测
    # pred_boxes  anchors回归预测后的值
    # return scores, pred_boxes



    timer.toc()
    print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))

    # Visualize detections for each class
    CONF_THRESH = 0.8
    NMS_THRESH = 0.3
    for cls_ind, cls in enumerate(CLASSES[1:]):#CLASSES就是英文标签
        cls_ind += 1 # because we skipped background
        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        #得到抽取出来的 一个框  一个 boxes shape (?, 21*4)  得到 [?*21,4]
        cls_scores = scores[:, cls_ind]#scores    shape  (?,21)  #  得到[?*21]
        dets = np.hstack((cls_boxes,# [?*21,4]
                          cls_scores[:, np.newaxis])).astype(np.float32)#[?*21,1]


        #得到的是[?*21,x,y,w,h,scores]  0.3
        #所有的框预测 21个类别  输入的是  所有的框预测一个类别的框与得分值
        # 是一个一个类别输入NMS  假如类别1    则输入 [?,x,y,w,h,scores]
        keep = nms(dets, NMS_THRESH)#为什么要用   (cpu)gpu_nms.pys
        #nms 纯python语言实现:简介方便、速度慢
        #Cython是一个快速生成Python扩展模块的工具,从语法层面上来讲是Python语法和C语言语法的混血,
        # 当Python性能遇到瓶颈时,Cython直接将C的原生速度植入Python程序,这样使Python程序无需使用C重写
        # ,能快速整合原有的Python程序,这样使得开发效率和执行效率都有很大的提高,而这些中间的部分,
        # 都是Cython帮我们做了。

        #https://www.cnblogs.com/king-lps/p/9031568.html  解释
        #  之前的 非极大值抑制  作用实在rpn路径上的   rpn路径就是为了做推荐而已 推荐 VGG16特征出来的框,但是太多 所以需要筛选
        #这里 是筛选出来之后  再做  nms


        #CONF_THRESH = 0.8
        #NMS_THRESH = 0.3
        dets = dets[keep, :]
        #im输入的图像经过限制在600,1000
        #cls  是实际的英文标签
        # dets是 [?*21,x,y,w,h,scores]  经过nms 得到 的    0.3
        #for      cls_ind, cls         in enumerate(CLASSES[1:]):#CLASSES就是英文标签


        #所有的框预测  21个类别  输入的是  所有的框预测一个类别的框与得分值
        # 进行NMS后 得到的框就是需要输出的框   他对应的英文标签是  cls    #CONF_THRESH = 0.8
        vis_detections(im, cls, dets, thresh=CONF_THRESH)
예제 #24
0
def test_net(net, imdb, max_per_image=100, thresh=0.01, vis=False):
    """Test a Fast R-CNN network on an image database."""
    num_images = len(imdb.image_index)
    # all detections are collected into:
    #    all_boxes[cls][image] = N x 5 array of detections in
    #    (x1, y1, x2, y2, score)
    all_boxes = [[[] for _ in xrange(num_images)]
                 for _ in xrange(imdb.num_classes)]

    output_dir = get_output_dir(imdb, net)

    # timers
    _t = {
        'im_preproc': Timer(),
        'im_net': Timer(),
        'im_postproc': Timer(),
        'misc': Timer()
    }

    if not cfg.TEST.HAS_RPN:
        roidb = imdb.roidb

    for i in xrange(num_images):
        # filter out any ground truth boxes
        if cfg.TEST.HAS_RPN:
            box_proposals = None
        else:
            # The roidb may contain ground-truth rois (for example, if the roidb
            # comes from the training or val split). We only want to evaluate
            # detection on the *non*-ground-truth rois. We select those the rois
            # that have the gt_classes field set to 0, which means there's no
            # ground truth.
            box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]

        im = cv2.imread(imdb.image_path_at(i))
        scores, boxes = im_detect(net, im, _t, box_proposals)

        _t['misc'].tic()
        # skip j = 0, because it's the background class
        for j in xrange(1, imdb.num_classes):
            inds = np.where(scores[:, j] > thresh)[0]
            cls_scores = scores[inds, j]
            cls_boxes = boxes[inds, j * 4:(j + 1) * 4]
            #cls_boxes = boxes[inds, 4:8] # 0.6971 vs 0.74
            cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
                .astype(np.float32, copy=False)
            keep = nms(cls_dets, cfg.TEST.NMS)
            #SOFT_NMS=1
            #keep = soft_nms(cls_dets, method=SOFT_NMS)
            dets_NMSed = cls_dets[keep, :]
            if cfg.TEST.BBOX_VOTE:
                cls_dets = bbox_vote(dets_NMSed, cls_dets)
            else:
                cls_dets = dets_NMSed

            if vis:
                vis_detections(im, imdb.classes[j], cls_dets)
            all_boxes[j][i] = cls_dets

        # Limit to max_per_image detections *over all classes*
        if max_per_image > 0:
            image_scores = np.hstack(
                [all_boxes[j][i][:, -1] for j in xrange(1, imdb.num_classes)])
            if len(image_scores) > max_per_image:
                image_thresh = np.sort(image_scores)[-max_per_image]
                for j in xrange(1, imdb.num_classes):
                    keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
                    all_boxes[j][i] = all_boxes[j][i][keep, :]
        _t['misc'].toc()

        print 'im_detect: {:d}/{:d}  net {:.3f}s  preproc {:.3f}s  postproc {:.3f}s  misc {:.3f}s' \
              .format(i + 1, num_images, _t['im_net'].average_time,
                      _t['im_preproc'].average_time, _t['im_postproc'].average_time,
                      _t['misc'].average_time)

    det_file = os.path.join(output_dir, 'detections.pkl')
    with open(det_file, 'wb') as f:
        cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)

    print 'Evaluating detections'
    imdb.evaluate_detections(all_boxes, output_dir)