示例#1
0
def get_minibatch_mul(roidb):
    """Given a roidb, construct a minibatch sampled from it."""
    # We collect blobs from each image onto a list and then concat them into a
    # single tensor, hence we initialize each blob to an empty list
    blobs = {k: [] for k in get_minibatch_blob_names()}
    #print("minibatch.py 157 ---------{}".format(len(roidb)))
    # Get the input image blob, formatted for caffe2
    im_blob, im_scales, roidb_ = _get_img_blob_mul(roidb)
    blobs['data'] = im_blob
    if cfg.RPN.RPN_ON:
        #     # RPN-only or end-to-end Faster/Mask R-CNN
        valid = rpn_roi_data.add_rpn_blobs(blobs, im_scales, roidb_)
        return blobs, valid
    if cfg.RETINANET.RETINANET_ON:
        im_width, im_height = im_blob.shape[3], im_blob.shape[2]
        # im_width, im_height corresponds to the network input: padded image
        # (if needed) width and height. We pass it as input and slice the data
        # accordingly so that we don't need to use SampleAsOp
        valid = retinanet_roi_data.add_retinanet_blobs(blobs, im_scales,
                                                       roidb_, im_width,
                                                       im_height)
    else:
        # Fast R-CNN like models trained on
        # precomputed proposals
        valid = fast_rcnn_roi_data.add_fast_rcnn_blobs(blobs, im_scales,
                                                       roidb_)
    return blobs, valid
def get_minibatch_s6(roidb, roidb_noclass):
    """Given a roidb, construct a minibatch sampled from it."""
    # We collect blobs from each image onto a list and then concat them into a
    # single tensor, hence we initialize each blob to an empty list
    if 0:
        random_bbox = dict()
        random_bbox['kernel_size'] = 224
        random_bbox['tl_x'] = random.randint(0, 800)
        random_bbox['tl_y'] = random.randint(0, 800)
    blobs = {k: [] for k in get_minibatch_blob_names()}
    # Get the input image blob, formatted for caffe2
    im_blob, im_scales, error_flag = _get_image_blob_s6(roidb, roidb_noclass)
    blobs['data'] = im_blob
    if cfg.RPN.RPN_ON:
        # RPN-only or end-to-end Faster/Mask R-CNN
        valid = rpn_roi_data.add_rpn_blobs(blobs, im_scales, roidb)
    elif cfg.RETINANET.RETINANET_ON:
        im_width, im_height = im_blob.shape[3], im_blob.shape[2]
        # im_width, im_height corresponds to the network input: padded image
        # (if needed) width and height. We pass it as input and slice the data
        # accordingly so that we don't need to use SampleAsOp
        valid = retinanet_roi_data.add_retinanet_blobs(blobs, im_scales, roidb,
                                                       im_width, im_height)
    else:
        # Fast R-CNN like models trained on precomputed proposals
        valid = fast_rcnn_roi_data.add_fast_rcnn_blobs(blobs, im_scales, roidb)
    return blobs, valid
def get_minibatch(roidb):
    """载入roidb,构建一个minibatch采样."""
    # We collect blobs from each image onto a list and then concat them into a
    # single tensor, hence we initialize each blob to an empty list
    # 生成空字典数据  如  {'data': [], 'fpn_labels_int32_wide_fpn1': []}等
    #print("现在执行的是无扩增版本")
    blobs = {k: [] for k in get_minibatch_blob_names()}
    print("blobs is ", blobs)
    # Get the input image blob, formatted for caffe2
    im_blob, im_scales = _get_image_blob(roidb)
    blobs['data'] = im_blob
    if cfg.RPN.RPN_ON:
        # RPN-only or end-to-end Faster/Mask R-CNN
        valid = rpn_roi_data.add_rpn_blobs(blobs, im_scales, roidb)
    elif cfg.RETINANET.RETINANET_ON:
        im_width, im_height = im_blob.shape[3], im_blob.shape[2]
        # im_width, im_height corresponds to the network input: padded image
        # (if needed) width and height. We pass it as input and slice the data
        # accordingly so that we don't need to use SampleAsOp
        valid = retinanet_roi_data.add_retinanet_blobs(blobs, im_scales, roidb,
                                                       im_width, im_height)
    else:
        # Fast R-CNN like models trained on precomputed proposals
        valid = fast_rcnn_roi_data.add_fast_rcnn_blobs(blobs, im_scales, roidb)
    return blobs, valid
示例#4
0
def get_minibatch(roidb):
    """Given a roidb, construct a minibatch sampled from it."""
    # We collect blobs from each image onto a list and then concat them into a
    # single tensor, hence we initialize each blob to an empty list
    blobs = {k: [] for k in get_minibatch_blob_names()}
    # Get the input image blob, formatted for caffe2
    im_blob, im_scales = _get_image_blob(roidb)
    blobs['data'] = im_blob

    # Calculate tracking metadata if using pre-calculated blobs
    if len(cfg.DATA_LOADER.EXTRA_BLOBS):
        blobs_extra = _get_extra_blobs(roidb)
        blobs.update(blobs_extra)
        if 'track_n_rois' in cfg.DATA_LOADER.EXTRA_BLOBS:
            blobs.update({
                'track_n_rois_one': [blobs_extra['track_n_rois'][0]],
                'track_n_rois_two': [blobs_extra['track_n_rois'][1]],
            })

    if cfg.RPN.RPN_ON:
        # RPN-only or end-to-end Faster/Mask R-CNN
        valid = rpn_roi_data.add_rpn_blobs(blobs, im_scales, roidb)
    elif cfg.RETINANET.RETINANET_ON:
        im_width, im_height = im_blob.shape[3], im_blob.shape[2]
        # im_width, im_height corresponds to the network input: padded image
        # (if needed) width and height. We pass it as input and slice the data
        # accordingly so that we don't need to use SampleAsOp
        valid = retinanet_roi_data.add_retinanet_blobs(blobs, im_scales, roidb,
                                                       im_width, im_height)
    else:
        # Fast R-CNN like models trained on precomputed proposals
        valid = fast_rcnn_roi_data.add_fast_rcnn_blobs(blobs, im_scales, roidb)
    return blobs, valid
示例#5
0
def get_minibatch(roidb):
    """Given a roidb, construct a minibatch sampled from it."""
    # We collect blobs from each image onto a list and then concat them into a
    # single tensor, hence we initialize each blob to an empty list
    blobs = {k: [] for k in get_minibatch_blob_names()}
    # Get the input image blob, formatted for caffe2
    im_blob, im_scales, im_crops = _get_image_blob(roidb)
    blobs['data'] = im_blob

    # row col row col to x1 y1 x2 y2
    im_crops = np.array(im_crops, dtype=np.int32)
    im_crops = im_crops[:, (1, 0, 3, 2)]

    if cfg.REID.APM:
        valid = reid_apm_roi_data.add_reid_blobs(blobs, im_scales, im_crops,
                                                 roidb)
    else:
        valid = reid_roi_data.add_reid_blobs(blobs, im_scales, roidb)
    return blobs, valid

    if cfg.RPN.RPN_ON:
        # RPN-only or end-to-end Faster/Mask R-CNN
        valid = rpn_roi_data.add_rpn_blobs(blobs, im_scales, roidb)
    elif cfg.RETINANET.RETINANET_ON:
        im_width, im_height = im_blob.shape[3], im_blob.shape[2]
        # im_width, im_height corresponds to the network input: padded image
        # (if needed) width and height. We pass it as input and slice the data
        # accordingly so that we don't need to use SampleAsOp
        valid = retinanet_roi_data.add_retinanet_blobs(blobs, im_scales, roidb,
                                                       im_width, im_height)
    else:
        # Fast R-CNN like models trained on precomputed proposals
        valid = fast_rcnn_roi_data.add_fast_rcnn_blobs(blobs, im_scales, roidb)
    return blobs, valid
示例#6
0
def get_minibatch(roidb):
    """Given a roidb, construct a minibatch sampled from it."""
    # We collect blobs from each image onto a list and then concat them into a
    # single tensor, hence we initialize each blob to an empty list
    blobs = {k: [] for k in get_minibatch_blob_names()}
    # Get the input image blob, formatted for caffe2
    im_blob, im_scales = _get_image_blob(roidb)
    blobs['data'] = im_blob
    if cfg.RPN.RPN_ON:
        # RPN-only or end-to-end Faster/Mask R-CNN
        valid = rpn_roi_data.add_rpn_blobs(blobs, im_scales, roidb)
    elif cfg.RETINANET.RETINANET_ON:
        im_width, im_height = im_blob.shape[3], im_blob.shape[2]
        # im_width, im_height corresponds to the network input: padded image
        # (if needed) width and height. We pass it as input and slice the data
        # accordingly so that we don't need to use SampleAsOp
        valid = retinanet_roi_data.add_retinanet_blobs(
            blobs, im_scales, roidb, im_width, im_height
        )
    else:
        # Fast R-CNN like models trained on precomputed proposals
        valid = fast_rcnn_roi_data.add_fast_rcnn_blobs(blobs, im_scales, roidb)
    return blobs, valid
示例#7
0
def get_minibatch(roidb):
    """Given a roidb, construct a minibatch sampled from it."""
    # We collect blobs from each image onto a list and then concat them into a
    # single tensor, hence we initialize each blob to an empty list
    blobs = {k: [] for k in get_minibatch_blob_names()}
    # Get the input image blob, formatted for caffe2
#    im_blob, im_scales = _get_image_blob(roidb)
    im_blob, im_scales, pose_pred, pose_line, blobs['seg_gt_label'] = _get_image_pose_blob(roidb) # pose_pred the same shape with im_blob
    blobs['data'] = im_blob
    blobs['normalizer'] = np.array([100], dtype=np.float32)
    if 'LIP' in cfg.TRAIN.DATASETS[0]:
        blobs['pose_pred_4'], blobs['pose_pred_8'], blobs['pose_pred_16'], blobs['pose_pred_32'] = _resize_pose_blob(pose_pred, channel=26)
    else:
        blobs['pose_pred_4'], blobs['pose_pred_8'], blobs['pose_pred_16'], blobs['pose_pred_32'] = _resize_pose_blob(pose_pred, channel=26)
#    blobs['pose_pred_8'], blobs['pose_pred_16'] = _resize_pose_blob_to13(pose_pred) # pose 16 to 13 channel
#    blobs['pose_sum_8'], blobs['pose_sum_16'] = pose_sum_to_onehotmap(blobs['pose_pred_8'], blobs['pose_pred_16'])
    blobs['pose_line_8'], blobs['pose_line_16'] = _resize_poseline_blob(pose_line)
    if cfg.RPN.RPN_ON:
        # RPN-only or end-to-end Faster/Mask R-CNN
        valid = rpn_roi_data.add_rpn_blobs(blobs, im_scales, roidb)
    elif cfg.RETINANET.RETINANET_ON:
        im_width, im_height = im_blob.shape[3], im_blob.shape[2]
        # im_width, im_height corresponds to the network input: padded image
        # (if needed) width and height. We pass it as input and slice the data
        # accordingly so that we don't need to use SampleAsOp
        valid = retinanet_roi_data.add_retinanet_blobs(
            blobs, im_scales, roidb, im_width, im_height
        )
    else:
        # Fast R-CNN like models trained on precomputed proposals
        valid = fast_rcnn_roi_data.add_fast_rcnn_blobs(blobs, im_scales, roidb)
#    blobs['pose_pred'] = pose_pred_model.pred_pose_batch(roidb)
#    pose_pred_model.draw_batch(blobs['pose_pred'], roidb)
#    blobs['pose_pred'] = _get_pose_pred(roidb)
    
#    logger.info(blobs['pose_pred'].shape)
    return blobs, valid