Пример #1
0
def get_rcnn_testbatch(roidb, cfg):
    """
    return a dict of testbatch
    :param roidb: ['image', 'flipped'] + ['boxes']
    :return: data, label, im_info
    """
    # assert len(roidb) == 1, 'Single batch only'
    imgs, roidb = get_image(roidb, cfg)
    im_array = imgs
    im_info = [
        np.array([roidb[i]['im_info']], dtype=np.float32)
        for i in range(len(roidb))
    ]

    im_rois = [roidb[i]['boxes'] for i in range(len(roidb))]
    rois = im_rois
    rois_array = [
        np.hstack((0 * np.ones((rois[i].shape[0], 1)), rois[i]))
        for i in range(len(rois))
    ]

    data = [{
        'data': im_array[i],
        'rois': rois_array[i]
    } for i in range(len(roidb))]
    label = {}

    return data, label, im_info
Пример #2
0
def get_rpn_testbatch(roidb, cfg, cur_frame_id):
    """
    return a dict of testbatch
    :param roidb: ['image', 'flipped']
    :return: data, label, im_info
    """
    # assert len(roidb) == 1, 'Single batch only'
    imgs, roidb, motion_vector, res_diff = get_image(roidb, cfg, cur_frame_id)
    im_array = imgs
    motion_vector_array = motion_vector
    res_diff_array = res_diff
    im_info = [
        np.array([roidb[i]['im_info']], dtype=np.float32)
        for i in range(len(roidb))
    ]

    data = [{
        'data': im_array[i],
        'motion_vector': motion_vector_array[i],
        'res_diff': res_diff_array[i],
        'im_info': im_info[i]
    } for i in range(len(roidb))]
    label = {}

    return data, label, im_info
Пример #3
0
def get_rpn_batch_poly(roidb, cfg):
    """
    prototype for rpn batch poly: data, im_info, gt_boxes
    :param roidb: ['image']
    :param cfg:
    :return:
    """
    assert len(roidb) == 1, 'Single batch only'
    imgs, roidb = get_image(roidb, cfg)
    im_array = imgs[0]
    im_info = np.array([roidb[0]['im_info']], dtype=np.float32)

    # gt boxes: (x1, y1, x2, y2, cls)
    if roidb[0]['gt_classes'].size > 0:
        gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
        gt_boxes = np.empty((roidb[0]['boxes'].shape[0], 9), dtype=np.float32)
        gt_boxes[:, 0:8] = roidb[0]['boxes'][gt_inds, :]
        gt_boxes[:, 8] = roidb[0]['gt_classes'][gt_inds]
    else:
        gt_boxes = np.empty((0, 9), dtype=np.float32)

    data = {'data': im_array, 'im_info': im_info}
    label = {'gt_boxes': gt_boxes}

    return data, label
Пример #4
0
def get_rpn_batch(roidb, cfg):
    """
    prototype for rpn batch: data, im_info, gt_boxes
    :param roidb: ['image', 'flipped'] + ['gt_boxes', 'boxes', 'gt_classes']
    :return: data, label
    """
    assert len(roidb) == 1, 'Single batch only'
    imgs, roidb = get_image(roidb, cfg)
    im_array = imgs[0]
    im_info = np.array([roidb[0]['im_info']], dtype=np.float32)

    # gt boxes: (x1, y1, x2, y2, cls)
    if roidb[0]['gt_classes'].size > 0:
        gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
        if len(gt_inds) == 0:
            gt_boxes = np.empty((0, 5), dtype=np.float32)
        else:
            gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
            #print 'rpn shapes :', gt_boxes.shape,' ',roidb[0]['boxes'].shape,' ',len(gt_inds)
            gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :]
            gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
    else:
        gt_boxes = np.empty((0, 5), dtype=np.float32)

    data = {'data': im_array,
            'im_info': im_info}
    label = {'gt_boxes': gt_boxes}

    return data, label
Пример #5
0
def get_rpn_batch(roidb, cfg):
    """
    prototype for rpn batch: data, im_info, gt_boxes
    :param roidb: ['image', 'flipped'] + ['gt_boxes', 'boxes', 'gt_classes']
    :return: data, label
    """
    # FIXME: locate bug when config.BATCH_IMAGES=2
    assert len(roidb) == 1, 'Single batch only'
    imgs, roidb = get_image(roidb, cfg)
    im_array = imgs[0]
    im_info = np.array([roidb[0]['im_info']], dtype=np.float32)

    # gt boxes: (x1, y1, x2, y2, cls)
    if roidb[0]['gt_classes'].size > 0:
        gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
        gt_boxes = np.empty((roidb[0]['boxes'].shape[0], 5), dtype=np.float32)
        gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :]
        gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
    else:
        gt_boxes = np.empty((0, 5), dtype=np.float32)

    data = {'data': im_array, 'im_info': im_info}
    label = {'gt_boxes': gt_boxes}

    return data, label
Пример #6
0
def get_rpn_batch(roidb, cfg):
    """
    prototype for rpn batch: data, im_info, gt_boxes
    :param roidb: ['image', 'flipped'] + ['gt_boxes', 'boxes', 'gt_classes']
    :return: data, label
    """
    assert len(roidb) == 1, 'Single batch only'
    imgs, roidb = get_image(roidb, cfg)
    im_array = imgs[0]
    im_info = np.array([roidb[0]['im_info']], dtype=np.float32)

    # gt boxes: (x1, y1, x2, y2, cls)
    if roidb[0]['gt_classes'].size > 0:
        gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
        gt_boxes = np.empty((roidb[0]['boxes'].shape[0], 5), dtype=np.float32)
        gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :]
        gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
        #attri = np.empty((roidb[0]['boxes'].shape[0], 115), dtype=np.float32)
        #attri[:,:] = roidb[0]['attri_overlaps'][gt_inds] 
    else:
        gt_boxes = np.empty((0, 5), dtype=np.float32)
        #attri = np.empty((roidb[0]['boxes'].shape[0], 41), dtype=np.float32)

    data = {'data': im_array,
            'im_info': im_info}
    label = {'gt_boxes': gt_boxes}
             #'attri'   : attri}

    return data, label
Пример #7
0
def get_rpn_batch(roidb, cfg):
    """
    prototype for rpn batch: data, im_info, gt_boxes
    :param roidb: ['image', 'flipped'] + ['gt_boxes', 'boxes', 'gt_classes']
    :return: data, label
    """
    assert len(roidb) == 1, 'Single batch only'
    imgs, roidb = get_image(roidb, cfg)
    im_array = imgs[0]
    im_info = np.array([roidb[0]['im_info']], dtype=np.float32)

    # gt boxes: (x1, y1, x2, y2, cls)
    if roidb[0]['gt_classes'].size > 0:
        gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
        gt_boxes = np.empty((roidb[0]['boxes'].shape[0], 5), dtype=np.float32)
        gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :]
        gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
    else:
        gt_boxes = np.empty((0, 5), dtype=np.float32)

    data = {'data': im_array,
            'im_info': im_info}
    label = {'gt_boxes': gt_boxes}

    return data, label
Пример #8
0
def get_rpn_batch(roidb, cfg):
    """
    prototype for rpn batch: data, im_info, gt_boxes
    :param roidb: ['image', 'flipped'] + ['gt_boxes', 'boxes', 'gt_classes']
    :return: data, label
    """
    assert len(roidb) == 1, 'Single batch only'
    imgs, roidb = get_image(roidb, cfg)
    im_array = imgs[0]
    im_info = np.array([roidb[0]['im_info']], dtype=np.float32)

    # gt boxes: (x1, y1, x2, y2, cls)
    if roidb[0]['gt_classes'].size > 0:
        gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
        gt_boxes = np.empty((roidb[0]['boxes'].shape[0], 5), dtype=np.float32)

        # TODO if gt_inds<len(roidb[0]) following two lines of code will raise ValueError
        gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :]
        gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
    else:
        gt_boxes = np.empty((0, 5), dtype=np.float32)

    data = {'data': im_array, 'im_info': im_info}
    label = {'gt_boxes': gt_boxes}

    return data, label
Пример #9
0
def on_end_epoch(state):
    model_wrapper = state['network']
    for meter_name, meter in model_wrapper.meters.items():
        train_loss_logger = model_wrapper.loggers[meter_name]
        train_loss_logger.log(state['epoch'], meter.value()[0])
    noise_sample = Variable(torch.randn(64, model_wrapper.noise_dim)).cuda()
    img = get_image(model_wrapper, noise_sample)
    model_wrapper.loggers["generated_image"].log(img)
Пример #10
0
def handle_image(image_url: str) -> bool:
    g.log.debug(f"Image URL | {image_url}")

    img = image.get_image(image_url)
    text: str = image.to_text(img)
    g.log.debug(f"OCR Results | {text}")

    return handle_text(text)
Пример #11
0
def get_rpn_testbatch(roidb, cfg):
    imgs, roidb = get_image(roidb, cfg)
    im_array = imgs
    im_info = [np.array([roidb[i]['im_info']],dtype = np.float32) for i in range(len(roidb))]
    data = [{'data': im_array[i],
             'im_info': im_info[i]} for i in range(len(roidb))]
        
    label = {}
    return data, label, im_info
Пример #12
0
def get_rcnn_testbatch(roidb, cfg):
    """
    return a dict of testbatch
    :param roidb: ['image', 'flipped'] + ['boxes']
    :return: data, label, im_info
    """
    # assert len(roidb) == 1, 'Single batch only'
    imgs, roidb = get_image(roidb, cfg)
    im_array = imgs
    im_info = [np.array([roidb[i]['im_info']], dtype=np.float32) for i in range(len(roidb))]

    im_rois = [roidb[i]['boxes'] for i in range(len(roidb))]

    if cfg.network.ROIDispatch:
        data = []
        for i in range(len(im_rois)):
            w = im_rois[i][:, 2] - im_rois[i][:, 0] + 1
            h = im_rois[i][:, 3] - im_rois[i][:, 1] + 1
            feat_id = np.clip(np.floor(2 + np.log2(np.sqrt(w * h) / 224)), 0, 3).astype(int)

            rois_0 = im_rois[i][np.where(feat_id == 0)]
            if len(rois_0) == 0:
                rois_0 = np.zeros((1, 4))
            rois_1 = im_rois[i][np.where(feat_id == 1)]
            if len(rois_1) == 0:
                rois_1 = np.zeros((1, 4))
            rois_2 = im_rois[i][np.where(feat_id == 2)]
            if len(rois_2) == 0:
                rois_2 = np.zeros((1, 4))
            rois_3 = im_rois[i][np.where(feat_id == 3)]
            if len(rois_3) == 0:
                rois_3 = np.zeros((1, 4))
            # stack batch index
            data.append({'data': im_array[i],
                         'rois_0': np.hstack((0 * np.ones((rois_0.shape[0], 1)), rois_0)),
                         'rois_1': np.hstack((0 * np.ones((rois_1.shape[0], 1)), rois_1)),
                         'rois_2': np.hstack((0 * np.ones((rois_2.shape[0], 1)), rois_2)),
                         'rois_3': np.hstack((0 * np.ones((rois_3.shape[0], 1)), rois_3))})
            if cfg.TEST.LEARN_NMS:
                data[-1]['im_info'] = im_info[i]
    else:
        rois = im_rois
        rois_array = [np.hstack((0 * np.ones((rois[i].shape[0], 1)), rois[i])) for i in range(len(rois))]

        data = []
        for i in range(len(roidb)):
            data.append({'data': im_array[i],
                         'rois': rois_array[i]})
            if cfg.TEST.LEARN_NMS:
                data[-1]['im_info'] = im_info[i]

    label = {}

    return data, label, im_info
Пример #13
0
def get_rpn_batch(roidb, cfg):
    """
    prototype for rpn batch: data, im_info, gt_boxes
    :param roidb: ['image', 'flipped'] + ['gt_boxes', 'boxes', 'gt_classes']
    :return: data, label
    """

    #print("length of roidb: ", len(roidb))

    assert len(roidb) == 1, 'Single batch only'
    imgs, roidb = get_image(roidb, cfg)
    im_array = imgs[0]
    im_info = np.array([roidb[0]['im_info']], dtype=np.float32)

    # gt boxes: (x1, y1, x2, y2, cls)
    if roidb[0]['gt_classes'].size > 0:
        gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
        gt_boxes = np.empty((roidb[0]['boxes'].shape[0], 5), dtype=np.float32)
        gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :]
        gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
    else:
        gt_boxes = np.empty((0, 5), dtype=np.float32)

    data = {'data': im_array, 'im_info': im_info}
    label = {'gt_boxes': gt_boxes}

    #print(data["data"].shape)
    """
    imgs, roidb = get_image(roidb, cfg)
    im_array = imgs
    im_info = [np.array([roidb[i]['im_info']], dtype=np.float32) for i in range(len(roidb))]

    data = [{'data': im_array[i],
            'im_info': im_info[i]} for i in range(len(roidb))]
    
    label = []
    for i, _ in enumerate(roidb):
        # gt boxes: (x1, y1, x2, y2, cls)
        if roidb[i]['gt_classes'].size > 0:
            gt_inds = np.where(roidb[i]['gt_classes'] != 0)[0]
            gt_boxes = np.empty((roidb[i]['boxes'].shape[0], 5), dtype=np.float32)
            gt_boxes[:, 0:4] = roidb[i]['boxes'][gt_inds, :]
            gt_boxes[:, 4] = roidb[i]['gt_classes'][gt_inds]
        else:
            gt_boxes = np.empty((0, 5), dtype=np.float32)
        label.append({'gt_boxes': gt_boxes})
    
    print(data[0]["data"].shape)
    """
    return data, label
Пример #14
0
def get_rpn_batch(roidb, cfg):
    """
    prototype for rpn batch: data, im_info, gt_boxes
    :param roidb: ['image', 'flipped'] + ['gt_boxes', 'boxes', 'gt_classes']
    :return: data, label
    """
    assert len(roidb) == 1, 'Single batch only'
    imgs, roidb = get_image(roidb, cfg)
    im_array = imgs[0]
    im_info = np.array([roidb[0]['im_info']], dtype=np.float32)

    inv_im_array = transform_inverse(im_array, cfg.network.PIXEL_MEANS)

    # gt boxes: (x1, y1, x2, y2, cls)
    if roidb[0]['gt_classes'].size > 0:
        gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
        gt_boxes = np.empty((roidb[0]['boxes'].shape[0], 5), dtype=np.float32)
        gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :]
        gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
    else:
        gt_boxes = np.empty((0, 5), dtype=np.float32)

    # print 'im_info[0] = ', im_info[0]
    # print 'roidb[0].keys() = ', roidb[0].keys()
    # print "gt_boxes = ", gt_boxes
    # print "im_array.shape = ", im_array.shape
    # print "inv_im_array.shape = ", inv_im_array.shape
    # print "roidb[0]['image'] = ", roidb[0]['image']
    # print "roidb[0]['boxes'] = ", roidb[0]['boxes']
    # print "roidb[0]['width'], roidb[0]['height'] = ", roidb[0]['width'], roidb[0]['height']
    # print '-----------'

    # Save image for debugging
    _, img_name = osp.split(roidb[0]['image'])
    img_out_path = osp.join('debug', img_name)
    im = Image.fromarray(inv_im_array)
    draw = ImageDraw.Draw(im)
    n_boxes, _ = gt_boxes.shape
    for i in range(n_boxes):
        draw.rectangle(gt_boxes[i, 0:4], outline='yellow')
    del draw

    im.save(img_out_path)

    data = {'data': im_array, 'im_info': im_info}
    label = {'gt_boxes': gt_boxes}

    return data, label
Пример #15
0
def get_rpn_testbatch(roidb, cfg):
    """
    return a dict of testbatch
    :param roidb: ['image', 'flipped']
    :return: data, label, im_info
    """
    # assert len(roidb) == 1, 'Single batch only'
    imgs, roidb = get_image(roidb, cfg)
    im_array = imgs
    im_info = [np.array([roidb[i]['im_info']], dtype=np.float32) for i in range(len(roidb))]

    data = [{'data': im_array[i],
            'im_info': im_info[i]} for i in range(len(roidb))]
    label = {}

    return data, label, im_info
Пример #16
0
def get_rpn_testbatch(roidb, cfg):
    """
    return a dict of testbatch
    :param roidb: ['image', 'flipped']
    :return: data, label, im_info
    """
    # assert len(roidb) == 1, 'Single batch only'
    imgs, roidb = get_image(roidb, cfg)
    im_array = imgs
    im_info = [np.array([roidb[i]['im_info']], dtype=np.float32) for i in range(len(roidb))]

    data = [{'data': im_array[i],
            'im_info': im_info[i]} for i in range(len(roidb))]
    label = {}

    return data, label, im_info
Пример #17
0
def get_rpn_batch(roidb,cfg):
    assert len(roidb) == 1, 'Single batch only'

    imgs, roidb = get_image(roidb,cfg)
    im_array = imgs[0]
    im_info = np.array([roidb[0]['im_info']], dtype = np.float32)

    if roidb[0]['gt_classes'].size > 0:
        gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
        gt_boxes = np.empty((roidb[0]['boxes'].shape[0],5),dtype = np.float32)
        gt_boxes[:,:4] = roidb[0]['boxes'][gt_inds,:]
        gt_boxes[:,4] = roidb[0]['gt_classes'][gt_inds]
    else:
        gt_boxes = np.empty((0,5),dtype = np.float32)

    data = {'data':im_array,
            'im_info':im_info}
    label = {'gt_boxes':gt_boxes}
    return data,label
Пример #18
0
def get_rpn_testbatch(roidb, target_scale, batch_size, cfg, data_buf):
    """
    """
    assert len(roidb) > 0, 'empty list !'

    # get images
    t0 = time.time()  ###
    target_size, max_size = target_scale
    imgs, roidb = get_image(roidb,
                            target_size,
                            max_size,
                            stride=cfg.network.IMAGE_STRIDE)
    max_h = max([img.shape[0] for img in imgs])
    max_w = max([img.shape[1] for img in imgs])
    t1 = time.time()  ###

    # shapes
    shapes = {
        'data': (batch_size, 3, max_h, max_w),
        'im_info': (batch_size, 3),
    }

    # reshape buffers
    batch = dict()
    for k in data_buf:
        s = shapes[k]
        c = np.prod(s)
        batch[k] = np.frombuffer(data_buf[k], dtype=np.float32,
                                 count=c).reshape(s)
        batch[k].fill(0)

    # transform image data
    bgr_means = cfg.network.PIXEL_MEANS
    for i, img in enumerate(imgs):
        h, w = img.shape[:2]
        for j in range(3):
            batch['data'][i, j, :h, :w] = img[:, :, 2 - j] - bgr_means[2 - j]
        batch['im_info'][i, :] = [max_h, max_w, roidb[i]['im_info'][2]]

    t2 = time.time()  ###
    #print 't_image:%.3f\tt_trans:%.3f' % (t1-t0, t2-t1) ###
    return shapes
Пример #19
0
def on_end_epoch(state):
    model_wrapper = state['network']
    meter_loss = model_wrapper.meters["loss"]
    print('Training loss: %.4f' % (meter_loss.value()[0]))
    train_loss_logger = model_wrapper.loggers["train_loss"]
    train_loss_logger.log(state['epoch'], meter_loss.value()[0])

    reset_meters(model_wrapper.meters)

    model_wrapper.eval()

    noise_sample = Variable(torch.randn(64, model_wrapper.noise_dim)).cuda()
    img = get_image(model_wrapper, noise_sample)
    model_wrapper.loggers["generated_image"].log(img)

    BasicEngine().test(model_wrapper, model_wrapper.dataset_iter(False))
    test_loss_logger = model_wrapper.loggers["test_loss"]
    test_loss_logger.log(state['epoch'], meter_loss.value()[0])
    print('Testing loss: %.4f' % (meter_loss.value()[0]))
    model_wrapper.train()
Пример #20
0
def get_rcnn_testbatch(roidb, cfg):
    """
    return a dict of testbatch
    :param roidb: ['image', 'flipped'] + ['boxes']
    :return: data, label, im_info
    """
    # assert len(roidb) == 1, 'Single batch only'
    imgs, roidb = get_image(roidb, cfg)
    im_array = imgs
    im_info = [np.array([roidb[i]['im_info']], dtype=np.float32) for i in range(len(roidb))]

    im_rois = [roidb[i]['boxes'] for i in range(len(roidb))]
    rois = im_rois
    rois_array = [np.hstack((0 * np.ones((rois[i].shape[0], 1)), rois[i])) for i in range(len(rois))]

    data = [{'data': im_array[i],
             'rois': rois_array[i]} for i in range(len(roidb))]
    label = {}

    return data, label, im_info
Пример #21
0
def get_rpn_batch(roidb, cfg):
    """
    prototype for rpn batch: data, im_info, gt_boxes
    :param roidb: ['image', 'flipped'] + ['gt_boxes', 'boxes', 'gt_classes']
    :return: data, label
    """
    assert len(roidb) == 1, 'Single batch only'
    imgs, roidb, img_names = get_image(roidb, cfg)
    im_array = imgs[0]
    img_fname = img_names[0]
    im_info = np.array([roidb[0]['im_info']], dtype=np.float32)

    # gt boxes: (x1, y1, x2, y2, cls)
    if roidb[0]['gt_classes'].size > 0:
        gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
        gt_boxes = np.empty((roidb[0]['boxes'].shape[0], 5), dtype=np.float32)
        gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :]
        gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]

        #leonid: filter boxes outside the number of supported classes
        # Joseph: this causes troubles when the permanent classes are replaced with nove ones
        #gt_boxes = gt_boxes[gt_boxes[:,4] < cfg.dataset.NUM_CLASSES]

        #leonid: filter boxes outside the set of supported categories
        if 'clsIds2use' in cfg.dataset:
            gt_boxes = gt_boxes[[
                ix for ix in range(gt_boxes.shape[0])
                if gt_boxes[ix, 4] in cfg.dataset.clsIds2use
            ]]
    else:
        gt_boxes = np.empty((0, 5), dtype=np.float32)

    data = {'data': im_array, 'im_info': im_info}
    label = {'gt_boxes': gt_boxes}

    return data, label, img_fname
Пример #22
0
def get_rcnn_batch(roidb, cfg):
    """
    return a dict of multiple images
    :param roidb: a list of dict, whose length controls batch size
    ['images', 'flipped'] + ['gt_boxes', 'boxes', 'gt_overlap'] => ['bbox_targets']
    :return: data, label
    """
    num_images = len(roidb)
    imgs, roidb = get_image(roidb, cfg)
    im_array = tensor_vstack(imgs)

    assert cfg.TRAIN.BATCH_ROIS == -1 or cfg.TRAIN.BATCH_ROIS % cfg.TRAIN.BATCH_IMAGES == 0, \
        'BATCHIMAGES {} must divide BATCH_ROIS {}'.format(cfg.TRAIN.BATCH_IMAGES, cfg.TRAIN.BATCH_ROIS)

    if cfg.TRAIN.BATCH_ROIS == -1:
        rois_per_image = np.sum([iroidb['boxes'].shape[0] for iroidb in roidb])
        fg_rois_per_image = rois_per_image
    else:
        rois_per_image = cfg.TRAIN.BATCH_ROIS / cfg.TRAIN.BATCH_IMAGES
        fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image).astype(int)

    rois_array = list()
    labels_array = list()
    bbox_targets_array = list()
    bbox_weights_array = list()

    for im_i in range(num_images):
        roi_rec = roidb[im_i]

        # infer num_classes from gt_overlaps
        num_classes = roi_rec['gt_overlaps'].shape[1]

        # label = class RoI has max overlap with
        rois = roi_rec['boxes']
        labels = roi_rec['max_classes']
        overlaps = roi_rec['max_overlaps']
        bbox_targets = roi_rec['bbox_targets']

        im_rois, labels, bbox_targets, bbox_weights = \
            sample_rois(rois, fg_rois_per_image, rois_per_image, num_classes, cfg,
                        labels, overlaps, bbox_targets)

        # project im_rois
        # do not round roi
        rois = im_rois
        batch_index = im_i * np.ones((rois.shape[0], 1))
        rois_array_this_image = np.hstack((batch_index, rois))
        rois_array.append(rois_array_this_image)

        # add labels
        labels_array.append(labels)
        bbox_targets_array.append(bbox_targets)
        bbox_weights_array.append(bbox_weights)

    rois_array = np.array(rois_array)
    labels_array = np.array(labels_array)
    bbox_targets_array = np.array(bbox_targets_array)
    bbox_weights_array = np.array(bbox_weights_array)

    data = {'data': im_array,
            'rois': rois_array}
    label = {'label': labels_array,
             'bbox_target': bbox_targets_array,
             'bbox_weight': bbox_weights_array}

    return data, label
Пример #23
0
def get_rpn_batch(roidb,
                  target_scale,
                  sym,
                  cfg,
                  data_buf,
                  allowed_border=0,
                  max_gts=100,
                  kps_dim=0,
                  stride=32):
    """
    allowed_border:

    max_gts:
        max number of groundtruths
    kps_dim:
        when trainning with keypoints, set kps_dim >0
    """
    num_images = len(roidb)
    assert num_images > 0, 'empty list !'

    # get images
    t0 = time.time()  ###
    target_size, max_size = target_scale
    imgs, roidb = get_image(roidb, target_size, max_size)
    max_h = max([img.shape[0] for img in imgs])
    max_w = max([img.shape[1] for img in imgs])
    stride = float(stride)
    max_h = int(np.ceil(max_h / stride) * stride)
    max_w = int(np.ceil(max_w / stride) * stride)
    t1 = time.time()  ###

    # assign anchor labels
    anchor_labels = []
    _, feat_shape, _ = sym.infer_shape(data=(num_images, 3, max_h, max_w))
    for i in range(num_images):
        if roidb[i]['gt_classes'].size > 0:
            assert np.sum(roidb[i]['gt_classes'] ==
                          0) == 0, 'should not have background boxes!'
        gt_boxes = roidb[i]['boxes']
        im_info = [max_h, max_w, roidb[i]['im_info'][2]]
        # assign anchors
        anchor_labels.append(
            assign_anchor(feat_shape[0], gt_boxes, im_info, cfg,
                          allowed_border))
    t2 = time.time()  ###

    # shapes
    shapes = {
        'data': (num_images, 3, max_h, max_w),
        'im_info': (num_images, 3),
        'gt_boxes': (num_images, max_gts, 5),
        'label':
        tuple([num_images] + list(anchor_labels[0]['label'].shape[1:])),
        'bbox_target':
        tuple([num_images] + list(anchor_labels[0]['bbox_target'].shape[1:])),
        'bbox_weight':
        tuple([num_images] + list(anchor_labels[0]['bbox_weight'].shape[1:])),
    }
    if kps_dim > 0:
        shapes['gt_kps'] = ((num_images, max_gts, kps_dim))

    # reshape buffers
    batch = dict()
    for k in data_buf:
        s = shapes[k]
        c = np.prod(s)
        batch[k] = np.frombuffer(data_buf[k], dtype=np.float32,
                                 count=c).reshape(s)
        batch[k].fill(0)

    # transform image data and gt labels
    bgr_means = cfg.network.PIXEL_MEANS
    for i, img in enumerate(imgs):
        h, w = img.shape[:2]
        for j in range(3):
            batch['data'][i, j, :h, :w] = img[:, :, 2 - j] - bgr_means[2 - j]
        batch['im_info'][i, :] = [max_h, max_w, roidb[i]['im_info'][2]]
        num_gt = roidb[i]['boxes'].shape[0]
        batch['gt_boxes'][i, :num_gt, :4] = roidb[i]['boxes']
        batch['gt_boxes'][i, :num_gt, 4] = roidb[i]['gt_classes']
        if kps_dim > 0:
            batch['gt_kps'][i, :num_gt] = roidb[i]['keypoints']
        batch['label'][i] = anchor_labels[i]['label']
        batch['bbox_target'][i] = anchor_labels[i]['bbox_target']
        batch['bbox_weight'][i] = anchor_labels[i]['bbox_weight']
    t3 = time.time()  ###

    #print 't_image=%.3f\tt_assign=%.3f\tt_trans=%.3f\tt_all=%.3f' % (t1-t0, t2-t1, t3-t2, t3-t0) ###
    return shapes
Пример #24
0
import sys
import os
# print(sys.path)

base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, base_dir)

# import utils
# from utils import cal

from utils.cal import add
from utils.image import get_image

# add(3,5)
get_image()
Пример #25
0
def get_rcnn_batch(roidb, cfg):
    """
    return a dict of multiple images
    :param roidb: a list of dict, whose length controls batch size
    ['images', 'flipped'] + ['gt_boxes', 'boxes', 'gt_overlap'] => ['bbox_targets']
    :return: data, label
    """
    num_images = len(roidb)
    imgs, roidb = get_image(roidb, cfg)
    im_array = tensor_vstack(imgs)

    assert cfg.TRAIN.BATCH_ROIS == -1 or cfg.TRAIN.BATCH_ROIS % cfg.TRAIN.BATCH_IMAGES == 0, \
        'BATCHIMAGES {} must divide BATCH_ROIS {}'.format(cfg.TRAIN.BATCH_IMAGES, cfg.TRAIN.BATCH_ROIS)

    if cfg.TRAIN.BATCH_ROIS == -1:
        rois_per_image = np.sum([iroidb['boxes'].shape[0] for iroidb in roidb])
        fg_rois_per_image = rois_per_image
    else:
        rois_per_image = cfg.TRAIN.BATCH_ROIS / cfg.TRAIN.BATCH_IMAGES
        fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image).astype(int)

    if cfg.network.ROIDispatch:
        rois_array_0 = list()
        rois_array_1 = list()
        rois_array_2 = list()
        rois_array_3 = list()
    else:
        rois_array = list()

    gt_labels_array = list()
    labels_array = list()
    bbox_targets_array = list()
    bbox_weights_array = list()

    for im_i in range(num_images):
        roi_rec = roidb[im_i]

        # infer num_classes from gt_overlaps
        num_classes = roi_rec['gt_overlaps'].shape[1]

        # label = class RoI has max overlap with
        rois = roi_rec['boxes']
        labels = roi_rec['max_classes']
        overlaps = roi_rec['max_overlaps']
        bbox_targets = roi_rec['bbox_targets']
        gt_lables = roi_rec['is_gt']

        if cfg.TRAIN.BATCH_ROIS == -1:
            im_rois, labels_t, bbox_targets, bbox_weights = \
                sample_rois_v2(rois, num_classes, cfg, labels=labels, overlaps=overlaps, bbox_targets=bbox_targets, gt_boxes=None)

            assert np.abs(im_rois - rois).max() < 1e-3
            assert np.abs(labels_t - labels).max() < 1e-3
        else:
            im_rois, labels, bbox_targets, bbox_weights, gt_lables =  \
                sample_rois(rois, fg_rois_per_image, rois_per_image, num_classes, cfg,
                            labels, overlaps, bbox_targets, gt_lables=gt_lables)

        # project im_rois
        # do not round roi
        if cfg.network.ROIDispatch:
            w = im_rois[:, 2] - im_rois[:, 0] + 1
            h = im_rois[:, 3] - im_rois[:, 1] + 1
            feat_id = np.clip(np.floor(2 + np.log2(np.sqrt(w * h) / 224)), 0, 3).astype(int)

            rois_0_idx = np.where(feat_id == 0)[0]
            rois_0 = im_rois[rois_0_idx]
            if len(rois_0) == 0:
                rois_0 = np.zeros((1, 4))
                label_0 = -np.ones((1,))
                gt_label_0 = -np.ones((1,))
                bbox_targets_0 = np.zeros((1, bbox_targets.shape[1]))
                bbox_weights_0 = np.zeros((1, bbox_weights.shape[1]))
            else:
                label_0 = labels[rois_0_idx]
                gt_label_0 = gt_lables[rois_0_idx]
                bbox_targets_0 = bbox_targets[rois_0_idx]
                bbox_weights_0 = bbox_weights[rois_0_idx]

            rois_1_idx = np.where(feat_id == 1)[0]
            rois_1 = im_rois[rois_1_idx]
            if len(rois_1) == 0:
                rois_1 = np.zeros((1, 4))
                label_1 = -np.ones((1,))
                gt_label_1 = -np.ones((1,))
                bbox_targets_1 = np.zeros((1, bbox_targets.shape[1]))
                bbox_weights_1 = np.zeros((1, bbox_weights.shape[1]))
            else:
                label_1 = labels[rois_1_idx]
                gt_label_1 = gt_lables[rois_1_idx]
                bbox_targets_1 = bbox_targets[rois_1_idx]
                bbox_weights_1 = bbox_weights[rois_1_idx]

            rois_2_idx = np.where(feat_id == 2)
            rois_2 = im_rois[rois_2_idx]
            if len(rois_2) == 0:
                rois_2 = np.zeros((1, 4))
                label_2 = -np.ones((1,))
                gt_label_2 = -np.ones((1,))
                bbox_targets_2 = np.zeros((1, bbox_targets.shape[1]))
                bbox_weights_2 = np.zeros((1, bbox_weights.shape[1]))
            else:
                label_2 = labels[rois_2_idx]
                gt_label_2 = gt_lables[rois_2_idx]
                bbox_targets_2 = bbox_targets[rois_2_idx]
                bbox_weights_2 = bbox_weights[rois_2_idx]

            rois_3_idx = np.where(feat_id == 3)
            rois_3 = im_rois[rois_3_idx]
            if len(rois_3) == 0:
                rois_3 = np.zeros((1, 4))
                label_3 = -np.ones((1,))
                gt_label_3 = -np.ones((1,))
                bbox_targets_3 = np.zeros((1, bbox_targets.shape[1]))
                bbox_weights_3 = np.zeros((1, bbox_weights.shape[1]))
            else:
                label_3 = labels[rois_3_idx]
                gt_label_3 = gt_lables[rois_3_idx]
                bbox_targets_3 = bbox_targets[rois_3_idx]
                bbox_weights_3 = bbox_weights[rois_3_idx]

            # stack batch index
            rois_array_0.append(np.hstack((im_i * np.ones((rois_0.shape[0], 1)), rois_0)))
            rois_array_1.append(np.hstack((im_i * np.ones((rois_1.shape[0], 1)), rois_1)))
            rois_array_2.append(np.hstack((im_i * np.ones((rois_2.shape[0], 1)), rois_2)))
            rois_array_3.append(np.hstack((im_i * np.ones((rois_3.shape[0], 1)), rois_3)))

            labels = np.concatenate([label_0, label_1, label_2, label_3], axis=0)
            gt_lables = np.concatenate([gt_label_0, gt_label_1, gt_label_2, gt_label_3], axis=0)
            bbox_targets = np.concatenate([bbox_targets_0, bbox_targets_1, bbox_targets_2, bbox_targets_3], axis=0)
            bbox_weights = np.concatenate([bbox_weights_0, bbox_weights_1, bbox_weights_2, bbox_weights_3], axis=0)
        else:
            rois = im_rois
            batch_index = im_i * np.ones((rois.shape[0], 1))
            rois_array_this_image = np.hstack((batch_index, rois))
            rois_array.append(rois_array_this_image)

        # add labels
        gt_labels_array.append(gt_lables)
        labels_array.append(labels)
        bbox_targets_array.append(bbox_targets)
        bbox_weights_array.append(bbox_weights)

    gt_labels_array = np.array(gt_labels_array)
    nongt_index_array = np.where(gt_labels_array == 0)[1]
    labels_array = np.array(labels_array)
    bbox_targets_array = np.array(bbox_targets_array)
    bbox_weights_array = np.array(bbox_weights_array)

    if cfg.network.USE_NONGT_INDEX:

        label = {'label': labels_array,
                 'nongt_index': nongt_index_array,
                 'bbox_target': bbox_targets_array,
                 'bbox_weight': bbox_weights_array}

    else:
        label = {'label': labels_array,
                 'bbox_target': bbox_targets_array,
                 'bbox_weight': bbox_weights_array}

    if cfg.network.ROIDispatch:
        rois_array_0 = np.array(rois_array_0)
        rois_array_1 = np.array(rois_array_1)
        rois_array_2 = np.array(rois_array_2)
        rois_array_3 = np.array(rois_array_3)
        # rois_concate = np.concatenate((rois_array_0, rois_array_1, rois_array_2, rois_array_3), axis=1)
        # gt_rois_t = rois_concate[:, gt_labels_array[0,:] > 0]
        data = {'data': im_array,
                'rois_0': rois_array_0,
                'rois_1': rois_array_1,
                'rois_2': rois_array_2,
                'rois_3': rois_array_3}
    else:
        rois_array = np.array(rois_array)
        data = {'data': im_array,
                'rois': rois_array}

    if cfg.TRAIN.LEARN_NMS:
        # im info
        im_info = np.array([roidb[0]['im_info']], dtype=np.float32)
        # gt_boxes
        if roidb[0]['gt_classes'].size > 0:
            gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
            gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
            gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :]
            gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
        else:
            gt_boxes = np.empty((0, 5), dtype=np.float32)
        data['im_info'] = im_info
        data['gt_boxes'] = gt_boxes

    return data, label