Beispiel #1
0
def test_retinanet_on_dataset(multi_gpu=False):
    """
    Main entry point for testing on a given dataset: whether multi_gpu or not
    """
    output_dir = get_output_dir(training=False)
    dataset = JsonDataset(cfg.TEST.DATASET)
    test_timer = Timer()
    test_timer.tic()

    # for test-dev or full test dataset, we generate detections for all images
    if 'test-dev' in cfg.TEST.DATASET or 'test' in cfg.TEST.DATASET:
        cfg.TEST.NUM_TEST_IMAGES = len(dataset.get_roidb())

    if multi_gpu:
        num_images = cfg.TEST.NUM_TEST_IMAGES
        all_boxes = multi_gpu_test_retinanet_on_dataset(
            num_images, output_dir, dataset)
    else:
        all_boxes = test_retinanet()
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(
        test_timer.average_time))
    results = task_evaluation.evaluate_all(dataset, all_boxes, None, None,
                                           output_dir)
    return results
Beispiel #2
0
def test_net_on_dataset(
        args,
        dataset_name,
        proposal_file,
        output_dir,
        multi_gpu=False,
        gpu_id=0):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            args, dataset_name, proposal_file, num_images, output_dir
        )
    else:
        all_boxes, all_segms, all_keyps = test_net(
            args, dataset_name, proposal_file, output_dir, gpu_id=gpu_id
        )
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
    results = task_evaluation.evaluate_all(
        dataset, all_boxes, all_segms, all_keyps, output_dir
    )
    return results
Beispiel #3
0
def test_net_on_dataset(
		args,
		dataset_name,
		proposal_file,
		output_dir,
		multi_gpu = False,
		gpu_id = 0):
	"""Run inference on a dataset."""
	dataset = JsonDataset(dataset_name)
	test_timer = Timer()
	test_timer.tic()
	if multi_gpu:
		num_images = len(dataset.get_roidb())
		all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
			args, dataset_name, proposal_file, num_images, output_dir
		)
	else:
		all_boxes, all_segms, all_keyps = test_net(
			args, dataset_name, proposal_file, output_dir, gpu_id = gpu_id
		)
	
	test_timer.toc()
	logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
	results = task_evaluation.evaluate_all(
		dataset, all_boxes, all_segms, all_keyps, output_dir
	)
	return results
def test_net_on_dataset(
        args,
        dataset_name,
        proposal_file,
        output_dir,
        ind_range=None,
        multi_gpu=False,
        gpu_id=0):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            args, dataset_name, proposal_file, num_images, output_dir
        )
    else:
        all_boxes, all_segms, all_keyps = test_net(
            args, dataset_name, proposal_file, output_dir, ind_range=ind_range, gpu_id=gpu_id
        )
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))

    dataset.test_img_ids = sorted(dataset.COCO.getImgIds())
    if ind_range is not None:
        dataset.test_img_ids = dataset.test_img_ids[ind_range[0]:ind_range[1]]

    results = task_evaluation.evaluate_all(
        dataset, all_boxes, all_segms, all_keyps, output_dir
    )
    return results
Beispiel #5
0
def do_reval(dataset_name, output_dir, args):
    dataset = JsonDataset(dataset_name)
    with open(os.path.join(output_dir, 'detections.pkl'), 'rb') as f:
        dets = pickle.load(f)
    # Override config with the one saved in the detections file
    if args.cfg_file is not None:
        core.config.merge_cfg_from_cfg(yaml.safe_load(dets['cfg']))
    else:
        core.config._merge_a_into_b(yaml.safe_load(dets['cfg']), cfg)
    results = task_evaluation.evaluate_all(dataset,
                                           dets['all_boxes'],
                                           dets['all_segms'],
                                           dets['all_keyps'],
                                           output_dir,
                                           use_matlab=args.matlab_eval)
    task_evaluation.log_copy_paste_friendly_results(results)
Beispiel #6
0
def test_net_on_dataset(output_dir, multi_gpu=False, gpu_id=0):
    """Run inference on a dataset."""
    dataset = JsonDataset(cfg.TEST.DATASET)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        assert cfg.TEST.IMS_PER_BATCH == 1, 'Single batch only'
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            num_images, output_dir)
    else:
        all_boxes, all_segms, all_keyps = test_net(output_dir, gpu_id=gpu_id)
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(
        test_timer.average_time))
    results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms,
                                           all_keyps, output_dir)
    return results
def test_net_on_dataset(multi_gpu=False):
    """Run inference on a dataset."""
    output_dir = get_output_dir(training=False)
    dataset = JsonDataset(cfg.TEST.DATASET)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            num_images, output_dir)
    else:
        all_boxes, all_segms, all_keyps = test_net()
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(
        test_timer.average_time))
    results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms,
                                           all_keyps, output_dir)
    return results
Beispiel #8
0
def do_reval(dataset_name, output_dir, args):
    dataset = JsonDataset(dataset_name)
    with open(os.path.join(output_dir, 'detections.pkl'), 'rb') as f:
        dets = pickle.load(f)
    # Override config with the one saved in the detections file
    if args.cfg_file is not None:
        core.config.merge_cfg_from_cfg(yaml.load(dets['cfg']))
    else:
        core.config._merge_a_into_b(yaml.load(dets['cfg']), cfg)
    results = task_evaluation.evaluate_all(
        dataset,
        dets['all_boxes'],
        dets['all_segms'],
        dets['all_keyps'],
        output_dir,
        use_matlab=args.matlab_eval
    )
    task_evaluation.log_copy_paste_friendly_results(results)
Beispiel #9
0
def test_retinanet_on_dataset(multi_gpu=False):
    """
    Main entry point for testing on a given dataset: whether multi_gpu or not
    """
    output_dir = get_output_dir(training=False)
    dataset = JsonDataset(cfg.TEST.DATASET)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes = multi_gpu_test_retinanet_on_dataset(num_images, output_dir)
    else:
        all_boxes = test_retinanet()
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
    results = task_evaluation.evaluate_all(
        dataset, all_boxes, None, None, output_dir
    )
    return results
Beispiel #10
0
def test_net_on_dataset(multi_gpu=False):
    """Run inference on a dataset."""
    output_dir = get_output_dir(training=False)
    dataset = JsonDataset(cfg.TEST.DATASET)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            num_images, output_dir
        )
    else:
        all_boxes, all_segms, all_keyps = test_net()
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
    results = task_evaluation.evaluate_all(
        dataset, all_boxes, all_segms, all_keyps, output_dir
    )
    return results
Beispiel #11
0
def test_net_on_dataset(args,
                        dataset_name,
                        proposal_file,
                        output_dir,
                        multi_gpu=False,
                        gpu_id=0):
    """Run inference on a dataset."""
    '''
    parent:
    results = parent_func(
    args,
    dataset_name,
    proposal_file,
    output_dir,
    multi_gpu=multi_gpu_testing
    )
    '''
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            args, dataset_name, proposal_file, num_images, output_dir)
    else:
        '''
        test_net() is the worker and test_net_on_dataset is the boss, if multi GPU, then do something futher,
        else just let this boss be the worker
        '''
        all_boxes, all_segms, all_keyps = test_net(args,
                                                   dataset_name,
                                                   proposal_file,
                                                   output_dir,
                                                   gpu_id=gpu_id)
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(
        test_timer.average_time))
    results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms,
                                           all_keyps, output_dir)
    return results
Beispiel #12
0
def test_net_on_dataset(
        args,
        dataset_name,
        proposal_file,
        output_dir,
        multi_gpu=False,
        gpu_id=0):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes = multi_gpu_test_net_on_dataset(
            args, dataset_name, proposal_file, num_images, output_dir
        )
    else:
        all_boxes = test_net(
            args, dataset_name, proposal_file, output_dir, gpu_id=gpu_id
        )
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))

    roidb = dataset.get_roidb()
    num_images = len(roidb)
    num_classes = cfg.MODEL.NUM_CLASSES + 1
    final_boxes = empty_results(num_classes, num_images)
    test_corloc = 'train' in dataset_name
    for i, entry in enumerate(roidb):
        boxes = all_boxes[entry['image']]
        if test_corloc:
            _, _, cls_boxes_i = box_results_for_corloc(boxes['scores'], boxes['boxes'])
        else:
            _, _, cls_boxes_i = box_results_with_nms_and_limit(boxes['scores'],
                                                         boxes['boxes'])
        extend_results(i, final_boxes, cls_boxes_i)
    results = task_evaluation.evaluate_all(
        dataset, final_boxes, output_dir, test_corloc
    )
    return results
Beispiel #13
0
def test_net_on_dataset(args,
                        dataset_name,
                        proposal_file,
                        output_dir,
                        dataset,
                        multi_gpu=False,
                        gpu_id=0):
    """Run inference on a dataset."""
    #dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        #num_images = len(dataset.get_roidb(gt=True))
        _, roidb = dataset.get_roidb(gt=True, test_flag=True)
        num_images = len(roidb)
        #num_images = 0
        #for item in roidb:
        #    gt_classes = list(set(item['gt_classes']))
        #    for cls in gt_classes:
        #        num_images += 1
        print('total images: ', num_images)
        num_classes = len(dataset.classes)
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            args, dataset_name, proposal_file, num_images, num_classes,
            output_dir, dataset)
    else:
        all_boxes, all_segms, all_keyps = test_net(args,
                                                   dataset_name,
                                                   proposal_file,
                                                   output_dir,
                                                   dataset,
                                                   gpu_id=gpu_id)
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(
        test_timer.average_time))
    results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms,
                                           all_keyps, output_dir)
    return results
import sys
import _init_paths
from datasets.json_dataset import JsonDataset
from datasets import task_evaluation
import pickle


def open_pkl(file_):
    with open(file_, 'rb') as f:
        data = pickle.load(f)
    return data


dataset_name = sys.argv[1]

detection = open_pkl(sys.argv[2])

all_boxes = detection['all_boxes']
all_segms = detection['all_segms']
all_keyps = detection['all_keyps']
output_dir = sys.argv[3]

dataset = JsonDataset(dataset_name)
results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms,
                                       all_keyps, output_dir)
Beispiel #15
0
def main():
    """Main function"""
    if not torch.cuda.is_available():
        sys.exit("Need a CUDA device to run the code.")

    logger = utils.logging.setup_logging(__name__)
    args = parse_args()
    logger.info('Called with args:')
    logger.info(args)

    assert (torch.cuda.device_count() == 1) ^ bool(args.multi_gpu_testing)

    assert bool(args.load_ckpt) ^ bool(args.load_detectron), \
        'Exactly one of --load_ckpt and --load_detectron should be specified.'
    if args.output_dir is None:
        ckpt_path = args.load_ckpt if args.load_ckpt else args.load_detectron
        args.output_dir = os.path.join(
            os.path.dirname(os.path.dirname(ckpt_path)), 'test')
        logger.info('Automatically set output directory to %s', args.output_dir)
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
    
    if args.close_fpn:
        args.cfg_file = "configs/few_shot/e2e_mask_rcnn_R-50-C4_1x_{}.yaml".format(args.group)
    else:
        args.cfg_file = "configs/few_shot/e2e_mask_rcnn_R-50-FPN_1x_{}.yaml".format(args.group)
    
    if args.cfg_file is not None:
        merge_cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        merge_cfg_from_list(args.set_cfgs)

    cfg.VIS = args.vis
    cfg.SEEN = args.seen

    if args.close_co_atten:
        cfg.CO_ATTEN = False
    if args.close_relation_rcnn:
        cfg.RELATION_RCNN = False
        if not args.close_fpn:
            cfg.FAST_RCNN.ROI_BOX_HEAD = 'fast_rcnn_heads.roi_2mlp_head'
            cfg.MRCNN.ROI_MASK_HEAD = 'mask_rcnn_heads.mask_rcnn_fcn_head_v1up4convs'
        else:
            cfg.FAST_RCNN.ROI_BOX_HEAD = 'torchResNet.ResNet_roi_conv5_head'
            cfg.MRCNN.ROI_MASK_HEAD = 'mask_rcnn_heads.mask_rcnn_fcn_head_v0upshare'
    if args.deform_conv:
        cfg.MODEL.USE_DEFORM = True

    if args.dataset == "fis_cell":
        cfg.TEST.DATASETS = ('fis_cell_test',)
        cfg.MODEL.NUM_CLASSES = 2
    elif args.dataset == "coco2017":
        cfg.TEST.DATASETS = ('coco_2017_val',)
        cfg.MODEL.NUM_CLASSES = 81
    elif args.dataset == "keypoints_coco2017":
        cfg.TEST.DATASETS = ('keypoints_coco_2017_val',)
        cfg.MODEL.NUM_CLASSES = 2
    else:  # For subprocess call
        assert cfg.TEST.DATASETS, 'cfg.TEST.DATASETS shouldn\'t be empty'
    assert_and_infer_cfg()

    #logger.info('Testing with config:')
    #logger.info(pprint.pformat(cfg))

    # manually set args.cuda
    args.cuda = True

    timer_for_ds = defaultdict(Timer)

    ### Dataset ###
    timer_for_ds['roidb'].tic()
    imdb, roidb, ratio_list, ratio_index, query, cat_list = combined_roidb(
        cfg.TEST.DATASETS, False)
    timer_for_ds['roidb'].toc()
    roidb_size = len(roidb)
    logger.info('{:d} roidb entries'.format(roidb_size))
    logger.info('Takes %.2f sec(s) to construct roidb', timer_for_ds['roidb'].average_time)

    batchSampler = BatchSampler(
        sampler=MinibatchSampler(ratio_list, ratio_index, shuffle=False),
        batch_size=1,
        drop_last=False
    )
    dataset = RoiDataLoader(
        roidb, ratio_list, ratio_index, query, 
        cfg.MODEL.NUM_CLASSES,
        training=False, cat_list=cat_list, shot=args.checkshot)
    
    ### Model ###
    model = initialize_model_from_cfg(args, gpu_id=0)

    all_results = OrderedDict({
        'box':
        OrderedDict(
            [
                ('AP', []),
                ('AP50', []),
                ('AP75', []),
                ('APs', []),
                ('APm', []),
                ('APl', []),
            ]
        ),
        'mask':
        OrderedDict(
            [
                ('AP', []),
                ('AP50', []),
                ('AP75', []),
                ('APs', []),
                ('APm', []),
                ('APl', []),
            ]
        )
    })
    timer_for_total = defaultdict(Timer)
    timer_for_total['total_test_time'].tic()
    for avg in range(args.average):
        dataset.query_position = avg
        dataloader = torch.utils.data.DataLoader(
            dataset, 
            batch_sampler=batchSampler, 
            num_workers=cfg.DATA_LOADER.NUM_THREADS,
            collate_fn=collate_minibatch
        )
        dataiterator = iter(dataloader)

        num_images = len(ratio_index)
        num_cats = imdb.num_classes
        all_boxes, all_segms, all_keyps = empty_results(num_cats, num_images)
        
        # total quantity of testing images
        num_detect = len(ratio_index)

        timers = defaultdict(Timer)
        post_fix = '%dshot_g%d_seen%d_%d'%(args.checkshot, args.group, args.seen, avg)
        for i, index in enumerate(ratio_index):
            input_data = next(dataiterator)
            catgory = input_data['choice']
            entry = dataset._roidb[dataset.ratio_index[i]]
            if cfg.TEST.PRECOMPUTED_PROPOSALS:
                # The roidb may contain ground-truth rois (for example, if the roidb
                # comes from the training or val split). We only want to evaluate
                # detection on the *non*-ground-truth rois. We select only the rois
                # that have the gt_classes field set to 0, which means there's no
                # ground truth.
                box_proposals = entry['boxes'][entry['gt_classes'] == 0]
                if len(box_proposals) == 0:
                    continue
            else:
                # Faster R-CNN type models generate proposals on-the-fly with an
                # in-network RPN; 1-stage models don't require proposals.
                box_proposals = None
            
            #im = cv2.imread(entry['image'])
            im = imread(entry['image'])

            if len(im.shape) == 2:
                im = im[:,:,np.newaxis]
                im = np.concatenate((im,im,im), axis=2)

            alpha = 2.2 # Simple contrast control
            beta = 0.5    # Simple brightness control
            im_colored = im.copy()
            if catgory[0].item() == 7:
                im_colored[:,:,0] = 0
                im_colored[:,:,1] = 0
                im_colored = cv2.convertScaleAbs(im_colored, alpha=alpha, beta=beta)
            elif catgory[0].item() == 8:
                im_colored[:,:,1] = 0
                im_colored[:,:,2] = 0
                im_colored = cv2.convertScaleAbs(im_colored, alpha=alpha, beta=beta)
            elif catgory[0].item() == 6: 
                im_colored[:,:,0] = 0
                im_colored[:,:,2] = 0
                im_colored = cv2.convertScaleAbs(im_colored, alpha=alpha, beta=beta)
            
            cls_boxes_i, cls_segms_i, cls_keyps_i = im_detect_all(model, im, input_data['query'], input_data['query_type'], catgory, num_cats, box_proposals, timers)
            im = im_colored
            extend_results(i, all_boxes, cls_boxes_i)
            if cls_segms_i is not None:
                extend_results(i, all_segms, cls_segms_i)
            if cls_keyps_i is not None:
                extend_results(i, all_keyps, cls_keyps_i)
            
            if i % 10 == 0:  # Reduce log file size
                ave_total_time = np.sum([t.average_time for t in timers.values()])
                eta_seconds = ave_total_time * (num_images - i - 1)
                eta = str(datetime.timedelta(seconds=int(eta_seconds)))
                det_time = (
                    timers['im_detect_bbox'].average_time +
                    timers['im_detect_mask'].average_time +
                    timers['im_detect_keypoints'].average_time
                )
                misc_time = (
                    timers['misc_bbox'].average_time +
                    timers['misc_mask'].average_time +
                    timers['misc_keypoints'].average_time
                )
                logger.info(
                    (
                        'im_detect: range [{:d}, {:d}] of {:d}: '
                        '{:d}/{:d} {:.3f}s + {:.3f}s (eta: {})'
                    ).format(
                        1, num_detect, num_detect, i + 1,
                        num_detect, det_time, misc_time, eta
                    )
                )

            if cfg.VIS:
                im_name = entry['image']
                class_name = im_name.split('/')[-4]
                file_name = im_name.split('/')[-3]
                
                im_target = im.copy()

                to_tensor = transforms.ToTensor()
                o_querys=[]
                for i in range(args.checkshot):
                    o_query = input_data['query'][0][i][0].permute(1, 2,0).contiguous().cpu().numpy()
                    o_query *= [0.229, 0.224, 0.225]
                    o_query += [0.485, 0.456, 0.406]
                    o_query *= 255
                    o_query_colored = o_query.copy()
                    if catgory[0].item() == 7:
                        o_query_colored[:,:,0] = 0
                        o_query_colored[:,:,1] = 0
                        o_query_colored = cv2.convertScaleAbs(o_query_colored, alpha=alpha, beta=beta)
                    elif catgory[0].item() == 8:
                        o_query_colored[:,:,1] = 0
                        o_query_colored[:,:,2] = 0
                        o_query_colored = cv2.convertScaleAbs(o_query_colored, alpha=alpha, beta=beta)
                    elif catgory[0].item() == 6: 
                        o_query_colored[:,:,0] = 0
                        o_query_colored[:,:,2] = 0
                        o_query_colored = cv2.convertScaleAbs(o_query_colored, alpha=alpha, beta=beta)
                    o_query = o_query_colored
                    o_query = Image.fromarray(o_query.astype(np.uint8))
                    o_querys.append(to_tensor(o_query))
                
                o_querys_grid = make_grid(o_querys, nrow=args.checkshot//2, normalize=True, scale_each=True, pad_value=1)
                o_querys_grid = transforms.ToPILImage()(o_querys_grid).convert("RGB")
                query_w, query_h = o_querys_grid.size
                query_bg = Image.new('RGB', (im_target.shape[1], im_target.shape[0]), (255, 255, 255))
                bg_w, bg_h = query_bg.size
                offset = ((bg_w - query_w) // 2, (bg_h - query_h) // 2)
                query_bg.paste(o_querys_grid, offset)
                query = np.asarray(query_bg)
                im_pair = np.concatenate((im_target, query), axis=1)
                
                im_output_dir = os.path.join(args.output_dir, 'vis', post_fix, class_name)

                if not os.path.exists(im_output_dir):
                    os.makedirs(im_output_dir)

                sample_output_dir = os.path.join(im_output_dir, os.path.basename('{:d}_{:s}'.format(i, file_name)))

                if not os.path.exists(sample_output_dir):
                    os.makedirs(sample_output_dir)
                
                target_save_name = os.path.join(sample_output_dir, os.path.basename('{:d}_{:s}'.format(i, file_name)) + '_target.pdf')
                target = Image.fromarray(im_target.astype(np.uint8))
                target.save(target_save_name,"pdf")

                query_save_name = os.path.join(sample_output_dir, os.path.basename('{:d}_{:s}'.format(i, file_name)) + '_query.pdf')
                query = Image.fromarray(query.astype(np.uint8))
                query.save(query_save_name,"pdf")

                pred_save_name = os.path.join(sample_output_dir, os.path.basename('{:d}_{:s}'.format(i, file_name)) + '_pred.pdf')
                vis_utils.save_one_image(
                        im,
                        pred_save_name,
                        cls_boxes_i,
                        segms = cls_segms_i,
                        keypoints = cls_keyps_i,
                        thresh = cfg.VIS_TH,
                        box_alpha = 0.6,
                        dataset = imdb,
                        show_class = False
                    )
                
                im_det = vis_utils.vis_one_image(
                        im,
                        '{:d}_det_{:s}'.format(i, file_name),
                        os.path.join(args.output_dir, 'vis', post_fix),
                        cls_boxes_i,
                        segms = cls_segms_i,
                        keypoints = cls_keyps_i,
                        thresh = cfg.VIS_TH,
                        box_alpha = 0.6,
                        dataset = imdb,
                        show_class = False,
                        class_name = class_name,
                        draw_bbox = False
                    )
                 
                gt_save_name = os.path.join(sample_output_dir, os.path.basename('{:d}_{:s}'.format(i, file_name)) + '_gt.pdf')
                vis_utils.save_one_image_gt(
                        im, entry['id'], 
                        gt_save_name,
                        dataset = imdb)
                im_gt = vis_utils.vis_one_image_gt(
                        im, entry['id'], 
                        '{:d}_gt_{:s}'.format(i, file_name),
                        os.path.join(args.output_dir, 'vis', post_fix),
                        dataset = imdb, 
                        class_name = class_name)
                im_det = np.asarray(im_det)
                im_gt = np.asarray(im_gt)
                im2draw = np.concatenate((im_gt, im_det), axis=1)
                im2show = np.concatenate((im_pair, im2draw), axis=0)

                im_save_name = os.path.basename('{:d}_{:s}'.format(i, file_name)) + '.png'
                cv2.imwrite(os.path.join(im_output_dir, '{}'.format(im_save_name)), cv2.cvtColor(im2show, cv2.COLOR_RGB2BGR))

            
            """
            if cfg.VIS:
                
                im_name = dataset._roidb[dataset.ratio_index[i]]['image']
                class_name = im_name.split('/')[-4]
                file_name = im_name.split('/')[-3]
                im2show = Image.open(im_name).convert("RGB")
                o_query = input_data['query'][0][0][0].permute(1, 2, 0).contiguous().cpu().numpy()
                o_query *= [0.229, 0.224, 0.225]
                o_query += [0.485, 0.456, 0.406]
                o_query *= 255
                o_query = o_query[:,:,::-1]

                o_query = Image.fromarray(o_query.astype(np.uint8))
                query_w, query_h = o_query.size
                query_bg = Image.new('RGB', (im2show.size), (255, 255, 255))
                bg_w, bg_h = query_bg.size
                offset = ((bg_w - query_w) // 2, (bg_h - query_h) // 2)
                query_bg.paste(o_query, offset)
                
                im2show = np.asarray(im2show)
                o_query = np.asarray(query_bg)

                im2show = np.concatenate((im2show, o_query), axis=1)
                output_dir = os.path.join(args.output_dir, 'vis')
                if not os.path.exists(output_dir):
                    os.makedirs(output_dir)
                im_save_dir = os.path.join(output_dir, class_name)
                if not os.path.exists(im_save_dir):
                    os.makedirs(im_save_dir)
                im_save_name = os.path.join(im_save_dir, file_name + '_%d_d.png'%(i))
                cv2.imwrite(im_save_name, cv2.cvtColor(im2show, cv2.COLOR_RGB2BGR))
                """
        cfg_yaml = yaml.dump(cfg)
        #det_name = 'detections.pkl'
        det_file = os.path.join(args.output_dir, 'detections_' + post_fix + '.pkl')
        save_object(
            dict(
                all_boxes=all_boxes,
                all_segms=all_segms,
                all_keyps=all_keyps,
                cfg=cfg_yaml
            ), det_file
        )
        logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
        
        results = task_evaluation.evaluate_all(
            imdb, all_boxes, all_segms, all_keyps, args.output_dir
        )
        task_evaluation.check_expected_results(
            results,
            atol=cfg.EXPECTED_RESULTS_ATOL,
            rtol=cfg.EXPECTED_RESULTS_RTOL
        )
        for task, metrics in all_results.items():
            metric_names = metrics.keys()
            for metric_name in metric_names:
                all_results[task][metric_name].append(results[imdb.name][task][metric_name])
        #task_evaluation.log_copy_paste_friendly_results(results)
    
    for task, metrics in all_results.items():
        metric_names = metrics.keys()
        for metric_name in metric_names:
            values = all_results[task][metric_name]
            all_results[task][metric_name] = sum(values) / len(values) 
    post_fix = '%dshot_g%d_seen%d'%(args.checkshot, args.group, args.seen)
    avg_results_path = os.path.join(args.output_dir, ('avg_cocoeval_' + post_fix + '_results.json'))
    with open(avg_results_path, 'w') as f:
        f.write(json.dumps(all_results))

    timer_for_total['total_test_time'].toc()
    logger.info('Total inference time: {:.3f}s'.format(timer_for_total['total_test_time'].average_time))
Beispiel #16
0
def test_net(
        args,
        dataset_name,
        proposal_file,
        output_dir,
        ind_range=None,
        gpu_id=0):
    """Run inference on all images in a dataset or over an index range of images
    in a dataset using a single GPU.
    """
    assert not cfg.MODEL.RPN_ONLY, 'Use rpn_generate to generate proposals from RPN-only models'
    dataset = JsonDataset(dataset_name, args.dataset_dir)
    timers = defaultdict(Timer)
    if ind_range is not None:
        if cfg.TEST.SOFT_NMS.ENABLED:
            det_name = 'detection_range_%s_%s_soft_nms.pkl' % tuple(ind_range)
        else:
            det_name = 'detection_range_(%d_%d)_nms_%.1f.pkl' % (ind_range[0], ind_range[1], cfg.TEST.NMS)
    else:
        det_name = 'detections.pkl'
    det_file = os.path.join(output_dir, det_name)
    roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(dataset, proposal_file, ind_range, args)
    num_images = len(roidb)
    image_ids = []
    num_classes = cfg.MODEL.NUM_CLASSES
    all_boxes, all_segms, all_keyps = empty_results(num_classes, num_images)

    for i, entry in enumerate(roidb):
        image_ids.append(entry['image'])
    args.image_ids = image_ids

    # If we have already computed the boxes
    if os.path.exists(det_file):
        obj = load_object(det_file)
        all_boxes, all_segms, all_keyps = obj['all_boxes'], obj['all_segms'], obj['all_keyps']

    else:
        model = initialize_model_from_cfg(args, gpu_id=gpu_id)
        for i, entry in enumerate(roidb):
            if cfg.TEST.PRECOMPUTED_PROPOSALS:
                # The roidb may contain ground-truth rois (for example, if the roidb
                # comes from the training or val split). We only want to evaluate
                # detection on the *non*-ground-truth rois. We select only the rois
                # that have the gt_classes field set to 0, which means there's no
                # ground truth.
                box_proposals = entry['boxes'][entry['gt_classes'] == 0]
                if len(box_proposals) == 0:
                    continue
            else:
                # Faster R-CNN type models generate proposals on-the-fly with an
                # in-network RPN; 1-stage models don't require proposals.
                box_proposals = None

            im = cv2.imread(entry['image'])
            cls_boxes_i, cls_segms_i, cls_keyps_i, car_cls_i, euler_angle_i, trans_pred_i = im_detect_all(model, im, box_proposals, timers, dataset)
            extend_results(i, all_boxes, cls_boxes_i)
            if cls_segms_i is not None:
                extend_results(i, all_segms, cls_segms_i)
            if cls_keyps_i is not None:
                extend_results(i, all_keyps, cls_keyps_i)

            if i % 10 == 0:  # Reduce log file size
                ave_total_time = np.sum([t.average_time for t in timers.values()])
                eta_seconds = ave_total_time * (num_images - i - 1)
                eta = str(datetime.timedelta(seconds=int(eta_seconds)))
                det_time = (
                    timers['im_detect_bbox'].average_time +
                    timers['im_detect_mask'].average_time +
                    timers['im_detect_keypoints'].average_time
                )
                misc_time = (
                    timers['misc_bbox'].average_time +
                    timers['misc_mask'].average_time +
                    timers['misc_keypoints'].average_time
                )
                logger.info(
                    (
                        'im_detect: range [{:d}, {:d}] of {:d}: '
                        '{:d}/{:d} {:.3f}s + {:.3f}s (eta: {})'
                    ).format(
                        start_ind + 1, end_ind, total_num_images, start_ind + i + 1,
                        start_ind + num_images, det_time, misc_time, eta
                    )
                )

            if cfg.VIS:
                im_name = os.path.splitext(os.path.basename(entry['image']))[0]
                vis_utils.vis_one_image_eccv2018_car_3d(
                    im[:, :, ::-1],
                    '{:d}_{:s}'.format(i, im_name),
                    os.path.join(output_dir, 'vis'),
                    boxes=cls_boxes_i,
                    car_cls_prob=car_cls_i,
                    euler_angle=euler_angle_i,
                    trans_pred=trans_pred_i,
                    car_models=dataset.Car3D.car_models,
                    intrinsic=dataset.Car3D.get_intrinsic_mat(),
                    segms=cls_segms_i,
                    keypoints=cls_keyps_i,
                    thresh=0.9,
                    box_alpha=0.8,
                    dataset=dataset.Car3D)
        cfg_yaml = yaml.dump(cfg)
        save_object(
            dict(
                all_boxes=all_boxes,
                all_segms=all_segms,
                all_keyps=all_keyps,
                cfg=cfg_yaml
            ), det_file
        )
        logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))

    results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms, all_keyps, output_dir, args)
    return results
Beispiel #17
0
def test_net_on_dataset(args,
                        dataset_name,
                        proposal_file,
                        output_dir,
                        multi_gpu=False,
                        gpu_id=0,
                        use_matlab=False,
                        early_stop=False):

    # print("test_net_on_dataset")
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()

    test_timer.tic()

    all_boxes = test_net(args,
                         dataset_name,
                         proposal_file,
                         output_dir,
                         gpu_id=gpu_id,
                         early_stop=early_stop)
    test_timer.toc()

    logger.info('Total inference time: {:.3f}s'.format(
        test_timer.average_time))

    roidb = dataset.get_roidb()
    num_images = len(roidb)
    num_classes = cfg.MODEL.NUM_CLASSES + 1
    final_boxes = empty_results(num_classes, num_images)
    test_corloc = 'train' in dataset_name

    all_cls_scores = {}

    for i, entry in enumerate(roidb):

        if early_stop and i > 10: break

        boxes = all_boxes[entry['image']]

        cls_key = entry['image'].replace('.jpg', '').split('/')[-1]
        all_cls_scores[cls_key] = boxes['cls_scores']
        # print(cls_key)

        if boxes['scores'] is not None:
            if test_corloc:
                # print("corlooking")
                _, _, cls_boxes_i = box_results_for_corloc(
                    boxes['scores'], boxes['boxes'], boxes['cls_scores'])
            else:
                _, _, cls_boxes_i = box_results_with_nms_and_limit(
                    boxes['scores'], boxes['boxes'], boxes['cls_scores'])

            extend_results(i, final_boxes, cls_boxes_i)
        else:
            final_boxes = None

    results = task_evaluation.evaluate_all(dataset,
                                           final_boxes,
                                           output_dir,
                                           test_corloc,
                                           all_cls_scores,
                                           use_matlab=use_matlab)
    return results
Beispiel #18
0
def test_net_on_dataset(
        args,
        dataset_name,
        proposal_file,
        output_dir,
        multi_gpu=False,
        gpu_id=0):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()

    # 如果ensemble json,跳过im_detect_all的模块
    if args.load_json:
        import json
        # json_path = os.path.join(args.ensamble_path, 'ensamble.json')
        json_path = args.load_json_path
        # json_path = os.path.join("/".join(json_path.split('/')[:-1]), json_path.split('/')[-1].rstrip('/')+'_ensamble.json')
        print("Load json {}".format(json_path))
        ans_json = json.loads(open(json_path).read())
        all_boxes = [[np.array(v).reshape((-1, 5)) for v in i] for i in ans_json['all_boxes']]
        all_segms = ans_json['all_segms']
        all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    else:
        if multi_gpu:
            num_images = len(dataset.get_roidb())
            all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
                args, dataset_name, proposal_file, num_images, output_dir
            )
        else:
            all_boxes, all_segms, all_keyps = test_net(
                args, dataset_name, proposal_file, output_dir, gpu_id=gpu_id
            )
    test_timer.toc()

    # 将检测结果保存至json文件,方便后序ensemble
    if args.save_json:
        import json
        import copy
        output_json = {'all_boxes': [],
                       'all_segms': [],
                       }
        all_box = copy.deepcopy(all_boxes)
        for cls_id in range(1, len(all_boxes)):
            for imgid in range(len(all_boxes[cls_id])):
                all_box[cls_id][imgid] = all_box[cls_id][imgid].tolist()
        output_json['all_boxes'] = all_box
        output_json['all_segms'] = all_segms
        if os.path.exists(args.save_json_path) is False:
            os.makedirs(args.save_json_path)

        json_path = os.path.join(args.save_json_path, args.method_name)
        json_path += ".json"
        print("Save {}".format(json_path))
        open(json_path, 'w').write(json.dumps(output_json))

    images_path = []
    for i in dataset.get_roidb():
        images_path.append(i['image'].split('/')[-1][:-4])

    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))

    if args.infer_submission:
        kitti_test_num = 200
        kitti_cls_num = 11
        pred_list_name = 'pred_list'
        pred_img_name = 'pred_img'

        test_image_path = "/nfs/project/libo_i/go_kitti/data/testing/image_2"
        test_image_list = os.listdir(test_image_path)

        pred_list_path = os.path.join(output_dir, pred_list_name)
        pred_img_path = os.path.join(output_dir, pred_img_name)

        # Ensure path exists.
        if not os.path.exists(pred_list_path):
            os.makedirs(pred_list_path)

        if not os.path.exists(pred_img_path):
            os.makedirs(pred_img_path)

        # assert kitti_test_num == len(test_image_list)

        for img_id in range(kitti_test_num):
            im = cv2.imread(os.path.join(test_image_path, test_image_list[img_id]))
            ist_cnt = 0
            text_save_name = "{}.txt".format(test_image_list[img_id][:-4])
            text_save_path = os.path.join(pred_list_path, text_save_name)
            file = open(text_save_path, "w")
            for cls_id in range(kitti_cls_num):
                if len(all_segms[cls_id]) != 0:
                    cls_item = all_segms[cls_id][img_id]
                    if len(cls_item) != 0:
                        for ist_id, specific_item in enumerate(cls_item):
                            # write image info
                            mask = np.array(mask_util.decode(specific_item), dtype=np.float32)
                            instances_graph = np.zeros((im.shape[0], im.shape[1]))
                            instances_graph[mask == 1] = 255
                            instance_save_name = "{}_{:0>3d}.png".format(
                                test_image_list[img_id][:-4],
                                ist_cnt)
                            print(instance_save_name)
                            instance_save_path = os.path.join(pred_img_path, instance_save_name)
                            import scipy.misc as msc
                            msc.imsave(instance_save_path, instances_graph)

                            # write text info like that
                            # ../pred_img/Kitti2015_000000_10_000.png 026 0.976347
                            instance_info_To_Text = "../pred_img/{} {:0>3d} {}\n".format(instance_save_name,
                                                                                         cls_id + 23,
                                                                                         all_boxes[cls_id][img_id][
                                                                                             ist_id][4])
                            ist_cnt += 1
                            file.writelines(instance_info_To_Text)

            file.close()
        exit(0)
    # start to output submit format files and use ann['annotations'] to evaluate the metrics.
    else:
        results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms, all_keyps, output_dir)
        return results
Beispiel #19
0
    else:  # For subprocess call
        assert cfg.TEST.DATASETS, 'cfg.TEST.DATASETS shouldn\'t be empty'
    assert_and_infer_cfg()

    logger.info('Re-evaluation with config:')
    logger.info(pprint.pformat(cfg))

    with open(args.result_path, 'rb') as f:
        results = pickle.load(f)
        logger.info('Loading results from {}.'.format(args.result_path))
    all_boxes = results['all_boxes']

    dataset_name = cfg.TEST.DATASETS[0]
    dataset = JsonDataset(dataset_name)
    roidb = dataset.get_roidb()
    num_images = len(roidb)
    num_classes = cfg.MODEL.NUM_CLASSES + 1
    final_boxes = empty_results(num_classes, num_images)
    test_corloc = 'train' in dataset_name
    for i, entry in enumerate(roidb):
        boxes = all_boxes[entry['image']]
        if test_corloc:
            _, _, cls_boxes_i = box_results_for_corloc(boxes['scores'],
                                                       boxes['boxes'])
        else:
            _, _, cls_boxes_i = box_results_with_nms_and_limit(
                boxes['scores'], boxes['boxes'])
        extend_results(i, final_boxes, cls_boxes_i)
    results = task_evaluation.evaluate_all(dataset, final_boxes,
                                           args.output_dir, test_corloc)