def eval_net(model,
             roidb,
             args,
             dataset_name,
             proposal_file,
             output_dir,
             ind_range=None,
             gpu_id=0,
             include_feat=False):
    """Run inference on all images in a dataset or over an index range of images
    in a dataset using a single GPU.
    """
    assert not cfg.MODEL.RPN_ONLY, \
        'Use rpn_generate to generate proposals from RPN-only models'

    # roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(
    #     dataset_name, proposal_file, ind_range, args.do_val
    # )
    # roidb = roidb[:100]
    num_images = len(roidb)
    all_results = [None for _ in range(num_images)]
    timers = defaultdict(Timer)
    for i, entry in enumerate(roidb):

        # print("entry")
        # print(entry)

        box_proposals = None

        im = cv2.imread(entry['image'])
        if args.use_gt_boxes:
            im_results = im_detect_rels(model,
                                        im,
                                        dataset_name,
                                        box_proposals,
                                        timers,
                                        entry,
                                        args.use_gt_labels,
                                        include_feat=include_feat)
        else:
            im_results = im_detect_rels(model,
                                        im,
                                        dataset_name,
                                        box_proposals,
                                        timers,
                                        include_feat=include_feat)

        im_results.update(dict(image=entry['image']))
        # add gt
        if args.do_val:
            im_results.update(
                dict(gt_sbj_boxes=entry['sbj_gt_boxes'],
                     gt_sbj_labels=entry['sbj_gt_classes'],
                     gt_obj_boxes=entry['obj_gt_boxes'],
                     gt_obj_labels=entry['obj_gt_classes'],
                     gt_prd_labels=entry['prd_gt_classes']))

        all_results[i] = im_results

    return all_results
Example #2
0
def get_metrics_det_boxes(model, timers, dataset_name):
    # assert len(cfg.TEST.DATASETS) == 1
    # dataset_name, proposal_file = get_inference_dataset(0, cfg.TEST.DATASETS)
    # dataset = JsonDatasetRel(dataset_name)
    model.eval()
    roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(
        dataset_name, None, None, True)
    num_images = len(roidb)
    all_results = [None for _ in range(num_images)]
    for i, entry in enumerate(roidb):
        box_proposals = None

        im = cv2.imread(entry['image'])

        im_results = im_detect_rels(model, im, dataset_name, box_proposals,
                                    False, timers)

        im_results.update(dict(image=entry['image']))
        # add gt

        im_results.update(
            dict(gt_sbj_boxes=entry['sbj_gt_boxes'],
                 gt_sbj_labels=entry['sbj_gt_classes'],
                 gt_obj_boxes=entry['obj_gt_boxes'],
                 gt_obj_labels=entry['obj_gt_classes'],
                 gt_prd_labels=entry['prd_gt_classes']))

        all_results[i] = im_results

    if dataset_name.find('vg') >= 0 or dataset_name.find(
            'vrd') >= 0:  # TODO: use pred_det to val
        metrics = task_evaluation_vg_and_vrd.eval_rel_results(
            all_results, None, True)
    else:
        metrics = task_evaluation_sg.eval_rel_results(all_results, None, True)

    return metrics
Example #3
0
def test_net(args,
             dataset_name,
             proposal_file,
             output_dir,
             ind_range=None,
             gpu_id=0):
    """Run inference on all images in a dataset or over an index range of images
    in a dataset using a single GPU.
    """
    assert not cfg.MODEL.RPN_ONLY, \
        'Use rpn_generate to generate proposals from RPN-only models'

    roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(
        dataset_name, proposal_file, ind_range, args.do_val)
    model = initialize_model_from_cfg(args, gpu_id=gpu_id)

    num_images = len(roidb)
    all_results = [None for _ in range(num_images)]
    timers = defaultdict(Timer)
    for i, entry in enumerate(roidb):
        box_proposals = None

        im = cv2.imread(entry['image'])
        if args.use_gt_boxes:
            im_results = im_detect_rels(model, im, dataset_name, box_proposals,
                                        args.do_vis, timers, entry,
                                        args.use_gt_labels)
        else:
            im_results = im_detect_rels(model, im, dataset_name, box_proposals,
                                        args.do_vis, timers)

        im_results.update(dict(image=entry['image']))
        # add gt
        if args.do_val:
            im_results.update(
                dict(gt_sbj_boxes=entry['sbj_gt_boxes'],
                     gt_sbj_labels=entry['sbj_gt_classes'],
                     gt_obj_boxes=entry['obj_gt_boxes'],
                     gt_obj_labels=entry['obj_gt_classes'],
                     gt_prd_labels=entry['prd_gt_classes']))

        all_results[i] = im_results

        if i % 10 == 0:  # Reduce log file size
            ave_total_time = np.sum([t.average_time for t in timers.values()])
            eta_seconds = ave_total_time * (num_images - i - 1)
            eta = str(datetime.timedelta(seconds=int(eta_seconds)))
            det_time = (timers['im_detect_rels'].average_time)
            logger.info(
                ('im_detect: range [{:d}, {:d}] of {:d}: '
                 '{:d}/{:d} {:.3f}s (eta: {})').format(start_ind + 1, end_ind,
                                                       total_num_images,
                                                       start_ind + i + 1,
                                                       start_ind + num_images,
                                                       det_time, eta))

    cfg_yaml = yaml.dump(cfg)
    if ind_range is not None:
        det_name = 'rel_detection_range_%s_%s.pkl' % tuple(ind_range)
    else:
        if args.use_gt_boxes:
            if args.use_gt_labels:
                det_name = 'rel_detections_gt_boxes_prdcls_' + args.load_ckpt.split(
                    '_')[-1].split('.')[-2] + '.pkl'
            else:
                det_name = 'rel_detections_gt_boxes_sgcls_' + args.load_ckpt.split(
                    '_')[-1].split('.')[-2] + '.pkl'
        else:
            det_name = 'rel_detections_' + args.load_ckpt.split('_')[-1].split(
                '.')[-2] + '.pkl'
    det_file = os.path.join(output_dir, det_name)
    save_object(all_results, det_file)
    logger.info('Wrote rel_detections to: {}'.format(
        os.path.abspath(det_file)))
    return all_results
Example #4
0
def get_sg(args):
    def initialize_model_from_cfg(args, gpu_id=0):
        model = model_builder_rel.Generalized_RCNN()
        model.eval()
        model.cuda()

        load_name = args.load_ckpt
        logger.info("loading checkpoint %s", load_name)
        checkpoint = torch.load(load_name,
                                map_location=lambda storage, loc: storage)
        net_utils.load_ckpt(model, checkpoint['model'])
        model = mynn.DataParallel(model,
                                  cpu_keywords=['im_info', 'roidb'],
                                  minibatch=True)
        return model

    def argsort_desc(scores):
        return np.column_stack(
            np.unravel_index(np.argsort(-scores.ravel()), scores.shape))

    cfg.VIS = args.vis

    if args.cfg_file is not None:
        merge_cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        merge_cfg_from_list(args.set_cfgs)

    cfg.TEST.DATASETS = ('vg_val', )
    cfg.MODEL.NUM_CLASSES = 151
    cfg.MODEL.NUM_PRD_CLASSES = 50  # exclude background

    if not cfg.MODEL.RUN_BASELINE:
        assert bool(args.load_ckpt) ^ bool(args.load_detectron), \
            'Exactly one of --load_ckpt and --load_detectron should be specified.'
    if args.output_dir is None:
        ckpt_path = args.load_ckpt if args.load_ckpt else args.load_detectron
        args.output_dir = os.path.join(
            os.path.dirname(os.path.dirname(ckpt_path)), 'test')
        logger.info('Automatically set output directory to %s',
                    args.output_dir)
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    #logger.info('Testing with config:')
    #logger.info(pprint.pformat(cfg))

    args.test_net_file, _ = os.path.splitext(__file__)
    args.cuda = True

    timers = defaultdict(Timer)
    box_proposals = None
    im_file = args.image
    im = cv2.imread(im_file)
    model = initialize_model_from_cfg(args)
    dataset_name = cfg.TEST.DATASETS[0]
    proposal_file = None
    im_results = im_detect_rels(model, im, dataset_name, box_proposals, timers)
    im_results.update(dict(image=im_file))

    det_boxes_sbj = im_results['sbj_boxes']
    det_boxes_obj = im_results['obj_boxes']
    det_labels_sbj = im_results['sbj_labels']
    det_labels_obj = im_results['obj_labels']
    det_scores_sbj = im_results['sbj_scores']
    det_scores_obj = im_results['obj_scores']
    det_scores_prd = im_results['prd_scores'][:, 1:]

    det_labels_prd = np.argsort(-det_scores_prd, axis=1)
    det_scores_prd = -np.sort(-det_scores_prd, axis=1)

    det_scores_so = det_scores_sbj * det_scores_obj
    det_scores_spo = det_scores_so[:, None] * det_scores_prd[:, :2]
    det_scores_inds = argsort_desc(det_scores_spo)[:100]
    det_scores_top = det_scores_spo[det_scores_inds[:, 0], det_scores_inds[:,
                                                                           1]]
    det_boxes_so_top = np.hstack(
        (det_boxes_sbj[det_scores_inds[:,
                                       0]], det_boxes_obj[det_scores_inds[:,
                                                                          0]]))
    det_labels_p_top = det_labels_prd[det_scores_inds[:, 0],
                                      det_scores_inds[:, 1]]
    det_labels_spo_top = np.vstack(
        (det_labels_sbj[det_scores_inds[:, 0]], det_labels_p_top,
         det_labels_obj[det_scores_inds[:, 0]])).transpose()

    det_boxes_s_top = det_boxes_so_top[:, :4]
    det_boxes_o_top = det_boxes_so_top[:, 4:]
    det_labels_s_top = det_labels_spo_top[:, 0]
    det_labels_p_top = det_labels_spo_top[:, 1]
    det_labels_o_top = det_labels_spo_top[:, 2]
    out_dict = {}
    out_dict['boxes_s_top'] = det_boxes_s_top
    out_dict['boxes_o_top'] = det_boxes_o_top
    out_dict['labels_s_top'] = det_labels_s_top
    out_dict['labels_p_top'] = det_labels_p_top
    out_dict['labels_o_top'] = det_labels_o_top
    out_dict['scores_top'] = det_scores_top
    out_dict['image'] = im_file

    with open(os.path.join(args.output_dir, 'test-out.pkl'), 'wb') as fout:
        pickle.dump(out_dict, fout, pickle.HIGHEST_PROTOCOL)
    return out_dict
def zfy_inference(args, proposal_file, output_dir, ind_range=None, gpu_id=0):
    #imgs = os.listdir('/home/zfy/Data/projects/isef/Large-Scale-VRD/data/vrd/train_images')
    imgs = ["test.jpg"]
    finals = []
    model = initialize_model_from_cfg(args, gpu_id=gpu_id)
    dataset_name, proposal = get_inference_dataset(0)
    for xyz in imgs:
        im_file = os.path.join('/home/zfy/Data/projects/isef/Large-Scale-VRD/',
                               xyz)
        #im_file = xyz
        timers = defaultdict(Timer)
        box_proposals = None
        im = cv2.imread(im_file)

        im_results = im_detect_rels(model, im, dataset_name, box_proposals,
                                    timers)
        im_results.update(dict(image=im_file))

        det_boxes_sbj = im_results['sbj_boxes']  # (#num_rel, 4)
        det_boxes_obj = im_results['obj_boxes']  # (#num_rel, 4)
        det_labels_sbj = im_results['sbj_labels']  # (#num_rel,)
        det_labels_obj = im_results['obj_labels']  # (#num_rel,)
        det_scores_sbj = im_results['sbj_scores']  # (#num_rel,)
        det_scores_obj = im_results['obj_scores']  # (#num_rel,)
        det_scores_prd = im_results['prd_scores'][:, 1:]

        det_labels_prd = np.argsort(-det_scores_prd, axis=1)
        det_scores_prd = -np.sort(-det_scores_prd, axis=1)

        det_scores_so = det_scores_sbj * det_scores_obj
        det_scores_spo = det_scores_so[:, None] * det_scores_prd[:, :2]
        det_scores_inds = argsort_desc(det_scores_spo)[:100]
        det_scores_top = det_scores_spo[det_scores_inds[:, 0],
                                        det_scores_inds[:, 1]]
        det_boxes_so_top = np.hstack((det_boxes_sbj[det_scores_inds[:, 0]],
                                      det_boxes_obj[det_scores_inds[:, 0]]))
        det_labels_p_top = det_labels_prd[det_scores_inds[:, 0],
                                          det_scores_inds[:, 1]]
        det_labels_spo_top = np.vstack(
            (det_labels_sbj[det_scores_inds[:, 0]], det_labels_p_top,
             det_labels_obj[det_scores_inds[:, 0]])).transpose()

        det_boxes_s_top = det_boxes_so_top[:, :4]
        det_boxes_o_top = det_boxes_so_top[:, 4:]
        det_labels_s_top = det_labels_spo_top[:, 0]
        det_labels_p_top = det_labels_spo_top[:, 1]
        det_labels_o_top = det_labels_spo_top[:, 2]
        out_dict = {}
        out_dict['boxes_s_top'] = det_boxes_s_top
        out_dict['boxes_o_top'] = det_boxes_o_top
        out_dict['labels_s_top'] = det_labels_s_top
        out_dict['labels_p_top'] = det_labels_p_top
        out_dict['labels_o_top'] = det_labels_o_top
        out_dict['scores_top'] = det_scores_top
        out_dict['image'] = im_file
        print('finished inferencing for {}'.format(im_file))
        finals.append(out_dict)

    with open('sg-out.pkl', 'wb') as fout:
        pickle.dump(finals, fout, pickle.HIGHEST_PROTOCOL)
    return finals
Example #6
0
    else:  # For subprocess call
        assert cfg.TEST.DATASETS, 'cfg.TEST.DATASETS shouldn\'t be empty'

    assert_and_infer_cfg()

    if not cfg.MODEL.RUN_BASELINE:
        assert bool(args.load_ckpt) ^ bool(args.load_detectron), \
            'Exactly one of --load_ckpt and --load_detectron should be specified.'

    # manually set args.cuda
    args.cuda = True

    if args.use_gt_boxes:
        if args.use_gt_labels:
            det_file = os.path.join(args.output_dir, 'rel_detections_gt_boxes_prdcls.pkl')
        else:
            det_file = os.path.join(args.output_dir, 'rel_detections_gt_boxes_sgcls.pkl')
    else:
        det_file = os.path.join(args.output_dir, 'rel_detections.pkl')

    box_proposals = None
    img_path = "Sakura.jpeg"

    model = initialize_model_from_cfg(args)

    im = cv2.imread(img_path)
    im_results = im_detect_rels(model, im,box_proposals=box_proposals, dataset_name = None)

    with open(img_path+".pkl", 'wb') as handle:
        pickle.dump(im_results, handle, protocol=pickle.HIGHEST_PROTOCOL)