def get_roidb_and_dataset(dataset_name, ind_range):
    dataset = JsonDataset(dataset_name)
    roidb = dataset.get_roidb()
    if ind_range is not None:
        total_num_images = len(roidb)
        start, end = ind_range
        roidb = roidb[start:end]
    else:
        start = 0
        end = len(roidb)
        total_num_images = end
    return roidb, dataset, start, end, total_num_images
Beispiel #2
0
def eval_kpts_cpu(args, net):
    # load dataset
    ds = JsonDataset(args.dataset, args.dataset_dir, args.dataset_ann)
    roidb = ds.get_roidb()
    logger.warning("Loaded dataset {} with {} images".format(args.dataset, len(roidb)))

    # initialize result
    all_results = empty_results(ds.num_classes, len(roidb))
    all_boxes = all_results["all_boxes"]
    all_keyps = all_results["all_keyps"]

    # run model
    for i, entry in enumerate(roidb):
        # Uncomment to only push the street corner image
        #if entry["id"] != 8211:
        #    continue
        #print()
        #print(entry["image"])
        #print()
        if i % 10 == 0:
            logger.warning("{}/{}".format(i, len(roidb)))
        ret = run_single_image(
            net,
            entry["image"],
            target_min_size=args.min_size,
            target_max_size=args.max_size,
        )
        boxes, xy_preds, classids = ret
        extend_results(i, all_boxes, [[], boxes])
        extend_results(i, all_keyps, [[], xy_preds])

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    # evaluate results
    logger.info("Evaluating detections")
    evaluate_boxes(ds, all_boxes, args.output_dir, use_salt=False)

    logger.info("Evaluating keypoints")
    evaluate_keypoints(ds, all_boxes, all_keyps, args.output_dir, use_salt=False)
Beispiel #3
0
def eval_segm_cpu(args, net):
    # load dataset
    ds = JsonDataset(args.dataset, args.dataset_dir, args.dataset_ann)
    roidb = ds.get_roidb()
    logger.warning("Loaded dataset {} with {} images".format(
        args.dataset, len(roidb)))

    # initialize result
    all_results = empty_results(ds.num_classes, len(roidb))
    all_boxes = all_results["all_boxes"]
    all_segms = all_results["all_segms"]

    # run model
    for i, entry in enumerate(roidb):
        if i % 10 == 0:
            logger.warning("{}/{}".format(i, len(roidb)))
        ret = run_single_image(
            net,
            entry["image"],
            target_min_size=args.min_size,
            target_max_size=args.max_size,
        )
        if ret is not None:
            extend_results_with_classes(i, all_boxes,
                                        (ret["boxes"], ret["classids"]))
            extend_seg_results_with_classes(i, all_segms,
                                            (ret["im_masks"], ret["classids"]))

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    # evaluate results
    logger.info("Evaluating detections")
    evaluate_boxes(ds, all_boxes, args.output_dir, use_salt=False)

    logger.info("Evaluating segmentations")
    evaluate_masks(ds, all_boxes, all_segms, args.output_dir, use_salt=False)
Beispiel #4
0
def eval_segm_cpu(args, net):
    # load dataset
    ds = JsonDataset(args.dataset, args.dataset_dir, args.dataset_ann)
    roidb = ds.get_roidb()
    logger.warning("Loaded dataset {} with {} images".format(
        args.dataset, len(roidb)))

    # initialize result
    all_results = empty_results(ds.num_classes, len(roidb))
    all_boxes = all_results["all_boxes"]
    all_segms = all_results["all_segms"]

    # run model
    if args.parallel == 0:
        for i, entry in enumerate(roidb):
            if i % 10 == 0:
                logger.warning("{}/{}".format(i, len(roidb)))
            ret = run_single_image(
                net,
                entry["image"],
                target_min_size=args.min_size,
                target_max_size=args.max_size,
            )
            if ret is not None:
                extend_results_with_classes(i, all_boxes,
                                            (ret["boxes"], ret["classids"]))
                extend_seg_results_with_classes(
                    i, all_segms, (ret["im_masks"], ret["classids"]))
    else:
        run_args = {
            "net": net,
            "target_min_size": args.min_size,
            "target_max_size": args.max_size,
        }

        assert args.parallel >= 1
        logger.info('CPU counts {}'.format(multiprocessing.cpu_count()))
        mp_count = multiprocessing.cpu_count(
        ) if args.parallel == 1 else args.parallel
        logger.info('Multiprocess counts {}'.format(mp_count))
        pool = multiprocessing.Pool(
            mp_count,
            _run_args_init,
            (run_args, ),
        )

        for i, ret in enumerate(pool.imap(_run_single_entry, roidb, 100)):
            if i % 10 == 0:
                logger.warning("{}/{}".format(i, len(roidb)))
            extend_results_with_classes(i, all_boxes,
                                        (ret['boxes'], ret['classids']))
            extend_seg_results_with_classes(i, all_segms,
                                            (ret['im_masks'], ret['classids']))

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    # evaluate results
    logger.info("Evaluating detections")
    evaluate_boxes(ds, all_boxes, args.output_dir, use_salt=False)

    logger.info("Evaluating segmentations")
    evaluate_masks(ds, all_boxes, all_segms, args.output_dir, use_salt=False)