def get_roidb_and_dataset(dataset_name, ind_range):
    dataset = JsonDataset(dataset_name)
    roidb = dataset.get_roidb()
    if ind_range is not None:
        total_num_images = len(roidb)
        start, end = ind_range
        roidb = roidb[start:end]
    else:
        start = 0
        end = len(roidb)
        total_num_images = end
    return roidb, dataset, start, end, total_num_images
Beispiel #2
0
 def __init__(self, args):
     self.args = args
     self.ds = JsonDataset(args)
     self.all_results = self.emptyResults(self.ds.num_classes,
                                          args.total_num)
     self.all_boxes = self.all_results["all_boxes"]
     self.all_segms = self.all_results["all_segms"]
     self.index = 0
Beispiel #3
0
def eval_kpts_cpu(args, net):
    # load dataset
    ds = JsonDataset(args.dataset, args.dataset_dir, args.dataset_ann)
    roidb = ds.get_roidb()
    logger.warning("Loaded dataset {} with {} images".format(args.dataset, len(roidb)))

    # initialize result
    all_results = empty_results(ds.num_classes, len(roidb))
    all_boxes = all_results["all_boxes"]
    all_keyps = all_results["all_keyps"]

    # run model
    for i, entry in enumerate(roidb):
        # Uncomment to only push the street corner image
        #if entry["id"] != 8211:
        #    continue
        #print()
        #print(entry["image"])
        #print()
        if i % 10 == 0:
            logger.warning("{}/{}".format(i, len(roidb)))
        ret = run_single_image(
            net,
            entry["image"],
            target_min_size=args.min_size,
            target_max_size=args.max_size,
        )
        boxes, xy_preds, classids = ret
        extend_results(i, all_boxes, [[], boxes])
        extend_results(i, all_keyps, [[], xy_preds])

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    # evaluate results
    logger.info("Evaluating detections")
    evaluate_boxes(ds, all_boxes, args.output_dir, use_salt=False)

    logger.info("Evaluating keypoints")
    evaluate_keypoints(ds, all_boxes, all_keyps, args.output_dir, use_salt=False)
Beispiel #4
0
def eval_segm_cpu(args, net):
    # load dataset
    ds = JsonDataset(args.dataset, args.dataset_dir, args.dataset_ann)
    roidb = ds.get_roidb()
    logger.warning("Loaded dataset {} with {} images".format(
        args.dataset, len(roidb)))

    # initialize result
    all_results = empty_results(ds.num_classes, len(roidb))
    all_boxes = all_results["all_boxes"]
    all_segms = all_results["all_segms"]

    # run model
    for i, entry in enumerate(roidb):
        if i % 10 == 0:
            logger.warning("{}/{}".format(i, len(roidb)))
        ret = run_single_image(
            net,
            entry["image"],
            target_min_size=args.min_size,
            target_max_size=args.max_size,
        )
        if ret is not None:
            extend_results_with_classes(i, all_boxes,
                                        (ret["boxes"], ret["classids"]))
            extend_seg_results_with_classes(i, all_segms,
                                            (ret["im_masks"], ret["classids"]))

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    # evaluate results
    logger.info("Evaluating detections")
    evaluate_boxes(ds, all_boxes, args.output_dir, use_salt=False)

    logger.info("Evaluating segmentations")
    evaluate_masks(ds, all_boxes, all_segms, args.output_dir, use_salt=False)
Beispiel #5
0
def eval_segm_cpu(args, net):
    # load dataset
    ds = JsonDataset(args.dataset, args.dataset_dir, args.dataset_ann)
    roidb = ds.get_roidb()
    logger.warning("Loaded dataset {} with {} images".format(
        args.dataset, len(roidb)))

    # initialize result
    all_results = empty_results(ds.num_classes, len(roidb))
    all_boxes = all_results["all_boxes"]
    all_segms = all_results["all_segms"]

    # run model
    if args.parallel == 0:
        for i, entry in enumerate(roidb):
            if i % 10 == 0:
                logger.warning("{}/{}".format(i, len(roidb)))
            ret = run_single_image(
                net,
                entry["image"],
                target_min_size=args.min_size,
                target_max_size=args.max_size,
            )
            if ret is not None:
                extend_results_with_classes(i, all_boxes,
                                            (ret["boxes"], ret["classids"]))
                extend_seg_results_with_classes(
                    i, all_segms, (ret["im_masks"], ret["classids"]))
    else:
        run_args = {
            "net": net,
            "target_min_size": args.min_size,
            "target_max_size": args.max_size,
        }

        assert args.parallel >= 1
        logger.info('CPU counts {}'.format(multiprocessing.cpu_count()))
        mp_count = multiprocessing.cpu_count(
        ) if args.parallel == 1 else args.parallel
        logger.info('Multiprocess counts {}'.format(mp_count))
        pool = multiprocessing.Pool(
            mp_count,
            _run_args_init,
            (run_args, ),
        )

        for i, ret in enumerate(pool.imap(_run_single_entry, roidb, 100)):
            if i % 10 == 0:
                logger.warning("{}/{}".format(i, len(roidb)))
            extend_results_with_classes(i, all_boxes,
                                        (ret['boxes'], ret['classids']))
            extend_seg_results_with_classes(i, all_segms,
                                            (ret['im_masks'], ret['classids']))

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    # evaluate results
    logger.info("Evaluating detections")
    evaluate_boxes(ds, all_boxes, args.output_dir, use_salt=False)

    logger.info("Evaluating segmentations")
    evaluate_masks(ds, all_boxes, all_segms, args.output_dir, use_salt=False)
early_stop_loss = 0.0000001
start_idx = 0
end_idx = max_in_memory
iter_per_epoch = int(np.ceil(num_train / float(max_in_memory)))
indices = np.arange(0, num_train, max_in_memory)
indices = list(indices) + [num_train]
print('iter_per_epoch:', iter_per_epoch)
print(indices)

# Use GPU.
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)

# Load the test data.
print('Loading test data ...')
test_set = JsonDataset(sys.argv[2])
test_loader = torchdata.DataLoader(test_set, batch_size=64, shuffle=True)

# Create the network.
#input_channels = test_set.images.shape[-1]
input_channels = 3
#net = NetCCFFF(input_channels)
net = Net(input_channels)

print('Copying network to GPU ...')
if torch.cuda.device_count() > 1:
    print("Let's use", torch.cuda.device_count(), "GPUs.")
    net = nn.DataParallel(net)
net.to(device)

net.load_state_dict(torch.load(sys.argv[3]))