Ejemplo n.º 1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset', choices=('voc', 'coco'))
    parser.add_argument('--model', choices=sorted(models.keys()))
    parser.add_argument('--pretrained-model')
    parser.add_argument('--batchsize', type=int)
    parser.add_argument('--gpu', type=int, default=-1)
    args = parser.parse_args()

    dataset, eval_, model, batchsize = setup(args.dataset, args.model,
                                             args.pretrained_model,
                                             args.batchsize)

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    model.use_preset('evaluate')

    iterator = iterators.MultithreadIterator(dataset,
                                             batchsize,
                                             repeat=False,
                                             shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(model.predict,
                                                           iterator,
                                                           hook=ProgressHook(
                                                               len(dataset)))
    # delete unused iterators explicitly
    del in_values

    eval_(out_values, rest_values)
Ejemplo n.º 2
0
def test(net,
         test_data,
         metric,
         use_gpus,
         calc_weight_count=False,
         extended_log=False):
    tic = time.time()

    predictor = test_data["predictor_class"](base_model=net)
    if use_gpus:
        predictor.to_gpu()

    if calc_weight_count:
        weight_count = net.count_params()
        logging.info("Model: {} trainable parameters".format(weight_count))

    in_values, out_values, rest_values = apply_to_iterator(
        predictor.predict,
        test_data["iterator"],
        hook=ProgressHook(test_data["ds_len"]))
    del in_values

    pred_labels, = out_values
    gt_labels, = rest_values

    labels = iter(gt_labels)
    preds = iter(pred_labels)
    for label, pred in zip(labels, preds):
        metric.update(label, pred)

    accuracy_msg = report_accuracy(metric=metric, extended_log=extended_log)
    logging.info("Test: {}".format(accuracy_msg))
    logging.info("Time cost: {:.4f} sec".format(time.time() - tic))
Ejemplo n.º 3
0
def test(net,
         val_iterator,
         val_dataset_len,
         num_gpus,
         calc_weight_count=False,
         extended_log=False):
    tic = time.time()

    predictor = CIFARPredictor(base_model=net)

    if num_gpus > 0:
        predictor.to_gpu()

    if calc_weight_count:
        weight_count = net.count_params()
        logging.info('Model: {} trainable parameters'.format(weight_count))

    in_values, out_values, rest_values = apply_to_iterator(
        predictor.predict, val_iterator, hook=ProgressHook(val_dataset_len))
    del in_values

    pred_probs, = out_values
    gt_labels, = rest_values

    y = np.array(list(pred_probs))
    t = np.array(list(gt_labels))

    acc_val_value = F.accuracy(y=y, t=t).data
    err_val = 1.0 - acc_val_value

    if extended_log:
        logging.info('Test: err={err:.4f} ({err})'.format(err=err_val))
    else:
        logging.info('Test: err={err:.4f}'.format(err=err_val))
    logging.info('Time cost: {:.4f} sec'.format(time.time() - tic))
Ejemplo n.º 4
0
def test(net, test_data, metric, calc_weight_count=False, extended_log=False):
    tic = time.time()

    predictor = Predictor(model=net, transform=None)

    if calc_weight_count:
        weight_count = net.count_params()
        logging.info("Model: {} trainable parameters".format(weight_count))

    _, out_values, rest_values = apply_to_iterator(
        func=predictor,
        iterator=test_data["iterator"],
        hook=ProgressHook(test_data["ds_len"]))
    assert (len(rest_values) == 1)
    assert (len(out_values) == 1)

    if False:
        labels = iter(rest_values[0])
        preds = iter(out_values[0])
        for label, pred in zip(labels, preds):
            metric.update(label, pred)
    else:
        import numpy as np
        metric.update(labels=np.array(list(rest_values[0])),
                      preds=np.array(list(out_values[0])))

    accuracy_msg = report_accuracy(metric=metric, extended_log=extended_log)
    logging.info("Test: {}".format(accuracy_msg))
    logging.info("Time cost: {:.4f} sec".format(time.time() - tic))
Ejemplo n.º 5
0
def main():
    args = parse_args()
    cfg.merge_from_file(args.config)
    cfg.freeze

    model = setup_model(cfg)
    load_pretrained_model(cfg, args.config, model, args.pretrained_model)

    dataset = setup_dataset(cfg, 'eval')
    iterator = iterators.MultithreadIterator(dataset,
                                             args.batchsize,
                                             repeat=False,
                                             shuffle=False)

    model.use_preset('evaluate')
    if args.gpu >= 0:
        model.to_gpu(args.gpu)

    in_values, out_values, rest_values = apply_to_iterator(model.predict,
                                                           iterator,
                                                           hook=ProgressHook(
                                                               len(dataset)))
    # delete unused iterators explicitly
    del in_values

    if cfg.dataset.eval == 'COCO':
        eval_coco(out_values, rest_values)
    elif cfg.dataset.eval == 'VOC':
        eval_voc(out_values, rest_values)
    else:
        raise ValueError()
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--dataset', choices=('cityscapes', 'ade20k', 'camvid', 'voc'))
    parser.add_argument('--model', choices=sorted(models.keys()))
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--pretrained-model')
    parser.add_argument('--batchsize', type=int)
    parser.add_argument('--input-size', type=int, default=None)
    args = parser.parse_args()

    dataset, eval_, model, batchsize = setup(
        args.dataset, args.model, args.pretrained_model,
        args.batchsize, args.input_size)

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    iterator = iterators.SerialIterator(
        dataset, batchsize, repeat=False, shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(
        model.predict, iterator, hook=ProgressHook(len(dataset)))
    # Delete an iterator of images to save memory usage.
    del in_values

    eval_(out_values, rest_values)
Ejemplo n.º 7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset', choices=('sbd', 'coco'))
    parser.add_argument('--model', choices=sorted(models.keys()))
    parser.add_argument('--pretrained-model')
    parser.add_argument('--batchsize', type=int)
    args = parser.parse_args()

    comm = chainermn.create_communicator('pure_nccl')
    device = comm.intra_rank

    dataset, label_names, eval_, model, batchsize = setup(
        args.dataset, args.model, args.pretrained_model, args.batchsize)

    chainer.cuda.get_device_from_id(device).use()
    model.to_gpu()

    if not comm.rank == 0:
        apply_to_iterator(model.predict, None, comm=comm)
        return

    iterator = iterators.MultithreadIterator(
        dataset, batchsize * comm.size, repeat=False, shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(
        model.predict, iterator, hook=ProgressHook(len(dataset)), comm=comm)
    # delete unused iterators explicitly
    del in_values

    eval_(out_values, rest_values)
Ejemplo n.º 8
0
def eva_coco(dataset, func, limit = 1000, preset = 'evaluate'):
    total = limit if limit else len(dataset)
    orig_ids = dataset.ids.copy()
    dataset.ids = dataset.ids[:total]
    iterator = iterators.SerialIterator(dataset, 1, repeat=False, shuffle=False)
    in_values, out_values, rest_values = apply_to_iterator(func, 
                                                           iterator, 
                                                           hook=ProgressHook(len(dataset)))
    pred_bboxes, pred_labels, pred_scores = out_values
    gt_bboxes, gt_labels, gt_areas, gt_crowdeds = rest_values
    result = eval_detection_coco(pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels, gt_areas, gt_crowdeds)
    keys = [
        'map/iou=0.50:0.95/area=all/max_dets=100',
        'map/iou=0.50/area=all/max_dets=100',
        'map/iou=0.75/area=all/max_dets=100',
        'map/iou=0.50:0.95/area=small/max_dets=100',
        'map/iou=0.50:0.95/area=medium/max_dets=100',
        'map/iou=0.50:0.95/area=large/max_dets=100',
        'mar/iou=0.50:0.95/area=all/max_dets=1',
        'mar/iou=0.50:0.95/area=all/max_dets=10',
        'mar/iou=0.50:0.95/area=all/max_dets=100',
        'mar/iou=0.50:0.95/area=small/max_dets=100',
        'mar/iou=0.50:0.95/area=medium/max_dets=100',
        'mar/iou=0.50:0.95/area=large/max_dets=100',
    ]
    print('')
    results = []
    for key in keys:
        print('{:s}: {:f}'.format(key, result[key]))
        results.append(result[key])
    dataset.ids = orig_ids
    return results
    def test_progress_hook_with_infinite_iterator(self):
        iterator = SerialIterator(self.dataset, 2)

        in_values, out_values, rest_values = apply_to_iterator(
            self.func, iterator, hook=ProgressHook())

        for _ in range(10):
            next(in_values[0])
Ejemplo n.º 10
0
    def test_progress_hook_with_infinite_iterator(self):
        iterator = SerialIterator(self.dataset, 2)

        imgs, pred_values, gt_values = apply_prediction_to_iterator(
            self.predict, iterator, hook=ProgressHook())

        for _ in range(10):
            next(imgs)
Ejemplo n.º 11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset',
                        choices=('cityscapes', 'ade20k', 'camvid'))
    parser.add_argument('--model', choices=('pspnet_resnet101', 'segnet'))
    parser.add_argument('--pretrained-model')
    parser.add_argument('--input-size', type=int, default=None)
    args = parser.parse_args()

    comm = chainermn.create_communicator()
    device = comm.intra_rank

    dataset, label_names, model = get_dataset_and_model(
        args.dataset, args.model, args.pretrained_model,
        (args.input_size, args.input_size))
    assert len(dataset) % comm.size == 0, \
        "The size of the dataset should be a multiple "\
        "of the number of GPUs"

    chainer.cuda.get_device_from_id(device).use()
    model.to_gpu()

    if comm.rank == 0:
        indices = np.arange(len(dataset))
    else:
        indices = None
    indices = chainermn.scatter_dataset(indices, comm)
    dataset = dataset.slice[indices]

    it = iterators.SerialIterator(dataset, 1, repeat=False, shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(model.predict,
                                                           it,
                                                           hook=ProgressHook(
                                                               len(dataset)))
    # Delete an iterator of images to save memory usage.
    del in_values
    pred_labels, = out_values
    gt_labels, = rest_values

    confusion = calc_semantic_segmentation_confusion(pred_labels, gt_labels)
    confusion = comm.allreduce(confusion)

    if comm.rank == 0:
        iou = calc_semantic_segmentation_iou(confusion)
        pixel_accuracy = np.diag(confusion).sum() / confusion.sum()
        class_accuracy = np.diag(confusion) / np.sum(confusion, axis=1)

        for iu, label_name in zip(iou, label_names):
            print('{:>23} : {:.4f}'.format(label_name, iu))
        print('=' * 34)
        print('{:>23} : {:.4f}'.format('mean IoU', np.nanmean(iou)))
        print('{:>23} : {:.4f}'.format('Class average accuracy',
                                       np.nanmean(class_accuracy)))
        print('{:>23} : {:.4f}'.format('Global average accuracy',
                                       pixel_accuracy))
    def test_progress_hook(self):
        iterator = SerialIterator(self.dataset, 2, repeat=False)

        in_values, out_values, rest_values = apply_to_iterator(
            self.func, iterator,
            hook=ProgressHook(n_total=len(self.dataset)))

        # consume all data
        for _ in in_values[0]:
            pass
Ejemplo n.º 13
0
    def test_progress_hook(self):
        iterator = SerialIterator(self.dataset, 2, repeat=False)

        imgs, pred_values, gt_values = apply_prediction_to_iterator(
            self.predict, iterator,
            hook=ProgressHook(n_total=len(self.dataset)))

        # consume all data
        for _ in imgs:
            pass
Ejemplo n.º 14
0
def test(net,
         val_iterator,
         val_dataset_len,
         num_gpus,
         input_image_size=224,
         resize_inv_factor=0.875,
         calc_weight_count=False,
         extended_log=False):
    assert (resize_inv_factor > 0.0)
    resize_value = int(math.ceil(float(input_image_size) / resize_inv_factor))

    tic = time.time()

    predictor = ImagenetPredictor(
        base_model=net,
        scale_size=resize_value,
        crop_size=input_image_size)

    if num_gpus > 0:
        predictor.to_gpu()

    if calc_weight_count:
        weight_count = net.count_params()
        logging.info('Model: {} trainable parameters'.format(weight_count))

    in_values, out_values, rest_values = apply_to_iterator(
        predictor.predict,
        val_iterator,
        hook=ProgressHook(val_dataset_len))
    del in_values

    pred_probs, = out_values
    gt_labels, = rest_values

    y = np.array(list(pred_probs))
    t = np.array(list(gt_labels))

    top1_acc = F.accuracy(
        y=y,
        t=t).data
    top5_acc = top_k_accuracy(
        y=y,
        t=t,
        k=5).data
    err_top1_val = 1.0 - top1_acc
    err_top5_val = 1.0 - top5_acc

    if extended_log:
        logging.info('Test: err-top1={top1:.4f} ({top1})\terr-top5={top5:.4f} ({top5})'.format(
            top1=err_top1_val, top5=err_top5_val))
    else:
        logging.info('Test: err-top1={top1:.4f}\terr-top5={top5:.4f}'.format(
            top1=err_top1_val, top5=err_top5_val))
    logging.info('Time cost: {:.4f} sec'.format(
        time.time() - tic))
Ejemplo n.º 15
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--model', choices=('resnet50', 'resnet101'))
    parser.add_argument(
        '--mean', choices=('chainercv', 'detectron'), default='chainercv')
    parser.add_argument('--batchsize', type=int, default=1)
    group = parser.add_mutually_exclusive_group()
    group.add_argument('--pretrained-model')
    group.add_argument('--snapshot')
    args = parser.parse_args()

    if args.model == 'resnet50':
        model = FasterRCNNFPNResNet50(n_fg_class=len(coco_bbox_label_names),
                                      mean=args.mean)
    elif args.model == 'resnet101':
        model = FasterRCNNFPNResNet101(n_fg_class=len(coco_bbox_label_names),
                                       mean=args.mean)

    if args.pretrained_model:
        chainer.serializers.load_npz(args.pretrained_model, model)
    elif args.snapshot:
        chainer.serializers.load_npz(
            args.snapshot, model, path='updater/model:main/model/')

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    model.use_preset('evaluate')

    dataset = COCOBboxDataset(
        split='minival',
        use_crowded=True,
        return_area=True,
        return_crowded=True)
    iterator = iterators.MultithreadIterator(
        dataset, args.batchsize, repeat=False, shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(
        model.predict, iterator, hook=ProgressHook(len(dataset)))
    # delete unused iterators explicitly
    del in_values

    pred_bboxes, pred_labels, pred_scores = out_values
    gt_bboxes, gt_labels, gt_area, gt_crowded = rest_values

    result = eval_detection_coco(
        pred_bboxes, pred_labels, pred_scores,
        gt_bboxes, gt_labels, gt_area, gt_crowded)

    print()
    for area in ('all', 'large', 'medium', 'small'):
        print('mmAP ({}):'.format(area),
              result['map/iou=0.50:0.95/area={}/max_dets=100'.format(area)])
Ejemplo n.º 16
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        choices=('fcis_psroi_align_resnet101', ),
                        default='fcis_psroi_align_resnet101')
    parser.add_argument('--pretrained-model')
    parser.add_argument('--iou-thresh', type=float, default=0.5)
    parser.add_argument('--gpu', type=int, default=-1)
    args = parser.parse_args()

    if args.model == 'fcis_psroi_align_resnet101':
        if args.pretrained_model:
            model = FCISPSROIAlignResNet101(
                n_fg_class=len(sbd_instance_segmentation_label_names),
                pretrained_model=args.pretrained_model)
        else:
            model = FCISPSROIAlignResNet101(pretrained_model='sbd')

    model.use_preset('evaluate')

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    dataset = SBDInstanceSegmentationDataset(split='val')
    iterator = iterators.SerialIterator(dataset,
                                        1,
                                        repeat=False,
                                        shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(model.predict,
                                                           iterator,
                                                           hook=ProgressHook(
                                                               len(dataset)))
    # delete unused iterators explicitly
    del in_values

    pred_masks, pred_labels, pred_scores = out_values
    gt_masks, gt_labels = rest_values

    result = eval_instance_segmentation_voc(pred_masks,
                                            pred_labels,
                                            pred_scores,
                                            gt_masks,
                                            gt_labels,
                                            args.iou_thresh,
                                            use_07_metric=True)

    print('')
    print('mAP: {:f}'.format(result['map']))
    for l, name in enumerate(sbd_instance_segmentation_label_names):
        if result['ap'][l]:
            print('{:s}: {:f}'.format(name, result['ap'][l]))
        else:
            print('{:s}: -'.format(name))
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset',
                        choices=('cityscapes', 'ade20k', 'camvid', 'voc'))
    parser.add_argument('--model',
                        choices=('pspnet_resnet101', 'segnet',
                                 'deeplab_v3plus_xception65'))
    parser.add_argument('--pretrained-model')
    parser.add_argument('--input-size', type=int, default=None)
    args = parser.parse_args()

    comm = chainermn.create_communicator('pure_nccl')
    device = comm.intra_rank

    if args.input_size is None:
        input_size = None
    else:
        input_size = (args.input_size, args.input_size)

    dataset, label_names, model = get_dataset_and_model(
        args.dataset, args.model, args.pretrained_model, input_size)

    chainer.cuda.get_device_from_id(device).use()
    model.to_gpu()

    if not comm.rank == 0:
        apply_to_iterator(model.predict, None, comm=comm)
        return

    it = iterators.MultithreadIterator(dataset,
                                       comm.size,
                                       repeat=False,
                                       shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(model.predict,
                                                           it,
                                                           hook=ProgressHook(
                                                               len(dataset)),
                                                           comm=comm)
    # Delete an iterator of images to save memory usage.
    del in_values
    pred_labels, = out_values
    gt_labels, = rest_values

    result = eval_semantic_segmentation(pred_labels, gt_labels)

    for iu, label_name in zip(result['iou'], label_names):
        print('{:>23} : {:.4f}'.format(label_name, iu))
    print('=' * 34)
    print('{:>23} : {:.4f}'.format('mean IoU', result['miou']))
    print('{:>23} : {:.4f}'.format('Class average accuracy',
                                   result['mean_class_accuracy']))
    print('{:>23} : {:.4f}'.format('Global average accuracy',
                                   result['pixel_accuracy']))
Ejemplo n.º 18
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--pretrained-model', default='coco')
    parser.add_argument('--gpu', type=int, default=-1)
    args = parser.parse_args()

    model = LightHeadRCNNResNet101(
        n_fg_class=len(coco_bbox_label_names),
        pretrained_model=args.pretrained_model)
    model.use_preset('evaluate')

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    dataset = COCOBboxDataset(
        split='minival', use_crowded=True,
        return_crowded=True, return_area=True)
    iterator = iterators.SerialIterator(
        dataset, 1, repeat=False, shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(
        model.predict, iterator, hook=ProgressHook(len(dataset)))
    # delete unused iterators explicitly
    del in_values

    pred_bboxes, pred_labels, pred_scores = out_values
    gt_bboxes, gt_labels, gt_areas, gt_crowdeds = rest_values

    result = eval_detection_coco(
        pred_bboxes, pred_labels, pred_scores,
        gt_bboxes, gt_labels, gt_areas, gt_crowdeds)

    keys = [
        'map/iou=0.50:0.95/area=all/max_dets=100',
        'map/iou=0.50/area=all/max_dets=100',
        'map/iou=0.75/area=all/max_dets=100',
        'map/iou=0.50:0.95/area=small/max_dets=100',
        'map/iou=0.50:0.95/area=medium/max_dets=100',
        'map/iou=0.50:0.95/area=large/max_dets=100',
        'mar/iou=0.50:0.95/area=all/max_dets=1',
        'mar/iou=0.50:0.95/area=all/max_dets=10',
        'mar/iou=0.50:0.95/area=all/max_dets=100',
        'mar/iou=0.50:0.95/area=small/max_dets=100',
        'mar/iou=0.50:0.95/area=medium/max_dets=100',
        'mar/iou=0.50:0.95/area=large/max_dets=100',
    ]

    print('')
    for key in keys:
        print('{:s}: {:f}'.format(key, result[key]))
Ejemplo n.º 19
0
def main():
    parser = argparse.ArgumentParser(
        description='Learning convnet from ILSVRC2012 dataset')
    parser.add_argument('val', help='Path to root of the validation dataset')
    parser.add_argument(
        '--model', choices=('vgg16', 'resnet50', 'resnet101', 'resnet152'))
    parser.add_argument('--pretrained_model', default='imagenet')
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--batchsize', type=int, default=32)
    parser.add_argument('--crop', choices=('center', '10'), default='center')
    parser.add_argument('--resnet_mode', default='he')
    args = parser.parse_args()

    dataset = DirectoryParsingLabelDataset(args.val)
    label_names = directory_parsing_label_names(args.val)
    n_class = len(label_names)
    iterator = iterators.MultiprocessIterator(
        dataset, args.batchsize, repeat=False, shuffle=False,
        n_processes=6, shared_mem=300000000)

    if args.model == 'vgg16':
        extractor = VGG16(n_class, args.pretrained_model)
    elif args.model == 'resnet50':
        extractor = ResNet50(
            n_class, args.pretrained_model, mode=args.resnet_mode)
    elif args.model == 'resnet101':
        extractor = ResNet101(
            n_class, args.pretrained_model, mode=args.resnet_mode)
    elif args.model == 'resnet152':
        extractor = ResNet152(
            n_class, args.pretrained_model, mode=args.resnet_mode)
    model = FeaturePredictor(
        extractor, crop_size=224, scale_size=256, crop=args.crop)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()
        model.to_gpu()

    print('Model has been prepared. Evaluation starts.')
    in_values, out_values, rest_values = apply_to_iterator(
        model.predict, iterator, hook=ProgressHook(len(dataset)))
    del in_values

    pred_probs, = out_values
    gt_labels, = rest_values

    accuracy = F.accuracy(
        np.array(list(pred_probs)), np.array(list(gt_labels))).data
    print()
    print('Top 1 Error {}'.format(1. - accuracy))
Ejemplo n.º 20
0
def test(net, test_data, metric, calc_weight_count=False, extended_log=False):
    """
    Main test routine.

    Parameters:
    ----------
    net : Chain
        Model.
    test_data : dict
        Data loader.
    metric : EvalMetric
        Metric object instance.
    calc_weight_count : bool, default False
        Whether to calculate count of weights.
    extended_log : bool, default False
        Whether to log more precise accuracy values.
    """
    tic = time.time()

    predictor = Predictor(model=net, transform=None)

    if calc_weight_count:
        weight_count = net.count_params()
        logging.info("Model: {} trainable parameters".format(weight_count))

    in_values, out_values, rest_values = apply_to_iterator(
        func=predictor,
        iterator=test_data["iterator"],
        hook=ProgressHook(test_data["ds_len"]))
    assert (len(rest_values) == 1)
    assert (len(out_values) == 1)
    assert (len(in_values) == 1)

    if True:
        labels = iter(rest_values[0])
        preds = iter(out_values[0])
        inputs = iter(in_values[0])
        for label, pred, inputi in zip(labels, preds, inputs):
            metric.update(label, pred)
            del label
            del pred
            del inputi
    else:
        import numpy as np
        metric.update(labels=np.array(list(rest_values[0])),
                      preds=np.array(list(out_values[0])))

    accuracy_msg = report_accuracy(metric=metric, extended_log=extended_log)
    logging.info("Test: {}".format(accuracy_msg))
    logging.info("Time cost: {:.4f} sec".format(time.time() - tic))
Ejemplo n.º 21
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model', choices=('pspnet_resnet101',),
        default='pspnet_resnet101')
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--pretrained-model')
    args = parser.parse_args()

    if args.model == 'pspnet_resnet101':
        if args.pretrained_model:
            model = PSPNetResNet101(
                n_class=len(cityscapes_semantic_segmentation_label_names),
                pretrained_model=args.pretrained_model, input_size=(713, 713)
            )
        else:
            model = PSPNetResNet101(pretrained_model='cityscapes')

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    dataset = CityscapesSemanticSegmentationDataset(
        split='val', label_resolution='fine')
    it = iterators.SerialIterator(
        dataset, 1, repeat=False, shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(
        model.predict, it, hook=ProgressHook(len(dataset)))
    # Delete an iterator of images to save memory usage.
    del in_values
    pred_labels, = out_values
    gt_labels, = rest_values

    result = eval_semantic_segmentation(pred_labels, gt_labels)

    for iu, label_name in zip(
            result['iou'], cityscapes_semantic_segmentation_label_names):
        print('{:>23} : {:.4f}'.format(label_name, iu))
    print('=' * 34)
    print('{:>23} : {:.4f}'.format('mean IoU', result['miou']))
    print('{:>23} : {:.4f}'.format(
        'Class average accuracy', result['mean_class_accuracy']))
    print('{:>23} : {:.4f}'.format(
        'Global average accuracy', result['pixel_accuracy']))
Ejemplo n.º 22
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--pretrained-model', type=str, default='camvid')
    parser.add_argument('--batchsize', type=int, default=24)
    args = parser.parse_args()

    model = SegNetBasic(n_class=len(camvid_label_names),
                        pretrained_model=args.pretrained_model)
    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    model = calc_bn_statistics(model, args.batchsize)

    test = CamVidDataset(split='test')
    it = chainer.iterators.SerialIterator(test,
                                          batch_size=args.batchsize,
                                          repeat=False,
                                          shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(model.predict,
                                                           it,
                                                           hook=ProgressHook(
                                                               len(test)))
    # Delete an iterator of images to save memory usage.
    del in_values
    pred_labels, = out_values
    gt_labels, = rest_values

    result = eval_semantic_segmentation(pred_labels, gt_labels)

    for iu, label_name in zip(result['iou'], camvid_label_names):
        print('{:>23} : {:.4f}'.format(label_name, iu))
    print('=' * 34)
    print('{:>23} : {:.4f}'.format('mean IoU', result['miou']))
    print('{:>23} : {:.4f}'.format('Class average accuracy',
                                   result['mean_class_accuracy']))
    print('{:>23} : {:.4f}'.format('Global average accuracy',
                                   result['pixel_accuracy']))
Ejemplo n.º 23
0
def main():
    args = parse_args()
    cfg.merge_from_file(args.config)
    cfg.freeze

    comm = chainermn.create_communicator('pure_nccl')
    device = comm.intra_rank

    model = setup_model(cfg)
    load_pretrained_model(cfg, args.config, model, args.pretrained_model)
    dataset = setup_dataset(cfg, 'eval')

    model.use_preset('evaluate')
    chainer.cuda.get_device_from_id(device).use()
    model.to_gpu()

    if not comm.rank == 0:
        apply_to_iterator(model.predict, None, comm=comm)
        return

    iterator = iterators.MultithreadIterator(dataset,
                                             args.batchsize * comm.size,
                                             repeat=False,
                                             shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(model.predict,
                                                           iterator,
                                                           hook=ProgressHook(
                                                               len(dataset)),
                                                           comm=comm)
    # delete unused iterators explicitly
    del in_values

    if cfg.dataset.eval == 'COCO':
        eval_coco(out_values, rest_values)
    elif cfg.dataset.eval == 'VOC':
        eval_voc(out_values, rest_values)
    else:
        raise ValueError()
Ejemplo n.º 24
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--pretrained-model')
    args = parser.parse_args()

    model = ResNet50(pretrained_model=args.pretrained_model,
                     n_class=len(voc_bbox_label_names),
                     arch='he')
    model.pick = 'fc6'
    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    dataset = VOCBboxDataset(split='test', year='2007', use_difficult=False)
    dataset = TransformDataset(dataset, ('img', 'bbox'), bbox_to_multi_label)
    iterator = iterators.SerialIterator(dataset,
                                        8,
                                        repeat=False,
                                        shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(
        PredictFunc(model, thresh=0),
        iterator,
        hook=ProgressHook(len(dataset)))
    # delete unused iterators explicitly
    del in_values
    pred_labels, pred_scores = out_values
    gt_labels, = rest_values

    result = eval_multi_label_classification(pred_labels, pred_scores,
                                             gt_labels)
    print()
    print('mAP: {:f}'.format(result['map']))
    for l, name in enumerate(voc_bbox_label_names):
        if result['ap'][l]:
            print('{:s}: {:f}'.format(name, result['ap'][l]))
        else:
            print('{:s}: -'.format(name))
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset',
                        choices=('cityscapes', 'ade20k', 'camvid'))
    parser.add_argument('--model', choices=('pspnet_resnet101', 'segnet'))
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--pretrained-model')
    parser.add_argument('--input-size', type=int, default=None)
    args = parser.parse_args()

    dataset, label_names, model = get_dataset_and_model(
        args.dataset, args.model, args.pretrained_model,
        (args.input_size, args.input_size))

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    it = iterators.SerialIterator(dataset, 1, repeat=False, shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(model.predict,
                                                           it,
                                                           hook=ProgressHook(
                                                               len(dataset)))
    # Delete an iterator of images to save memory usage.
    del in_values
    pred_labels, = out_values
    gt_labels, = rest_values

    result = eval_semantic_segmentation(pred_labels, gt_labels)

    for iu, label_name in zip(result['iou'], label_names):
        print('{:>23} : {:.4f}'.format(label_name, iu))
    print('=' * 34)
    print('{:>23} : {:.4f}'.format('mean IoU', result['miou']))
    print('{:>23} : {:.4f}'.format('Class average accuracy',
                                   result['mean_class_accuracy']))
    print('{:>23} : {:.4f}'.format('Global average accuracy',
                                   result['pixel_accuracy']))
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset',
                        choices=('cityscapes', 'ade20k', 'camvid', 'voc'))
    parser.add_argument('--model', choices=sorted(models.keys()))
    parser.add_argument('--pretrained-model')
    parser.add_argument('--batchsize', type=int)
    parser.add_argument('--input-size', type=int, default=None)
    args = parser.parse_args()

    comm = chainermn.create_communicator('pure_nccl')
    device = comm.intra_rank

    dataset, eval_, model, batchsize = setup(args.dataset, args.model,
                                             args.pretrained_model,
                                             args.batchsize, args.input_size)

    chainer.cuda.get_device_from_id(device).use()
    model.to_gpu()

    if not comm.rank == 0:
        apply_to_iterator(model.predict, None, comm=comm)
        return

    it = iterators.MultithreadIterator(dataset,
                                       batchsize * comm.size,
                                       repeat=False,
                                       shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(model.predict,
                                                           it,
                                                           hook=ProgressHook(
                                                               len(dataset)),
                                                           comm=comm)
    # Delete an iterator of images to save memory usage.
    del in_values

    eval_(out_values, rest_values)
Ejemplo n.º 27
0
def main():
    parser = argparse.ArgumentParser(
        description='Evaluating convnet from ILSVRC2012 dataset')
    parser.add_argument('val', help='Path to root of the validation dataset')
    parser.add_argument('--model', choices=sorted(models.keys()))
    parser.add_argument('--pretrained-model')
    parser.add_argument('--dataset', choices=('imagenet', ))
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--batchsize', type=int)
    parser.add_argument('--crop', choices=('center', '10'))
    parser.add_argument('--resnet-arch')
    args = parser.parse_args()

    dataset, eval_, model, batchsize = setup(args.dataset, args.model,
                                             args.pretrained_model,
                                             args.batchsize, args.val,
                                             args.crop, args.resnet_arch)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()
        model.to_gpu()

    iterator = iterators.MultiprocessIterator(dataset,
                                              batchsize,
                                              repeat=False,
                                              shuffle=False,
                                              n_processes=6,
                                              shared_mem=300000000)

    print('Model has been prepared. Evaluation starts.')
    in_values, out_values, rest_values = apply_to_iterator(model.predict,
                                                           iterator,
                                                           hook=ProgressHook(
                                                               len(dataset)))
    del in_values

    eval_(out_values, rest_values)
Ejemplo n.º 28
0
def test(net,
         test_dataset,
         num_gpus,
         num_classes,
         calc_weight_count=False,
         extended_log=False,
         dataset_metainfo=None):
    assert (dataset_metainfo is not None)
    tic = time.time()

    it = iterators.SerialIterator(
        dataset=test_dataset,
        batch_size=1,
        repeat=False,
        shuffle=False)

    predictor = SegPredictor(base_model=net)

    if num_gpus > 0:
        predictor.to_gpu()

    if calc_weight_count:
        weight_count = net.count_params()
        logging.info('Model: {} trainable parameters'.format(weight_count))

    in_values, out_values, rest_values = apply_to_iterator(
        predictor.predict,
        it,
        hook=ProgressHook(len(test_dataset)))
    del in_values

    pred_labels, = out_values
    gt_labels, = rest_values

    metrics = []
    pix_acc_macro_average = False
    metrics.append(PixelAccuracyMetric(
        vague_idx=dataset_metainfo["vague_idx"],
        use_vague=dataset_metainfo["use_vague"],
        macro_average=pix_acc_macro_average))
    mean_iou_macro_average = False
    metrics.append(MeanIoUMetric(
        num_classes=num_classes,
        vague_idx=dataset_metainfo["vague_idx"],
        use_vague=dataset_metainfo["use_vague"],
        bg_idx=dataset_metainfo["background_idx"],
        ignore_bg=dataset_metainfo["ignore_bg"],
        macro_average=mean_iou_macro_average))

    labels = iter(gt_labels)
    preds = iter(pred_labels)
    for label, pred in zip(labels, preds):
        for metric in metrics:
            metric.update(label, pred)

    accuracy_info = [metric.get() for metric in metrics]
    pix_acc = accuracy_info[0][1]
    mean_iou = accuracy_info[1][1]
    pix_macro = "macro" if pix_acc_macro_average else "micro"
    iou_macro = "macro" if mean_iou_macro_average else "micro"

    if extended_log:
        logging.info(
            "Test: {pix_macro}-pix_acc={pix_acc:.4f} ({pix_acc}), "
            "{iou_macro}-mean_iou={mean_iou:.4f} ({mean_iou})".format(
                pix_macro=pix_macro, pix_acc=pix_acc, iou_macro=iou_macro, mean_iou=mean_iou))
    else:
        logging.info("Test: {pix_macro}-pix_acc={pix_acc:.4f}, {iou_macro}-mean_iou={mean_iou:.4f}".format(
            pix_macro=pix_macro, pix_acc=pix_acc, iou_macro=iou_macro, mean_iou=mean_iou))

    logging.info('Time cost: {:.4f} sec'.format(
        time.time() - tic))
Ejemplo n.º 29
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        choices=('fcis_resnet101', ),
                        default='fcis_resnet101')
    parser.add_argument('--pretrained-model', default=None)
    parser.add_argument('--gpu', type=int, default=-1)
    args = parser.parse_args()

    if args.model == 'fcis_resnet101':
        if args.pretrained_model is None:
            args.pretrained_model = 'coco'
        proposal_creator_params = FCISResNet101.proposal_creator_params
        proposal_creator_params['min_size'] = 2
        model = FCISResNet101(
            n_fg_class=len(coco_instance_segmentation_label_names),
            anchor_scales=(4, 8, 16, 32),
            pretrained_model=args.pretrained_model,
            proposal_creator_params=proposal_creator_params)

    model.use_preset('coco_evaluate')

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    dataset = COCOInstanceSegmentationDataset(split='minival',
                                              year='2014',
                                              use_crowded=True,
                                              return_crowded=True,
                                              return_area=True)
    iterator = iterators.SerialIterator(dataset,
                                        1,
                                        repeat=False,
                                        shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(model.predict,
                                                           iterator,
                                                           hook=ProgressHook(
                                                               len(dataset)))
    # delete unused iterators explicitly
    del in_values

    pred_masks, pred_labels, pred_scores = out_values
    gt_masks, gt_labels, gt_areas, gt_crowdeds = rest_values

    result = eval_instance_segmentation_coco(pred_masks, pred_labels,
                                             pred_scores, gt_masks, gt_labels,
                                             gt_areas, gt_crowdeds)

    keys = [
        'map/iou=0.50:0.95/area=all/max_dets=100',
        'map/iou=0.50/area=all/max_dets=100',
        'map/iou=0.75/area=all/max_dets=100',
        'map/iou=0.50:0.95/area=small/max_dets=100',
        'map/iou=0.50:0.95/area=medium/max_dets=100',
        'map/iou=0.50:0.95/area=large/max_dets=100',
        'mar/iou=0.50:0.95/area=all/max_dets=1',
        'mar/iou=0.50:0.95/area=all/max_dets=10',
        'mar/iou=0.50:0.95/area=all/max_dets=100',
        'mar/iou=0.50:0.95/area=small/max_dets=100',
        'mar/iou=0.50:0.95/area=medium/max_dets=100',
        'mar/iou=0.50:0.95/area=large/max_dets=100',
    ]

    print('')
    for key in keys:
        print('{:s}: {:f}'.format(key, result[key]))
Ejemplo n.º 30
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        choices=('faster_rcnn', 'ssd300', 'ssd512', 'yolo_v3'),
                        default='ssd300')
    parser.add_argument('--pretrained_model')
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--batchsize', type=int, default=32)
    args = parser.parse_args()

    if args.model == 'faster_rcnn':
        if args.pretrained_model:
            model = FasterRCNNVGG16(n_fg_class=len(voc_bbox_label_names),
                                    pretrained_model=args.pretrained_model)
        else:
            model = FasterRCNNVGG16(pretrained_model='voc07')
    elif args.model == 'ssd300':
        if args.pretrained_model:
            model = SSD300(n_fg_class=len(voc_bbox_label_names),
                           pretrained_model=args.pretrained_model)
        else:
            model = SSD300(pretrained_model='voc0712')
    elif args.model == 'ssd512':
        if args.pretrained_model:
            model = SSD512(n_fg_class=len(voc_bbox_label_names),
                           pretrained_model=args.pretrained_model)
        else:
            model = SSD512(pretrained_model='voc0712')
    elif args.model == 'yolo_v3':
        if args.pretrained_model:
            model = YOLOv3(n_fg_class=len(voc_bbox_label_names),
                           pretrained_model=args.pretrained_model)
        else:
            model = YOLOv3(pretrained_model='voc0712')

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    model.use_preset('evaluate')

    dataset = VOCBboxDataset(year='2007',
                             split='test',
                             use_difficult=True,
                             return_difficult=True)
    iterator = iterators.SerialIterator(dataset,
                                        args.batchsize,
                                        repeat=False,
                                        shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(model.predict,
                                                           iterator,
                                                           hook=ProgressHook(
                                                               len(dataset)))
    # delete unused iterators explicitly
    del in_values

    pred_bboxes, pred_labels, pred_scores = out_values
    gt_bboxes, gt_labels, gt_difficults = rest_values

    result = eval_detection_voc(pred_bboxes,
                                pred_labels,
                                pred_scores,
                                gt_bboxes,
                                gt_labels,
                                gt_difficults,
                                use_07_metric=True)

    print()
    print('mAP: {:f}'.format(result['map']))
    for l, name in enumerate(voc_bbox_label_names):
        if result['ap'][l]:
            print('{:s}: {:f}'.format(name, result['ap'][l]))
        else:
            print('{:s}: -'.format(name))