def test_calc_semantic_segmentation_iou(self):
        c = np.random.randint(0, 100, size=(self.n_class, self.n_class))
        expected = np.array(
            [c[0, 0] / (c[0, 0] + c[0, 1] + c[1, 0]),
             c[1, 1] / (c[1, 1] + c[1, 0] + c[0, 1])])

        iou = calc_semantic_segmentation_iou(c)
        np.testing.assert_equal(iou, expected)
    def test_calc_semantic_segmentation_iou(self):
        c = np.random.randint(0, 100, size=(self.n_class, self.n_class))
        expected = np.array(
            [c[0, 0] / (c[0, 0] + c[0, 1] + c[1, 0]),
             c[1, 1] / (c[1, 1] + c[1, 0] + c[0, 1])])

        iou = calc_semantic_segmentation_iou(c)
        np.testing.assert_equal(iou, expected)
Exemple #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset',
                        choices=('cityscapes', 'ade20k', 'camvid'))
    parser.add_argument('--model', choices=('pspnet_resnet101', 'segnet'))
    parser.add_argument('--pretrained-model')
    parser.add_argument('--input-size', type=int, default=None)
    args = parser.parse_args()

    comm = chainermn.create_communicator()
    device = comm.intra_rank

    dataset, label_names, model = get_dataset_and_model(
        args.dataset, args.model, args.pretrained_model,
        (args.input_size, args.input_size))
    assert len(dataset) % comm.size == 0, \
        "The size of the dataset should be a multiple "\
        "of the number of GPUs"

    chainer.cuda.get_device_from_id(device).use()
    model.to_gpu()

    if comm.rank == 0:
        indices = np.arange(len(dataset))
    else:
        indices = None
    indices = chainermn.scatter_dataset(indices, comm)
    dataset = dataset.slice[indices]

    it = iterators.SerialIterator(dataset, 1, repeat=False, shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(model.predict,
                                                           it,
                                                           hook=ProgressHook(
                                                               len(dataset)))
    # Delete an iterator of images to save memory usage.
    del in_values
    pred_labels, = out_values
    gt_labels, = rest_values

    confusion = calc_semantic_segmentation_confusion(pred_labels, gt_labels)
    confusion = comm.allreduce(confusion)

    if comm.rank == 0:
        iou = calc_semantic_segmentation_iou(confusion)
        pixel_accuracy = np.diag(confusion).sum() / confusion.sum()
        class_accuracy = np.diag(confusion) / np.sum(confusion, axis=1)

        for iu, label_name in zip(iou, label_names):
            print('{:>23} : {:.4f}'.format(label_name, iu))
        print('=' * 34)
        print('{:>23} : {:.4f}'.format('mean IoU', np.nanmean(iou)))
        print('{:>23} : {:.4f}'.format('Class average accuracy',
                                       np.nanmean(class_accuracy)))
        print('{:>23} : {:.4f}'.format('Global average accuracy',
                                       pixel_accuracy))
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--pretrained_model', type=str, default='camvid')
    parser.add_argument('--batchsize', type=int, default=24)
    args = parser.parse_args()

    model = SegNetBasic(n_class=len(camvid_label_names),
                        pretrained_model=args.pretrained_model)
    if args.gpu >= 0:
        model.to_gpu(args.gpu)

    model = calc_bn_statistics(model, args.batchsize)

    chainer.config.train = False

    test = CamVidDataset(split='test')
    it = chainer.iterators.SerialIterator(test,
                                          batch_size=args.batchsize,
                                          repeat=False,
                                          shuffle=False)

    imgs, pred_values, gt_values = apply_prediction_to_iterator(
        model.predict, it)
    # Delete an iterator of images to save memory usage.
    del imgs
    pred_labels, = pred_values
    gt_labels, = gt_values

    confusion = calc_semantic_segmentation_confusion(pred_labels, gt_labels)
    ious = calc_semantic_segmentation_iou(confusion)

    pixel_accuracy = np.diag(confusion).sum() / confusion.sum()
    mean_pixel_accuracy = np.mean(
        np.diag(confusion) / np.sum(confusion, axis=1))

    for iou, label_name in zip(ious, camvid_label_names):
        print('{:>23} : {:.4f}'.format(label_name, iou))
    print('=' * 34)
    print('{:>23} : {:.4f}'.format('mean IoU', np.nanmean(ious)))
    print('{:>23} : {:.4f}'.format('Class average accuracy',
                                   mean_pixel_accuracy))
    print('{:>23} : {:.4f}'.format('Global average accuracy', pixel_accuracy))
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        choices=('pspnet_resnet101', ),
                        default='pspnet_resnet101')
    parser.add_argument('--pretrained-model')
    args = parser.parse_args()

    comm = chainermn.create_communicator()
    device = comm.intra_rank

    if args.model == 'pspnet_resnet101':
        if args.pretrained_model:
            model = PSPNetResNet101(
                n_class=len(cityscapes_semantic_segmentation_label_names),
                pretrained_model=args.pretrained_model,
                input_size=(713, 713))
        else:
            model = PSPNetResNet101(pretrained_model='cityscapes')

    chainer.cuda.get_device_from_id(device).use()
    model.to_gpu()

    dataset = CityscapesSemanticSegmentationDataset(split='val',
                                                    label_resolution='fine')

    if comm.rank == 0:
        indices = np.arange(len(dataset))
    else:
        indices = None
    indices = chainermn.scatter_dataset(indices, comm)
    dataset = dataset.slice[indices]

    it = iterators.SerialIterator(dataset, 1, repeat=False, shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(model.predict,
                                                           it,
                                                           hook=ProgressHook(
                                                               len(dataset)))
    # Delete an iterator of images to save memory usage.
    del in_values
    pred_labels, = out_values
    gt_labels, = rest_values

    confusion = calc_semantic_segmentation_confusion(pred_labels, gt_labels)
    confusion = comm.allreduce(confusion)

    if comm.rank == 0:
        iou = calc_semantic_segmentation_iou(confusion)
        pixel_accuracy = np.diag(confusion).sum() / confusion.sum()
        class_accuracy = np.diag(confusion) / np.sum(confusion, axis=1)

        for iu, label_name in zip(
                iou, cityscapes_semantic_segmentation_label_names):
            print('{:>23} : {:.4f}'.format(label_name, iu))
        print('=' * 34)
        print('{:>23} : {:.4f}'.format('mean IoU', np.nanmean(iou)))
        print('{:>23} : {:.4f}'.format('Class average accuracy',
                                       np.nanmean(class_accuracy)))
        print('{:>23} : {:.4f}'.format('Global average accuracy',
                                       pixel_accuracy))