def run(args):
    assert args.voc12_root is not None
    assert args.chainer_eval_set is not None
    assert args.sem_seg_out_dir is not None

    dataset = VOCSemanticSegmentationDataset(split=args.chainer_eval_set,
                                             data_dir=args.voc12_root)
    labels = [
        dataset.get_example_by_keys(i, (1, ))[0] for i in range(len(dataset))
    ]

    preds = []
    for id in tqdm(dataset.ids):
        cls_labels = imageio.imread(
            os.path.join(args.sem_seg_out_dir, id + '.png')).astype(np.uint8)
        cls_labels[cls_labels == 255] = 0
        preds.append(cls_labels.copy())
    confusion = calc_semantic_segmentation_confusion(preds, labels)[:21, :21]

    gtj = confusion.sum(axis=1)
    resj = confusion.sum(axis=0)
    gtjresj = np.diag(confusion)
    denominator = gtj + resj - gtjresj
    fp = 1. - gtj / denominator
    fn = 1. - resj / denominator
    iou = gtjresj / denominator

    print(fp[0], fn[0])
    print(np.mean(fp[1:]), np.mean(fn[1:]))

    print({'iou': iou, 'miou': np.nanmean(iou)})
Beispiel #2
0
def run_app(cfg: DictConfig) -> None:
    dataset = VOCSemanticSegmentationDataset(split=cfg.chainer_eval_set,
                                             data_dir=cfg.voc12_root)
    labels = [
        dataset.get_example_by_keys(i, (1, ))[0] for i in range(len(dataset))
    ]

    preds = []
    for id in dataset.ids:
        cls_labels = imageio.imread(
            os.path.join(cfg.sem_seg_out_dir, id + '.png')).astype(np.uint8)
        cls_labels[cls_labels == 255] = 0
        if cfg.cv_out_dir:
            cls_labels = add_cv_results(cls_labels.copy(), id, cfg.cv_out_dir,
                                        cfg.area_threshold)
        preds.append(cls_labels.copy())

    confusion = calc_semantic_segmentation_confusion(preds, labels)[:21, :21]

    gtj = confusion.sum(axis=1)
    resj = confusion.sum(axis=0)
    gtjresj = np.diag(confusion)
    denominator = gtj + resj - gtjresj
    fp = 1. - gtj / denominator
    fn = 1. - resj / denominator
    iou = gtjresj / denominator

    print(fp[0], fn[0])
    print(np.mean(fp[1:]), np.mean(fn[1:]))

    print({'iou': iou, 'miou': np.nanmean(iou)})
Beispiel #3
0
def run(args):
    dataset = VOCSemanticSegmentationDataset(split=args.chainer_eval_set,
                                             data_dir=args.voc12_root)
    labels = [
        dataset.get_example_by_keys(i, (1, ))[0] for i in range(len(dataset))
    ]

    preds = []
    for id in dataset.ids:
        ###################修改代码#########################
        cam_dict = np.load(os.path.join(args.cam_out_aug_dir, id + '.npy'),
                           allow_pickle=True).item()
        ###################修改代码#########################
        cams = cam_dict['high_res']
        cams = np.pad(cams, ((1, 0), (0, 0), (0, 0)),
                      mode='constant',
                      constant_values=args.cam_eval_thres)
        keys = np.pad(cam_dict['keys'] + 1, (1, 0), mode='constant')
        cls_labels = np.argmax(cams, axis=0)
        cls_labels = keys[cls_labels]
        preds.append(cls_labels.copy())

    confusion = calc_semantic_segmentation_confusion(preds, labels)

    gtj = confusion.sum(axis=1)
    resj = confusion.sum(axis=0)
    gtjresj = np.diag(confusion)
    denominator = gtj + resj - gtjresj
    iou = gtjresj / denominator

    print({'iou': iou, 'miou': np.nanmean(iou)})
Beispiel #4
0
def run_app(cfg: DictConfig) -> None:
    dataset = VOCSemanticSegmentationDataset(split=cfg.chainer_eval_set,
                                             data_dir=cfg.voc12_root)
    labels = [
        dataset.get_example_by_keys(i, (1, ))[0] for i in range(len(dataset))
    ]
    debug = True
    if debug:
        preds = []
        for idx in dataset.ids:
            pred = _work(cfg.cam_out_dir, cfg.cv_out_dir, cfg.cam_eval_thres,
                         cfg.area_threshold, idx)
            preds.append(pred)
    else:
        with mp.Pool(processes=mp.cpu_count() // 2) as pool:
            preds = pool.map(
                partial(_work, cfg.cam_out_dir, cfg.cv_out_dir,
                        cfg.cam_eval_thres, cfg.area_threshold),
                list(dataset.ids))
    print(len(preds))

    confusion = calc_semantic_segmentation_confusion(preds, labels)

    gtj = confusion.sum(axis=1)
    resj = confusion.sum(axis=0)
    gtjresj = np.diag(confusion)
    denominator = gtj + resj - gtjresj
    iou = gtjresj / denominator

    print({'iou': iou, 'miou': np.nanmean(iou)})
    logging.info({'iou': iou, 'miou': np.nanmean(iou)})
    def test_calc_semantic_segmentation_confusion_shape(self):
        n_class = 30
        pred_labels = np.random.randint(0, n_class, size=(2, 3, 3))
        gt_labels = np.random.randint(-1, n_class, size=(2, 3, 3))
        confusion = calc_semantic_segmentation_confusion(
            pred_labels, gt_labels)

        size = (np.max((pred_labels + 1, gt_labels + 1)))
        self.assertEqual(confusion.shape, (size, size))
    def test_calc_semantic_segmentation_confusion_shape(self):
        n_class = 30
        pred_labels = np.random.randint(0, n_class, size=(2, 3, 3))
        gt_labels = np.random.randint(-1, n_class, size=(2, 3, 3))
        confusion = calc_semantic_segmentation_confusion(
            pred_labels, gt_labels)

        size = (np.max((pred_labels + 1, gt_labels + 1)))
        self.assertEqual(confusion.shape, (size, size))
Beispiel #7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset',
                        choices=('cityscapes', 'ade20k', 'camvid'))
    parser.add_argument('--model', choices=('pspnet_resnet101', 'segnet'))
    parser.add_argument('--pretrained-model')
    parser.add_argument('--input-size', type=int, default=None)
    args = parser.parse_args()

    comm = chainermn.create_communicator()
    device = comm.intra_rank

    dataset, label_names, model = get_dataset_and_model(
        args.dataset, args.model, args.pretrained_model,
        (args.input_size, args.input_size))
    assert len(dataset) % comm.size == 0, \
        "The size of the dataset should be a multiple "\
        "of the number of GPUs"

    chainer.cuda.get_device_from_id(device).use()
    model.to_gpu()

    if comm.rank == 0:
        indices = np.arange(len(dataset))
    else:
        indices = None
    indices = chainermn.scatter_dataset(indices, comm)
    dataset = dataset.slice[indices]

    it = iterators.SerialIterator(dataset, 1, repeat=False, shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(model.predict,
                                                           it,
                                                           hook=ProgressHook(
                                                               len(dataset)))
    # Delete an iterator of images to save memory usage.
    del in_values
    pred_labels, = out_values
    gt_labels, = rest_values

    confusion = calc_semantic_segmentation_confusion(pred_labels, gt_labels)
    confusion = comm.allreduce(confusion)

    if comm.rank == 0:
        iou = calc_semantic_segmentation_iou(confusion)
        pixel_accuracy = np.diag(confusion).sum() / confusion.sum()
        class_accuracy = np.diag(confusion) / np.sum(confusion, axis=1)

        for iu, label_name in zip(iou, label_names):
            print('{:>23} : {:.4f}'.format(label_name, iu))
        print('=' * 34)
        print('{:>23} : {:.4f}'.format('mean IoU', np.nanmean(iou)))
        print('{:>23} : {:.4f}'.format('Class average accuracy',
                                       np.nanmean(class_accuracy)))
        print('{:>23} : {:.4f}'.format('Global average accuracy',
                                       pixel_accuracy))
Beispiel #8
0
def run(args):
    if args.dataset == 'l8biome':
        dataset = l8biome.dataloader.L8BiomeDataset(args.data_root,
                                                    'train',
                                                    mask_file='mask.tif')
        # Only compute CAM for cloudy images - we know the segmentation label for clear already.
        dataset.images = [img for img in dataset.images if 'cloudy' in img[2]]
        labels = [dataset.load_mask(x[0]) for x in dataset.images]
        ids = [x[2] for x in dataset.images]
    else:
        dataset = VOCSemanticSegmentationDataset(split=args.chainer_eval_set,
                                                 data_dir=args.data_root)
        labels = [
            dataset.get_example_by_keys(i, (1, ))[0]
            for i in range(len(dataset))
        ]
        ids = dataset.ids

    preds = []
    for id in tqdm(ids):
        cam_dict = np.load(os.path.join(args.cam_out_dir, id + '.npy'),
                           allow_pickle=True).item()
        cams = cam_dict['high_res']
        cams = np.pad(cams, ((1, 0), (0, 0), (0, 0)),
                      mode='constant',
                      constant_values=args.cam_eval_thres)
        if args.dataset == 'l8biome':
            # background class (in our case 'clear') corresponds to class 0 already
            keys = np.pad(cam_dict['keys'], (1, 0), mode='constant')
        else:
            keys = np.pad(cam_dict['keys'] + 1, (1, 0), mode='constant')
        cls_labels = np.argmax(cams, axis=0)
        cls_labels = keys[cls_labels]
        preds.append(cls_labels.copy())

    if args.dataset == 'l8biome':
        # Compute metrics as FCD
        pass
    else:
        confusion = calc_semantic_segmentation_confusion(preds, labels)

        gtj = confusion.sum(axis=1)
        resj = confusion.sum(axis=0)
        gtjresj = np.diag(confusion)
        denominator = gtj + resj - gtjresj
        iou = gtjresj / denominator

        print({'iou': iou, 'miou': np.nanmean(iou)})
    def test_calc_semantic_segmentation_confusion(self):
        n_class = 2
        pred_labels = np.random.randint(0, n_class, size=(10, 16, 16))
        gt_labels = np.random.randint(-1, n_class, size=(10, 16, 16))
        expected = np.zeros((n_class, n_class), dtype=np.int64)
        expected[0, 0] = np.sum(
            np.logical_and(gt_labels == 0, pred_labels == 0))
        expected[0, 1] = np.sum(
            np.logical_and(gt_labels == 0, pred_labels == 1))
        expected[1, 0] = np.sum(
            np.logical_and(gt_labels == 1, pred_labels == 0))
        expected[1, 1] = np.sum(
            np.logical_and(gt_labels == 1, pred_labels == 1))

        confusion = calc_semantic_segmentation_confusion(
            pred_labels, gt_labels)
        np.testing.assert_equal(confusion, expected)
    def test_calc_semantic_segmentation_confusion(self):
        n_class = 2
        pred_labels = np.random.randint(0, n_class, size=(10, 16, 16))
        gt_labels = np.random.randint(-1, n_class, size=(10, 16, 16))
        expected = np.zeros((n_class, n_class), dtype=np.int64)
        expected[0, 0] = np.sum(
            np.logical_and(gt_labels == 0, pred_labels == 0))
        expected[0, 1] = np.sum(
            np.logical_and(gt_labels == 0, pred_labels == 1))
        expected[1, 0] = np.sum(
            np.logical_and(gt_labels == 1, pred_labels == 0))
        expected[1, 1] = np.sum(
            np.logical_and(gt_labels == 1, pred_labels == 1))

        confusion = calc_semantic_segmentation_confusion(
            pred_labels, gt_labels)
        np.testing.assert_equal(confusion, expected)
Beispiel #11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--pretrained_model', type=str, default='camvid')
    parser.add_argument('--batchsize', type=int, default=24)
    args = parser.parse_args()

    model = SegNetBasic(n_class=len(camvid_label_names),
                        pretrained_model=args.pretrained_model)
    if args.gpu >= 0:
        model.to_gpu(args.gpu)

    model = calc_bn_statistics(model, args.batchsize)

    chainer.config.train = False

    test = CamVidDataset(split='test')
    it = chainer.iterators.SerialIterator(test,
                                          batch_size=args.batchsize,
                                          repeat=False,
                                          shuffle=False)

    imgs, pred_values, gt_values = apply_prediction_to_iterator(
        model.predict, it)
    # Delete an iterator of images to save memory usage.
    del imgs
    pred_labels, = pred_values
    gt_labels, = gt_values

    confusion = calc_semantic_segmentation_confusion(pred_labels, gt_labels)
    ious = calc_semantic_segmentation_iou(confusion)

    pixel_accuracy = np.diag(confusion).sum() / confusion.sum()
    mean_pixel_accuracy = np.mean(
        np.diag(confusion) / np.sum(confusion, axis=1))

    for iou, label_name in zip(ious, camvid_label_names):
        print('{:>23} : {:.4f}'.format(label_name, iou))
    print('=' * 34)
    print('{:>23} : {:.4f}'.format('mean IoU', np.nanmean(ious)))
    print('{:>23} : {:.4f}'.format('Class average accuracy',
                                   mean_pixel_accuracy))
    print('{:>23} : {:.4f}'.format('Global average accuracy', pixel_accuracy))
Beispiel #12
0
def run(args):
    dataset = VOCSemanticSegmentationDataset(split=args.chainer_eval_set,
                                             data_dir=args.voc12_root)
    # labels = [dataset.get_example_by_keys(i, (1,))[0] for i in range(len(dataset))]

    preds = []
    labels = []
    n_images = 0
    for i, id in enumerate(dataset.ids):
        n_images += 1
        # print(os.path.join(args.cam_out_dir, id + '.npy'))
        cam_dict = np.load(os.path.join(args.cam_out_dir, id + '.npy'),
                           allow_pickle=True).item()
        cams = cam_dict['high_res']
        cams = np.pad(cams, ((1, 0), (0, 0), (0, 0)),
                      mode='constant',
                      constant_values=args.cam_eval_thres)
        keys = np.pad(cam_dict['keys'] + 1, (1, 0), mode='constant')
        cls_labels = np.argmax(cams, axis=0)
        cls_labels = keys[cls_labels]
        preds.append(cls_labels.copy())
        labels.append(dataset.get_example_by_keys(i, (1, ))[0])

    confusion = calc_semantic_segmentation_confusion(preds, labels)

    gtj = confusion.sum(axis=1)
    resj = confusion.sum(axis=0)
    gtjresj = np.diag(confusion)
    denominator = gtj + resj - gtjresj
    iou = gtjresj / denominator

    print("threshold:", args.cam_eval_thres, 'miou:', np.nanmean(iou),
          "i_imgs", n_images)
    print('among_predfg_bg',
          float((resj[1:].sum() - confusion[1:, 1:].sum()) / (resj[1:].sum())))

    return np.nanmean(iou)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        choices=('pspnet_resnet101', ),
                        default='pspnet_resnet101')
    parser.add_argument('--pretrained-model')
    args = parser.parse_args()

    comm = chainermn.create_communicator()
    device = comm.intra_rank

    if args.model == 'pspnet_resnet101':
        if args.pretrained_model:
            model = PSPNetResNet101(
                n_class=len(cityscapes_semantic_segmentation_label_names),
                pretrained_model=args.pretrained_model,
                input_size=(713, 713))
        else:
            model = PSPNetResNet101(pretrained_model='cityscapes')

    chainer.cuda.get_device_from_id(device).use()
    model.to_gpu()

    dataset = CityscapesSemanticSegmentationDataset(split='val',
                                                    label_resolution='fine')

    if comm.rank == 0:
        indices = np.arange(len(dataset))
    else:
        indices = None
    indices = chainermn.scatter_dataset(indices, comm)
    dataset = dataset.slice[indices]

    it = iterators.SerialIterator(dataset, 1, repeat=False, shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(model.predict,
                                                           it,
                                                           hook=ProgressHook(
                                                               len(dataset)))
    # Delete an iterator of images to save memory usage.
    del in_values
    pred_labels, = out_values
    gt_labels, = rest_values

    confusion = calc_semantic_segmentation_confusion(pred_labels, gt_labels)
    confusion = comm.allreduce(confusion)

    if comm.rank == 0:
        iou = calc_semantic_segmentation_iou(confusion)
        pixel_accuracy = np.diag(confusion).sum() / confusion.sum()
        class_accuracy = np.diag(confusion) / np.sum(confusion, axis=1)

        for iu, label_name in zip(
                iou, cityscapes_semantic_segmentation_label_names):
            print('{:>23} : {:.4f}'.format(label_name, iu))
        print('=' * 34)
        print('{:>23} : {:.4f}'.format('mean IoU', np.nanmean(iou)))
        print('{:>23} : {:.4f}'.format('Class average accuracy',
                                       np.nanmean(class_accuracy)))
        print('{:>23} : {:.4f}'.format('Global average accuracy',
                                       pixel_accuracy))
Beispiel #14
0
def run(args):

    if args.dataset == 'voc12':
        dataset = VOCSemanticSegmentationDataset(split=args.chainer_eval_set, data_dir=args.dev_root)
        outsize = None
    elif args.dataset in ['adp_morph', 'adp_func']:
        dataset = ADPSemanticSegmentationDataset(split=args.chainer_eval_set, data_dir=args.dev_root,
                                                 htt_type=args.dataset.split('_')[-1])
        outsize = (1088, 1088)
    elif args.dataset in ['deepglobe', 'deepglobe_balanced']:
        dataset = DeepGlobeSemanticSegmentationDataset(split=args.chainer_eval_set, data_dir=args.dev_root,
                                                       is_balanced=args.dataset == 'deepglobe_balanced')
        outsize = (2448, 2448)
    else:
        raise KeyError('Dataset %s not yet implemented' % args.dataset)
    labels = [dataset.get_example_by_keys(i, (1,))[0] for i in range(len(dataset))]

    preds = []
    with tqdm(total=len(dataset)) as pbar:
        for id in dataset.ids:
            if args.dataset == 'voc12':
                img_path = voc12.dataloader.get_img_path(id, args.dev_root)
            elif args.dataset in ['adp_morph', 'adp_func']:
                img_path = adp.dataloader.get_img_path(id, args.dev_root, args.split == 'evaluation')
            elif args.dataset in ['deepglobe', 'deepglobe_balanced']:
                img_path = deepglobe.dataloader.get_img_path(id, args.dev_root)
            else:
                raise KeyError('Dataset %s not yet implemented' % args.dataset)

            cam_dict = np.load(os.path.join(args.cam_out_dir, id + '.npy'), allow_pickle=True).item()
            if args.dataset == 'voc12':
                cams = cam_dict['high_res']
                cams = np.pad(cams, ((1, 0), (0, 0), (0, 0)), mode='constant', constant_values=args.cam_eval_thres)
                keys = np.pad(cam_dict['keys'] + 1, (1, 0), mode='constant')
            elif args.dataset in ['adp_morph', 'adp_func']:
                keys = cam_dict['keys']
                cams = cam_dict['high_res']
            elif args.dataset in ['deepglobe', 'deepglobe_balanced']:
                keys = cam_dict['keys']
                cams = cam_dict['cam']
            else:
                raise KeyError('Dataset %s not yet implemented' % args.dataset)
            cls_labels = np.argmax(cams, axis=0)
            cls_labels = keys[cls_labels]
            if outsize is not None:
                cls_labels = cv2.resize(cls_labels, outsize, interpolation=cv2.INTER_NEAREST)

            imageio.imsave(os.path.join(args.cam_clr_out_dir, id + '.png'), cls_labels.astype(np.uint8))
            # Save with colour
            rw_pred_clr = np.zeros(list(cls_labels.shape) + [3], dtype=np.uint8)
            off = 0
            for t in ['bg', 'fg']:
                for i, c in enumerate(args.class_colours[t]):
                    for ch in range(3):
                        rw_pred_clr[:, :, ch] += c[ch] * np.uint8(cls_labels == (i + off))
                off += len(args.class_colours[t])
            imageio.imsave(os.path.join(args.cam_clr_out_dir, id + '.png'), rw_pred_clr)
            # Save with colour, overlaid on original image
            if args.dataset not in ['deepglobe', 'deepglobe_balanced']:
                orig_img = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)
            else:
                orig_img = cv2.resize(cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB), rw_pred_clr.shape[:2])
            if args.dataset in ['adp_morph', 'adp_func']:
                rw_pred_clr = cv2.resize(rw_pred_clr, orig_img.shape[:2])
            rw_pred_clr_over = np.uint8((1 - args.overlay_r) * np.float32(orig_img) +
                                        args.overlay_r * np.float32(rw_pred_clr))
            imageio.imsave(os.path.join(args.cam_clr_out_dir, id + '_overlay.png'), rw_pred_clr_over)
            preds.append(cls_labels.copy())
            pbar.update(1)

    confusion = calc_semantic_segmentation_confusion(preds, labels)

    gtj = confusion.sum(axis=1)
    resj = confusion.sum(axis=0)
    gtjresj = np.diag(confusion)
    denominator = gtj + resj - gtjresj
    iou = gtjresj / denominator
    precision = gtjresj / resj
    recall = gtjresj / gtj
    miou = np.array([np.nanmean(iou)])
    mprecision = np.array([np.nanmean(precision)])
    mrecall = np.array([np.nanmean(recall)])

    iou_data = np.concatenate((iou, miou), axis=0)
    pr_data = np.concatenate((precision, mprecision), axis=0)
    re_data = np.concatenate((recall, mrecall), axis=0)
    data = np.column_stack((iou_data, pr_data, re_data))
    if args.dataset in ['deepglobe', 'deepglobe_balanced']:
        row_names = args.class_names['bg'] + args.class_names['fg'][:-1] + ['mean']
    else:
        row_names = args.class_names['bg'] + args.class_names['fg'] + ['mean']
    df = pd.DataFrame(data, index=row_names, columns=['iou', 'precision', 'recall'])
    df.to_csv(os.path.join(args.eval_dir, args.run_name + '_' + args.split + '_cam_iou.csv'), index=True)

    with open(args.logfile, 'a') as f:
        f.write('[eval_cam, ' + args.split + '] iou: ' + str(list(iou)) + '\n')
        f.write('[eval_cam, ' + args.split + '] miou: ' + str(miou[0]) + '\n')
    # args.logger.write('[eval_cam] iou: ' + iou + '\n')
    # args.logger.write('[eval_cam] miou: ' + miou+ '\n')
def run(args):

    if args.dataset == 'voc12':
        dataset = VOCSemanticSegmentationDataset(split=args.chainer_eval_set,
                                                 data_dir=args.dev_root)
        outsize = None
    elif args.dataset in ['adp_morph', 'adp_func']:
        dataset = ADPSemanticSegmentationDataset(
            split=args.chainer_eval_set,
            data_dir=args.dev_root,
            htt_type=args.dataset.split('_')[-1])
        outsize = (1088, 1088)
    elif args.dataset in ['deepglobe', 'deepglobe_balanced']:
        dataset = DeepGlobeSemanticSegmentationDataset(
            split=args.chainer_eval_set,
            data_dir=args.dev_root,
            is_balanced=args.dataset == 'deepglobe_balanced')
        outsize = (2448, 2448)
    else:
        raise KeyError('Dataset %s not yet implemented' % args.dataset)
    labels = [
        dataset.get_example_by_keys(i, (1, ))[0] for i in range(len(dataset))
    ]

    preds = []

    with tqdm(total=len(dataset)) as pbar:
        for id in dataset.ids:
            cls_labels = imageio.imread(
                os.path.join(args.sem_seg_out_dir,
                             id + '.png')).astype(np.uint8)
            cls_labels[cls_labels == 255] = 0
            if outsize is not None:
                cls_labels = cv2.resize(cls_labels,
                                        outsize,
                                        interpolation=cv2.INTER_NEAREST)
            preds.append(cls_labels.copy())
            pbar.update(1)

    confusion = calc_semantic_segmentation_confusion(preds,
                                                     labels)  #[:21, :21]

    gtj = confusion.sum(axis=1)
    resj = confusion.sum(axis=0)
    gtjresj = np.diag(confusion)
    denominator = gtj + resj - gtjresj
    fp = 1. - gtj / denominator
    fn = 1. - resj / denominator
    iou = gtjresj / denominator
    miou = np.array([np.nanmean(iou)])

    print(fp[0], fn[0])
    print(np.mean(fp[1:]), np.mean(fn[1:]))

    data = np.concatenate((iou, miou), axis=0)
    if args.dataset in ['deepglobe', 'deepglobe_balanced']:
        row_names = args.class_names['bg'] + args.class_names['fg'][:-1] + [
            'miou'
        ]
    else:
        row_names = args.class_names['bg'] + args.class_names['fg'] + ['miou']
    df = pd.DataFrame(data, index=row_names, columns=['iou'])
    df.to_csv(os.path.join(args.eval_dir,
                           args.run_name + '_' + args.split + '_iou.csv'),
              index=True)

    with open(args.logfile, 'a') as f:
        f.write('[eval_sem_seg, ' + args.split + '] iou: ' + str(list(iou)) +
                '\n')
        f.write('[eval_sem_seg, ' + args.split + '] miou: ' + str(miou[0]) +
                '\n')
Beispiel #16
0
        cams = np.pad(cams, ((1, 0), (0, 0), (0, 0)),
                      mode='constant',
                      constant_values=cam_eval_thres)  # 添加背景的阈值
        keys = np.pad(cam_dict['keys'] + 1, (1, 0), mode='constant')  # 添加背景
        cls_labels = np.argmax(cams, axis=0)
        cls_labels = keys[cls_labels]
        preds.append(cls_labels.copy())
        # print(np.unique(preds[-1]), np.unique(labels[idx]))
        seg = np.zeros((preds[-1].shape[0], preds[-1].shape[1], 3), np.uint8)
        for i in range(n_classes):
            seg[preds[-1] == i] = rgb_dict[i]

        if dataname == 'camvid':
            ori_img = cv2.imread(voc12_root + '/JPEGImages/' + id + '.png')
        else:
            ori_img = cv2.imread(voc12_root + '/JPEGImages/' + id + '.jpg')
        seg = cv2.cvtColor(seg, cv2.COLOR_RGB2BGR)

        dst_img = cv2.addWeighted(ori_img, 0.2, seg, 0.8, 0)
        cv2.imwrite(cam_seg_dir + '/' + id + '.png', dst_img)

    confusion = calc_semantic_segmentation_confusion(preds, labels)

    gtj = confusion.sum(axis=1)
    resj = confusion.sum(axis=0)
    gtjresj = np.diag(confusion)
    denominator = gtj + resj - gtjresj
    iou = gtjresj / denominator

    print({'iou': iou, 'miou': np.nanmean(iou)})