Exemplo n.º 1
0
def run(args):
    dataset = VOCSemanticSegmentationDataset(split=args.chainer_eval_set,
                                             data_dir=args.voc12_root)
    labels = [
        dataset.get_example_by_keys(i, (1, ))[0] for i in range(len(dataset))
    ]

    preds = []
    for id in dataset.ids:
        ###################修改代码#########################
        cam_dict = np.load(os.path.join(args.cam_out_aug_dir, id + '.npy'),
                           allow_pickle=True).item()
        ###################修改代码#########################
        cams = cam_dict['high_res']
        cams = np.pad(cams, ((1, 0), (0, 0), (0, 0)),
                      mode='constant',
                      constant_values=args.cam_eval_thres)
        keys = np.pad(cam_dict['keys'] + 1, (1, 0), mode='constant')
        cls_labels = np.argmax(cams, axis=0)
        cls_labels = keys[cls_labels]
        preds.append(cls_labels.copy())

    confusion = calc_semantic_segmentation_confusion(preds, labels)

    gtj = confusion.sum(axis=1)
    resj = confusion.sum(axis=0)
    gtjresj = np.diag(confusion)
    denominator = gtj + resj - gtjresj
    iou = gtjresj / denominator

    print({'iou': iou, 'miou': np.nanmean(iou)})
Exemplo n.º 2
0
def run(args):
    assert args.voc12_root is not None
    assert args.chainer_eval_set is not None
    assert args.sem_seg_out_dir is not None

    dataset = VOCSemanticSegmentationDataset(split=args.chainer_eval_set,
                                             data_dir=args.voc12_root)
    labels = [
        dataset.get_example_by_keys(i, (1, ))[0] for i in range(len(dataset))
    ]

    preds = []
    for id in tqdm(dataset.ids):
        cls_labels = imageio.imread(
            os.path.join(args.sem_seg_out_dir, id + '.png')).astype(np.uint8)
        cls_labels[cls_labels == 255] = 0
        preds.append(cls_labels.copy())
    confusion = calc_semantic_segmentation_confusion(preds, labels)[:21, :21]

    gtj = confusion.sum(axis=1)
    resj = confusion.sum(axis=0)
    gtjresj = np.diag(confusion)
    denominator = gtj + resj - gtjresj
    fp = 1. - gtj / denominator
    fn = 1. - resj / denominator
    iou = gtjresj / denominator

    print(fp[0], fn[0])
    print(np.mean(fp[1:]), np.mean(fn[1:]))

    print({'iou': iou, 'miou': np.nanmean(iou)})
Exemplo n.º 3
0
def run_app(cfg: DictConfig) -> None:
    dataset = VOCSemanticSegmentationDataset(split=cfg.chainer_eval_set,
                                             data_dir=cfg.voc12_root)
    labels = [
        dataset.get_example_by_keys(i, (1, ))[0] for i in range(len(dataset))
    ]
    debug = True
    if debug:
        preds = []
        for idx in dataset.ids:
            pred = _work(cfg.cam_out_dir, cfg.cv_out_dir, cfg.cam_eval_thres,
                         cfg.area_threshold, idx)
            preds.append(pred)
    else:
        with mp.Pool(processes=mp.cpu_count() // 2) as pool:
            preds = pool.map(
                partial(_work, cfg.cam_out_dir, cfg.cv_out_dir,
                        cfg.cam_eval_thres, cfg.area_threshold),
                list(dataset.ids))
    print(len(preds))

    confusion = calc_semantic_segmentation_confusion(preds, labels)

    gtj = confusion.sum(axis=1)
    resj = confusion.sum(axis=0)
    gtjresj = np.diag(confusion)
    denominator = gtj + resj - gtjresj
    iou = gtjresj / denominator

    print({'iou': iou, 'miou': np.nanmean(iou)})
    logging.info({'iou': iou, 'miou': np.nanmean(iou)})
Exemplo n.º 4
0
def run_app(cfg: DictConfig) -> None:
    dataset = VOCSemanticSegmentationDataset(split=cfg.chainer_eval_set,
                                             data_dir=cfg.voc12_root)
    labels = [
        dataset.get_example_by_keys(i, (1, ))[0] for i in range(len(dataset))
    ]

    preds = []
    for id in dataset.ids:
        cls_labels = imageio.imread(
            os.path.join(cfg.sem_seg_out_dir, id + '.png')).astype(np.uint8)
        cls_labels[cls_labels == 255] = 0
        if cfg.cv_out_dir:
            cls_labels = add_cv_results(cls_labels.copy(), id, cfg.cv_out_dir,
                                        cfg.area_threshold)
        preds.append(cls_labels.copy())

    confusion = calc_semantic_segmentation_confusion(preds, labels)[:21, :21]

    gtj = confusion.sum(axis=1)
    resj = confusion.sum(axis=0)
    gtjresj = np.diag(confusion)
    denominator = gtj + resj - gtjresj
    fp = 1. - gtj / denominator
    fn = 1. - resj / denominator
    iou = gtjresj / denominator

    print(fp[0], fn[0])
    print(np.mean(fp[1:]), np.mean(fn[1:]))

    print({'iou': iou, 'miou': np.nanmean(iou)})
Exemplo n.º 5
0
def run(args):
    if args.dataset == 'l8biome':
        dataset = l8biome.dataloader.L8BiomeDataset(args.data_root,
                                                    'train',
                                                    mask_file='mask.tif')
        # Only compute CAM for cloudy images - we know the segmentation label for clear already.
        dataset.images = [img for img in dataset.images if 'cloudy' in img[2]]
        labels = [dataset.load_mask(x[0]) for x in dataset.images]
        ids = [x[2] for x in dataset.images]
    else:
        dataset = VOCSemanticSegmentationDataset(split=args.chainer_eval_set,
                                                 data_dir=args.data_root)
        labels = [
            dataset.get_example_by_keys(i, (1, ))[0]
            for i in range(len(dataset))
        ]
        ids = dataset.ids

    preds = []
    for id in tqdm(ids):
        cam_dict = np.load(os.path.join(args.cam_out_dir, id + '.npy'),
                           allow_pickle=True).item()
        cams = cam_dict['high_res']
        cams = np.pad(cams, ((1, 0), (0, 0), (0, 0)),
                      mode='constant',
                      constant_values=args.cam_eval_thres)
        if args.dataset == 'l8biome':
            # background class (in our case 'clear') corresponds to class 0 already
            keys = np.pad(cam_dict['keys'], (1, 0), mode='constant')
        else:
            keys = np.pad(cam_dict['keys'] + 1, (1, 0), mode='constant')
        cls_labels = np.argmax(cams, axis=0)
        cls_labels = keys[cls_labels]
        preds.append(cls_labels.copy())

    if args.dataset == 'l8biome':
        # Compute metrics as FCD
        pass
    else:
        confusion = calc_semantic_segmentation_confusion(preds, labels)

        gtj = confusion.sum(axis=1)
        resj = confusion.sum(axis=0)
        gtjresj = np.diag(confusion)
        denominator = gtj + resj - gtjresj
        iou = gtjresj / denominator

        print({'iou': iou, 'miou': np.nanmean(iou)})
Exemplo n.º 6
0
def run(args):
    dataset = VOCSemanticSegmentationDataset(split=args.chainer_eval_set,
                                             data_dir=args.voc12_root)
    # labels = [dataset.get_example_by_keys(i, (1,))[0] for i in range(len(dataset))]

    preds = []
    labels = []
    n_images = 0
    for i, id in enumerate(dataset.ids):
        n_images += 1
        # print(os.path.join(args.cam_out_dir, id + '.npy'))
        cam_dict = np.load(os.path.join(args.cam_out_dir, id + '.npy'),
                           allow_pickle=True).item()
        cams = cam_dict['high_res']
        cams = np.pad(cams, ((1, 0), (0, 0), (0, 0)),
                      mode='constant',
                      constant_values=args.cam_eval_thres)
        keys = np.pad(cam_dict['keys'] + 1, (1, 0), mode='constant')
        cls_labels = np.argmax(cams, axis=0)
        cls_labels = keys[cls_labels]
        preds.append(cls_labels.copy())
        labels.append(dataset.get_example_by_keys(i, (1, ))[0])

    confusion = calc_semantic_segmentation_confusion(preds, labels)

    gtj = confusion.sum(axis=1)
    resj = confusion.sum(axis=0)
    gtjresj = np.diag(confusion)
    denominator = gtj + resj - gtjresj
    iou = gtjresj / denominator

    print("threshold:", args.cam_eval_thres, 'miou:', np.nanmean(iou),
          "i_imgs", n_images)
    print('among_predfg_bg',
          float((resj[1:].sum() - confusion[1:, 1:].sum()) / (resj[1:].sum())))

    return np.nanmean(iou)
Exemplo n.º 7
0
def run(args):

    if args.dataset == 'voc12':
        dataset = VOCSemanticSegmentationDataset(split=args.chainer_eval_set, data_dir=args.dev_root)
        outsize = None
    elif args.dataset in ['adp_morph', 'adp_func']:
        dataset = ADPSemanticSegmentationDataset(split=args.chainer_eval_set, data_dir=args.dev_root,
                                                 htt_type=args.dataset.split('_')[-1])
        outsize = (1088, 1088)
    elif args.dataset in ['deepglobe', 'deepglobe_balanced']:
        dataset = DeepGlobeSemanticSegmentationDataset(split=args.chainer_eval_set, data_dir=args.dev_root,
                                                       is_balanced=args.dataset == 'deepglobe_balanced')
        outsize = (2448, 2448)
    else:
        raise KeyError('Dataset %s not yet implemented' % args.dataset)
    labels = [dataset.get_example_by_keys(i, (1,))[0] for i in range(len(dataset))]

    preds = []
    with tqdm(total=len(dataset)) as pbar:
        for id in dataset.ids:
            if args.dataset == 'voc12':
                img_path = voc12.dataloader.get_img_path(id, args.dev_root)
            elif args.dataset in ['adp_morph', 'adp_func']:
                img_path = adp.dataloader.get_img_path(id, args.dev_root, args.split == 'evaluation')
            elif args.dataset in ['deepglobe', 'deepglobe_balanced']:
                img_path = deepglobe.dataloader.get_img_path(id, args.dev_root)
            else:
                raise KeyError('Dataset %s not yet implemented' % args.dataset)

            cam_dict = np.load(os.path.join(args.cam_out_dir, id + '.npy'), allow_pickle=True).item()
            if args.dataset == 'voc12':
                cams = cam_dict['high_res']
                cams = np.pad(cams, ((1, 0), (0, 0), (0, 0)), mode='constant', constant_values=args.cam_eval_thres)
                keys = np.pad(cam_dict['keys'] + 1, (1, 0), mode='constant')
            elif args.dataset in ['adp_morph', 'adp_func']:
                keys = cam_dict['keys']
                cams = cam_dict['high_res']
            elif args.dataset in ['deepglobe', 'deepglobe_balanced']:
                keys = cam_dict['keys']
                cams = cam_dict['cam']
            else:
                raise KeyError('Dataset %s not yet implemented' % args.dataset)
            cls_labels = np.argmax(cams, axis=0)
            cls_labels = keys[cls_labels]
            if outsize is not None:
                cls_labels = cv2.resize(cls_labels, outsize, interpolation=cv2.INTER_NEAREST)

            imageio.imsave(os.path.join(args.cam_clr_out_dir, id + '.png'), cls_labels.astype(np.uint8))
            # Save with colour
            rw_pred_clr = np.zeros(list(cls_labels.shape) + [3], dtype=np.uint8)
            off = 0
            for t in ['bg', 'fg']:
                for i, c in enumerate(args.class_colours[t]):
                    for ch in range(3):
                        rw_pred_clr[:, :, ch] += c[ch] * np.uint8(cls_labels == (i + off))
                off += len(args.class_colours[t])
            imageio.imsave(os.path.join(args.cam_clr_out_dir, id + '.png'), rw_pred_clr)
            # Save with colour, overlaid on original image
            if args.dataset not in ['deepglobe', 'deepglobe_balanced']:
                orig_img = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)
            else:
                orig_img = cv2.resize(cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB), rw_pred_clr.shape[:2])
            if args.dataset in ['adp_morph', 'adp_func']:
                rw_pred_clr = cv2.resize(rw_pred_clr, orig_img.shape[:2])
            rw_pred_clr_over = np.uint8((1 - args.overlay_r) * np.float32(orig_img) +
                                        args.overlay_r * np.float32(rw_pred_clr))
            imageio.imsave(os.path.join(args.cam_clr_out_dir, id + '_overlay.png'), rw_pred_clr_over)
            preds.append(cls_labels.copy())
            pbar.update(1)

    confusion = calc_semantic_segmentation_confusion(preds, labels)

    gtj = confusion.sum(axis=1)
    resj = confusion.sum(axis=0)
    gtjresj = np.diag(confusion)
    denominator = gtj + resj - gtjresj
    iou = gtjresj / denominator
    precision = gtjresj / resj
    recall = gtjresj / gtj
    miou = np.array([np.nanmean(iou)])
    mprecision = np.array([np.nanmean(precision)])
    mrecall = np.array([np.nanmean(recall)])

    iou_data = np.concatenate((iou, miou), axis=0)
    pr_data = np.concatenate((precision, mprecision), axis=0)
    re_data = np.concatenate((recall, mrecall), axis=0)
    data = np.column_stack((iou_data, pr_data, re_data))
    if args.dataset in ['deepglobe', 'deepglobe_balanced']:
        row_names = args.class_names['bg'] + args.class_names['fg'][:-1] + ['mean']
    else:
        row_names = args.class_names['bg'] + args.class_names['fg'] + ['mean']
    df = pd.DataFrame(data, index=row_names, columns=['iou', 'precision', 'recall'])
    df.to_csv(os.path.join(args.eval_dir, args.run_name + '_' + args.split + '_cam_iou.csv'), index=True)

    with open(args.logfile, 'a') as f:
        f.write('[eval_cam, ' + args.split + '] iou: ' + str(list(iou)) + '\n')
        f.write('[eval_cam, ' + args.split + '] miou: ' + str(miou[0]) + '\n')
    # args.logger.write('[eval_cam] iou: ' + iou + '\n')
    # args.logger.write('[eval_cam] miou: ' + miou+ '\n')
Exemplo n.º 8
0
def run(args):

    if args.dataset == 'voc12':
        dataset = VOCSemanticSegmentationDataset(split=args.chainer_eval_set,
                                                 data_dir=args.dev_root)
        outsize = None
    elif args.dataset in ['adp_morph', 'adp_func']:
        dataset = ADPSemanticSegmentationDataset(
            split=args.chainer_eval_set,
            data_dir=args.dev_root,
            htt_type=args.dataset.split('_')[-1])
        outsize = (1088, 1088)
    elif args.dataset in ['deepglobe', 'deepglobe_balanced']:
        dataset = DeepGlobeSemanticSegmentationDataset(
            split=args.chainer_eval_set,
            data_dir=args.dev_root,
            is_balanced=args.dataset == 'deepglobe_balanced')
        outsize = (2448, 2448)
    else:
        raise KeyError('Dataset %s not yet implemented' % args.dataset)
    labels = [
        dataset.get_example_by_keys(i, (1, ))[0] for i in range(len(dataset))
    ]

    preds = []

    with tqdm(total=len(dataset)) as pbar:
        for id in dataset.ids:
            cls_labels = imageio.imread(
                os.path.join(args.sem_seg_out_dir,
                             id + '.png')).astype(np.uint8)
            cls_labels[cls_labels == 255] = 0
            if outsize is not None:
                cls_labels = cv2.resize(cls_labels,
                                        outsize,
                                        interpolation=cv2.INTER_NEAREST)
            preds.append(cls_labels.copy())
            pbar.update(1)

    confusion = calc_semantic_segmentation_confusion(preds,
                                                     labels)  #[:21, :21]

    gtj = confusion.sum(axis=1)
    resj = confusion.sum(axis=0)
    gtjresj = np.diag(confusion)
    denominator = gtj + resj - gtjresj
    fp = 1. - gtj / denominator
    fn = 1. - resj / denominator
    iou = gtjresj / denominator
    miou = np.array([np.nanmean(iou)])

    print(fp[0], fn[0])
    print(np.mean(fp[1:]), np.mean(fn[1:]))

    data = np.concatenate((iou, miou), axis=0)
    if args.dataset in ['deepglobe', 'deepglobe_balanced']:
        row_names = args.class_names['bg'] + args.class_names['fg'][:-1] + [
            'miou'
        ]
    else:
        row_names = args.class_names['bg'] + args.class_names['fg'] + ['miou']
    df = pd.DataFrame(data, index=row_names, columns=['iou'])
    df.to_csv(os.path.join(args.eval_dir,
                           args.run_name + '_' + args.split + '_iou.csv'),
              index=True)

    with open(args.logfile, 'a') as f:
        f.write('[eval_sem_seg, ' + args.split + '] iou: ' + str(list(iou)) +
                '\n')
        f.write('[eval_sem_seg, ' + args.split + '] miou: ' + str(miou[0]) +
                '\n')
Exemplo n.º 9
0
Arquivo: cam_eval.py Projeto: 6clc/IRN
from config import *
import numpy as np
import os
from chainercv.datasets import VOCSemanticSegmentationDataset
from chainercv.evaluations import calc_semantic_segmentation_confusion
from PIL import Image
import cv2
rgb_dict = [[0, 0, 0]] + [[255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 255, 0],
                          [255, 0, 255], [0, 255, 255]] * 20
# rgb_dict = [[0, 0, 0]] + [[0, 0, 0], [0, 255, 0], [0, 0, 0], [255, 255, 0], [ 255, 0, 255], [0, 255,255]]*20

if __name__ == "__main__":
    dataset = VOCSemanticSegmentationDataset(split=chainer_eval_set,
                                             data_dir=voc12_root)
    labels = [
        dataset.get_example_by_keys(i, (1, ))[0] for i in range(len(dataset))
    ]

    preds = []
    for idx, id in enumerate(dataset.ids):
        cam_dict = np.load(os.path.join(cam_out_dir, id + '.npy'),
                           allow_pickle=True).item()
        cams = cam_dict['high_res']
        cams[:2, :] = 0
        cams[3:, :] = 0
        cams = np.pad(cams, ((1, 0), (0, 0), (0, 0)),
                      mode='constant',
                      constant_values=cam_eval_thres)  # 添加背景的阈值
        keys = np.pad(cam_dict['keys'] + 1, (1, 0), mode='constant')  # 添加背景
        cls_labels = np.argmax(cams, axis=0)
        cls_labels = keys[cls_labels]