Пример #1
0
def run_app(cfg: DictConfig) -> None:
    dataset = VOCSemanticSegmentationDataset(split=cfg.chainer_eval_set,
                                             data_dir=cfg.voc12_root)
    labels = [
        dataset.get_example_by_keys(i, (1, ))[0] for i in range(len(dataset))
    ]
    debug = True
    if debug:
        preds = []
        for idx in dataset.ids:
            pred = _work(cfg.cam_out_dir, cfg.cv_out_dir, cfg.cam_eval_thres,
                         cfg.area_threshold, idx)
            preds.append(pred)
    else:
        with mp.Pool(processes=mp.cpu_count() // 2) as pool:
            preds = pool.map(
                partial(_work, cfg.cam_out_dir, cfg.cv_out_dir,
                        cfg.cam_eval_thres, cfg.area_threshold),
                list(dataset.ids))
    print(len(preds))

    confusion = calc_semantic_segmentation_confusion(preds, labels)

    gtj = confusion.sum(axis=1)
    resj = confusion.sum(axis=0)
    gtjresj = np.diag(confusion)
    denominator = gtj + resj - gtjresj
    iou = gtjresj / denominator

    print({'iou': iou, 'miou': np.nanmean(iou)})
    logging.info({'iou': iou, 'miou': np.nanmean(iou)})
Пример #2
0
def run(args):
    assert args.voc12_root is not None
    assert args.chainer_eval_set is not None
    assert args.sem_seg_out_dir is not None

    dataset = VOCSemanticSegmentationDataset(split=args.chainer_eval_set,
                                             data_dir=args.voc12_root)
    labels = [
        dataset.get_example_by_keys(i, (1, ))[0] for i in range(len(dataset))
    ]

    preds = []
    for id in tqdm(dataset.ids):
        cls_labels = imageio.imread(
            os.path.join(args.sem_seg_out_dir, id + '.png')).astype(np.uint8)
        cls_labels[cls_labels == 255] = 0
        preds.append(cls_labels.copy())
    confusion = calc_semantic_segmentation_confusion(preds, labels)[:21, :21]

    gtj = confusion.sum(axis=1)
    resj = confusion.sum(axis=0)
    gtjresj = np.diag(confusion)
    denominator = gtj + resj - gtjresj
    fp = 1. - gtj / denominator
    fn = 1. - resj / denominator
    iou = gtjresj / denominator

    print(fp[0], fn[0])
    print(np.mean(fp[1:]), np.mean(fn[1:]))

    print({'iou': iou, 'miou': np.nanmean(iou)})
Пример #3
0
def run(args):
    dataset = VOCSemanticSegmentationDataset(split=args.chainer_eval_set,
                                             data_dir=args.voc12_root)
    labels = [
        dataset.get_example_by_keys(i, (1, ))[0] for i in range(len(dataset))
    ]

    preds = []
    for id in dataset.ids:
        ###################修改代码#########################
        cam_dict = np.load(os.path.join(args.cam_out_aug_dir, id + '.npy'),
                           allow_pickle=True).item()
        ###################修改代码#########################
        cams = cam_dict['high_res']
        cams = np.pad(cams, ((1, 0), (0, 0), (0, 0)),
                      mode='constant',
                      constant_values=args.cam_eval_thres)
        keys = np.pad(cam_dict['keys'] + 1, (1, 0), mode='constant')
        cls_labels = np.argmax(cams, axis=0)
        cls_labels = keys[cls_labels]
        preds.append(cls_labels.copy())

    confusion = calc_semantic_segmentation_confusion(preds, labels)

    gtj = confusion.sum(axis=1)
    resj = confusion.sum(axis=0)
    gtjresj = np.diag(confusion)
    denominator = gtj + resj - gtjresj
    iou = gtjresj / denominator

    print({'iou': iou, 'miou': np.nanmean(iou)})
Пример #4
0
def run_app(cfg: DictConfig) -> None:
    dataset = VOCSemanticSegmentationDataset(split=cfg.chainer_eval_set,
                                             data_dir=cfg.voc12_root)
    labels = [
        dataset.get_example_by_keys(i, (1, ))[0] for i in range(len(dataset))
    ]

    preds = []
    for id in dataset.ids:
        cls_labels = imageio.imread(
            os.path.join(cfg.sem_seg_out_dir, id + '.png')).astype(np.uint8)
        cls_labels[cls_labels == 255] = 0
        if cfg.cv_out_dir:
            cls_labels = add_cv_results(cls_labels.copy(), id, cfg.cv_out_dir,
                                        cfg.area_threshold)
        preds.append(cls_labels.copy())

    confusion = calc_semantic_segmentation_confusion(preds, labels)[:21, :21]

    gtj = confusion.sum(axis=1)
    resj = confusion.sum(axis=0)
    gtjresj = np.diag(confusion)
    denominator = gtj + resj - gtjresj
    fp = 1. - gtj / denominator
    fn = 1. - resj / denominator
    iou = gtjresj / denominator

    print(fp[0], fn[0])
    print(np.mean(fp[1:]), np.mean(fn[1:]))

    print({'iou': iou, 'miou': np.nanmean(iou)})
Пример #5
0
def setup(dataset, model, pretrained_model, batchsize, input_size):
    dataset_name = dataset
    if dataset_name == 'cityscapes':
        dataset = CityscapesSemanticSegmentationDataset(
            split='val', label_resolution='fine')
        label_names = cityscapes_semantic_segmentation_label_names
    elif dataset_name == 'ade20k':
        dataset = ADE20KSemanticSegmentationDataset(split='val')
        label_names = ade20k_semantic_segmentation_label_names
    elif dataset_name == 'camvid':
        dataset = CamVidDataset(split='test')
        label_names = camvid_label_names
    elif dataset_name == 'voc':
        dataset = VOCSemanticSegmentationDataset(split='val')
        label_names = voc_semantic_segmentation_label_names

    def eval_(out_values, rest_values):
        pred_labels, = out_values
        gt_labels, = rest_values

        result = eval_semantic_segmentation(pred_labels, gt_labels)

        for iu, label_name in zip(result['iou'], label_names):
            print('{:>23} : {:.4f}'.format(label_name, iu))
        print('=' * 34)
        print('{:>23} : {:.4f}'.format('mean IoU', result['miou']))
        print('{:>23} : {:.4f}'.format('Class average accuracy',
                                       result['mean_class_accuracy']))
        print('{:>23} : {:.4f}'.format('Global average accuracy',
                                       result['pixel_accuracy']))

    cls, pretrained_models, default_batchsize = models[model]
    if pretrained_model is None:
        pretrained_model = pretrained_models.get(dataset_name, dataset_name)
    if input_size is None:
        input_size = None
    else:
        input_size = (input_size, input_size)

    kwargs = {
        'n_class': len(label_names),
        'pretrained_model': pretrained_model,
    }
    if model in ['pspnet_resnet50', 'pspnet_resnet101']:
        kwargs.update({'input_size': input_size})
    elif model == 'deeplab_v3plus_xception65':
        kwargs.update({'min_input_size': input_size})
    model = cls(**kwargs)

    if batchsize is None:
        batchsize = default_batchsize

    return dataset, eval_, model, batchsize
Пример #6
0
def run_app(cfg: DictConfig) -> None:
    dataset = VOCSemanticSegmentationDataset(split=cfg.chainer_eval_set,
                                             data_dir=cfg.voc12_root)
    os.makedirs(cfg.out_dir, exist_ok=True)
    debug = False
    if debug:
        for i, idx in enumerate(dataset.ids):
            _work(dataset, cfg.out_dir, cfg.cv_method, i, idx)

    else:
        with mp.Pool(processes=mp.cpu_count() // 2) as pool:
            pool.starmap(partial(_work, dataset, cfg.out_dir, cfg.cv_method),
                         enumerate(dataset.ids))
Пример #7
0
def run(args):
    if args.dataset == 'l8biome':
        dataset = l8biome.dataloader.L8BiomeDataset(args.data_root,
                                                    'train',
                                                    mask_file='mask.tif')
        # Only compute CAM for cloudy images - we know the segmentation label for clear already.
        dataset.images = [img for img in dataset.images if 'cloudy' in img[2]]
        labels = [dataset.load_mask(x[0]) for x in dataset.images]
        ids = [x[2] for x in dataset.images]
    else:
        dataset = VOCSemanticSegmentationDataset(split=args.chainer_eval_set,
                                                 data_dir=args.data_root)
        labels = [
            dataset.get_example_by_keys(i, (1, ))[0]
            for i in range(len(dataset))
        ]
        ids = dataset.ids

    preds = []
    for id in tqdm(ids):
        cam_dict = np.load(os.path.join(args.cam_out_dir, id + '.npy'),
                           allow_pickle=True).item()
        cams = cam_dict['high_res']
        cams = np.pad(cams, ((1, 0), (0, 0), (0, 0)),
                      mode='constant',
                      constant_values=args.cam_eval_thres)
        if args.dataset == 'l8biome':
            # background class (in our case 'clear') corresponds to class 0 already
            keys = np.pad(cam_dict['keys'], (1, 0), mode='constant')
        else:
            keys = np.pad(cam_dict['keys'] + 1, (1, 0), mode='constant')
        cls_labels = np.argmax(cams, axis=0)
        cls_labels = keys[cls_labels]
        preds.append(cls_labels.copy())

    if args.dataset == 'l8biome':
        # Compute metrics as FCD
        pass
    else:
        confusion = calc_semantic_segmentation_confusion(preds, labels)

        gtj = confusion.sum(axis=1)
        resj = confusion.sum(axis=0)
        gtjresj = np.diag(confusion)
        denominator = gtj + resj - gtjresj
        iou = gtjresj / denominator

        print({'iou': iou, 'miou': np.nanmean(iou)})
Пример #8
0
def get_dataset_and_model(dataset_name, model_name, pretrained_model,
                          input_size):
    if dataset_name == 'cityscapes':
        dataset = CityscapesSemanticSegmentationDataset(
            split='val', label_resolution='fine')
        label_names = cityscapes_semantic_segmentation_label_names
    elif dataset_name == 'ade20k':
        dataset = ADE20KSemanticSegmentationDataset(split='val')
        label_names = ade20k_semantic_segmentation_label_names
    elif dataset_name == 'camvid':
        dataset = CamVidDataset(split='test')
        label_names = camvid_label_names
    elif dataset_name == 'voc':
        dataset = VOCSemanticSegmentationDataset(split='val')
        label_names = voc_semantic_segmentation_label_names

    n_class = len(label_names)

    if pretrained_model:
        pretrained_model = pretrained_model
    else:
        pretrained_model = dataset_name
    if model_name == 'pspnet_resnet101':
        model = PSPNetResNet101(n_class=n_class,
                                pretrained_model=pretrained_model,
                                input_size=input_size)
    elif model_name == 'pspnet_resnet50':
        model = PSPNetResNet50(n_class=n_class,
                               pretrained_model=pretrained_model,
                               input_size=input_size)
    elif model_name == 'segnet':
        model = SegNetBasic(n_class=n_class, pretrained_model=pretrained_model)
    elif model_name == 'deeplab_v3plus_xception65':
        model = DeepLabV3plusXception65(n_class=n_class,
                                        pretrained_model=pretrained_model,
                                        min_input_size=input_size)

    return dataset, label_names, model
Пример #9
0
def run(args):
    dataset = VOCSemanticSegmentationDataset(split=args.chainer_eval_set,
                                             data_dir=args.voc12_root)
    # labels = [dataset.get_example_by_keys(i, (1,))[0] for i in range(len(dataset))]

    preds = []
    labels = []
    n_images = 0
    for i, id in enumerate(dataset.ids):
        n_images += 1
        # print(os.path.join(args.cam_out_dir, id + '.npy'))
        cam_dict = np.load(os.path.join(args.cam_out_dir, id + '.npy'),
                           allow_pickle=True).item()
        cams = cam_dict['high_res']
        cams = np.pad(cams, ((1, 0), (0, 0), (0, 0)),
                      mode='constant',
                      constant_values=args.cam_eval_thres)
        keys = np.pad(cam_dict['keys'] + 1, (1, 0), mode='constant')
        cls_labels = np.argmax(cams, axis=0)
        cls_labels = keys[cls_labels]
        preds.append(cls_labels.copy())
        labels.append(dataset.get_example_by_keys(i, (1, ))[0])

    confusion = calc_semantic_segmentation_confusion(preds, labels)

    gtj = confusion.sum(axis=1)
    resj = confusion.sum(axis=0)
    gtjresj = np.diag(confusion)
    denominator = gtj + resj - gtjresj
    iou = gtjresj / denominator

    print("threshold:", args.cam_eval_thres, 'miou:', np.nanmean(iou),
          "i_imgs", n_images)
    print('among_predfg_bg',
          float((resj[1:].sum() - confusion[1:, 1:].sum()) / (resj[1:].sum())))

    return np.nanmean(iou)
 def setUp(self):
     self.dataset = VOCSemanticSegmentationDataset(split=self.split)
Пример #11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        choices=('multi_task_300', 'multi_task_512'),
                        default='multi_task_300')
    parser.add_argument('--batchsize', type=int, default=32)
    parser.add_argument('--iteration', type=int, default=120000)
    parser.add_argument('--eval_step',
                        type=int,
                        nargs='*',
                        default=[80000, 100000, 120000])
    parser.add_argument('--lr_step',
                        type=int,
                        nargs='*',
                        default=[80000, 100000])
    parser.add_argument('--lr', type=float, default=0.001)
    parser.add_argument('--snap_step', type=int, default=10000)
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--out',
                        default='result')  # in experiments for real experiment
    parser.add_argument('--resume', type=str)
    parser.add_argument('--detection', action='store_true', default=False)
    parser.add_argument('--segmentation', action='store_true', default=False)
    parser.add_argument('--attention', action='store_true', default=False)
    parser.add_argument('--dataset', default='voc', type=str)
    parser.add_argument('--experiment', type=str, default='final_voc')
    parser.add_argument('--multitask_loss', action='store_true', default=False)
    parser.add_argument('--dynamic_loss', action='store_true', default=False)
    parser.add_argument('--log_interval', type=int, default=10)
    parser.add_argument('--debug', action='store_true', default=False)
    parser.add_argument('--update_split_interval', type=int, default=100)
    parser.add_argument(
        '--loss_split', type=float, default=0.5
    )  # in fact for detection, other task(segmentation) is 1-loss_split
    args = parser.parse_args()
    snap_step = args.snap_step
    args.snap_step = []
    for step in range(snap_step, args.iteration + 1, snap_step):
        args.snap_step.append(step)

    # redefine the output path
    import os
    import time
    args.out = os.path.join(args.out, args.experiment,
                            time.strftime("%Y%m%d_%H%M%S", time.localtime()))

    if args.model == 'multi_task_300':
        model = Multi_task_300(n_fg_class=len(voc_bbox_label_names),
                               pretrained_model='imagenet',
                               detection=args.detection,
                               segmentation=args.segmentation,
                               attention=args.attention)
    elif args.model == 'multi_task_512':
        model = Multi_task_512(n_fg_class=len(voc_bbox_label_names),
                               pretrained_model='imagenet',
                               detection=args.detection,
                               segmentation=args.segmentation,
                               attention=args.attention)

    model.use_preset('evaluate')
    if not (args.segmentation or args.detection):
        raise RuntimeError

    train_chain = MultiboxTrainChain(model,
                                     gpu=args.gpu >= 0,
                                     use_multi_task_loss=args.multitask_loss,
                                     loss_split=args.loss_split)
    train_chain.cleargrads()

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    train = TransformDataset(
        Multi_task_VOC(voc_experiments[args.experiment][args.experiment +
                                                        '_train']),
        Transform(model.coder, model.insize, model.mean))
    train_iter = chainer.iterators.MultiprocessIterator(
        train, batch_size=args.batchsize)

    test = VOCBboxDataset(year='2007',
                          split='test',
                          use_difficult=True,
                          return_difficult=True)

    test_iter = chainer.iterators.SerialIterator(test,
                                                 args.batchsize,
                                                 repeat=False,
                                                 shuffle=False)

    test_mask = VOCSemanticSegmentationDataset(split='val')
    test_mask_iter = chainer.iterators.SerialIterator(test_mask,
                                                      args.batchsize,
                                                      repeat=False,
                                                      shuffle=False)

    optimizer = chainer.optimizers.MomentumSGD()
    optimizer.setup(train_chain)
    # optimizer.add_hook(GradientClipping(0.1))
    for param in train_chain.params():
        if param.name == 'b':
            param.update_rule.add_hook(GradientScaling(2))
        else:
            param.update_rule.add_hook(WeightDecay(0.0005))

    updater = training.updaters.StandardUpdater(train_iter,
                                                optimizer,
                                                device=args.gpu)
    trainer = training.Trainer(updater, (args.iteration, 'iteration'),
                               args.out)
    '''if args.resume:
        serializers.load_npz(args.resume, trainer)'''
    trainer.extend(extensions.ExponentialShift('lr', 0.1, init=args.lr),
                   trigger=triggers.ManualScheduleTrigger(
                       args.lr_step, 'iteration'))

    if args.dataset == 'voc':
        use_07 = True
        label_names = voc_bbox_label_names
    elif args.dataset == 'coco':
        label_names = coco_bbox_label_names
    if args.detection and not args.debug:
        trainer.extend(MultitaskEvaluator(test_iter,
                                          model,
                                          args.dataset,
                                          use_07,
                                          label_names=label_names),
                       trigger=triggers.ManualScheduleTrigger(
                           args.eval_step + [args.iteration], 'iteration'))

    if args.segmentation and not args.debug:
        trainer.extend(MultitaskEvaluator(test_mask_iter,
                                          model,
                                          dataset=args.dataset,
                                          label_names=label_names,
                                          detection=False),
                       trigger=triggers.ManualScheduleTrigger(
                           args.eval_step + [args.iteration], 'iteration'))

    log_interval = args.log_interval, 'iteration'
    trainer.extend(extensions.LogReport(trigger=log_interval))
    if args.segmentation and args.detection and args.dynamic_loss:
        trainer.extend(
            loss_split.LossSplit(trigger=(args.update_split_interval,
                                          'iteration')))
    trainer.extend(extensions.observe_lr(), trigger=log_interval)
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'lr', 'main/loss', 'main/loss/mask',
        'main/loss/loc', 'main/loss/conf', 'main/loss/split'
    ]),
                   trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    trainer.extend(extensions.snapshot(),
                   trigger=triggers.ManualScheduleTrigger(
                       args.snap_step + [args.iteration], 'iteration'))
    trainer.extend(extensions.snapshot_object(
        model, 'model_iter_{.updater.iteration}'),
                   trigger=triggers.ManualScheduleTrigger(
                       args.snap_step + [args.iteration], 'iteration'))
    if args.resume:
        if 'model' in args.resume:
            serializers.load_npz(args.resume, model)
        else:
            serializers.load_npz(args.resume, trainer)

    print(args)

    trainer.run()
Пример #12
0
def main():
    parser = argparse.ArgumentParser(
        description='ChainerCV Semantic Segmentation example with FCN')
    parser.add_argument('--batch_size',
                        '-b',
                        type=int,
                        default=1,
                        help='Number of images in each mini-batch')
    parser.add_argument('--iteration',
                        '-i',
                        type=int,
                        default=50000,
                        help='Number of iteration to carry out')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--lr',
                        '-l',
                        type=float,
                        default=1e-10,
                        help='Learning rate of the optimizer')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(args.batch_size))
    print('# iteration: {}'.format(args.iteration))
    print('')
    batch_size = args.batch_size
    iteration = args.iteration
    gpu = args.gpu
    lr = args.lr
    out = args.out
    resume = args.resume

    # prepare datasets
    def transform(in_data):
        img, label = in_data
        vgg_subtract_bgr = np.array([103.939, 116.779, 123.68],
                                    np.float32)[:, None, None]
        img -= vgg_subtract_bgr
        img = transforms.pad(img, max_size=(512, 512), bg_value=0)
        label = transforms.pad(label, max_size=(512, 512), bg_value=-1)
        return img, label

    train_data = VOCSemanticSegmentationDataset(mode='train')
    test_data = VOCSemanticSegmentationDataset(mode='val')
    train_data = TransformDataset(train_data, transform)
    test_data = TransformDataset(test_data, transform)

    # set up FCN32s
    n_class = 21
    model = FCN32s(n_class=n_class)
    if gpu != -1:
        model.to_gpu(gpu)
        chainer.cuda.get_device(gpu).use()

    # prepare an optimizer
    optimizer = chainer.optimizers.MomentumSGD(lr=lr, momentum=0.99)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(rate=0.0005))

    # prepare iterators
    train_iter = chainer.iterators.SerialIterator(train_data,
                                                  batch_size=batch_size)
    test_iter = chainer.iterators.SerialIterator(test_data,
                                                 batch_size=1,
                                                 repeat=False,
                                                 shuffle=False)

    updater = training.StandardUpdater(train_iter, optimizer, device=gpu)
    trainer = training.Trainer(updater, (iteration, 'iteration'), out=out)

    val_interval = 3000, 'iteration'
    log_interval = 100, 'iteration'

    trainer.extend(TestModeEvaluator(test_iter, model, device=gpu),
                   trigger=val_interval)

    # reporter related
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport([
        'iteration', 'main/time', 'main/loss', 'validation/main/loss',
        'main/accuracy', 'validation/main/accuracy', 'main/accuracy_cls',
        'validation/main/accuracy_cls', 'main/iu', 'validation/main/iu',
        'main/fwavacc', 'validation/main/fwavacc'
    ]),
                   trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    # visualize training
    trainer.extend(
        extensions.PlotReport(['main/loss', 'validation/main/loss'],
                              trigger=log_interval,
                              file_name='loss.png'))
    trainer.extend(
        extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'],
                              trigger=log_interval,
                              file_name='accuracy.png'))
    trainer.extend(
        extensions.PlotReport(
            ['main/accuracy_cls', 'validation/main/accuracy_cls'],
            trigger=log_interval,
            file_name='accuracy_cls.png'))
    trainer.extend(
        extensions.PlotReport(['main/iu', 'validation/main/iu'],
                              trigger=log_interval,
                              file_name='iu.png'))
    trainer.extend(
        extensions.PlotReport(['main/fwavacc', 'validation/main/fwavacc'],
                              trigger=log_interval,
                              file_name='fwavacc.png'))

    def vis_transform(in_data):
        vgg_subtract_bgr = np.array([103.939, 116.779, 123.68],
                                    np.float32)[:, None, None]
        img, label = in_data
        img += vgg_subtract_bgr
        img, label = transforms.chw_to_pil_image_tuple((img, label),
                                                       indices=[0, 1])
        return img, label

    trainer.extend(
        SemanticSegmentationVisReport(
            range(10),  # visualize outputs for the first 10 data of test_data
            test_data,
            model,
            n_class=n_class,
            predict_func=model.predict,  # a function to predict output
            vis_transform=vis_transform),
        trigger=val_interval,
        invoke_before_training=True)

    trainer.extend(extensions.dump_graph('main/loss'))

    if resume:
        chainer.serializers.load_npz(osp.expanduser(resume), trainer)

    trainer.run()
Пример #13
0
def run(args):

    if args.dataset == 'voc12':
        dataset = VOCSemanticSegmentationDataset(split=args.chainer_eval_set,
                                                 data_dir=args.dev_root)
        outsize = None
    elif args.dataset in ['adp_morph', 'adp_func']:
        dataset = ADPSemanticSegmentationDataset(
            split=args.chainer_eval_set,
            data_dir=args.dev_root,
            htt_type=args.dataset.split('_')[-1])
        outsize = (1088, 1088)
    elif args.dataset in ['deepglobe', 'deepglobe_balanced']:
        dataset = DeepGlobeSemanticSegmentationDataset(
            split=args.chainer_eval_set,
            data_dir=args.dev_root,
            is_balanced=args.dataset == 'deepglobe_balanced')
        outsize = (2448, 2448)
    else:
        raise KeyError('Dataset %s not yet implemented' % args.dataset)
    labels = [
        dataset.get_example_by_keys(i, (1, ))[0] for i in range(len(dataset))
    ]

    preds = []

    with tqdm(total=len(dataset)) as pbar:
        for id in dataset.ids:
            cls_labels = imageio.imread(
                os.path.join(args.sem_seg_out_dir,
                             id + '.png')).astype(np.uint8)
            cls_labels[cls_labels == 255] = 0
            if outsize is not None:
                cls_labels = cv2.resize(cls_labels,
                                        outsize,
                                        interpolation=cv2.INTER_NEAREST)
            preds.append(cls_labels.copy())
            pbar.update(1)

    confusion = calc_semantic_segmentation_confusion(preds,
                                                     labels)  #[:21, :21]

    gtj = confusion.sum(axis=1)
    resj = confusion.sum(axis=0)
    gtjresj = np.diag(confusion)
    denominator = gtj + resj - gtjresj
    fp = 1. - gtj / denominator
    fn = 1. - resj / denominator
    iou = gtjresj / denominator
    miou = np.array([np.nanmean(iou)])

    print(fp[0], fn[0])
    print(np.mean(fp[1:]), np.mean(fn[1:]))

    data = np.concatenate((iou, miou), axis=0)
    if args.dataset in ['deepglobe', 'deepglobe_balanced']:
        row_names = args.class_names['bg'] + args.class_names['fg'][:-1] + [
            'miou'
        ]
    else:
        row_names = args.class_names['bg'] + args.class_names['fg'] + ['miou']
    df = pd.DataFrame(data, index=row_names, columns=['iou'])
    df.to_csv(os.path.join(args.eval_dir,
                           args.run_name + '_' + args.split + '_iou.csv'),
              index=True)

    with open(args.logfile, 'a') as f:
        f.write('[eval_sem_seg, ' + args.split + '] iou: ' + str(list(iou)) +
                '\n')
        f.write('[eval_sem_seg, ' + args.split + '] miou: ' + str(miou[0]) +
                '\n')
Пример #14
0
def run(args):

    if args.dataset == 'voc12':
        dataset = VOCSemanticSegmentationDataset(split=args.chainer_eval_set, data_dir=args.dev_root)
        outsize = None
    elif args.dataset in ['adp_morph', 'adp_func']:
        dataset = ADPSemanticSegmentationDataset(split=args.chainer_eval_set, data_dir=args.dev_root,
                                                 htt_type=args.dataset.split('_')[-1])
        outsize = (1088, 1088)
    elif args.dataset in ['deepglobe', 'deepglobe_balanced']:
        dataset = DeepGlobeSemanticSegmentationDataset(split=args.chainer_eval_set, data_dir=args.dev_root,
                                                       is_balanced=args.dataset == 'deepglobe_balanced')
        outsize = (2448, 2448)
    else:
        raise KeyError('Dataset %s not yet implemented' % args.dataset)
    labels = [dataset.get_example_by_keys(i, (1,))[0] for i in range(len(dataset))]

    preds = []
    with tqdm(total=len(dataset)) as pbar:
        for id in dataset.ids:
            if args.dataset == 'voc12':
                img_path = voc12.dataloader.get_img_path(id, args.dev_root)
            elif args.dataset in ['adp_morph', 'adp_func']:
                img_path = adp.dataloader.get_img_path(id, args.dev_root, args.split == 'evaluation')
            elif args.dataset in ['deepglobe', 'deepglobe_balanced']:
                img_path = deepglobe.dataloader.get_img_path(id, args.dev_root)
            else:
                raise KeyError('Dataset %s not yet implemented' % args.dataset)

            cam_dict = np.load(os.path.join(args.cam_out_dir, id + '.npy'), allow_pickle=True).item()
            if args.dataset == 'voc12':
                cams = cam_dict['high_res']
                cams = np.pad(cams, ((1, 0), (0, 0), (0, 0)), mode='constant', constant_values=args.cam_eval_thres)
                keys = np.pad(cam_dict['keys'] + 1, (1, 0), mode='constant')
            elif args.dataset in ['adp_morph', 'adp_func']:
                keys = cam_dict['keys']
                cams = cam_dict['high_res']
            elif args.dataset in ['deepglobe', 'deepglobe_balanced']:
                keys = cam_dict['keys']
                cams = cam_dict['cam']
            else:
                raise KeyError('Dataset %s not yet implemented' % args.dataset)
            cls_labels = np.argmax(cams, axis=0)
            cls_labels = keys[cls_labels]
            if outsize is not None:
                cls_labels = cv2.resize(cls_labels, outsize, interpolation=cv2.INTER_NEAREST)

            imageio.imsave(os.path.join(args.cam_clr_out_dir, id + '.png'), cls_labels.astype(np.uint8))
            # Save with colour
            rw_pred_clr = np.zeros(list(cls_labels.shape) + [3], dtype=np.uint8)
            off = 0
            for t in ['bg', 'fg']:
                for i, c in enumerate(args.class_colours[t]):
                    for ch in range(3):
                        rw_pred_clr[:, :, ch] += c[ch] * np.uint8(cls_labels == (i + off))
                off += len(args.class_colours[t])
            imageio.imsave(os.path.join(args.cam_clr_out_dir, id + '.png'), rw_pred_clr)
            # Save with colour, overlaid on original image
            if args.dataset not in ['deepglobe', 'deepglobe_balanced']:
                orig_img = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)
            else:
                orig_img = cv2.resize(cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB), rw_pred_clr.shape[:2])
            if args.dataset in ['adp_morph', 'adp_func']:
                rw_pred_clr = cv2.resize(rw_pred_clr, orig_img.shape[:2])
            rw_pred_clr_over = np.uint8((1 - args.overlay_r) * np.float32(orig_img) +
                                        args.overlay_r * np.float32(rw_pred_clr))
            imageio.imsave(os.path.join(args.cam_clr_out_dir, id + '_overlay.png'), rw_pred_clr_over)
            preds.append(cls_labels.copy())
            pbar.update(1)

    confusion = calc_semantic_segmentation_confusion(preds, labels)

    gtj = confusion.sum(axis=1)
    resj = confusion.sum(axis=0)
    gtjresj = np.diag(confusion)
    denominator = gtj + resj - gtjresj
    iou = gtjresj / denominator
    precision = gtjresj / resj
    recall = gtjresj / gtj
    miou = np.array([np.nanmean(iou)])
    mprecision = np.array([np.nanmean(precision)])
    mrecall = np.array([np.nanmean(recall)])

    iou_data = np.concatenate((iou, miou), axis=0)
    pr_data = np.concatenate((precision, mprecision), axis=0)
    re_data = np.concatenate((recall, mrecall), axis=0)
    data = np.column_stack((iou_data, pr_data, re_data))
    if args.dataset in ['deepglobe', 'deepglobe_balanced']:
        row_names = args.class_names['bg'] + args.class_names['fg'][:-1] + ['mean']
    else:
        row_names = args.class_names['bg'] + args.class_names['fg'] + ['mean']
    df = pd.DataFrame(data, index=row_names, columns=['iou', 'precision', 'recall'])
    df.to_csv(os.path.join(args.eval_dir, args.run_name + '_' + args.split + '_cam_iou.csv'), index=True)

    with open(args.logfile, 'a') as f:
        f.write('[eval_cam, ' + args.split + '] iou: ' + str(list(iou)) + '\n')
        f.write('[eval_cam, ' + args.split + '] miou: ' + str(miou[0]) + '\n')
    # args.logger.write('[eval_cam] iou: ' + iou + '\n')
    # args.logger.write('[eval_cam] miou: ' + miou+ '\n')
Пример #15
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--device', type=int, default=-1, help='gpu id')
    parser.add_argument('--lr_init',
                        type=float,
                        default=5 * 1e-5,
                        help='init learning rate')
    # parser.add_argument('--lr_trigger', type=float, default=5, help='trigger to decreace learning rate')
    # parser.add_argument('--lr_target', type=float, default=5*1e-5, help='target learning rate')
    # parser.add_argument('--lr_factor', type=float, default=.75, help='decay factor')
    parser.add_argument('--name',
                        type=str,
                        default='classifier',
                        help='name of the experiment')
    parser.add_argument('--resume',
                        type=bool,
                        default=False,
                        help='resume training or not')
    parser.add_argument('--snapshot',
                        type=str,
                        help='snapshot file of the trainer to resume from')

    args = parser.parse_args()

    resume = args.resume
    device = args.device

    if resume:
        load_snapshot_path = args.snapshot

    experiment = args.name
    lr_init = args.lr_init
    # lr_target = args.lr_target
    # lr_factor = args.lr_factor
    # lr_trigger_interval = (args.lr_trigger, 'epoch')

    os.makedirs('result/' + experiment, exist_ok=True)
    f = open('result/' + experiment + '/details.txt', "w+")
    f.write("lr - " + str(lr_init) + "\n")
    f.write("optimizer - " + str(Adam))
    # f.write("lr_trigger_interval - "+str(lr_trigger_interval)+"\n")
    f.close()

    if not resume:
        # Add the FC layers to original FCN for GAIN
        model_own = FCN8s()
        model_original = fcn.models.FCN8s()
        model_file = fcn.models.FCN8s.download()
        chainer.serializers.load_npz(model_file, model_original)

        for layers in model_original._children:
            setattr(model_own, layers, getattr(model_original, layers))
        del (model_original, model_file)

    else:
        model_own = FCN8s()

    if device >= 0:
        model_own.to_gpu(device)

    dataset = VOCSemanticSegmentationDataset()
    iterator = SerialIterator(dataset, 1)
    optimizer = Adam(alpha=lr_init)
    optimizer.setup(model_own)

    updater = VOC_ClassificationUpdater(iterator, optimizer, device=device)
    trainer = Trainer(updater, (100, 'epoch'))
    log_keys = ['epoch', 'iteration', 'main/Loss']
    trainer.extend(
        extensions.LogReport(log_keys, (100, 'iteration'),
                             log_name='log_' + experiment))
    trainer.extend(extensions.PrintReport(log_keys),
                   trigger=(100, 'iteration'))
    trainer.extend(extensions.snapshot(filename=experiment +
                                       "_snapshot_{.updater.iteration}"),
                   trigger=(5, 'epoch'))
    trainer.extend(extensions.snapshot_object(
        trainer.updater._optimizers['main'].target,
        experiment + "_model_{.updater.iteration}"),
                   trigger=(5, 'epoch'))
    trainer.extend(
        extensions.PlotReport(['main/Loss'],
                              'iteration', (100, 'iteration'),
                              file_name='trainer_' + experiment + '/loss.png',
                              grid=True,
                              marker=" "))

    # trainer.extend(extensions.ExponentialShift('lr', lr_factor, target=lr_target), trigger=lr_trigger_interval)
    if resume:
        chainer.serializers.load_npz(load_snapshot_path, trainer)

    print("Running - - ", experiment)
    print('initial lr ', lr_init)
    # print('lr_trigger_interval ', lr_trigger_interval)
    trainer.run()
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--gpu', type=int, default=0, help='gpu id')
    parser.add_argument('--modelfile', help='pretrained model file of FCN8')
    parser.add_argument('--lr',
                        type=float,
                        default=1e-7,
                        help='init learning rate')
    parser.add_argument('--name',
                        type=str,
                        default='exp',
                        help='name of the experiment')
    parser.add_argument('--resume',
                        type=int,
                        default=0,
                        help='resume training or not')
    parser.add_argument('--snapshot',
                        type=str,
                        help='snapshot file to resume from')
    parser.add_argument('--lambda1',
                        default=5,
                        type=float,
                        help='lambda1 param')
    parser.add_argument('--lambda2',
                        default=1,
                        type=float,
                        help='lambda2 param')
    parser.add_argument('--lambda3',
                        default=1.5,
                        type=float,
                        help='lambda3 param')

    args = parser.parse_args()

    resume = args.resume
    device = args.gpu

    if resume:
        load_snapshot_path = args.snapshot
        load_model_path = args.modelfile
    else:
        pretrained_model_path = args.modelfile

    experiment = args.name
    lr = args.lr
    optim = Adam
    training_interval = (20000, 'iteration')
    snapshot_interval = (1000, 'iteration')
    lambd1 = args.lambda1
    lambd2 = args.lambda2
    lambd3 = args.lambda3
    updtr = VOC_GAIN_Updater2

    os.makedirs('result/' + experiment, exist_ok=True)
    f = open('result/' + experiment + '/details.txt', "w+")
    f.write("lr - " + str(lr) + "\n")
    f.write("optimizer - " + str(optim) + "\n")
    f.write("lambd1 - " + str(lambd1) + "\n")
    f.write("lambd2 - " + str(lambd2) + "\n")
    f.write("lambd3 - " + str(lambd3) + "\n")
    f.write("training_interval - " + str(training_interval) + "\n")
    f.write("Updater - " + str(updtr) + "\n")
    f.close()

    if resume:
        model = FCN8s()
        chainer.serializers.load_npz(load_model_path, model)
    else:
        model = FCN8s()
        chainer.serializers.load_npz(pretrained_model_path, model)

    if device >= 0:
        model.to_gpu(device)
    dataset = VOCSemanticSegmentationDataset()
    iterator = SerialIterator(dataset, 1, shuffle=False)

    optimizer = Adam(alpha=lr)
    optimizer.setup(model)

    updater = updtr(iterator,
                    optimizer,
                    device=device,
                    lambd1=lambd1,
                    lambd2=lambd2)
    trainer = Trainer(updater, training_interval)
    log_keys = [
        'epoch', 'iteration', 'main/AM_Loss', 'main/CL_Loss', 'main/TotalLoss'
    ]
    trainer.extend(
        extensions.LogReport(log_keys, (10, 'iteration'),
                             log_name='log' + experiment))
    trainer.extend(extensions.PrintReport(log_keys),
                   trigger=(100, 'iteration'))
    trainer.extend(
        extensions.ProgressBar(training_length=training_interval,
                               update_interval=100))
    trainer.extend(extensions.snapshot(filename='snapshot' + experiment),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        trainer.updater._optimizers['main'].target, "model" + experiment),
                   trigger=snapshot_interval)
    trainer.extend(
        extensions.PlotReport(['main/AM_Loss'],
                              'iteration', (20, 'iteration'),
                              file_name=experiment + '/am_loss.png',
                              grid=True,
                              marker=" "))
    trainer.extend(
        extensions.PlotReport(['main/CL_Loss'],
                              'iteration', (20, 'iteration'),
                              file_name=experiment + '/cl_loss.png',
                              grid=True,
                              marker=" "))
    trainer.extend(
        extensions.PlotReport(['main/SG_Loss'],
                              'iteration', (20, 'iteration'),
                              file_name=experiment + '/sg_loss.png',
                              grid=True,
                              marker=" "))
    trainer.extend(
        extensions.PlotReport(['main/TotalLoss'],
                              'iteration', (20, 'iteration'),
                              file_name=experiment + '/total_loss.png',
                              grid=True,
                              marker=" "))
    trainer.extend(
        extensions.PlotReport(log_keys[2:],
                              'iteration', (20, 'iteration'),
                              file_name=experiment + '/all_loss.png',
                              grid=True,
                              marker=" "))

    if resume:
        chainer.serializers.load_npz(load_snapshot_path, trainer)
    print("Running - - ", experiment)
    print('initial lr ', lr)
    print('optimizer ', optim)
    print('lambd1 ', lambd1)
    print('lambd2 ', lambd2)
    print('lambd3', lambd3)
    trainer.run()
Пример #17
0
from chainercv.visualizations import vis_image
from chainercv.visualizations import vis_label
import matplotlib.pyplot as plot


fig = plot.figure(figsize=(26, 10))
ax1 = fig.add_subplot(1, 2, 1)
plot.axis('off')
ax2 = fig.add_subplot(1, 2, 2)
plot.axis('off')
dataset = VOCDetectionDataset()
img, bbox, label = dataset[310]

vis_bbox(img, bbox, label,
        label_names=voc_detection_label_names,
         ax=ax1)

dataset = VOCSemanticSegmentationDataset()
img, label = dataset[30]
vis_image(img, ax=ax2)
_, legend_handles = vis_label(
    label,
    label_names=voc_semantic_segmentation_label_names,
    label_colors=voc_semantic_segmentation_label_colors,
    alpha=0.9, ax=ax2)
# ax2.legend(handles=legend_handles, bbox_to_anchor=(1, 1), loc=2)
plot.tight_layout()
plot.savefig('../images/vis_visualization.png')
plot.show()

Пример #18
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--gpu', type=int, default=0, help='gpu id')
    parser.add_argument('--modelfile', help='pretrained model file of FCN8')
    parser.add_argument('--lr',
                        type=float,
                        default=5 * 1e-4,
                        help='init learning rate')
    parser.add_argument('--name',
                        type=str,
                        default='exp',
                        help='init learning rate')
    parser.add_argument('--resume',
                        type=int,
                        default=0,
                        help='resume training or not')
    parser.add_argument('--snapshot',
                        type=str,
                        help='snapshot file to resume from')

    args = parser.parse_args()

    resume = args.resume
    device = args.gpu

    if resume:
        load_snapshot_path = args.snapshot
        load_model_path = args.modelfile
    else:
        load_model_path = args.modelfile

    experiment = args.name
    lr = args.lr
    lr_trigger_interval = (5, 'epoch')
    optim = Adam

    os.makedirs('result/' + experiment, exist_ok=True)
    f = open('result/' + experiment + '/details.txt', "w+")
    f.write("lr - " + str(lr) + "\n")
    f.write("optimizer - " + str(optim))
    f.write("lr_trigger_interval - " + str(lr_trigger_interval) + "\n")
    f.close()

    if not resume:
        # Add the FC layers to original FConvN for GAIN
        model_own = FCN8s()
        model_original = fcn.models.FCN8s()
        model_file = fcn.models.FCN8s.download()
        chainer.serializers.load_npz(model_file, model_original)

        for layers in model_original._children:
            setattr(model_own, layers, getattr(model_original, layers))
        del (model_original, model_file)

    else:
        model_own = FCN8s()
        chainer.serializers.load_npz(load_model_path, model_own)

    if device >= 0:
        model_own.to_gpu(device)

    dataset = VOCSemanticSegmentationDataset()
    iterator = SerialIterator(dataset, 1)
    optimizer = Adam(alpha=lr)
    optimizer.setup(model_own)

    updater = VOC_ClassificationUpdater(iterator, optimizer, device=device)
    trainer = Trainer(updater, (50, 'epoch'))
    log_keys = ['epoch', 'iteration', 'main/Loss']
    trainer.extend(
        extensions.LogReport(log_keys, (100, 'iteration'),
                             log_name='log' + experiment))
    trainer.extend(extensions.PrintReport(log_keys),
                   trigger=(100, 'iteration'))
    trainer.extend(
        extensions.ProgressBar(training_length=(50, 'epoch'),
                               update_interval=500))
    trainer.extend(extensions.snapshot(filename='snapshot' + experiment),
                   trigger=(5, 'epoch'))
    trainer.extend(extensions.snapshot_object(
        trainer.updater._optimizers['main'].target, "model" + experiment),
                   trigger=(5, 'epoch'))
    trainer.extend(
        extensions.PlotReport(['main/Loss'],
                              'iteration', (100, 'iteration'),
                              file_name=experiment + '/loss.png',
                              grid=True,
                              marker=" "))

    if resume:
        chainer.serializers.load_npz(load_snapshot_path, trainer)
    print("Running - - ", experiment)
    print('initial lr ', lr)
    print('lr_trigger_interval ', lr_trigger_interval)
    trainer.run()
Пример #19
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        choices=('multi_task_300', 'multi_task_512'),
                        default='multi_task_300')
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--model_path', type=str)
    parser.add_argument('--detection', action='store_true', default=False)
    parser.add_argument('--segmentation', action='store_true', default=False)
    parser.add_argument('--attention', action='store_true', default=False)
    parser.add_argument('--dataset', default='voc', type=str)
    parser.add_argument('--eval_seg', default=False, action='store_true')
    parser.add_argument('--eval_det', default=False, action='store_true')
    parser.add_argument('--batchsize', type=int, default=32)

    args = parser.parse_args()
    print(args)
    if not (args.segmentation or args.detection):
        raise RuntimeError

    if not args.model_path:
        raise RuntimeError

    if args.model == 'multi_task_300':
        model = Multi_task_300(n_fg_class=len(voc_bbox_label_names),
                               pretrained_model='imagenet',
                               detection=args.detection,
                               segmentation=args.segmentation,
                               attention=args.attention)
    elif args.model == 'multi_task_512':
        model = Multi_task_512(n_fg_class=len(voc_bbox_label_names),
                               pretrained_model='imagenet',
                               detection=args.detection,
                               segmentation=args.segmentation,
                               attention=args.attention)

    model.use_preset('evaluate')

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    if args.dataset == 'voc':
        use_07 = True
        label_names = voc_bbox_label_names
    elif args.dataset == 'coco':
        label_names = coco_bbox_label_names

    if args.model_path:
        serializers.load_npz(args.model_path, model)

    if args.detection and args.eval_det:
        test = VOCBboxDataset(year='2007',
                              split='test',
                              use_difficult=True,
                              return_difficult=True)

        test_iter = chainer.iterators.SerialIterator(test,
                                                     args.batchsize,
                                                     repeat=False,
                                                     shuffle=False)
        det_evaluator = MultitaskEvaluator(test_iter,
                                           model,
                                           use_07_metric=use_07,
                                           label_names=label_names,
                                           detection=True)
        result = det_evaluator()
        print('detection result')
        print(result)

    if args.segmentation and args.eval_seg:
        test_mask = VOCSemanticSegmentationDataset(split='val')
        test_mask_iter = chainer.iterators.SerialIterator(test_mask,
                                                          args.batchsize,
                                                          repeat=False,
                                                          shuffle=False)
        seg_evaluator = MultitaskEvaluator(test_mask_iter,
                                           model,
                                           use_07_metric=use_07,
                                           label_names=label_names,
                                           detection=False)
        result_mask = seg_evaluator()
        print('segmentation result')
        print(result_mask)
Пример #20
0
def main(gpu=-1, batch_size=1, iterations=100000,
         lr=1e-10, out='result', resume=''):
    # prepare datasets
    def transform(in_data):
        img, label = in_data
        vgg_subtract_bgr = np.array(
            [103.939, 116.779, 123.68], np.float32)[:, None, None]
        img -= vgg_subtract_bgr
        img = pad(img, max_size=(512, 512), bg_value=0)
        label = pad(label, max_size=(512, 512), bg_value=-1)
        return img, label

    train_data = VOCSemanticSegmentationDataset(mode='train')
    test_data = VOCSemanticSegmentationDataset(mode='val')
    extend(train_data, transform)
    extend(test_data, transform)

    # set up FCN32s
    n_class = 21
    model = FCN32s(n_class=n_class)
    if gpu != -1:
        model.to_gpu(gpu)
        chainer.cuda.get_device(gpu).use()

    # prepare an optimizer
    optimizer = chainer.optimizers.MomentumSGD(lr=lr, momentum=0.99)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(rate=0.0005))

    # prepare iterators
    train_iter = chainer.iterators.SerialIterator(
        train_data, batch_size=batch_size)
    test_iter = chainer.iterators.SerialIterator(
        test_data, batch_size=1, repeat=False, shuffle=False)

    updater = training.StandardUpdater(train_iter, optimizer, device=gpu)
    trainer = training.Trainer(updater, (iterations, 'iteration'), out=out)

    val_interval = 3000, 'iteration'
    log_interval = 100, 'iteration'

    trainer.extend(
        TestModeEvaluator(test_iter, model, device=gpu), trigger=val_interval)

    # reporter related
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport(
        ['iteration', 'main/time',
         'main/loss', 'validation/main/loss',
         'main/accuracy', 'validation/main/accuracy',
         'main/accuracy_cls', 'validation/main/accuracy_cls',
         'main/iu', 'validation/main/iu',
         'main/fwavacc', 'validation/main/fwavacc']),
        trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    # visualize training
    trainer.extend(
        extensions.PlotReport(
            ['main/loss', 'validation/main/loss'],
            trigger=log_interval, file_name='loss.png')
    )
    trainer.extend(
        extensions.PlotReport(
            ['main/accuracy', 'validation/main/accuracy'],
            trigger=log_interval, file_name='accuracy.png')
    )
    trainer.extend(
        extensions.PlotReport(
            ['main/accuracy_cls', 'validation/main/accuracy_cls'],
            trigger=log_interval, file_name='accuracy_cls.png')
    )
    trainer.extend(
        extensions.PlotReport(
            ['main/iu', 'validation/main/iu'],
            trigger=log_interval, file_name='iu.png')
    )
    trainer.extend(
        extensions.PlotReport(
            ['main/fwavacc', 'validation/main/fwavacc'],
            trigger=log_interval, file_name='fwavacc.png')
    )
    trainer.extend(
        SemanticSegmentationVisReport(
            range(10),  # visualize outputs for the first 10 data of test_data
            test_data,
            model,
            n_class=n_class,
            predict_func=model.extract  # use FCN32s.extract to get a score map
        ),
        trigger=val_interval, invoke_before_training=True)

    trainer.extend(extensions.dump_graph('main/loss'))

    if resume:
        chainer.serializers.load_npz(osp.expanduser(resume), trainer)

    trainer.run()
Пример #21
0
from config import *
import numpy as np
import os
from chainercv.datasets import VOCSemanticSegmentationDataset
from chainercv.evaluations import calc_semantic_segmentation_confusion
from PIL import Image
import cv2
rgb_dict = [[0, 0, 0]] + [[255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 255, 0],
                          [255, 0, 255], [0, 255, 255]] * 20
# rgb_dict = [[0, 0, 0]] + [[0, 0, 0], [0, 255, 0], [0, 0, 0], [255, 255, 0], [ 255, 0, 255], [0, 255,255]]*20

if __name__ == "__main__":
    dataset = VOCSemanticSegmentationDataset(split=chainer_eval_set,
                                             data_dir=voc12_root)
    labels = [
        dataset.get_example_by_keys(i, (1, ))[0] for i in range(len(dataset))
    ]

    preds = []
    for idx, id in enumerate(dataset.ids):
        cam_dict = np.load(os.path.join(cam_out_dir, id + '.npy'),
                           allow_pickle=True).item()
        cams = cam_dict['high_res']
        cams[:2, :] = 0
        cams[3:, :] = 0
        cams = np.pad(cams, ((1, 0), (0, 0), (0, 0)),
                      mode='constant',
                      constant_values=cam_eval_thres)  # 添加背景的阈值
        keys = np.pad(cam_dict['keys'] + 1, (1, 0), mode='constant')  # 添加背景
        cls_labels = np.argmax(cams, axis=0)
        cls_labels = keys[cls_labels]