コード例 #1
0
ファイル: test_pspnet.py プロジェクト: zhuyiche/chainercv
    def test_pretrained(self):
        kwargs = {
            'n_class': self.n_class,
            'pretrained_model': self.pretrained_model,
        }

        if self.pretrained_model == 'cityscapes':
            valid = self.n_class in {None, 19}

        if valid:
            PSPNetResNet101(**kwargs)
        else:
            with self.assertRaises(ValueError):
                PSPNetResNet101(**kwargs)
コード例 #2
0
def get_dataset_and_model(dataset_name, model_name, pretrained_model,
                          input_size):
    if dataset_name == 'cityscapes':
        dataset = CityscapesSemanticSegmentationDataset(
            split='val', label_resolution='fine')
        label_names = cityscapes_semantic_segmentation_label_names
    elif dataset_name == 'ade20k':
        dataset = ADE20KSemanticSegmentationDataset(split='val')
        label_names = ade20k_semantic_segmentation_label_names
    elif dataset_name == 'camvid':
        dataset = CamVidDataset(split='test')
        label_names = camvid_label_names

    n_class = len(label_names)

    if pretrained_model:
        pretrained_model = pretrained_model
    else:
        pretrained_model = dataset_name
    if model_name == 'pspnet_resnet101':
        model = PSPNetResNet101(n_class=n_class,
                                pretrained_model=pretrained_model,
                                input_size=input_size)
    elif model_name == 'pspnet_resnet50':
        model = PSPNetResNet50(n_class=n_class,
                               pretrained_model=pretrained_model,
                               input_size=input_size)
    elif model_name == 'segnet':
        model = SegNetBasic(n_class=n_class, pretrained_model=pretrained_model)
    return dataset, label_names, model
コード例 #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', '-g', type=int, default=-1)
    parser.add_argument('--pretrained-model')
    parser.add_argument('--input-size', type=int, default=713)
    parser.add_argument('image')
    args = parser.parse_args()

    label_names = cityscapes_semantic_segmentation_label_names
    colors = cityscapes_semantic_segmentation_label_colors
    n_class = len(label_names)

    input_size = (args.input_size, args.input_size)
    model = PSPNetResNet101(n_class, args.pretrained_model, input_size)

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu(args.gpu)

    img = read_image(args.image)
    labels = model.predict([img])
    label = labels[0]

    fig = plt.figure()
    ax1 = fig.add_subplot(1, 2, 1)
    vis_image(img, ax=ax1)
    ax2 = fig.add_subplot(1, 2, 2)
    ax2, legend_handles = vis_semantic_segmentation(
        img, label, label_names, colors, ax=ax2)
    ax2.legend(handles=legend_handles, bbox_to_anchor=(1, 1), loc=2)

    plt.show()
コード例 #4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model', choices=('pspnet_resnet101',),
        default='pspnet_resnet101')
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--pretrained-model')
    args = parser.parse_args()

    if args.model == 'pspnet_resnet101':
        if args.pretrained_model:
            model = PSPNetResNet101(
                n_class=len(cityscapes_semantic_segmentation_label_names),
                pretrained_model=args.pretrained_model, input_size=(713, 713)
            )
        else:
            model = PSPNetResNet101(pretrained_model='cityscapes')

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    dataset = CityscapesSemanticSegmentationDataset(
        split='val', label_resolution='fine')
    it = iterators.SerialIterator(
        dataset, 1, repeat=False, shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(
        model.predict, it, hook=ProgressHook(len(dataset)))
    # Delete an iterator of images to save memory usage.
    del in_values
    pred_labels, = out_values
    gt_labels, = rest_values

    result = eval_semantic_segmentation(pred_labels, gt_labels)

    for iu, label_name in zip(
            result['iou'], cityscapes_semantic_segmentation_label_names):
        print('{:>23} : {:.4f}'.format(label_name, iu))
    print('=' * 34)
    print('{:>23} : {:.4f}'.format('mean IoU', result['miou']))
    print('{:>23} : {:.4f}'.format(
        'Class average accuracy', result['mean_class_accuracy']))
    print('{:>23} : {:.4f}'.format(
        'Global average accuracy', result['pixel_accuracy']))
コード例 #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('caffemodel')
    parser.add_argument('output')
    args = parser.parse_args()

    proto_path = 'weights/pspnet101_cityscapes_713.prototxt'
    n_class = 19
    input_size = (713, 713)

    model = PSPNetResNet101(n_class, None, input_size)
    model(np.random.uniform(size=(1, 3) + input_size).astype(np.float32))

    caffe_param = caffe_pb2.NetParameter()
    caffe_param.MergeFromString(open(args.caffemodel, 'rb').read())
    caffe_net = text_format.Merge(
        open(proto_path).read(), caffe_pb2.NetParameter())

    transfer(model, caffe_param, caffe_net)
    serializers.save_npz(args.output, model)
コード例 #6
0
ファイル: test_pspnet.py プロジェクト: zhuyiche/chainercv
 def setUp(self):
     self.n_class = 10
     self.input_size = (120, 160)
     self.link = PSPNetResNet101(n_class=self.n_class,
                                 input_size=self.input_size)
コード例 #7
0
ファイル: pspnet.py プロジェクト: Zhaojp-Frank/recompute
def pspnet():
    model = PSPNetResNet101(17, input_size=(713, 713))
    return TrainChain(model)
コード例 #8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        choices=('pspnet_resnet101', ),
                        default='pspnet_resnet101')
    parser.add_argument('--pretrained-model')
    args = parser.parse_args()

    comm = chainermn.create_communicator()
    device = comm.intra_rank

    if args.model == 'pspnet_resnet101':
        if args.pretrained_model:
            model = PSPNetResNet101(
                n_class=len(cityscapes_semantic_segmentation_label_names),
                pretrained_model=args.pretrained_model,
                input_size=(713, 713))
        else:
            model = PSPNetResNet101(pretrained_model='cityscapes')

    chainer.cuda.get_device_from_id(device).use()
    model.to_gpu()

    dataset = CityscapesSemanticSegmentationDataset(split='val',
                                                    label_resolution='fine')

    if comm.rank == 0:
        indices = np.arange(len(dataset))
    else:
        indices = None
    indices = chainermn.scatter_dataset(indices, comm)
    dataset = dataset.slice[indices]

    it = iterators.SerialIterator(dataset, 1, repeat=False, shuffle=False)

    in_values, out_values, rest_values = apply_to_iterator(model.predict,
                                                           it,
                                                           hook=ProgressHook(
                                                               len(dataset)))
    # Delete an iterator of images to save memory usage.
    del in_values
    pred_labels, = out_values
    gt_labels, = rest_values

    confusion = calc_semantic_segmentation_confusion(pred_labels, gt_labels)
    confusion = comm.allreduce(confusion)

    if comm.rank == 0:
        iou = calc_semantic_segmentation_iou(confusion)
        pixel_accuracy = np.diag(confusion).sum() / confusion.sum()
        class_accuracy = np.diag(confusion) / np.sum(confusion, axis=1)

        for iu, label_name in zip(
                iou, cityscapes_semantic_segmentation_label_names):
            print('{:>23} : {:.4f}'.format(label_name, iu))
        print('=' * 34)
        print('{:>23} : {:.4f}'.format('mean IoU', np.nanmean(iou)))
        print('{:>23} : {:.4f}'.format('Class average accuracy',
                                       np.nanmean(class_accuracy)))
        print('{:>23} : {:.4f}'.format('Global average accuracy',
                                       pixel_accuracy))
コード例 #9
0
ファイル: train_multi.py プロジェクト: souravsingh/chainercv
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--data-dir', default='auto')
    parser.add_argument('--dataset', choices=('ade20k', 'cityscapes'))
    parser.add_argument('--model',
                        choices=('pspnet_resnet101', 'pspnet_resnet50'))
    parser.add_argument('--lr', default=1e-2)
    parser.add_argument('--batchsize', default=2, type=int)
    parser.add_argument('--out', default='result')
    parser.add_argument('--iteration', default=None, type=int)
    parser.add_argument('--communicator', default='hierarchical')
    args = parser.parse_args()

    dataset_cfgs = {
        'ade20k': {
            'input_size': (473, 473),
            'label_names': ade20k_semantic_segmentation_label_names,
            'iteration': 150000
        },
        'cityscapes': {
            'input_size': (713, 713),
            'label_names': cityscapes_semantic_segmentation_label_names,
            'iteration': 90000
        }
    }
    dataset_cfg = dataset_cfgs[args.dataset]

    # https://docs.chainer.org/en/stable/chainermn/tutorial/tips_faqs.html#using-multiprocessiterator
    if hasattr(multiprocessing, 'set_start_method'):
        multiprocessing.set_start_method('forkserver')
        p = multiprocessing.Process()
        p.start()
        p.join()

    comm = chainermn.create_communicator(args.communicator)
    device = comm.intra_rank

    n_class = len(dataset_cfg['label_names'])
    if args.model == 'pspnet_resnet101':
        model = PSPNetResNet101(n_class,
                                pretrained_model='imagenet',
                                input_size=dataset_cfg['input_size'])
    elif args.model == 'pspnet_resnet50':
        model = PSPNetResNet50(n_class,
                               pretrained_model='imagenet',
                               input_size=dataset_cfg['input_size'])
    train_chain = create_mnbn_model(TrainChain(model), comm)
    model = train_chain.model
    if device >= 0:
        chainer.cuda.get_device_from_id(device).use()
        train_chain.to_gpu()

    if args.iteration is None:
        n_iter = dataset_cfg['iteration']
    else:
        n_iter = args.iteration

    if args.dataset == 'ade20k':
        train = ADE20KSemanticSegmentationDataset(data_dir=args.data_dir,
                                                  split='train')
        if comm.rank == 0:
            val = ADE20KSemanticSegmentationDataset(data_dir=args.data_dir,
                                                    split='val')
        label_names = ade20k_semantic_segmentation_label_names
    elif args.dataset == 'cityscapes':
        train = CityscapesSemanticSegmentationDataset(args.data_dir,
                                                      label_resolution='fine',
                                                      split='train')
        if comm.rank == 0:
            val = CityscapesSemanticSegmentationDataset(
                args.data_dir, label_resolution='fine', split='val')
        label_names = cityscapes_semantic_segmentation_label_names
    train = TransformDataset(train, ('img', 'label'),
                             Transform(model.mean, dataset_cfg['input_size']))

    if comm.rank == 0:
        indices = np.arange(len(train))
    else:
        indices = None
    indices = chainermn.scatter_dataset(indices, comm, shuffle=True)
    train = train.slice[indices]

    train_iter = chainer.iterators.MultiprocessIterator(
        train, batch_size=args.batchsize, n_processes=2)

    optimizer = chainermn.create_multi_node_optimizer(
        chainer.optimizers.MomentumSGD(args.lr, 0.9), comm)
    optimizer.setup(train_chain)
    for param in train_chain.params():
        if param.name not in ('beta', 'gamma'):
            param.update_rule.add_hook(chainer.optimizer.WeightDecay(1e-4))
    for l in [
            model.ppm, model.head_conv1, model.head_conv2,
            train_chain.aux_conv1, train_chain.aux_conv2
    ]:
        for param in l.params():
            param.update_rule.add_hook(GradientScaling(10))

    updater = training.updaters.StandardUpdater(train_iter,
                                                optimizer,
                                                device=device)
    trainer = training.Trainer(updater, (n_iter, 'iteration'), args.out)
    trainer.extend(PolynomialShift('lr', 0.9, n_iter, optimizer=optimizer),
                   trigger=(1, 'iteration'))

    log_interval = 10, 'iteration'

    if comm.rank == 0:
        trainer.extend(extensions.LogReport(trigger=log_interval))
        trainer.extend(extensions.observe_lr(), trigger=log_interval)
        trainer.extend(extensions.PrintReport([
            'epoch', 'iteration', 'elapsed_time', 'lr', 'main/loss',
            'validation/main/miou', 'validation/main/mean_class_accuracy',
            'validation/main/pixel_accuracy'
        ]),
                       trigger=log_interval)
        trainer.extend(extensions.ProgressBar(update_interval=10))
        trainer.extend(extensions.snapshot_object(
            train_chain.model, 'snapshot_model_{.updater.iteration}.npz'),
                       trigger=(n_iter, 'iteration'))
        val_iter = chainer.iterators.SerialIterator(val,
                                                    batch_size=1,
                                                    repeat=False,
                                                    shuffle=False)
        trainer.extend(SemanticSegmentationEvaluator(val_iter, model,
                                                     label_names),
                       trigger=(n_iter, 'iteration'))

    trainer.run()