示例#1
0
class TestPixelwiseSoftmaxClassifier(unittest.TestCase):
    def setUp(self):
        model = DummySemanticSegmentationModel(self.n_class)
        if self.class_weight:
            self.class_weight = [0.1 * i for i in range(self.n_class)]
        self.link = PixelwiseSoftmaxClassifier(model, self.ignore_label,
                                               self.class_weight)
        self.x = np.random.rand(2, 3, 16, 16).astype(np.float32)
        self.t = np.random.randint(self.n_class,
                                   size=(2, 16, 16)).astype(np.int32)

    def _check_call(self):
        xp = self.link.xp
        loss = self.link(chainer.Variable(xp.asarray(self.x)),
                         chainer.Variable(xp.asarray(self.t)))
        self.assertIsInstance(loss, chainer.Variable)
        self.assertIsInstance(loss.data, self.link.xp.ndarray)
        self.assertEqual(loss.shape, ())

        self.assertTrue(hasattr(self.link, 'y'))
        self.assertIsNotNone(self.link.y)

        self.assertTrue(hasattr(self.link, 'loss'))
        xp.testing.assert_allclose(self.link.loss.data, loss.data)

    def test_call_cpu(self):
        self._check_call()

    @attr.gpu
    def test_call_gpu(self):
        self.link.to_gpu()
        self._check_call()
class TestPixelwiseSoftmaxClassifier(unittest.TestCase):

    def setUp(self):
        model = DummySemanticSegmentationModel(self.n_class)
        if self.class_weight:
            self.class_weight = [0.1 * i for i in range(self.n_class)]
        self.link = PixelwiseSoftmaxClassifier(
            model, self.ignore_label, self.class_weight)
        self.x = np.random.rand(2, 3, 16, 16).astype(np.float32)
        self.t = np.random.randint(
            self.n_class, size=(2, 16, 16)).astype(np.int32)

    def _check_call(self):
        xp = self.link.xp
        loss = self.link(chainer.Variable(xp.asarray(self.x)),
                         chainer.Variable(xp.asarray(self.t)))
        self.assertIsInstance(loss, chainer.Variable)
        self.assertIsInstance(loss.array, self.link.xp.ndarray)
        self.assertEqual(loss.shape, ())

        self.assertTrue(hasattr(self.link, 'y'))
        self.assertIsNotNone(self.link.y)

        self.assertTrue(hasattr(self.link, 'loss'))
        xp.testing.assert_allclose(self.link.loss.array, loss.array)

    def test_call_cpu(self):
        self._check_call()

    @attr.gpu
    def test_call_gpu(self):
        self.link.to_gpu()
        self._check_call()
示例#3
0
 def setUp(self):
     model = DummySemanticSegmentationModel(self.n_class)
     if self.class_weight:
         self.class_weight = [0.1 * i for i in range(self.n_class)]
     self.link = PixelwiseSoftmaxClassifier(model, self.ignore_label,
                                            self.class_weight)
     self.x = np.random.rand(2, 3, 16, 16).astype(np.float32)
     self.t = np.random.randint(self.n_class,
                                size=(2, 16, 16)).astype(np.int32)
示例#4
0
def get_segnet(batchsize):
    model = SegNetBasic(n_class=17)
    model = PixelwiseSoftmaxClassifier(model, class_weight=np.ones(17))
    x = np.random.uniform(size=(batchsize, 3, 1024, 1024)).astype('f')
    x = chainer.as_variable(x)
    t = np.random.randint(size=(batchsize, 1024, 1024), low=0, high=10)\
        .astype(np.int32)
    t = chainer.as_variable(t)
    return [x, t], model
 def setUp(self):
     model = DummySemanticSegmentationModel(self.n_class)
     if self.class_weight:
         self.class_weight = [0.1 * i for i in range(self.n_class)]
     self.link = PixelwiseSoftmaxClassifier(
         model, self.ignore_label, self.class_weight)
     self.x = np.random.rand(2, 3, 16, 16).astype(np.float32)
     self.t = np.random.randint(
         self.n_class, size=(2, 16, 16)).astype(np.int32)
示例#6
0
def train_linknet():
    """Training LinkNet."""
    chainer.config.debug = True
    config = parse_args()
    train_data, test_data = load_dataset(config["dataset"])
    train_iter, test_iter = create_iterator(train_data, test_data, config['iterator'])
    model = get_model(config["model"])
    class_weight = get_class_weight(config)
    model = PixelwiseSoftmaxClassifier(model, class_weight=class_weight)
    optimizer = create_optimizer(config['optimizer'], model)
    devices = parse_devices(config['gpus'])
    updater = create_updater(train_iter, optimizer, config['updater'], devices)
    trainer = training.Trainer(updater, config['end_trigger'], out=config['results'])
    trainer = create_extension(trainer, test_iter,  model.predictor,
                               config['extension'], devices=devices)
    trainer.run()
    chainer.serializers.save_npz(os.path.join(config['results'], 'model.npz'),
                                 model.predictor)
示例#7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--batchsize', type=int, default=12)
    parser.add_argument('--class_weight', type=str, default='class_weight.npy')
    parser.add_argument('--out', type=str, default='result')
    args = parser.parse_args()

    # Triggers
    log_trigger = (50, 'iteration')
    validation_trigger = (2000, 'iteration')
    end_trigger = (16000, 'iteration')

    # Dataset
    train = CamVidDataset(split='train')
    train = TransformDataset(train, transform)
    val = CamVidDataset(split='val')

    # Iterator
    train_iter = iterators.MultiprocessIterator(train, args.batchsize)
    val_iter = iterators.MultiprocessIterator(val,
                                              args.batchsize,
                                              shuffle=False,
                                              repeat=False)

    # Model
    class_weight = np.load(args.class_weight)
    model = SegNetBasic(n_class=len(camvid_label_names))
    model = PixelwiseSoftmaxClassifier(model, class_weight=class_weight)
    if args.gpu >= 0:
        # Make a specified GPU current
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()  # Copy the model to the GPU

    # Optimizer
    optimizer = optimizers.MomentumSGD(lr=0.1, momentum=0.9)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(rate=0.0005))

    # Updater
    updater = training.updaters.StandardUpdater(train_iter,
                                                optimizer,
                                                device=args.gpu)

    # Trainer
    trainer = training.Trainer(updater, end_trigger, out=args.out)

    trainer.extend(extensions.LogReport(trigger=log_trigger))
    trainer.extend(extensions.observe_lr(), trigger=log_trigger)
    trainer.extend(extensions.dump_graph('main/loss'))

    if extensions.PlotReport.available():
        trainer.extend(
            extensions.PlotReport(['main/loss'],
                                  x_key='iteration',
                                  file_name='loss.png'))
        trainer.extend(
            extensions.PlotReport(['validation/main/miou'],
                                  x_key='iteration',
                                  file_name='miou.png'))

    trainer.extend(extensions.snapshot_object(
        model.predictor, filename='model_iteration-{.updater.iteration}'),
                   trigger=end_trigger)
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'elapsed_time', 'lr', 'main/loss',
        'validation/main/miou', 'validation/main/mean_class_accuracy',
        'validation/main/pixel_accuracy'
    ]),
                   trigger=log_trigger)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    trainer.extend(SemanticSegmentationEvaluator(val_iter, model.predictor,
                                                 camvid_label_names),
                   trigger=validation_trigger)

    trainer.run()
示例#8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--gpu", type=int, default=-1)
    parser.add_argument("--batchsize", type=int, default=12)
    parser.add_argument("--class_weight", type=str, default="class_weight.npy")
    parser.add_argument("--out", type=str, default="result")
    args = parser.parse_args()

    # Triggers
    log_trigger = (50, "iteration")
    validation_trigger = (2000, "iteration")
    end_trigger = (16000, "iteration")

    # Dataset
    train = CamVidDataset(split="train")
    train = TransformDataset(dataset=train, transform=transform)
    val = CamVidDataset(split="val")

    # Iterator
    train_iter = iterators.MultiprocessIterator(train, args.batchsize)
    val_iter = iterators.MultiprocessIterator(val, args.batchsize,
                                              repeat=False,
                                              shuffle=False)
    # Model
    class_weight = np.load(args.class_weight)
    model = SegNetBasic(n_class=11)
    model = PixelwiseSoftmaxClassifier(
        model, class_weight=class_weight
    )

    if args.gpu >= 0:
        # Make a specified GPU current
        chainer.cuda.get_device_from_id(args.gpu).use()
        # Copy the model to the GPU
        model.to_gpu()

    # Optimizer
    optimizer = optimizers.MomentumSGD(lr=0.1, momentum=0.9)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(rate=0.0005))

    # Updater
    updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)

    # Trainer
    trainer = training.Trainer(updater=updater, stop_trigger=end_trigger, out=args.out)

    trainer.extend(extensions.LogReport(trigger=log_trigger))
    trainer.extend(extensions.observe_lr(), trigger=log_trigger)
    trainer.extend(extensions.dump_graph("main/loss"))

    if extensions.PlotReport.available():
        trainer.extend(extensions.PlotReport(
            ["main/loss"], x_key="iteration",
            file_name="loss.png"
        ))
        trainer.extend(extensions.PlotReport(
            ["validation/loss"], x_key="iteration",
            file_name="miou.png"
        ))

    trainer.extend(extensions.snapshot_object(
        model.predictor, filename="model_iteration-{.updater.iteration}"),
        trigger=end_trigger)

    trainer.extend(extensions.PrintReport(
        ["epoch", "iteration", "elapsed_time", "lr",
         "main/loss", "validation/main/miou",
         "validation/main/mean_class_accuracy",
         "validation/main/pixel_accuracy"
         ]
    ), trigger=log_trigger)
    trainer.extend(extensions.ProgressBar(update_interval=10))
    trainer.extend(SemanticSegmentationEvaluator(
        val_iter, model.predictor,
        camvid_label_names), trigger=validation_trigger)
    trainer.run()
示例#9
0
def train_one_epoch(model, train_data, lr, gpu, batchsize, out):
    train_model = PixelwiseSoftmaxClassifier(model)
    if gpu >= 0:
        # Make a specified GPU current
        chainer.cuda.get_device_from_id(gpu).use()
        train_model.to_gpu()  # Copy the model to the GPU
    log_trigger = (0.1, 'epoch')
    validation_trigger = (1, 'epoch')
    end_trigger = (1, 'epoch')

    train_data = TransformDataset(train_data, ('img', 'label_map'),
                                  SimpleDoesItTransform(model.mean))
    val = VOCSemanticSegmentationWithBboxDataset(
        split='val').slice[:, ['img', 'label_map']]

    # Iterator
    train_iter = iterators.MultiprocessIterator(train_data, batchsize)
    val_iter = iterators.MultiprocessIterator(val,
                                              1,
                                              shuffle=False,
                                              repeat=False,
                                              shared_mem=100000000)

    # Optimizer
    optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9)
    optimizer.setup(train_model)
    optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(rate=0.0001))

    # Updater
    updater = training.updaters.StandardUpdater(train_iter,
                                                optimizer,
                                                device=gpu)

    # Trainer
    trainer = training.Trainer(updater, end_trigger, out=out)

    trainer.extend(extensions.LogReport(trigger=log_trigger))
    trainer.extend(extensions.observe_lr(), trigger=log_trigger)
    trainer.extend(extensions.dump_graph('main/loss'))

    if extensions.PlotReport.available():
        trainer.extend(
            extensions.PlotReport(['main/loss'],
                                  x_key='iteration',
                                  file_name='loss.png'))
        trainer.extend(
            extensions.PlotReport(['validation/main/miou'],
                                  x_key='iteration',
                                  file_name='miou.png'))

    trainer.extend(extensions.snapshot_object(model, filename='snapshot.npy'),
                   trigger=end_trigger)
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'elapsed_time', 'lr', 'main/loss',
        'validation/main/miou', 'validation/main/mean_class_accuracy',
        'validation/main/pixel_accuracy'
    ]),
                   trigger=log_trigger)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    trainer.extend(SemanticSegmentationEvaluator(
        val_iter, model, voc_semantic_segmentation_label_names),
                   trigger=validation_trigger)
    trainer.run()
示例#10
0
def handler(context):
    # Triggers
    log_trigger = (50, 'iteration')
    validation_trigger = (2000, 'iteration')
    end_trigger = (nb_iterations, 'iteration')

    # Dataset
    dataset_alias = context.datasets
    train_dataset_id = dataset_alias['train']
    val_dataset_id = dataset_alias['val']
    train = SegmentationDatasetFromAPI(train_dataset_id)
    val = SegmentationDatasetFromAPI(val_dataset_id)
    class_weight = calc_weight(train)

    print(class_weight)

    train = TransformDataset(train, transform)

    # Iterator
    train_iter = iterators.SerialIterator(train, BATCHSIZE)
    val_iter = iterators.SerialIterator(val,
                                        BATCHSIZE,
                                        shuffle=False,
                                        repeat=False)

    # Model
    model = SegNetBasic(n_class=len(camvid_label_names))
    model = PixelwiseSoftmaxClassifier(model, class_weight=class_weight)

    if USE_GPU >= 0:
        # Make a specified GPU current
        chainer.cuda.get_device_from_id(USE_GPU).use()
        model.to_gpu()  # Copy the model to the GPU

    # Optimizer
    optimizer = optimizers.MomentumSGD(lr=0.1, momentum=0.9)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(rate=0.0005))

    # Updater
    updater = training.updaters.StandardUpdater(train_iter,
                                                optimizer,
                                                device=USE_GPU)

    # Trainer
    trainer = training.Trainer(updater,
                               end_trigger,
                               out=ABEJA_TRAINING_RESULT_DIR)

    trainer.extend(extensions.LogReport(trigger=log_trigger))
    trainer.extend(extensions.observe_lr(), trigger=log_trigger)
    trainer.extend(extensions.dump_graph('main/loss'))

    trainer.extend(extensions.snapshot_object(
        model.predictor, filename='model_iteration-{.updater.iteration}'),
                   trigger=end_trigger)

    print_entries = [
        'iteration', 'main/loss', 'validation/main/miou',
        'validation/main/mean_class_accuracy', 'validation/main/pixel_accuracy'
    ]

    report_entries = [
        'epoch', 'iteration', 'lr', 'main/loss', 'validation/main/miou',
        'validation/main/mean_class_accuracy', 'validation/main/pixel_accuracy'
    ]

    trainer.extend(Statistics(report_entries,
                              nb_iterations,
                              obs_key='iteration'),
                   trigger=log_trigger)
    trainer.extend(Tensorboard(report_entries, out_dir=log_path))
    trainer.extend(extensions.PrintReport(print_entries), trigger=log_trigger)

    trainer.extend(SemanticSegmentationEvaluator(val_iter, model.predictor,
                                                 camvid_label_names),
                   trigger=validation_trigger)

    trainer.run()