Exemplo n.º 1
0
class TestFasterRCNNVGG16Loss(unittest.TestCase):

    n_fg_class = 20

    def setUp(self):
        faster_rcnn = FasterRCNNVGG16(
            n_fg_class=self.n_fg_class, pretrained_model=False)
        self.link = FasterRCNNTrainChain(faster_rcnn)

        self.n_bbox = 3
        self.bboxes = chainer.Variable(
            generate_random_bbox(self.n_bbox, (600, 800), 16, 350)[np.newaxis])
        _labels = np.random.randint(
            0, self.n_fg_class, size=(1, self.n_bbox)).astype(np.int32)
        self.labels = chainer.Variable(_labels)
        _imgs = np.random.uniform(
            low=-122.5, high=122.5, size=(1, 3, 600, 800)).astype(np.float32)
        self.imgs = chainer.Variable(_imgs)
        self.scale = chainer.Variable(np.array(1.))

    def check_call(self):
        loss = self.link(self.imgs, self.bboxes, self.labels, self.scale)
        self.assertEqual(loss.shape, ())

    def test_call_cpu(self):
        self.check_call()

    @attr.gpu
    def test_call_gpu(self):
        self.link.to_gpu()
        self.bboxes.to_gpu()
        self.labels.to_gpu()
        self.imgs.to_gpu()
        self.scale.to_gpu()
        self.check_call()
Exemplo n.º 2
0
    def setUp(self):
        faster_rcnn = FasterRCNNVGG16(
            n_fg_class=self.n_fg_class, pretrained_model=False)
        self.link = FasterRCNNTrainChain(faster_rcnn)

        self.n_bbox = 3
        self.bboxes = chainer.Variable(
            generate_random_bbox(self.n_bbox, (600, 800), 16, 350)[np.newaxis])
        _labels = np.random.randint(
            0, self.n_fg_class, size=(1, self.n_bbox)).astype(np.int32)
        self.labels = chainer.Variable(_labels)
        _imgs = np.random.uniform(
            low=-122.5, high=122.5, size=(1, 3, 600, 800)).astype(np.float32)
        self.imgs = chainer.Variable(_imgs)
        self.scale = chainer.Variable(np.array(1.))
Exemplo n.º 3
0
    def set_model(self, n_class=2):
        """モデルのセット

        Args:
            n_class (int, optional): 認識する物体クラスの数. Defaults to 2.
        """
        faster_rcnn = FasterRCNNVGG16(n_fg_class=n_class, pretrained_model="imagenet")
        faster_rcnn.use_preset("evaluate")
        model = FasterRCNNTrainChain(faster_rcnn)
        self.model = model
        self.logger.info("set FasterRCNNVGG16, pretrained=imagenet")
Exemplo n.º 4
0
    def setUp(self):
        self.n_anchor_base = 6
        self.feat_stride = 4
        self.n_fg_class = 3
        self.n_roi = 24
        self.n_bbox = 3
        self.link = FasterRCNNTrainChain(DummyFasterRCNN(
            n_anchor_base=self.n_anchor_base,
            feat_stride=self.feat_stride,
            n_fg_class=self.n_fg_class,
            n_roi=self.n_roi,
            min_size=600,
            max_size=800,
        ))

        self.bboxes = chainer.Variable(
            generate_random_bbox(self.n_bbox, (600, 800), 16, 350)[np.newaxis])
        _labels = np.random.randint(
            0, self.n_fg_class, size=(1, self.n_bbox)).astype(np.int32)
        self.labels = chainer.Variable(_labels)
        self.imgs = chainer.Variable(_random_array((1, 3, 600, 800)))
        self.scale = chainer.Variable(np.array(1.))
Exemplo n.º 5
0
class TestFasterRCNNTrainChain(unittest.TestCase):

    def setUp(self):
        self.n_anchor_base = 6
        self.feat_stride = 4
        self.n_fg_class = 3
        self.n_roi = 24
        self.n_bbox = 3
        self.link = FasterRCNNTrainChain(DummyFasterRCNN(
            n_anchor_base=self.n_anchor_base,
            feat_stride=self.feat_stride,
            n_fg_class=self.n_fg_class,
            n_roi=self.n_roi,
            min_size=600,
            max_size=800,
        ))

        self.bboxes = chainer.Variable(
            generate_random_bbox(self.n_bbox, (600, 800), 16, 350)[np.newaxis])
        _labels = np.random.randint(
            0, self.n_fg_class, size=(1, self.n_bbox)).astype(np.int32)
        self.labels = chainer.Variable(_labels)
        self.imgs = chainer.Variable(_random_array((1, 3, 600, 800)))
        self.scale = chainer.Variable(np.array(1.))

    def check_call(self):
        loss = self.link(self.imgs, self.bboxes, self.labels, self.scale)
        self.assertEqual(loss.shape, ())

    def test_call_cpu(self):
        self.check_call()

    @attr.gpu
    def test_call_gpu(self):
        self.link.to_gpu()
        self.imgs.to_gpu()
        self.bboxes.to_gpu()
        self.labels.to_gpu()
        self.check_call()
class TestFasterRCNNTrainChain(unittest.TestCase):

    def setUp(self):
        self.n_anchor_base = 6
        self.feat_stride = 4
        self.n_fg_class = 3
        self.n_roi = 24
        self.n_bbox = 3
        self.link = FasterRCNNTrainChain(DummyFasterRCNN(
            n_anchor_base=self.n_anchor_base,
            feat_stride=self.feat_stride,
            n_fg_class=self.n_fg_class,
            n_roi=self.n_roi,
            min_size=600,
            max_size=800,
        ))

        self.bboxes = chainer.Variable(
            generate_random_bbox(self.n_bbox, (600, 800), 16, 350)[np.newaxis])
        _labels = np.random.randint(
            0, self.n_fg_class, size=(1, self.n_bbox)).astype(np.int32)
        self.labels = chainer.Variable(_labels)
        self.imgs = chainer.Variable(_random_array((1, 3, 600, 800)))
        self.scale = chainer.Variable(np.array(1.))

    def check_call(self):
        loss = self.link(self.imgs, self.bboxes, self.labels, self.scale)
        self.assertEqual(loss.shape, ())

    def test_call_cpu(self):
        self.check_call()

    @attr.gpu
    def test_call_gpu(self):
        self.link.to_gpu()
        self.imgs.to_gpu()
        self.bboxes.to_gpu()
        self.labels.to_gpu()
        self.check_call()
Exemplo n.º 7
0
def get_faster_rcnn(n):
    frc = FasterRCNNVGG16(n_fg_class=20)
    model = FasterRCNNTrainChain(frc)

    batchsize = 1  # only 1 is supported
    K = 10
    x = np.random.uniform(size=(batchsize, 3, n * 512, 512)).astype('f')
    x = chainer.as_variable(x)
    bbox = np.random.uniform(size=(batchsize, K, 4)).astype('f')
    bbox = chainer.as_variable(bbox)
    labels = np.random.randint(size=(batchsize, K), low=0, high=20)\
        .astype(np.int32)
    labels = chainer.as_variable(labels)
    scale = np.ones((batchsize, )).astype('f')
    scale = chainer.as_variable(scale)

    return [x, bbox, labels, scale], model
    def setUp(self):
        self.n_anchor_base = 6
        self.feat_stride = 4
        self.n_fg_class = 3
        self.n_roi = 24
        self.n_bbox = 3
        self.link = FasterRCNNTrainChain(DummyFasterRCNN(
            n_anchor_base=self.n_anchor_base,
            feat_stride=self.feat_stride,
            n_fg_class=self.n_fg_class,
            n_roi=self.n_roi,
            min_size=600,
            max_size=800,
        ))

        self.bboxes = chainer.Variable(
            generate_random_bbox(self.n_bbox, (600, 800), 16, 350)[np.newaxis])
        _labels = np.random.randint(
            0, self.n_fg_class, size=(1, self.n_bbox)).astype(np.int32)
        self.labels = chainer.Variable(_labels)
        self.imgs = chainer.Variable(_random_array((1, 3, 600, 800)))
        self.scale = chainer.Variable(np.array(1.))
Exemplo n.º 9
0
def main():
    parser = argparse.ArgumentParser(
        description='ChainerCV training example: Faster R-CNN')
    parser.add_argument('--dataset',
                        choices=('voc07', 'voc0712'),
                        help='The dataset to use: VOC07, VOC07+12',
                        default='voc07')
    parser.add_argument('--gpu', '-g', type=int, default=-1)
    parser.add_argument('--lr', '-l', type=float, default=1e-3)
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Output directory')
    parser.add_argument('--seed', '-s', type=int, default=0)
    parser.add_argument('--step_size', '-ss', type=int, default=50000)
    parser.add_argument('--iteration', '-i', type=int, default=70000)
    args = parser.parse_args()

    np.random.seed(args.seed)

    if args.dataset == 'voc07':
        train_data = VOCBboxDataset(split='trainval', year='2007')
    elif args.dataset == 'voc0712':
        train_data = ConcatenatedDataset(
            VOCBboxDataset(year='2007', split='trainval'),
            VOCBboxDataset(year='2012', split='trainval'))
    test_data = VOCBboxDataset(split='test',
                               year='2007',
                               use_difficult=True,
                               return_difficult=True)
    faster_rcnn = FasterRCNNVGG16(n_fg_class=len(voc_bbox_label_names),
                                  pretrained_model='imagenet')
    faster_rcnn.use_preset('evaluate')
    model = FasterRCNNTrainChain(faster_rcnn)
    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()
    optimizer = chainer.optimizers.MomentumSGD(lr=args.lr, momentum=0.9)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(rate=0.0005))

    train_data = TransformDataset(train_data, Transform(faster_rcnn))

    train_iter = chainer.iterators.MultiprocessIterator(train_data,
                                                        batch_size=1,
                                                        n_processes=None,
                                                        shared_mem=100000000)
    test_iter = chainer.iterators.SerialIterator(test_data,
                                                 batch_size=1,
                                                 repeat=False,
                                                 shuffle=False)
    updater = chainer.training.updaters.StandardUpdater(train_iter,
                                                        optimizer,
                                                        device=args.gpu)

    trainer = training.Trainer(updater, (args.iteration, 'iteration'),
                               out=args.out)

    trainer.extend(extensions.snapshot_object(model.faster_rcnn,
                                              'snapshot_model.npz'),
                   trigger=(args.iteration, 'iteration'))
    trainer.extend(extensions.ExponentialShift('lr', 0.1),
                   trigger=(args.step_size, 'iteration'))

    log_interval = 20, 'iteration'
    plot_interval = 3000, 'iteration'
    print_interval = 20, 'iteration'

    trainer.extend(chainer.training.extensions.observe_lr(),
                   trigger=log_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport([
        'iteration',
        'epoch',
        'elapsed_time',
        'lr',
        'main/loss',
        'main/roi_loc_loss',
        'main/roi_cls_loss',
        'main/rpn_loc_loss',
        'main/rpn_cls_loss',
        'validation/main/map',
    ]),
                   trigger=print_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    if extensions.PlotReport.available():
        trainer.extend(extensions.PlotReport(['main/loss'],
                                             file_name='loss.png',
                                             trigger=plot_interval),
                       trigger=plot_interval)

    trainer.extend(DetectionVOCEvaluator(test_iter,
                                         model.faster_rcnn,
                                         use_07_metric=True,
                                         label_names=voc_bbox_label_names),
                   trigger=ManualScheduleTrigger(
                       [args.step_size, args.iteration], 'iteration'))

    trainer.extend(extensions.dump_graph('main/loss'))

    trainer.run()
Exemplo n.º 10
0
def main():
    parser = argparse.ArgumentParser(
        description='ChainerCV training example: Faster R-CNN')
    parser.add_argument('--gpu', '-g', type=int, default=-1)
    parser.add_argument('--lr', '-l', type=float, default=1e-3)
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Output directory')
    parser.add_argument('--seed', '-s', type=int, default=0)
    parser.add_argument('--step_size', '-ss', type=int, default=50000)
    parser.add_argument('--iteration', '-i', type=int, default=70000)
    args = parser.parse_args()

    np.random.seed(args.seed)

    train_data = VOCDetectionDataset(split='trainval', year='2007')
    test_data = VOCDetectionDataset(split='test',
                                    year='2007',
                                    use_difficult=True,
                                    return_difficult=True)
    faster_rcnn = FasterRCNNVGG16(n_fg_class=len(voc_detection_label_names),
                                  pretrained_model='imagenet')
    faster_rcnn.use_preset('evaluate')
    model = FasterRCNNTrainChain(faster_rcnn)
    if args.gpu >= 0:
        model.to_gpu(args.gpu)
        chainer.cuda.get_device(args.gpu).use()
    optimizer = chainer.optimizers.MomentumSGD(lr=args.lr, momentum=0.9)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(rate=0.0005))

    def transform(in_data):
        img, bbox, label = in_data
        _, H, W = img.shape
        img = faster_rcnn.prepare(img)
        _, o_H, o_W = img.shape
        scale = o_H / H
        bbox = transforms.resize_bbox(bbox, (H, W), (o_H, o_W))

        # horizontally flip
        img, params = transforms.random_flip(img,
                                             x_random=True,
                                             return_param=True)
        bbox = transforms.flip_bbox(bbox, (o_H, o_W), x_flip=params['x_flip'])

        return img, bbox, label, scale

    train_data = TransformDataset(train_data, transform)

    train_iter = chainer.iterators.MultiprocessIterator(train_data,
                                                        batch_size=1,
                                                        n_processes=None,
                                                        shared_mem=100000000)
    test_iter = chainer.iterators.SerialIterator(test_data,
                                                 batch_size=1,
                                                 repeat=False,
                                                 shuffle=False)
    updater = chainer.training.updater.StandardUpdater(train_iter,
                                                       optimizer,
                                                       device=args.gpu)

    trainer = training.Trainer(updater, (args.iteration, 'iteration'),
                               out=args.out)

    trainer.extend(extensions.snapshot_object(model.faster_rcnn,
                                              'snapshot_model.npz'),
                   trigger=(args.iteration, 'iteration'))
    trainer.extend(extensions.ExponentialShift('lr', 0.1),
                   trigger=(args.step_size, 'iteration'))

    log_interval = 20, 'iteration'
    plot_interval = 3000, 'iteration'
    print_interval = 20, 'iteration'

    trainer.extend(chainer.training.extensions.observe_lr(),
                   trigger=log_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport([
        'iteration',
        'epoch',
        'elapsed_time',
        'lr',
        'main/loss',
        'main/roi_loc_loss',
        'main/roi_cls_loss',
        'main/rpn_loc_loss',
        'main/rpn_cls_loss',
        'validation/main/map',
    ]),
                   trigger=print_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    if extensions.PlotReport.available():
        trainer.extend(extensions.PlotReport(['main/loss'],
                                             file_name='loss.png',
                                             trigger=plot_interval),
                       trigger=plot_interval)

    trainer.extend(
        DetectionVOCEvaluator(test_iter,
                              model.faster_rcnn,
                              use_07_metric=True,
                              label_names=voc_detection_label_names),
        trigger=ManualScheduleTrigger([args.step_size, args.iteration],
                                      'iteration'),
        invoke_before_training=False)

    trainer.extend(extensions.dump_graph('main/loss'))

    trainer.run()
Exemplo n.º 11
0
def main():
    bbox_label_names = ('loop')

    n_itrs = 70000
    n_step = 50000
    np.random.seed(0)
    train_data = DefectDetectionDataset(split='train')
    test_data = DefectDetectionDataset(split='test')
    proposal_params = {'min_size': 8}

    faster_rcnn = FasterRCNNVGG16(n_fg_class=1, pretrained_model='imagenet', ratios=[0.5, 1, 2],
                                  anchor_scales=[1, 4, 8, 16], min_size=512, max_size=1024,
                                  proposal_creator_params=proposal_params)
    faster_rcnn.use_preset('evaluate')
    model = FasterRCNNTrainChain(faster_rcnn)
    chainer.cuda.get_device_from_id(0).use()
    model.to_gpu()
    optimizer = chainer.optimizers.MomentumSGD(lr=1e-3, momentum=0.9)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(rate=0.0005))
    train_data = TransformDataset(train_data, Transform(faster_rcnn))
    train_iter = chainer.iterators.MultiprocessIterator(
        train_data, batch_size=1, n_processes=None, shared_mem=100000000)
    test_iter = chainer.iterators.SerialIterator(
        test_data, batch_size=1, repeat=False, shuffle=False)
    updater = chainer.training.updater.StandardUpdater(
        train_iter, optimizer, device=0)
    trainer = training.Trainer(
        updater, (n_itrs, 'iteration'), out='result')
    trainer.extend(
        extensions.snapshot_object(model.faster_rcnn, 'snapshot_model_{.updater.iteration}.npz'), 
        trigger=(n_itrs/5, 'iteration'))
    trainer.extend(extensions.ExponentialShift('lr', 0.1),
                   trigger=(n_step, 'iteration'))
    log_interval = 50, 'iteration'
    plot_interval = 100, 'iteration'
    print_interval = 20, 'iteration'
    trainer.extend(chainer.training.extensions.observe_lr(),
                   trigger=log_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport(
        ['iteration', 'epoch', 'elapsed_time', 'lr',
         'main/loss',
         'main/roi_loc_loss',
         'main/roi_cls_loss',
         'main/rpn_loc_loss',
         'main/rpn_cls_loss',
         'validation/main/map',
         ]), trigger=print_interval)
    trainer.extend(extensions.ProgressBar(update_interval=5))
    if extensions.PlotReport.available():
        trainer.extend(
            extensions.PlotReport(
                ['main/loss'],
                file_name='loss.png', trigger=plot_interval
            ),
            trigger=plot_interval
        )
    trainer.extend(
        DetectionVOCEvaluator(
            test_iter, model.faster_rcnn, use_07_metric=True,
            label_names=bbox_label_names),
        trigger=ManualScheduleTrigger(
            [100, 500, 1000, 5000, 10000, 20000, 40000, 60000, n_step, n_itrs], 'iteration'))

    trainer.extend(extensions.dump_graph('main/loss'))

    trainer.run()
Exemplo n.º 12
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--batchsize', type=int, default=1)
    parser.add_argument('--lr', type=float, default=1e-3)
    parser.add_argument('--out', default='result')
    parser.add_argument('--resume')
    args = parser.parse_args()

    comm = chainermn.create_communicator()
    device = comm.intra_rank

    faster_rcnn = FasterRCNNVGG16(
        n_fg_class=len(epic_kitchens_bbox_label_names),
        pretrained_model='imagenet')

    faster_rcnn.use_preset('evaluate')
    model = FasterRCNNTrainChain(faster_rcnn)
    chainer.cuda.get_device_from_id(device).use()
    model.to_gpu()

    train = EpicKitchensBboxDataset(year='2018', split='train')
    if comm.rank == 0:
        indices = np.arange(len(train))
    else:
        indices = None
    train = TransformDataset(train, ('img', 'bbox', 'label', 'scale'),
                             Transform(faster_rcnn))

    indices = chainermn.scatter_dataset(indices, comm, shuffle=True)
    train = train.slice[indices]

    train_iter = chainer.iterators.SerialIterator(train,
                                                  batch_size=args.batchsize)

    optimizer = chainermn.create_multi_node_optimizer(
        chainer.optimizers.MomentumSGD(), comm)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(rate=0.0005))

    updater = training.updaters.StandardUpdater(train_iter,
                                                optimizer,
                                                device=device)
    trainer = training.Trainer(updater, (18, 'epoch'), args.out)
    trainer.extend(extensions.ExponentialShift('lr', 0.1, init=args.lr),
                   trigger=triggers.ManualScheduleTrigger([12, 15], 'epoch'))

    if comm.rank == 0:
        log_interval = 10, 'iteration'
        trainer.extend(
            extensions.LogReport(log_name='log.json', trigger=log_interval))
        trainer.extend(extensions.observe_lr(), trigger=log_interval)
        trainer.extend(extensions.PrintReport([
            'iteration', 'epoch', 'elapsed_time', 'lr', 'main/loss',
            'main/roi_loc_loss', 'main/roi_cls_loss', 'main/rpn_loc_loss',
            'main/rpn_cls_loss'
        ]),
                       trigger=log_interval)
        trainer.extend(extensions.ProgressBar(update_interval=1))

        trainer.extend(extensions.snapshot_object(
            model.faster_rcnn, 'model_iter_{.updater.iteration}.npz'),
                       trigger=(1, 'epoch'))

    if args.resume:
        serializers.load_npz(args.resume, trainer)

    trainer.run()
Exemplo n.º 13
0
def main():
    parser = argparse.ArgumentParser(
        description='ChainerCV training example: Faster R-CNN')
    parser.add_argument('--dataset', choices=('voc07', 'voc0712'),
                        help='The dataset to use: VOC07, VOC07+12',
                        default='voc07')
    parser.add_argument('--gpu', '-g', type=int, default=-1)
    parser.add_argument('--lr', '-l', type=float, default=1e-3)
    parser.add_argument('--out', '-o', default='result',
                        help='Output directory')
    parser.add_argument('--seed', '-s', type=int, default=0)
    parser.add_argument('--step_size', '-ss', type=int, default=50000)
    parser.add_argument('--iteration', '-i', type=int, default=70000)
    args = parser.parse_args()

    np.random.seed(args.seed)

    if args.dataset == 'voc07':
        train_data = VOCBboxDataset(split='trainval', year='2007')
    elif args.dataset == 'voc0712':
        train_data = ConcatenatedDataset(
            VOCBboxDataset(year='2007', split='trainval'),
            VOCBboxDataset(year='2012', split='trainval'))
    test_data = VOCBboxDataset(split='test', year='2007',
                               use_difficult=True, return_difficult=True)
    faster_rcnn = FasterRCNNVGG16(n_fg_class=len(voc_bbox_label_names),
                                  pretrained_model='imagenet')
    faster_rcnn.use_preset('evaluate')
    model = FasterRCNNTrainChain(faster_rcnn)
    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()
    optimizer = chainer.optimizers.MomentumSGD(lr=args.lr, momentum=0.9)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(rate=0.0005))

    train_data = TransformDataset(train_data, Transform(faster_rcnn))

    train_iter = chainer.iterators.MultiprocessIterator(
        train_data, batch_size=1, n_processes=None, shared_mem=100000000)
    test_iter = chainer.iterators.SerialIterator(
        test_data, batch_size=1, repeat=False, shuffle=False)
    updater = chainer.training.updater.StandardUpdater(
        train_iter, optimizer, device=args.gpu)

    trainer = training.Trainer(
        updater, (args.iteration, 'iteration'), out=args.out)

    trainer.extend(
        extensions.snapshot_object(model.faster_rcnn, 'snapshot_model.npz'),
        trigger=(args.iteration, 'iteration'))
    trainer.extend(extensions.ExponentialShift('lr', 0.1),
                   trigger=(args.step_size, 'iteration'))

    log_interval = 20, 'iteration'
    plot_interval = 3000, 'iteration'
    print_interval = 20, 'iteration'

    trainer.extend(chainer.training.extensions.observe_lr(),
                   trigger=log_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport(
        ['iteration', 'epoch', 'elapsed_time', 'lr',
         'main/loss',
         'main/roi_loc_loss',
         'main/roi_cls_loss',
         'main/rpn_loc_loss',
         'main/rpn_cls_loss',
         'validation/main/map',
         ]), trigger=print_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    if extensions.PlotReport.available():
        trainer.extend(
            extensions.PlotReport(
                ['main/loss'],
                file_name='loss.png', trigger=plot_interval
            ),
            trigger=plot_interval
        )

    trainer.extend(
        DetectionVOCEvaluator(
            test_iter, model.faster_rcnn, use_07_metric=True,
            label_names=voc_bbox_label_names),
        trigger=ManualScheduleTrigger(
            [args.step_size, args.iteration], 'iteration'))

    trainer.extend(extensions.dump_graph('main/loss'))

    trainer.run()
Exemplo n.º 14
0
def main():
    parser = argparse.ArgumentParser(
        description='ChainerCV training example: Faster R-CNN')
    parser.add_argument('--gpu', '-g', type=int, default=-1)
    parser.add_argument('--lr', '-l', type=float, default=1e-3)
    parser.add_argument('--out', '-o', default='result',
                        help='Output directory')
    parser.add_argument('--seed', '-s', type=int, default=0)
    parser.add_argument('--step_size', '-ss', type=int, default=50000)
    parser.add_argument('--iteration', '-i', type=int, default=70000)
    parser.add_argument('--train_data_dir', '-t', default=WIDER_TRAIN_DIR,
                        help='Training dataset (WIDER_train)')
    parser.add_argument('--train_annotation', '-ta', default=WIDER_TRAIN_ANNOTATION_MAT,
                        help='Annotation file (.mat) for training dataset')
    parser.add_argument('--val_data_dir', '-v', default=WIDER_VAL_DIR,
                        help='Validation dataset (WIDER_train)')
    parser.add_argument('--val_annotation', '-va', default=WIDER_VAL_ANNOTATION_MAT,
                        help='Annotation file (.mat) for validation dataset')
    args = parser.parse_args()

    np.random.seed(args.seed)

    # for logging pocessed files
    logger = logging.getLogger('logger')
    logger.setLevel(logging.DEBUG)
    handler = logging.FileHandler(filename='filelog.log')
    handler.setLevel(logging.DEBUG)
    logger.addHandler(handler)
    
    blacklist = []
    with open(BLACKLIST_FILE, 'r') as f:
        for line in f:
            l = line.strip()
            if l:
                blacklist.append(line.strip())
    
    # train_data = VOCDetectionDataset(split='trainval', year='2007')
    # test_data = VOCDetectionDataset(split='test', year='2007',
                                    # use_difficult=True, return_difficult=True)
    train_data = WIDERFACEDataset(args.train_data_dir, args.train_annotation, 
        logger=logger, exclude_file_list=blacklist)
    test_data = WIDERFACEDataset(args.val_data_dir, args.val_annotation)
    # faster_rcnn = FasterRCNNVGG16(n_fg_class=len(voc_detection_label_names),
                                  # pretrained_model='imagenet')
    faster_rcnn.use_preset('evaluate')
    model = FasterRCNNTrainChain(faster_rcnn)
    if args.gpu >= 0:
        model.to_gpu(args.gpu)
        chainer.cuda.get_device(args.gpu).use()
    optimizer = chainer.optimizers.MomentumSGD(lr=args.lr, momentum=0.9)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(rate=0.0005))


    train_data = TransformDataset(train_data, transform)
    #import pdb; pdb.set_trace()
    #train_iter = chainer.iterators.MultiprocessIterator(
    #    train_data, batch_size=1, n_processes=None, shared_mem=100000000)
    train_iter = chainer.iterators.SerialIterator(
        train_data, batch_size=1)
    test_iter = chainer.iterators.SerialIterator(
        test_data, batch_size=1, repeat=False, shuffle=False)
    updater = chainer.training.updater.StandardUpdater(
        train_iter, optimizer, device=args.gpu)

    trainer = training.Trainer(
        updater, (args.iteration, 'iteration'), out=args.out)

    trainer.extend(
        extensions.snapshot_object(model.faster_rcnn, 'snapshot_model.npz'),
        trigger=(args.iteration, 'iteration'))
    trainer.extend(extensions.ExponentialShift('lr', 0.1),
                   trigger=(args.step_size, 'iteration'))

    log_interval = 20, 'iteration'
    plot_interval = 3000, 'iteration'
    print_interval = 20, 'iteration'

    trainer.extend(chainer.training.extensions.observe_lr(),
                   trigger=log_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport(
        ['iteration', 'epoch', 'elapsed_time', 'lr',
         'main/loss',
         'main/roi_loc_loss',
         'main/roi_cls_loss',
         'main/rpn_loc_loss',
         'main/rpn_cls_loss',
         'validation/main/map',
         ]), trigger=print_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    if extensions.PlotReport.available():
        trainer.extend(
            extensions.PlotReport(
                ['main/loss'],
                file_name='loss.png', trigger=plot_interval
            ),
            trigger=plot_interval
        )

    trainer.extend(
        DetectionVOCEvaluator(
            test_iter, model.faster_rcnn, use_07_metric=True,
            label_names=('face',)),
        trigger=ManualScheduleTrigger(
            [args.step_size, args.iteration], 'iteration'),
        invoke_before_training=False)

    trainer.extend(extensions.dump_graph('main/loss'))

    #try:
        # warnings.filterwarnings('error', category=RuntimeWarning)
    trainer.run()
Exemplo n.º 15
0
    model_args = {
        'n_fg_class': len(voc_bbox_label_names),
        'pretrained_model': 'voc0712'
    }
    model = helper.get_detector(args.det_type, model_args)

    if not os.path.exists(args.result):
        os.mkdir(args.result)

    if args.load:
        chainer.serializers.load_npz(args.load, model)

    model.use_preset('evaluate')
    if args.det_type == 'faster':
        train_chain = FasterRCNNTrainChain(model)
        train_transform = FasterRCNNTransform(model)
    else:
        train_chain = SSDMultiboxTrainChain(model)
        train_transform = SSDTransform(model.coder, model.insize, model.mean)

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    train = TransformDataset(datasets_train, train_transform)

    train_iter = MultiprocessIterator(train,
                                      args.batchsize,
                                      n_processes=4,
                                      shared_mem=100000000)
Exemplo n.º 16
0
def main():
    parser = argparse.ArgumentParser(
        description='ChainerCV training example: Faster R-CNN')
    parser.add_argument(
        '--dataset_path',
        '-path',
        type=str,
        default="/home/takagi.kazunari/projects/datasets/SUNRGBD_2DBB_fixed")
    parser.add_argument('--gpu', '-g', type=int, default=-1)
    parser.add_argument('--lr', '-l', type=float, default=1e-3)
    parser.add_argument('--out',
                        '-o',
                        default='sunrgbd_result',
                        help='Output directory')
    parser.add_argument('--seed', '-s', type=int, default=0)
    parser.add_argument('--step_size', '-ss', type=int, default=50000)
    parser.add_argument('--iteration', '-i', type=int, default=70000)
    args = parser.parse_args()

    np.random.seed(args.seed)

    train_data = SUNRGBDDataset(args.dataset_path, mode="train")
    test_data = SUNRGBDDataset(args.dataset_path, mode="test")

    sunrgbd_bbox_label_names = train_data.get_dataset_label()

    faster_rcnn = FasterRCNNVGG16(n_fg_class=len(sunrgbd_bbox_label_names),
                                  pretrained_model='imagenet')
    faster_rcnn.use_preset('evaluate')
    model = FasterRCNNTrainChain(faster_rcnn)
    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()
    optimizer = chainer.optimizers.MomentumSGD(lr=args.lr, momentum=0.9)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(rate=0.0005))

    train_data = TransformDataset(train_data, Transform(faster_rcnn))

    train_iter = chainer.iterators.MultiprocessIterator(train_data,
                                                        batch_size=1,
                                                        n_processes=None,
                                                        shared_mem=100000000)
    test_iter = chainer.iterators.SerialIterator(test_data,
                                                 batch_size=1,
                                                 repeat=False,
                                                 shuffle=False)
    updater = chainer.training.updaters.StandardUpdater(train_iter,
                                                        optimizer,
                                                        device=args.gpu)

    now_time = str(datetime.datetime.today()).replace(" ", "_")
    save_dir = osp.join(args.out, now_time)

    trainer = training.Trainer(updater, (args.iteration, 'iteration'),
                               out=save_dir)

    #save_iteration = [i for i in range(100, args.iteration, args.step_size)]

    weight_save_interval = 5000, 'iteration'
    evaluation_interval = 10000, 'iteration'

    trainer.extend(extensions.snapshot_object(
        model.faster_rcnn, 'sunrgbd_model_{.updater.iteration}.npz'),
                   trigger=weight_save_interval)
    trainer.extend(extensions.ExponentialShift('lr', 0.1),
                   trigger=(args.step_size, 'iteration'))

    log_interval = 20, 'iteration'
    plot_interval = 10, 'iteration'
    print_interval = 20, 'iteration'

    trainer.extend(chainer.training.extensions.observe_lr(),
                   trigger=log_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport([
        'iteration',
        'epoch',
        'elapsed_time',
        'lr',
        'main/loss',
        'main/roi_loc_loss',
        'main/roi_cls_loss',
        'main/rpn_loc_loss',
        'main/rpn_cls_loss',
        'validation/main/map',
    ]),
                   trigger=print_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    if extensions.PlotReport.available():
        trainer.extend(extensions.PlotReport(['main/loss'],
                                             file_name='loss.png',
                                             trigger=plot_interval),
                       trigger=plot_interval)

    #do_evaluation_iteration = [i for i in range(0, args.iteration, 500)]

    trainer.extend(DetectionVOCEvaluator(test_iter,
                                         model.faster_rcnn,
                                         use_07_metric=True,
                                         label_names=sunrgbd_bbox_label_names),
                   trigger=evaluation_interval)

    trainer.extend(extensions.dump_graph('main/loss'))

    trainer.run()