def setUp(self):
     chainer.set_debug(True)
     np.random.seed(0)
     dataset = VOC('train')
     img, im_info, bbox = dataset[1]
     self.x = Variable(img[None, ...])
     self.im_info = Variable(im_info[None, ...])
     self.gt_boxes = Variable(bbox[None, ...])
def warmup(model, gpu_ids):
    train_dataset = VOC('train')
    iterator = iterators.MultiprocessIterator(train_dataset,
                                              1,
                                              shared_mem=10000000)

    batch = iterator.next()
    img, img_info, bbox = batch[0]
    img = chainer.Variable(img[None, ...])
    img_info = chainer.Variable(img_info[None, ...])
    bbox = chainer.Variable(bbox[None, ...])
    for gpu_id in gpu_ids:
        if gpu_id >= 0:
            img.to_gpu(gpu_id)
            img_info.to_gpu(gpu_id)
            bbox.to_gpu(gpu_id)
            model.to_gpu(gpu_id)
        model.rcnn_train = True
        model(img, img_info, bbox)
        model.rpn_train = True
        model(img, img_info, bbox)
        if gpu_id >= 0:
            model.to_cpu()
def warmup(model, iterator, gpu_id=0):
    batch = iterator.next()
    img, img_info, bbox = concat_examples(batch, gpu_id)
    img = chainer.Variable(img)
    img_info = chainer.Variable(img_info)
    bbox = chainer.Variable(bbox)
    model.rcnn_train = True
    model(img, img_info, bbox)
    model.rpn_train = True
    model(img, img_info, bbox)


if __name__ == '__main__':
    batchsize = 1

    train_dataset = VOC('train')
    valid_dataset = VOC('val')

    train_iter = iterators.SerialIterator(train_dataset, batchsize)
    model = FasterRCNN()
    model.to_gpu(0)

    warmup(model, train_iter)
    model.rcnn_train = True

    serializers.load_npz('tests/train_test/snapshot_10000', model)

    # optimizer = optimizers.Adam()
    # optimizer.setup(model)
    optimizer = optimizers.MomentumSGD(lr=0.001)
    optimizer.setup(model)
            devices['gpu{}'.format(gid)] = gid

    # Instantiate a model
    model = get_model(args.model_file, args.model_name, devices['main'],
                      args.rpn_in_ch, args.rpn_out_ch, args.n_anchors,
                      args.feat_stride, args.anchor_scales, args.num_classes,
                      args.spatial_scale, args.rpn_sigma, args.sigma,
                      args.trunk_model, True, result_dir)

    # Instantiate a optimizer
    optimizer = get_optimizer(model, args.opt, args.lr, args.adam_alpha,
                              args.adam_beta1, args.adam_beta2, args.adam_eps,
                              args.weight_decay)

    # Setting up datasets
    train = VOC('train', False)
    valid = VOC('val', False)
    print('train: {}, valid: {}'.format(len(train), len(valid)))

    # Iterator
    train_iter = iterators.MultiprocessIterator(train, args.batchsize)
    valid_iter = iterators.MultiprocessIterator(valid,
                                                args.valid_batchsize,
                                                repeat=False,
                                                shuffle=False)

    # Updater
    updater = ParallelUpdater(train_iter, optimizer, devices=devices)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=result_dir)

    # Extentions
 def setUp(self):
     self.dataset = VOC('train', False)
     if not os.path.exists('tests/imgs'):
         os.makedirs('tests/imgs')