Esempio n. 1
0
    def test_iterator_serialize(self):
        dataset = [1, 2, 3, 4, 5, 6]
        it = iterators.MultiprocessIterator(dataset, 2, **self.options)

        self.assertEqual(it.epoch, 0)
        self.assertAlmostEqual(it.epoch_detail, 0 / 6)
        batch1 = it.next()
        self.assertEqual(len(batch1), 2)
        self.assertIsInstance(batch1, list)
        self.assertFalse(it.is_new_epoch)
        self.assertAlmostEqual(it.epoch_detail, 2 / 6)
        batch2 = it.next()
        self.assertEqual(len(batch2), 2)
        self.assertIsInstance(batch2, list)
        self.assertFalse(it.is_new_epoch)
        self.assertAlmostEqual(it.epoch_detail, 4 / 6)

        target = dict()
        it.serialize(DummySerializer(target))

        it = iterators.MultiprocessIterator(dataset, 2, **self.options)
        it.serialize(DummyDeserializer(target))
        self.assertFalse(it.is_new_epoch)
        self.assertAlmostEqual(it.epoch_detail, 4 / 6)

        batch3 = it.next()
        self.assertEqual(len(batch3), 2)
        self.assertIsInstance(batch3, list)
        self.assertTrue(it.is_new_epoch)
        self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
        self.assertAlmostEqual(it.epoch_detail, 6 / 6)
Esempio n. 2
0
def get_data_iterators(data_dir,
                       batch_size,
                       num_workers,
                       num_classes,
                       input_image_size=224,
                       resize_inv_factor=0.875):
    assert (resize_inv_factor > 0.0)
    resize_value = int(math.ceil(float(input_image_size) / resize_inv_factor))

    train_dir_path = os.path.join(data_dir, 'train')
    train_dataset = PreprocessedDataset(root=train_dir_path,
                                        scale_size=resize_value,
                                        crop_size=input_image_size)
    assert (len(directory_parsing_label_names(train_dir_path)) == num_classes)

    val_dir_path = os.path.join(data_dir, 'val')
    val_dataset = PreprocessedDataset(root=val_dir_path,
                                      scale_size=resize_value,
                                      crop_size=input_image_size)
    assert (len(directory_parsing_label_names(val_dir_path)) == num_classes)

    train_iterator = iterators.MultiprocessIterator(dataset=train_dataset,
                                                    batch_size=batch_size,
                                                    repeat=False,
                                                    shuffle=True,
                                                    n_processes=num_workers)

    val_iterator = iterators.MultiprocessIterator(dataset=val_dataset,
                                                  batch_size=batch_size,
                                                  repeat=False,
                                                  shuffle=False,
                                                  n_processes=num_workers)

    return train_iterator, val_iterator
Esempio n. 3
0
def get_data_iterators(data_dir,
                       batch_size,
                       num_workers,
                       num_classes):

    train_dir_path = os.path.join(data_dir, 'train')
    train_dataset = PreprocessedDataset(root=train_dir_path)
    assert(len(directory_parsing_label_names(train_dir_path)) == num_classes)

    val_dir_path = os.path.join(data_dir, 'val')
    val_dataset = PreprocessedDataset(root=val_dir_path)
    assert (len(directory_parsing_label_names(val_dir_path)) == num_classes)

    train_iterator = iterators.MultiprocessIterator(
        dataset=train_dataset,
        batch_size=batch_size,
        repeat=False,
        shuffle=True,
        n_processes=num_workers)

    val_iterator = iterators.MultiprocessIterator(
        dataset=val_dataset,
        batch_size=batch_size,
        repeat=False,
        shuffle=False,
        n_processes=num_workers)

    return train_iterator, val_iterator
Esempio n. 4
0
 def test_reproduce_same_permutation(self):
     dataset = [1, 2, 3, 4, 5, 6]
     numpy.random.seed(self._seed)
     it1 = iterators.MultiprocessIterator(dataset, 6)
     numpy.random.seed(self._seed)
     it2 = iterators.MultiprocessIterator(dataset, 6)
     for _ in range(5):
         self.assertEqual(it1.next(), it2.next())
Esempio n. 5
0
def create_iterators(train_dataset, valid_dataset, config):
    train = Dataset(**config['dataset']['train'])
    valid = Dataset(**config['dataset']['valid'])
    train_iter = iterators.MultiprocessIterator(train_dataset, train.batchsize)
    valid_iter = iterators.MultiprocessIterator(valid_dataset,
                                                valid.batchsize,
                                                repeat=False,
                                                shuffle=False)
    return train_iter, valid_iter
 def test_reproduce_same_permutation(self):
     dataset = [1, 2, 3, 4, 5, 6]
     order_sampler1 = iterators.ShuffleOrderSampler(
         numpy.random.RandomState(self._seed))
     it1 = iterators.MultiprocessIterator(
         dataset, 6, order_sampler=order_sampler1)
     order_sampler2 = iterators.ShuffleOrderSampler(
         numpy.random.RandomState(self._seed))
     it2 = iterators.MultiprocessIterator(
         dataset, 6, order_sampler=order_sampler2)
     for _ in range(5):
         self.assertEqual(it1.next(), it2.next())
def run(batch_size, n_process, prefetch,
        model_name, exits_bn, activation_function, number_filter_list,
        gpu_id, lossfun, learning_rate, max_epoch, out_dir, epoch):
    train, test = get_image()
    train = TransformDataset(train, trans)
    test = TransformDataset(test, trans)


    train_iter = iterators.MultiprocessIterator(train, batch_size, True, True, n_process, prefetch)
    test_iter = iterators.MultiprocessIterator(test, batch_size, False, False, n_process, prefetch)


    model = model_name(exits_bn, activation_function, number_filter_list)
    
    if gpu_id >= 0:
	    model.to_gpu(gpu_id)

    # Wrap your model by Classifier and include the process of loss calculation within your model.
    # Since we do not specify a loss function here, the default 'softmax_cross_entropy' is used.

    model = links.Loss_Classifier(model, lossfun)
    # selection of your optimizing method
    optimizer = optimizers.MomentumSGD(lr=learning_rate, momentum=0.9)

    # Give the optimizer a reference to the model
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(0.0005))

    # Get an updater that uses the Iterator and Optimizer
    updater = training.updaters.StandardUpdater(train_iter, optimizer, device=gpu_id)

    # Setup a Trainer
    trainer = training.Trainer(updater, (max_epoch, 'epoch'), out='{}'.format(out_dir))

    from chainer.training import extensions

    trainer.extend(extensions.LogReport()) # generate report
    trainer.extend(extensions.snapshot(filename='snapshot_epoch-{.updater.epoch}')) # save updater
    trainer.extend(extensions.snapshot_object(model.predictor, filename='model_epoch-{.updater.epoch}')) # save model
    trainer.extend(extensions.Evaluator(test_iter, model, device=gpu_id)) # validation

    trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'main/accuracy', 'validation/main/loss', 'validation/main/accuracy', 'elapsed_time'])) # show loss and accuracy
    trainer.extend(extensions.ProgressBar()) # show trainning progress
    trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss'], x_key='epoch', file_name='loss.png')) # loss curve
    trainer.extend(extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'], x_key='epoch', file_name='accuracy.png')) # accuracy curve
    trainer.extend(extensions.dump_graph('main/loss'))
    if epoch > 0:
        serializers.load_npz('./{}/snapshot_epoch-{}'.format(out_dir, epoch), trainer)
        trainer.updater.get_optimizer('main').lr = learning_rate
    trainer.run()
def run_training(
        net, train, valid, result_dir, batchsize=64, devices=-1,
        training_epoch=300, initial_lr=0.05, lr_decay_rate=0.5,
        lr_decay_epoch=30, weight_decay=0.0005):
    # Iterator
    train_iter = iterators.MultiprocessIterator(train, batchsize)
    test_iter = iterators.MultiprocessIterator(valid, batchsize, False, False)

    # Model
    net = L.Classifier(net)

    # Optimizer
    optimizer = optimizers.MomentumSGD(lr=initial_lr)
    optimizer.setup(net)
    if weight_decay > 0:
        optimizer.add_hook(chainer.optimizer.WeightDecay(weight_decay))

    # Updater
    if isinstance(devices, int):
        devices['main'] = devices
        updater = training.StandardUpdater(
            train_iter, optimizer, device=devices)
    elif isinstance(devices, dict):
        updater = training.ParallelUpdater(
            train_iter, optimizer, devices=devices)

    # 6. Trainer
    trainer = training.Trainer(
        updater, (training_epoch, 'epoch'), out=result_dir)

    # 7. Trainer extensions
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.observe_lr())
    trainer.extend(extensions.Evaluator(
        test_iter, net, device=devices['main']), name='val')
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'main/accuracy', 'val/main/loss',
         'val/main/accuracy', 'elapsed_time', 'lr']))
    trainer.extend(extensions.PlotReport(
        ['main/loss', 'val/main/loss'], x_key='epoch', file_name='loss.png'))
    trainer.extend(extensions.PlotReport(
        ['main/accuracy', 'val/main/accuracy'], x_key='epoch',
        file_name='accuracy.png'))
    trainer.extend(extensions.ExponentialShift(
        'lr', lr_decay_rate), trigger=(lr_decay_epoch, 'epoch'))
    trainer.extend(extensions.snapshot_object(net.predictor, 'model_{.updater.epoch}.npz'), trigger=(10, 'epoch'))
    trainer.run()

    return net
Esempio n. 9
0
def create_iterators(train_dataset, batchsize, valid_dataset, valid_batchsize,
                     devices):
    if HAVE_NCCL and len(devices) > 1:
        train_iter = [
            iterators.MultiprocessIterator(i, batchsize)
            for i in chainer.datasets.split_dataset_n_random(
                train_dataset, len(devices))
        ]
    else:
        train_iter = iterators.MultiprocessIterator(train_dataset, batchsize)
    valid_iter = iterators.MultiprocessIterator(valid_dataset,
                                                valid_batchsize,
                                                repeat=False,
                                                shuffle=False)
    return train_iter, valid_iter
Esempio n. 10
0
def train(network_object,
          batchsize=128,
          gpu_id=0,
          max_epoch=20,
          train_dataset=None,
          test_dataset=None,
          postfix='',
          base_lr=0.01,
          lr_decay=None):

    #1. Dataset
    if train_dataset is None and test_dataset is None:
        train, test = cifar.get_cifar10()
    else:
        train, test = train_dataset, test_dataset

    #2. Iterator
    train_iter = iterators.MultiprocessIterator(train, batchsize)
    test_iter = iterators.MultiprocessIterator(test, batchsize, False, False)

    #3. Model
    net = L.Classifier(network_object)

    #4. Optimizer
    optimizer = optimizers.MomentumSGD(lr=base_lr)
    optimizer.setup(net)
    optimizer.add_hook(chainer.optimizer.WeightDecay(0.0005))

    #5. Updater
    updater = training.StandardUpdater(train_iter, optimizer, device=gpu_id)

    #6. Trainer
    trainer = training.Trainer(updater, (max_epoch, 'epoch'), out='{}_cifar10_{}result'.format(network_object.__class__.__name__, postfix))

    #7. Trainer extensions
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.observe_lr())
    trainer.extend(extensions.Evaluator(test_iter, net, device=gpu_id), name='val')
    trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'main/accuracy', 'val/main/loss', 'val/main/accuracy', 'elapsed_time', 'lr']))
    trainer.extend(extensions.PlotReport(['main/loss', 'val/main/loss'], x_key='epoch', file_name='loss.png'))
    trainer.extend(extensions.PlotReport(['main/accuracy', 'val/main/accuracy'], x_key='epoch', file_name='accuracy.png'))

    if lr_decay is not None:
        trainer.extend(extensions.ExponentialShift('lr', 0.1), trigger=lr_decay)
    trainer.run()
    del trainer

    return net
Esempio n. 11
0
    def test_iterator_repeat(self):
        dataset = [1, 2, 3]
        it = iterators.MultiprocessIterator(dataset, 2, **self.options)
        for i in range(3):
            self.assertEqual(it.epoch, i)
            self.assertAlmostEqual(it.epoch_detail, i + 0 / 6)
            if i == 0:
                self.assertIsNone(it.previous_epoch_detail)
            else:
                self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
            batch1 = it.next()
            self.assertEqual(len(batch1), 2)
            self.assertIsInstance(batch1, list)
            self.assertFalse(it.is_new_epoch)
            self.assertAlmostEqual(it.epoch_detail, i + 2 / 6)
            self.assertAlmostEqual(it.previous_epoch_detail, i + 0 / 6)
            batch2 = it.next()
            self.assertEqual(len(batch2), 2)
            self.assertIsInstance(batch2, list)
            self.assertFalse(it.is_new_epoch)
            self.assertAlmostEqual(it.epoch_detail, i + 4 / 6)
            self.assertAlmostEqual(it.previous_epoch_detail, i + 2 / 6)
            batch3 = it.next()
            self.assertEqual(len(batch3), 2)
            self.assertIsInstance(batch3, list)
            self.assertTrue(it.is_new_epoch)
            self.assertAlmostEqual(it.epoch_detail, i + 6 / 6)
            self.assertAlmostEqual(it.previous_epoch_detail, i + 4 / 6)

            self.assertEqual(sorted(batch1 + batch2 + batch3),
                             [1, 1, 2, 2, 3, 3])
Esempio n. 12
0
    def test_iterator_pickle_after_init(self):
        dataset = [1, 2, 3, 4, 5, 6]
        it = iterators.MultiprocessIterator(dataset, 2, **self.options)

        self.assertEqual(it.epoch, 0)
        self.assertAlmostEqual(it.epoch_detail, 0 / 6)
        self.assertIsNone(it.previous_epoch_detail)
        batch1 = it.next()
        self.assertEqual(len(batch1), 2)
        self.assertIsInstance(batch1, list)
        self.assertFalse(it.is_new_epoch)
        self.assertAlmostEqual(it.epoch_detail, 2 / 6)
        self.assertAlmostEqual(it.previous_epoch_detail, 0 / 6)
        batch2 = it.next()
        self.assertEqual(len(batch2), 2)
        self.assertIsInstance(batch2, list)
        self.assertFalse(it.is_new_epoch)
        self.assertAlmostEqual(it.epoch_detail, 4 / 6)
        self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)

        pickled_it = pickle.dumps(it)
        it = pickle.loads(pickled_it)

        self.assertFalse(it.is_new_epoch)
        self.assertAlmostEqual(it.epoch_detail, 4 / 6)
        self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)

        batch3 = it.next()
        self.assertEqual(len(batch3), 2)
        self.assertIsInstance(batch3, list)
        self.assertTrue(it.is_new_epoch)
        self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
        self.assertAlmostEqual(it.epoch_detail, 6 / 6)
        self.assertAlmostEqual(it.previous_epoch_detail, 4 / 6)
Esempio n. 13
0
    def test_stalled_getitem(self):
        nth = self.nth
        batch_size = 2
        sleep = 0.5
        timeout = 0.1

        dataset = StallingDataset(nth, sleep)
        it = iterators.MultiprocessIterator(dataset,
                                            batch_size=batch_size,
                                            shuffle=False,
                                            dataset_timeout=timeout,
                                            repeat=False)

        # TimeoutWarning should be issued.
        warning_cls = iterators.MultiprocessIterator.TimeoutWarning
        data = []
        # No warning until the stalling batch
        for i in range(nth // batch_size):
            data.append(it.next())
        # Warning on the stalling batch
        with testing.assert_warns(warning_cls):
            data.append(it.next())
        # Retrieve data until the end
        while True:
            try:
                data.append(it.next())
            except StopIteration:
                break

        # All data must be retrieved
        assert data == [
            dataset.data[i * batch_size:(i + 1) * batch_size]
            for i in range((len(dataset) + batch_size - 1) // batch_size)
        ]
Esempio n. 14
0
    def test_iterator_list_type(self):
        dataset = [[i, numpy.zeros((10, )) + i] for i in range(6)]
        it = iterators.MultiprocessIterator(dataset, 2, **self.options)
        for i in range(3):
            self.assertEqual(it.epoch, i)
            self.assertAlmostEqual(it.epoch_detail, i)
            if i == 0:
                self.assertIsNone(it.previous_epoch_detail)
            else:
                self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
            batches = {}
            for j in range(3):
                batch = it.next()
                self.assertEqual(len(batch), 2)
                if j != 2:
                    self.assertFalse(it.is_new_epoch)
                else:
                    self.assertTrue(it.is_new_epoch)
                self.assertAlmostEqual(it.epoch_detail,
                                       (3 * i + j + 1) * 2 / 6)
                self.assertAlmostEqual(it.previous_epoch_detail,
                                       (3 * i + j) * 2 / 6)
                for x in batch:
                    self.assertIsInstance(x, list)
                    self.assertIsInstance(x[1], numpy.ndarray)
                    batches[x[0]] = x[1]

            self.assertEqual(len(batches), len(dataset))
            for k, v in six.iteritems(batches):
                numpy.testing.assert_allclose(dataset[k][1], v)
Esempio n. 15
0
    def test_iterator_dict_type(self):
        dataset = [{i: numpy.zeros((10, )) + i} for i in range(6)]
        it = iterators.MultiprocessIterator(dataset, 2, **self.options)
        for i in range(3):
            self.assertEqual(it.epoch, i)
            self.assertAlmostEqual(it.epoch_detail, i)
            batches = {}
            for j in range(3):
                batch = it.next()
                self.assertEqual(len(batch), 2)
                if j != 2:
                    self.assertFalse(it.is_new_epoch)
                else:
                    self.assertTrue(it.is_new_epoch)
                self.assertAlmostEqual(it.epoch_detail,
                                       (3 * i + j + 1) * 2 / 6)
                for x in batch:
                    self.assertIsInstance(x, dict)
                    k = tuple(x)[0]
                    v = x[k]
                    self.assertIsInstance(v, numpy.ndarray)
                    batches[k] = v

            self.assertEqual(len(batches), len(dataset))
            for k, v in six.iteritems(batches):
                x = dataset[k][tuple(dataset[k])[0]]
                numpy.testing.assert_allclose(x, v)
Esempio n. 16
0
    def test_invalid_order_sampler(self):
        dataset = [1, 2, 3, 4, 5, 6]

        with self.assertRaises(ValueError):
            it = iterators.MultiprocessIterator(
                dataset, 6, shuffle=None, order_sampler=_InvalidOrderSampler())
            it.next()
Esempio n. 17
0
 def test_iterator_shuffle_nondivisible(self):
     dataset = list(range(10))
     it = iterators.MultiprocessIterator(dataset,
                                         3,
                                         n_processes=self.n_processes)
     out = sum([it.next() for _ in range(7)], [])
     self.assertNotEqual(out[0:10], out[10:20])
Esempio n. 18
0
 def test_unsupported_reset_middle(self):
     dataset = [1, 2, 3, 4, 5]
     it = iterators.MultiprocessIterator(dataset,
                                         2,
                                         repeat=False,
                                         **self.options)
     it.next()
     self.assertRaises(NotImplementedError, it.reset)
def get_data_iterators(batch_size, num_workers):

    train_dataset = PreprocessedCIFARDataset(train=True)
    train_iterator = iterators.MultiprocessIterator(dataset=train_dataset,
                                                    batch_size=batch_size,
                                                    repeat=False,
                                                    shuffle=True,
                                                    n_processes=num_workers)

    val_dataset = PreprocessedCIFARDataset(train=False)
    val_iterator = iterators.MultiprocessIterator(dataset=val_dataset,
                                                  batch_size=batch_size,
                                                  repeat=False,
                                                  shuffle=False,
                                                  n_processes=num_workers)

    return train_iterator, val_iterator
Esempio n. 20
0
    def test_iterator_repeat_not_even(self):
        dataset = [1, 2, 3, 4, 5]
        it = iterators.MultiprocessIterator(dataset,
                                            2,
                                            n_processes=self.n_processes)

        batches = sum([it.next() for _ in range(5)], [])
        self.assertEqual(sorted(batches), sorted(dataset * 2))
Esempio n. 21
0
def train_CNN(network_object, batchsize=128, gpu_id=-1, max_epoch=20, train_dataset=None, test_dataset=None, postfix='', base_lr=0.01, lr_decay=None,number = 11):
    number = str(number)
    # 1. Dataset
    if train_dataset is None and test_dataset is None:
        train, test = cifar.get_cifar10()
    else:
        train, test = train_dataset, test_dataset
        
    if gpu_id >= 0:
        network_object.to_gpu(gpu_id)
    # 2. Iterator
    train_iter = iterators.MultiprocessIterator(train, batchsize)
    test_iter = iterators.MultiprocessIterator(test, batchsize, False, False)

    # 3. Model
    net = L.Classifier(network_object)

    # 4. Optimizer
    optimizer = optimizers.MomentumSGD()
    optimizer.setup(net)
    optimizer.add_hook(chainer.optimizer.WeightDecay(0.0005))

    # 5. Updater
    updater = training.StandardUpdater(train_iter, optimizer, device=gpu_id)

    # 6. Trainer
    trainer = training.Trainer(updater, (max_epoch, 'epoch'), out='{}_crack_{}result'.format(network_object.__class__.__name__, postfix))
    
    # 7. Trainer extensions
    trainer.extend(extensions.LogReport(trigger=(1, 'epoch'), log_name="log_"+number))
    trainer.extend(extensions.snapshot(filename=number+'snapshot_epoch-{.updater.epoch}'),trigger=(5, 'epoch'))
#    trainer.extend(extensions.snapshot(), trigger=(10, 'epoch'))
    trainer.extend(extensions.ParameterStatistics(net.predictor.conv1, {'std': np.std}))
    trainer.extend(extensions.observe_lr())
    trainer.extend(extensions.Evaluator(test_iter, net, device=gpu_id), name='val')
    trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'main/accuracy', 'val/main/loss', 'val/main/accuracy', 'elapsed_time', 'lr']))
    trainer.extend(extensions.PlotReport(['main/loss', 'val/main/loss'], x_key='epoch', file_name='loss'+number+'.png'))
    trainer.extend(extensions.PlotReport(['main/accuracy', 'val/main/accuracy'], x_key='epoch', file_name='accuracy'+number+'.png'))
    trainer.extend(extensions.PlotReport(['l1/W/data/std'], x_key='epoch', file_name='std'+number+'.png'))    
    if lr_decay is not None:
        trainer.extend(extensions.ExponentialShift('lr', 0.1), trigger=lr_decay)
    trainer.run()
    del trainer

    return net                
Esempio n. 22
0
    def test_reset_repeat(self):
        dataset = [1, 2, 3, 4]
        it = iterators.MultiprocessIterator(
            dataset, 2, repeat=True, **self.options)

        for trial in range(4):
            batches = sum([it.next() for _ in range(4)], [])
            self.assertEqual(sorted(batches), sorted(2 * dataset))
            it.reset()
Esempio n. 23
0
    def test_iterator_not_repeat(self):
        dataset = [1, 2, 3, 4, 5]
        it = iterators.MultiprocessIterator(
            dataset, 2, repeat=False, **self.options)

        batches = sum([it.next() for _ in range(3)], [])
        self.assertEqual(sorted(batches), dataset)
        for _ in range(2):
            self.assertRaises(StopIteration, it.next)
Esempio n. 24
0
    def test_iterator_pickle_new(self):
        dataset = [1, 2, 3, 4, 5, 6]
        it = iterators.MultiprocessIterator(dataset, 2, **self.options)

        self.assertEqual(it.epoch, 0)
        self.assertAlmostEqual(it.epoch_detail, 0 / 6)
        self.assertIsNone(it.previous_epoch_detail)
        pickled_it = pickle.dumps(it)
        it = pickle.loads(pickled_it)
Esempio n. 25
0
    def test_no_same_indices_order_sampler(self):
        dataset = [1, 2, 3, 4, 5, 6]
        batchsize = 5

        it = iterators.MultiprocessIterator(
            dataset, batchsize,
            order_sampler=_NoSameIndicesOrderSampler(batchsize))
        for _ in range(5):
            batch = it.next()
            self.assertEqual(len(numpy.unique(batch)), batchsize)
Esempio n. 26
0
def main():
    parser = argparse.ArgumentParser(description='training mnist')
    parser.add_argument('--gpu',
                        '-g',
                        default=-1,
                        type=int,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=8,
                        help='Number of images in each mini-batch')
    parser.add_argument('--load_model',
                        '-lm',
                        type=str,
                        default=None,
                        help='Path of the model object to load')

    args = parser.parse_args()

    backbone = 'mobilenet'
    model = ModifiedClassifier(
        DeepLab(n_class=13, task='semantic', backbone=backbone))

    if args.load_model is not None:
        serializers.load_npz(args.load_model, model)
    else:
        print('You need to specify path of the model object')
        sys.exit()

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    dir_path = './dataset/2D-3D-S'
    test_data = Stanford2D3DS(dir_path,
                              'semantic',
                              area='5a',
                              train=False,
                              n_data=100)
    test_iter = iterators.MultiprocessIterator(test_data,
                                               args.batchsize,
                                               repeat=False,
                                               shuffle=False)

    label_list = list(test_data.label_dict.keys())[1:]
    evaluator = ModifiedEvaluator(test_iter,
                                  model,
                                  label_names=label_list,
                                  device=args.gpu)
    observation = evaluator()

    for k, v in observation.items():
        print(k, v)
Esempio n. 27
0
def main(dataset_name, snapshot_path):
    """
    Args:
        dataset_name: 'mpii' or 'lsp'.
        init_snapshot_path: path to the snapshot to test
    """
    if dataset_name == 'mpii':
        TEST_CV_FILEPATH = os.path.join(config.MPII_DATASET_ROOT, 'test_joints.csv')
        IMG_PATH_PREFIX = os.path.join(config.MPII_DATASET_ROOT, 'images')
        symmetric_joints = "[[12, 13], [11, 14], [10, 15], [2, 3], [1, 4], [0, 5]]"
        ignore_label = -100500

    elif dataset_name == 'lsp':
        TEST_CV_FILEPATH = os.path.join(config.LSP_DATASET_ROOT, 'test_joints.csv')
#        IMG_PATH_PREFIX = ''
        IMG_PATH_PREFIX = os.path.join(config.LSP_DATASET_ROOT, 'images')
        symmetric_joints = "[[8, 9], [7, 10], [6, 11], [2, 3], [1, 4], [0, 5]]"
        ignore_label = -1

    else :
        TEST_CV_FILEPATH = os.path.join(config.MET_DATASET_ROOT, 'test_joints_1.csv')
        IMG_PATH_PREFIX = os.path.join(config.MET_DATASET_ROOT, 'images')
        symmetric_joints = "[[8, 9], [7, 10], [6, 11], [2, 3], [1, 4], [0, 5]]"
        ignore_label = -1

    print(TEST_CV_FILEPATH)
    print(IMG_PATH_PREFIX)

    test_dataset = dataset.PoseDataset(
        TEST_CV_FILEPATH,
        IMG_PATH_PREFIX, 227,
        fliplr=False, rotate=False,
        shift=None,
        bbox_extension_range=(1.0, 1.0),
        coord_normalize=True,
        gcn=True,
        fname_index=0,
        joint_index=1,
        symmetric_joints=symmetric_joints,
        ignore_label=ignore_label,
        should_return_bbox=True,
        should_downscale_images=True,
        downscale_height=400
    )

    test_iterator = iterators.MultiprocessIterator(
        test_dataset, batch_size=128,
        repeat=False, shuffle=False,
        n_processes=1, n_prefetch=1)

    if dataset_name == 'MET' :
    	test_net(test_dataset, test_iterator, 'lsp', snapshot_path)
    else :
    	test_net(test_dataset, test_iterator, dataset_name, snapshot_path)
Esempio n. 28
0
    def test_iterator_not_repeat_not_even(self):
        dataset = [1, 2, 3, 4, 5]
        it = iterators.MultiprocessIterator(
            dataset, 2, repeat=False, **self.options)

        batch1 = it.next()
        batch2 = it.next()
        batch3 = it.next()
        self.assertRaises(StopIteration, it.next)

        self.assertEqual(len(batch3), 1)
        self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
Esempio n. 29
0
    def test_finalize_not_deadlock(self):
        dataset = numpy.ones((1000, 1000))
        it = iterators.MultiprocessIterator(dataset, 10, n_processes=4)
        for _ in range(10):
            it.next()

        t = threading.Thread(target=lambda: it.finalize())
        t.daemon = True
        t.start()
        t.join(5)
        deadlock = t.is_alive()

        self.assertFalse(deadlock)
Esempio n. 30
0
def create_iterator(
    settings: Dict,
    train_dataset: Union[datasets.LabeledImageDataset, datasets.TupleDataset,
                         None] = None,
    val_dataset: Union[datasets.LabeledImageDataset, datasets.TupleDataset,
                       None] = None,
    test_dataset: Union[datasets.LabeledImageDataset, datasets.TupleDataset,
                        None] = None
) -> Tuple[Optional[chainer.iterators.MultiprocessIterator]]:
    """Create dataset iterator."""
    gpu_num = len(settings["gpu_devices"])
    if train_dataset is None:
        train_iter = None
    else:
        if gpu_num == 1:
            train_iter = iterators.MultiprocessIterator(
                train_dataset,
                settings["batch_size"],
                n_processes=settings["n_processes"])
        else:
            assert gpu_num == len(train_dataset),\
                " gpu num: {} != dataset num: {}".format(gpu_num, len(train_dataset))
            train_iter = [
                iterators.MultiprocessIterator(
                    sub_dataset,
                    settings["batch_size"],
                    n_processes=min(2, settings["n_processes"] // gpu_num))
                for sub_dataset in train_dataset
            ]

    val_iter = None if val_dataset is None else \
        iterators.MultiprocessIterator(
            val_dataset, settings["batch_size"], repeat=False, shuffle=False, n_processes=settings["n_processes"])

    test_iter = None if test_dataset is None else \
        iterators.MultiprocessIterator(
            test_dataset, settings["batch_size"], repeat=False, shuffle=False, n_processes=settings["n_processes"])

    return train_iter, val_iter, test_iter