コード例 #1
0
ファイル: test_fw_iterators.py プロジェクト: thoslin/DALI
def test_gluon_iterator_sparse_batch():
    from nvidia.dali.plugin.mxnet import DALIGluonIterator as GluonIterator
    from mxnet.ndarray.ndarray import NDArray
    num_gpus = 1
    batch_size = 16

    pipes, _ = create_pipeline(
        lambda gpu: COCOReaderPipeline(batch_size=batch_size,
                                       num_threads=4,
                                       shard_id=gpu,
                                       num_gpus=num_gpus,
                                       data_paths=data_sets[0],
                                       random_shuffle=True,
                                       stick_to_shard=False,
                                       shuffle_after_epoch=False,
                                       pad_last_batch=True,
                                       return_labels=True), batch_size,
        num_gpus)

    dali_train_iter = GluonIterator(
        pipes,
        pipes[0].epoch_size("Reader"),
        [GluonIterator.SPARSE_TAG, GluonIterator.DENSE_TAG],
        fill_last_batch=True)

    for it in dali_train_iter:
        labels, ids = it[0]  # gpu 0
        # labels should be a sparse batch: a list of per-sample NDArray's
        # ids should be a dense batch: a single NDarray reprenseting the batch
        assert isinstance(labels, (tuple, list))
        assert len(labels) == batch_size
        assert isinstance(labels[0], NDArray)
        assert isinstance(ids, NDArray)
コード例 #2
0
ファイル: test_fw_iterators.py プロジェクト: thoslin/DALI
def test_gluon_iterator_not_fill_last_batch_pad_last_batch():
    from nvidia.dali.plugin.mxnet import DALIGluonIterator as GluonIterator
    num_gpus = 1
    batch_size = 100

    pipes, data_size = create_pipeline(
        lambda gpu: COCOReaderPipeline(batch_size=batch_size,
                                       num_threads=4,
                                       shard_id=gpu,
                                       num_gpus=num_gpus,
                                       data_paths=data_sets[0],
                                       random_shuffle=False,
                                       stick_to_shard=False,
                                       shuffle_after_epoch=False,
                                       pad_last_batch=True), batch_size,
        num_gpus)

    dali_train_iter = GluonIterator(pipes,
                                    size=pipes[0].epoch_size("Reader"),
                                    fill_last_batch=False)

    img_ids_list, img_ids_list_set, mirrored_data, _, _ = \
        gather_ids(dali_train_iter, lambda x: x[0].squeeze().asnumpy(), lambda x: 0, data_size)

    assert len(img_ids_list) == data_size
    assert len(img_ids_list_set) == data_size
    assert len(set(mirrored_data)) != 1

    dali_train_iter.reset()
    next_img_ids_list, next_img_ids_list_set, next_mirrored_data, pad, remainder = \
        gather_ids(dali_train_iter, lambda x: x[0].squeeze().asnumpy(), lambda x: 0, data_size)

    assert len(next_img_ids_list) == data_size
    assert len(next_img_ids_list_set) == data_size
    assert len(set(next_mirrored_data)) != 1
コード例 #3
0
ファイル: test_fw_iterators.py プロジェクト: thoslin/DALI
def test_stop_iteration_gluon():
    from nvidia.dali.plugin.mxnet import DALIGluonIterator as GluonIterator
    fw_iter = lambda pipe, size, auto_reset: GluonIterator(
        pipe, size, [GluonIterator.DENSE_TAG], auto_reset=auto_reset)
    iter_name = "GluonIterator"
    for batch_size, epochs, iter_num, auto_reset, infinite in stop_teration_case_generator(
    ):
        yield check_stop_iter, fw_iter, iter_name, batch_size, epochs, iter_num, auto_reset, infinite
コード例 #4
0
ファイル: test_fw_iterators.py プロジェクト: thoslin/DALI
def test_stop_iteration_gluon_fail_single():
    from nvidia.dali.plugin.mxnet import DALIGluonIterator as GluonIterator
    fw_iter = lambda pipe, size, auto_reset: GluonIterator(
        pipe, size=size, auto_reset=auto_reset)
    check_stop_iter_fail_single(fw_iter)