コード例 #1
0
ファイル: imagenet_utils.py プロジェクト: shlpu/benchmarks
def get_val_dataflow(
        datadir, batch_size,
        augmentors, parallel=None,
        num_splits=None, split_index=None):
    assert datadir is not None
    assert isinstance(augmentors, list)
    if parallel is None:
        parallel = min(40, multiprocessing.cpu_count())

    if num_splits is None:
        ds = dataset.ILSVRC12Files(datadir, 'val', shuffle=False)
    else:
        assert split_index < num_splits
        files = dataset.ILSVRC12Files(datadir, 'val', shuffle=False)
        files.reset_state()
        files = list(files.get_data())
        logger.info("#ValidationData = {}".format(len(files)))
        split_size = len(files) // num_splits
        start, end = split_size * split_index, split_size * (split_index + 1)
        end = min(end, len(files))
        logger.info("#ValidationSplit = {} - {}".format(start, end))
        files = files[start: end]
        ds = DataFromList(files, shuffle=False)
    aug = imgaug.AugmentorList(augmentors)

    def mapf(dp):
        fname, cls = dp
        im = cv2.imread(fname, cv2.IMREAD_COLOR)
        im = aug.augment(im)
        return im, cls
    ds = MultiThreadMapData(ds, parallel, mapf, buffer_size=2000, strict=True)
    ds = BatchData(ds, batch_size, remainder=True)
    # ds = PrefetchDataZMQ(ds, 1)
    # do not fork() under MPI
    return ds
コード例 #2
0
def get_val_dataflow(datadir,
                     batch_size,
                     augmentors=None,
                     parallel=None,
                     num_splits=None,
                     split_index=None,
                     dataname="val"):
    if augmentors is None:
        augmentors = fbresnet_augmentor(False)
    assert datadir is not None
    assert isinstance(augmentors, list)
    if parallel is None:
        parallel = min(40, multiprocessing.cpu_count())

    if num_splits is None:
        ds = dataset.ILSVRC12Files(datadir, dataname, shuffle=True)
    else:
        # shard validation data
        assert False
        assert split_index < num_splits
        files = dataset.ILSVRC12Files(datadir, dataname, shuffle=True)
        files.reset_state()
        files = list(files.get_data())
        logger.info("Number of validation data = {}".format(len(files)))
        split_size = len(files) // num_splits
        start, end = split_size * split_index, split_size * (split_index + 1)
        end = min(end, len(files))
        logger.info("Local validation split = {} - {}".format(start, end))
        files = files[start:end]
        ds = DataFromList(files, shuffle=True)

    aug = imgaug.AugmentorList(augmentors)

    def mapf(dp):
        fname, cls = dp
        im = cv2.imread(fname, cv2.IMREAD_COLOR)
        #from BGR to RGB
        im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
        im = aug.augment(im)
        return im, cls

    ds = MultiThreadMapData(ds,
                            parallel,
                            mapf,
                            buffer_size=min(2000, ds.size()),
                            strict=True)
    ds = BatchData(ds, batch_size, remainder=False)
    ds = RepeatedData(ds, num=-1)
    # do not fork() under MPI
    return ds
コード例 #3
0
def get_imagenet_dataflow(datadir, name, batch_size, augmentors):
    """
    See explanations in the tutorial:
    http://tensorpack.readthedocs.io/en/latest/tutorial/efficient-dataflow.html
    """
    assert name in ['train', 'val', 'test']
    assert datadir is not None
    assert isinstance(augmentors, list)
    isTrain = name == 'train'
    cpu = min(30, multiprocessing.cpu_count())
    meta_dir = './ilsvrc_metadata'
    if isTrain:
        ds = dataset.ILSVRC12(datadir, name, meta_dir=meta_dir, shuffle=True)
        ds = AugmentImageComponent(ds, augmentors, copy=False)
        ds = PrefetchDataZMQ(ds, cpu)
        ds = BatchData(ds, batch_size, remainder=False)
    else:
        ds = dataset.ILSVRC12Files(datadir,
                                   name,
                                   meta_dir=meta_dir,
                                   shuffle=False)
        aug = imgaug.AugmentorList(augmentors)

        def mapf(dp):
            fname, cls = dp
            im = cv2.imread(fname, cv2.IMREAD_COLOR)
            im = aug.augment(im)
            return im, cls

        ds = MultiThreadMapData(ds, cpu, mapf, buffer_size=2000, strict=True)
        ds = BatchData(ds, batch_size, remainder=True)
        ds = PrefetchDataZMQ(ds, 1)
    return ds
コード例 #4
0
ファイル: imagenet_utils.py プロジェクト: zzuxzt/tensorpack
def get_imagenet_dataflow(
        datadir, name, batch_size,
        augmentors, parallel=None):
    """
    See explanations in the tutorial:
    http://tensorpack.readthedocs.io/en/latest/tutorial/efficient-dataflow.html
    """
    assert name in ['train', 'val', 'test']
    assert datadir is not None
    assert isinstance(augmentors, list)
    isTrain = name == 'train'
    if parallel is None:
        parallel = min(40, multiprocessing.cpu_count() // 2)  # assuming hyperthreading
    if isTrain:
        ds = dataset.ILSVRC12(datadir, name, shuffle=True)
        ds = AugmentImageComponent(ds, augmentors, copy=False)
        if parallel < 16:
            logger.warn("DataFlow may become the bottleneck when too few processes are used.")
        ds = PrefetchDataZMQ(ds, parallel)
        ds = BatchData(ds, batch_size, remainder=False)
    else:
        ds = dataset.ILSVRC12Files(datadir, name, shuffle=False)
        aug = imgaug.AugmentorList(augmentors)

        def mapf(dp):
            fname, cls = dp
            im = cv2.imread(fname, cv2.IMREAD_COLOR)
            im = aug.augment(im)
            return im, cls
        ds = MultiThreadMapData(ds, parallel, mapf, buffer_size=2000, strict=True)
        ds = BatchData(ds, batch_size, remainder=True)
        ds = PrefetchDataZMQ(ds, 1)
    return ds
コード例 #5
0
def get_imagenet_dataflow(datadir,
                          name,
                          batch_size,
                          augmentors,
                          parallel=None):
    """
    See explanations in the tutorial:
    http://tensorpack.readthedocs.io/en/latest/tutorial/efficient-dataflow.html
    """
    assert name in ['train', 'val', 'test']
    assert datadir is not None
    assert isinstance(augmentors, list)
    isTrain = name == 'train'
    if parallel is None:
        parallel = min(40, multiprocessing.cpu_count())
    if isTrain:
        ds = dataset.ILSVRC12(datadir, name, shuffle=True)
        ds = AugmentImageComponent(ds, augmentors, copy=False)
        if parallel < 16:
            logger.warn(
                "DataFlow may become the bottleneck when too few processes are used."
            )
        ds = PrefetchDataZMQ(ds, parallel)
        ds = BatchData(ds, batch_size, remainder=False)
    else:
        ds = dataset.ILSVRC12Files(datadir, name, shuffle=False)
        aug = imgaug.AugmentorList(augmentors)

        def mapf(dp):
            fname, cls = dp

            jpeg_filename = os.path.basename(fname)
            jpeg_dirname = os.path.basename(os.path.dirname(fname))
            zip_filepath = os.path.dirname(fname) + '.zip'

            f = zipfile.ZipFile(zip_filepath, 'r')
            compress_jpeg = np.fromstring(f.read(
                os.path.join(jpeg_dirname, jpeg_filename)),
                                          dtype=np.uint8)

            im = cv2.imdecode(compress_jpeg, cv2.IMREAD_COLOR)
            #im = cv2.imread(fname, cv2.IMREAD_COLOR)

            im = aug.augment(im)
            return im, cls

        ds = MultiThreadMapData(ds,
                                parallel,
                                mapf,
                                buffer_size=2000,
                                strict=True)
        ds = BatchData(ds, batch_size, remainder=True)
        ds = PrefetchDataZMQ(ds, 1)
    return ds
def get_imagenet_dataflow(datadir,
                          name,
                          batch_size,
                          augmentors,
                          parallel=None):  #获取图像网络数据流
    """
    See explanations in the tutorial:
    http://tensorpack.readthedocs.io/en/latest/tutorial/efficient-dataflow.html
    """
    assert name in ['train', 'val', 'test']
    assert datadir is not None
    assert isinstance(augmentors, list)
    isTrain = name == 'train'
    if parallel is None:  # 如果不是并行的话
        parallel = min(40,
                       multiprocessing.cpu_count() //
                       2)  # assuming hyperthreading 超线程? 获取当前计算机cpu数量

    if isTrain:
        # dataset:创建一个在数据流上运行的预测器,并且拿出一个batch?
        ds = dataset.ILSVRC12(datadir, name, shuffle=True)
        ds = AugmentImageComponent(ds, augmentors,
                                   copy=False)  # 使用共享的增强参数在多个组件上应用图像增强器
        if parallel < 16:  # 如果少于16个的话
            logger.warn(
                "DataFlow may become the bottleneck when too few processes are used."
            )
        ds = PrefetchDataZMQ(ds, parallel)  # 实现高效的数据流水线
        ds = BatchData(ds, batch_size, remainder=False)  # 取一个batch?
    else:
        # 如果是测试时,增强图像,加速对数据流的读取操作等
        # 与ILSVRC12相同,但生成图像的文件名而不是np array。
        ds = dataset.ILSVRC12Files(datadir, name, shuffle=False)
        aug = imgaug.AugmentorList(augmentors)

        def mapf(dp):
            fname, cls = dp
            im = cv2.imread(fname, cv2.IMREAD_COLOR
                            )  # cv2.IMREAD_COLOR : 默认使用该种标识。加载一张彩色图片,忽视它的透明度
            im = aug.augment(im)  # 增强图像
            return im, cls

        ds = MultiThreadMapData(ds,
                                parallel,
                                mapf,
                                buffer_size=2000,
                                strict=True)  # 并行加速?
        ds = BatchData(ds, batch_size, remainder=True)  # 取一个batch?
        ds = PrefetchDataZMQ(ds, 1)
    return ds
コード例 #7
0
ファイル: eval.py プロジェクト: zzwei1/AutoNL
def get_val_dataflow(datadir, parallel=1):
    assert datadir is not None
    ds = dataset.ILSVRC12Files(datadir, 'val', shuffle=False)

    def mapf(dp):
        fname, cls = dp
        with open(fname, "rb") as f:
            im_bytes = f.read()
        return im_bytes, cls

    ds = MultiThreadMapData(ds,
                            parallel,
                            mapf,
                            buffer_size=min(2000, ds.size()),
                            strict=True)
    return ds
コード例 #8
0
def get_tp_loader(data_dir, name, batch_size, parallel=None):
    isTrain = name == 'train'
    augmentors = get_tp_augmentor(isTrain)

    if data_dir.endswith('lmdb'):
        # 500000[70:87:20, 1.95it/s]
        data_dir = os.path.join(data_dir, 'ILSVRC-%s.lmdb' % name)
        ds = LMDBSerializer.load(data_dir, shuffle=False)
        ds = get_sequential_loader(ds, isTrain, batch_size, augmentors,
                                   parallel)
    else:
        # 500000[27:11:03, 5.11it/s]
        if isTrain:
            ds = dataset.ILSVRC12(data_dir, name, shuffle=True)
        else:
            ds = dataset.ILSVRC12Files(data_dir, name, shuffle=False)
        ds = get_random_loader(ds, isTrain, batch_size, augmentors, parallel)
    return ds