def create_image_mb_source(map_file, is_training, total_number_of_samples):
    if not os.path.exists(map_file):
        raise RuntimeError("File '%s' does not exist." %map_file)

    # transformation pipeline for the features has jitter/crop only when training
    transforms = []
    if is_training:
        transforms += [
            ImageDeserializer.crop(crop_type='randomside', side_ratio='0.4375:0.875', jitter_type='uniratio') # train uses jitter
        ]
    else:
        transforms += [
            ImageDeserializer.crop(crop_type='center', side_ratio=0.5833333) # test has no jitter
        ]

    transforms += [
        ImageDeserializer.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear'),
    ]

    # deserializer
    return MinibatchSource(
        ImageDeserializer(map_file, StreamDefs(
            features = StreamDef(field='image', transforms=transforms), # first column in map file is referred to as 'image'
            labels   = StreamDef(field='label', shape=num_classes))),   # and second as 'label'
        randomize = is_training,
        max_samples=total_number_of_samples,
        multithreaded_deserializer = True)
Пример #2
0
def create_reader(map_file, mean_file, train):
    if not os.path.exists(map_file) or not os.path.exists(mean_file):
        raise RuntimeError(
            "File '%s' or '%s' does not exist. Please run install_cifar10.py from DataSets/CIFAR-10 to fetch them"
            % (map_file, mean_file)
        )

    # transformation pipeline for the features has jitter/crop only when training
    transforms = []
    if train:
        transforms += [
            ImageDeserializer.crop(crop_type="Random", ratio=0.8, jitter_type="uniRatio")  # train uses jitter
        ]
    transforms += [
        ImageDeserializer.scale(width=image_width, height=image_height, channels=num_channels, interpolations="linear"),
        ImageDeserializer.mean(mean_file),
    ]
    # deserializer
    return MinibatchSource(
        ImageDeserializer(
            map_file,
            StreamDefs(
                features=StreamDef(
                    field="image", transforms=transforms
                ),  # first column in map file is referred to as 'image'
                labels=StreamDef(field="label", shape=num_classes),
            ),
        )
    )  # and second as 'label'
def create_reader(map_file, mean_file, train):
    if not os.path.exists(map_file) or not os.path.exists(mean_file):
        raise RuntimeError(
            "File '%s' or '%s' does not exist. Please run install_cifar10.py from DataSets/CIFAR-10 to fetch them"
            % (map_file, mean_file))

    # transformation pipeline for the features has jitter/crop only when training
    transforms = []
    if train:
        transforms += [
            ImageDeserializer.crop(crop_type='Random',
                                   ratio=0.8,
                                   jitter_type='uniRatio')  # train uses jitter
        ]
    transforms += [
        ImageDeserializer.scale(width=image_width,
                                height=image_height,
                                channels=num_channels,
                                interpolations='linear'),
        ImageDeserializer.mean(mean_file)
    ]
    # deserializer
    return MinibatchSource(
        ImageDeserializer(
            map_file,
            StreamDefs(
                features=StreamDef(
                    field='image', transforms=transforms
                ),  # first column in map file is referred to as 'image'
                labels=StreamDef(field='label',
                                 shape=num_classes))))  # and second as 'label'
Пример #4
0
def create_test_mb_source(features_stream_name, labels_stream_name,
                          image_height, image_width, num_channels, num_classes,
                          cifar_data_path):

    path = os.path.normpath(os.path.join(abs_path, cifar_data_path))

    map_file = os.path.join(path, TEST_MAP_FILENAME)
    mean_file = os.path.join(path, MEAN_FILENAME)

    if not os.path.exists(map_file) or not os.path.exists(mean_file):
        cifar_py3 = "" if sys.version_info.major < 3 else "_py3"
        raise RuntimeError(
            "File '%s' or '%s' do not exist. Please run CifarDownload%s.py and CifarConverter%s.py from CIFAR-10 to fetch them"
            % (map_file, mean_file, cifar_py3, cifar_py3))

    image = ImageDeserializer(map_file)
    image.map_features(features_stream_name, [
        ImageDeserializer.crop(
            crop_type='Random', ratio=0.8, jitter_type='uniRatio'),
        ImageDeserializer.scale(width=image_width,
                                height=image_height,
                                channels=num_channels,
                                interpolations='linear'),
        ImageDeserializer.mean(mean_file)
    ])
    image.map_labels(labels_stream_name, num_classes)

    rc = ReaderConfig(image, epoch_size=sys.maxsize)
    return rc.minibatch_source()
Пример #5
0
def create_mb_source(image_height, image_width, num_channels, map_file):
    transforms = [ImageDeserializer.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear')]
    image_source = ImageDeserializer(map_file)
    image_source.ignore_labels()
    image_source.map_features('features', transforms)

    return MinibatchSource(image_source, randomize=False)
Пример #6
0
def create_reader(map_file, mean_file, train, distributed_communicator=None):
    if not os.path.exists(map_file) or not os.path.exists(mean_file):
        cifar_py3 = "" if sys.version_info.major < 3 else "_py3"
        raise RuntimeError(
            "File '%s' or '%s' does not exist. Please run CifarDownload%s.py and CifarConverter%s.py from CIFAR-10 to fetch them"
            % (map_file, mean_file, cifar_py3, cifar_py3))

    # transformation pipeline for the features has jitter/crop only when training
    transforms = []
    if train:
        transforms += [
            ImageDeserializer.crop(crop_type='Random',
                                   ratio=0.8,
                                   jitter_type='uniRatio')  # train uses jitter
        ]
    transforms += [
        ImageDeserializer.scale(width=image_width,
                                height=image_height,
                                channels=num_channels,
                                interpolations='linear'),
        ImageDeserializer.mean(mean_file)
    ]
    # deserializer
    return MinibatchSource(
        ImageDeserializer(
            map_file,
            StreamDefs(
                features=StreamDef(
                    field='image', transforms=transforms
                ),  # first column in map file is referred to as 'image'
                labels=StreamDef(field='label',
                                 shape=num_classes))),  # and second as 'label'
        distributed_communicator=distributed_communicator)
Пример #7
0
def create_mb_source(img_height, img_width, img_channels, n_classes, n_rois, data_path, data_set):
    rois_dim = 4 * n_rois
    label_dim = n_classes * n_rois

    path = os.path.normpath(os.path.join(abs_path, data_path))
    if data_set == 'test':
        map_file = os.path.join(path, test_map_filename)
    else:
        map_file = os.path.join(path, train_map_filename)
    roi_file = os.path.join(path, data_set + rois_filename_postfix)
    label_file = os.path.join(path, data_set + roilabels_filename_postfix)

    if not os.path.exists(map_file) or not os.path.exists(roi_file) or not os.path.exists(label_file):
        raise RuntimeError("File '%s', '%s' or '%s' does not exist. "
                           "Please run install_fastrcnn.py from Examples/Image/Detection/FastRCNN to fetch them" %
                           (map_file, roi_file, label_file))

    # read images
    image_source = ImageDeserializer(map_file)
    image_source.ignore_labels()
    image_source.map_features(features_stream_name,
                              [ImageDeserializer.scale(width=img_width, height=img_height, channels=img_channels,
                                                       scale_mode="pad", pad_value=114, interpolations='linear')])

    # read rois and labels
    roi_source = CTFDeserializer(roi_file)
    roi_source.map_input(roi_stream_name, dim=rois_dim, format="dense")
    label_source = CTFDeserializer(label_file)
    label_source.map_input(label_stream_name, dim=label_dim, format="dense")

    # define a composite reader
    return MinibatchSource([image_source, roi_source, label_source], epoch_size=sys.maxsize, randomize=data_set == "train")
Пример #8
0
def create_image_mb_source(map_file, is_training, total_number_of_samples):
    if not os.path.exists(map_file):
        raise RuntimeError("File '%s' does not exist." %map_file)

    # transformation pipeline for the features has jitter/crop only when training
    transforms = []
    if is_training:
        transforms += [
            ImageDeserializer.crop(crop_type='randomside', side_ratio='0.4375:0.875', jitter_type='uniratio') # train uses jitter
        ]
    else: 
        transforms += [
            ImageDeserializer.crop(crop_type='center', side_ratio=0.5833333) # test has no jitter
        ]

    transforms += [
        ImageDeserializer.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear'),
    ]

    # deserializer
    return MinibatchSource(
        ImageDeserializer(map_file, StreamDefs(
            features = StreamDef(field='image', transforms=transforms), # first column in map file is referred to as 'image'
            labels   = StreamDef(field='label', shape=num_classes))),   # and second as 'label'
        randomize = is_training, 
        epoch_size=total_number_of_samples,
        multithreaded_deserializer = True)
Пример #9
0
def test_image():
    from cntk.io import ReaderConfig, ImageDeserializer

    map_file = "input.txt"
    mean_file = "mean.txt"
    epoch_size = 150

    feature_name = "f"
    image_width = 100
    image_height = 200
    num_channels = 3

    label_name = "l"
    num_classes = 7

    image = ImageDeserializer(map_file)
    image.map_features(
        feature_name,
        [
            ImageDeserializer.crop(crop_type="Random", ratio=0.8, jitter_type="uniRatio"),
            ImageDeserializer.scale(
                width=image_width, height=image_height, channels=num_channels, interpolations="linear"
            ),
            ImageDeserializer.mean(mean_file),
        ],
    )
    image.map_labels(label_name, num_classes)

    rc = ReaderConfig(image, randomize=False, epoch_size=epoch_size)

    assert rc["epochSize"] == epoch_size
    assert rc["randomize"] == False
    assert len(rc["deserializers"]) == 1
    d = rc["deserializers"][0]
    assert d["type"] == "ImageDeserializer"
    assert d["file"] == map_file
    assert set(d["input"].keys()) == {label_name, feature_name}

    l = d["input"][label_name]
    assert l["labelDim"] == num_classes

    f = d["input"][feature_name]
    assert set(f.keys()) == {"transforms"}
    t0, t1, t2 = f["transforms"]
    assert t0["type"] == "Crop"
    assert t1["type"] == "Scale"
    assert t2["type"] == "Mean"
    t0["cropType"] == "Random"
    t0["cropRatio"] == 0.8
    t0["jitterType"] == "uniRatio"
    t1["width"] == image_width
    t1["height"] == image_height
    t1["channels"] == num_channels
    t1["interpolations"] == "linear"
    t2["type"] == "mean"
    t2["meanFile"] == mean_file

    # TODO depends on ImageReader.dll
    """ 
Пример #10
0
def test_image():
    from cntk.io import ReaderConfig, ImageDeserializer
    map_file = "input.txt"
    mean_file = "mean.txt"
    epoch_size = 150

    feature_name = "f"
    image_width = 100
    image_height = 200
    num_channels = 3

    label_name = "l"
    num_classes = 7

    image = ImageDeserializer(map_file)
    image.map_features(feature_name, [
        ImageDeserializer.crop(
            crop_type='Random', ratio=0.8, jitter_type='uniRatio'),
        ImageDeserializer.scale(width=image_width,
                                height=image_height,
                                channels=num_channels,
                                interpolations='linear'),
        ImageDeserializer.mean(mean_file)
    ])
    image.map_labels(label_name, num_classes)

    rc = ReaderConfig(image, randomize=False, epoch_size=epoch_size)

    assert rc['epochSize'] == epoch_size
    assert rc['randomize'] == False
    assert len(rc['deserializers']) == 1
    d = rc['deserializers'][0]
    assert d['type'] == 'ImageDeserializer'
    assert d['file'] == map_file
    assert set(d['input'].keys()) == {label_name, feature_name}

    l = d['input'][label_name]
    assert l['labelDim'] == num_classes

    f = d['input'][feature_name]
    assert set(f.keys()) == {'transforms'}
    t0, t1, t2 = f['transforms']
    assert t0['type'] == 'Crop'
    assert t1['type'] == 'Scale'
    assert t2['type'] == 'Mean'
    t0['cropType'] == 'Random'
    t0['cropRatio'] == 0.8
    t0['jitterType'] == 'uniRatio'
    t1['width'] == image_width
    t1['height'] == image_height
    t1['channels'] == num_channels
    t1['interpolations'] == 'linear'
    t2['type'] == 'mean'
    t2['meanFile'] == mean_file

    # TODO depends on ImageReader.dll
    ''' 
Пример #11
0
def test_image():
    from cntk.io import ReaderConfig, ImageDeserializer
    map_file = "input.txt"
    mean_file = "mean.txt"
    epoch_size = 150

    feature_name = "f"
    image_width = 100
    image_height = 200
    num_channels = 3

    label_name = "l"
    num_classes = 7
    
    image = ImageDeserializer(map_file)
    image.map_features(feature_name,
            [ImageDeserializer.crop(crop_type='Random', ratio=0.8,
                jitter_type='uniRatio'),
             ImageDeserializer.scale(width=image_width, height=image_height,
                 channels=num_channels, interpolations='linear'),
             ImageDeserializer.mean(mean_file)])
    image.map_labels(label_name, num_classes)

    rc = ReaderConfig(image, randomize=False, epoch_size=epoch_size)

    assert rc['epochSize'].value == epoch_size
    assert rc['randomize'] == False
    assert len(rc['deserializers']) == 1
    d = rc['deserializers'][0]
    assert d['type'] == 'ImageDeserializer'
    assert d['file'] == map_file
    assert set(d['input'].keys()) == {label_name, feature_name}

    l = d['input'][label_name]
    assert l['labelDim'] == num_classes

    f = d['input'][feature_name]
    assert set(f.keys()) == { 'transforms' }
    t0, t1, t2 = f['transforms']
    assert t0['type'] == 'Crop'
    assert t1['type'] == 'Scale'
    assert t2['type'] == 'Mean'
    t0['cropType'] == 'Random'
    t0['cropRatio'] == 0.8
    t0['jitterType'] == 'uniRatio'
    t1['width'] == image_width
    t1['height'] == image_height
    t1['channels'] == num_channels
    t1['interpolations'] == 'linear'
    t2['type'] == 'mean'
    t2['meanFile'] == mean_file

    # TODO depends on ImageReader.dll
    ''' 
Пример #12
0
def create_mb_source(map_file,
                     image_width,
                     image_height,
                     num_channels,
                     num_classes,
                     randomize=True):
    transforms = [
        ImageDeserializer.scale(width=image_width,
                                height=image_height,
                                channels=num_channels,
                                interpolations='linear')
    ]
    image_source = ImageDeserializer(map_file)
    image_source.map_features(features_stream_name, transforms)
    image_source.map_labels(label_stream_name, num_classes)
    return MinibatchSource(image_source, randomize=randomize)
Пример #13
0
def create_mb_source(image_height, image_width, num_channels, map_file):
    transforms = [
        ImageDeserializer.scale(width=image_width,
                                height=image_height,
                                channels=num_channels,
                                interpolations='linear')
    ]
    return MinibatchSource(
        ImageDeserializer(
            map_file,
            StreamDefs(
                features=StreamDef(
                    field='image', transforms=transforms
                ),  # first column in map file is referred to as 'image'
                labels=StreamDef(field='label', shape=1000))
        ),  # and second as 'label'. TODO: add option to ignore labels
        randomize=False)
Пример #14
0
def create_mb_source(features_stream_name, labels_stream_name, image_height,
                     image_width, num_channels, num_classes, cifar_data_path):
    map_file = os.path.join(cifar_data_path, TRAIN_MAP_FILENAME)
    mean_file = os.path.join(cifar_data_path, MEAN_FILENAME)

    if not os.path.exists(map_file) or not os.path.exists(mean_file):
        cifar_py3 = "" if sys.version_info.major < 3 else "_py3"
        raise RuntimeError(
            "File '%s' or '%s' do not exist. Please run CifarDownload%s.py and CifarConverter%s.py from CIFAR-10 to fetch them"
            % (map_file, mean_file, cifar_py3, cifar_py3))

    image = ImageDeserializer(map_file)
    image.map_features(feature_name, [
        ImageDeserializer.crop(
            crop_type='Random', ratio=0.8, jitter_type='uniRatio'),
        ImageDeserializer.scale(width=image_width,
                                height=image_height,
                                channels=num_channels,
                                interpolations='linear'),
        ImageDeserializer.mean(mean_file)
    ])
    image.map_labels(label_name, num_classes)

    rc = ReaderConfig(image, epoch_size=sys.maxsize)

    input_streams_config = {
        features_stream_name: features_stream_config,
        labels_stream_name: labels_stream_config
    }
    deserializer_config = {
        "type": "ImageDeserializer",
        "file": map_file,
        "input": input_streams_config
    }

    minibatch_config = {
        "epochSize": sys.maxsize,
        "deserializers": [deserializer_config]
    }
    print(minibatch_config)

    return minibatch_source(minibatch_config)
def create_reader(map_file,
                  mean_file,
                  train,
                  total_data_size,
                  distributed_after=INFINITE_SAMPLES):
    if not os.path.exists(map_file) or not os.path.exists(mean_file):
        raise RuntimeError(
            "File '%s' or '%s' does not exist. Please run install_cifar10.py from DataSets/CIFAR-10 to fetch them"
            % (map_file, mean_file))

    # transformation pipeline for the features has jitter/crop only when training
    transforms = []
    if train:
        transforms += [
            ImageDeserializer.crop(crop_type='randomside',
                                   side_ratio=0.8,
                                   jitter_type='uniratio')  # train uses jitter
        ]
    transforms += [
        ImageDeserializer.scale(width=image_width,
                                height=image_height,
                                channels=num_channels,
                                interpolations='linear'),
        ImageDeserializer.mean(mean_file)
    ]
    # deserializer
    return MinibatchSource(
        ImageDeserializer(
            map_file,
            StreamDefs(
                features=StreamDef(
                    field='image', transforms=transforms
                ),  # first column in map file is referred to as 'image'
                labels=StreamDef(field='label',
                                 shape=num_classes))),  # and second as 'label'
        epoch_size=total_data_size,
        multithreaded_deserializer=
        False,  # turn off omp as CIFAR-10 is not heavy for deserializer
        distributed_after=distributed_after)
def create_reader(map_file, mean_file, train, distributed_after=INFINITE_SAMPLES):
    if not os.path.exists(map_file) or not os.path.exists(mean_file):
        raise RuntimeError("File '%s' or '%s' does not exist. Please run install_cifar10.py from DataSets/CIFAR-10 to fetch them" %
                           (map_file, mean_file))

    # transformation pipeline for the features has jitter/crop only when training
    transforms = []
    if train:
        transforms += [
            ImageDeserializer.crop(crop_type='Random', ratio=0.8, jitter_type='uniRatio') # train uses jitter
        ]
    transforms += [
        ImageDeserializer.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear'),
        ImageDeserializer.mean(mean_file)
    ]
    # deserializer
    return MinibatchSource(
        ImageDeserializer(map_file, StreamDefs(
            features = StreamDef(field='image', transforms=transforms), # first column in map file is referred to as 'image'
            labels   = StreamDef(field='label', shape=num_classes))),   # and second as 'label'
        multithreaded_deserializer = False,  # turn off omp as CIFAR-10 is not heavy for deserializer
        distributed_after = distributed_after)
Пример #17
0
def create_mb_source(features_stream_name, labels_stream_name, image_height,
                     image_width, num_channels, num_classes, cifar_data_path):

    path = os.path.normpath(os.path.join(abs_path, cifar_data_path))
    map_file = os.path.join(path, TRAIN_MAP_FILENAME)
    mean_file = os.path.join(path, MEAN_FILENAME)

    if not os.path.exists(map_file) or not os.path.exists(mean_file):
        cifar_py3 = "" if sys.version_info.major < 3 else "_py3"
        raise RuntimeError("File '%s' or '%s' do not exist. Please run CifarDownload%s.py and CifarConverter%s.py from CIFAR-10 to fetch them" %
                           (map_file, mean_file, cifar_py3, cifar_py3))

    image = ImageDeserializer(map_file)
    image.map_features(features_stream_name,
            [ImageDeserializer.crop(crop_type='Random', ratio=0.8,
                jitter_type='uniRatio'),
             ImageDeserializer.scale(width=image_width, height=image_height,
                 channels=num_channels, interpolations='linear'),
             ImageDeserializer.mean(mean_file)])
    image.map_labels(labels_stream_name, num_classes)

    rc = ReaderConfig(image, epoch_size=sys.maxsize)
    return rc.minibatch_source()
Пример #18
0
def create_mb_source(map_file, image_width, image_height, num_channels, num_classes, randomize=True):
    transforms = [ImageDeserializer.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear')]
    image_source = ImageDeserializer(map_file)
    image_source.map_features(features_stream_name, transforms)
    image_source.map_labels(label_stream_name, num_classes)
    return MinibatchSource(image_source, randomize=randomize)