Example #1
0
 def test_minimum_dimensions_example_stream(self):
     stream = MinimumImageDimensions(self.example_stream, (4, 5),
                                     which_sources=('source1', ))
     it = stream.get_epoch_iterator()
     for example, shp in zip(it, self.shapes):
         assert example[0].shape[0] >= 4 and example[0].shape[1] >= 5
         assert (example[1].shape[1] == shp[0]
                 and example[1].shape[0] == shp[1])
Example #2
0
 def test_minimum_dimensions_example_stream(self):
     stream = MinimumImageDimensions(self.example_stream, (4, 5),
                                     which_sources=('source1',))
     it = stream.get_epoch_iterator()
     for example, shp in zip(it, self.shapes):
         assert example[0].shape[0] >= 4 and example[0].shape[1] >= 5
         assert (example[1].shape[1] == shp[0] and
                 example[1].shape[0] == shp[1])
Example #3
0
 def test_minimum_dimensions_batch_stream(self):
     stream = MinimumImageDimensions(self.batch_stream, (4, 5),
                                     which_sources=('source1', ))
     it = stream.get_epoch_iterator()
     for batch, shapes in zip(it, partition_all(self.batch_size,
                                                self.shapes)):
         assert (example.shape[0] >= 4 and example.shape[1] >= 5
                 for example in batch[0])
         assert (example.shape[1] == shp[0] and example.shape[0] == shp[1]
                 for example, shp in zip(batch[1], shapes))
Example #4
0
 def test_minimum_dimensions_batch_stream(self):
     stream = MinimumImageDimensions(self.batch_stream, (4, 5),
                                     which_sources=('source1',))
     it = stream.get_epoch_iterator()
     for batch, shapes in zip(it, partition_all(self.batch_size,
                                                self.shapes)):
         assert (example.shape[0] >= 4 and example.shape[1] >= 5
                 for example in batch[0])
         assert (example.shape[1] == shp[0] and
                 example.shape[0] == shp[1]
                 for example, shp in zip(batch[1], shapes))
def create_data(data, size, batch_size, _port):
    if data == "train":
        cats = DogsVsCats(('train', ), subset=slice(0, 20000))
        port = _port + 2
    elif data == "valid":
        cats = DogsVsCats(('train', ), subset=slice(20000, 25000))
        port = _port + 3
    print 'port', port
    stream = DataStream.default_stream(cats,
                                       iteration_scheme=ShuffledScheme(
                                           cats.num_examples, batch_size))
    stream_downscale = MinimumImageDimensions(
        stream, size, which_sources=('image_features', ))
    stream_rotate = FlipAsYouCan(stream_downscale, )
    stream_max = ScikitResize(stream_rotate,
                              image_size,
                              which_sources=('image_features', ))
    stream_scale = ScaleAndShift(stream_max,
                                 1. / 255,
                                 0,
                                 which_sources=('image_features', ))
    stream_data = Cast(stream_scale,
                       dtype='float32',
                       which_sources=('image_features', ))
    start_server(stream_data, port=port)
def create_data(data):

    stream = DataStream(data,
                        iteration_scheme=ShuffledScheme(
                            data.num_examples, batch_size))

    # Data Augmentation
    stream = MinimumImageDimensions(stream,
                                    image_size,
                                    which_sources=('image_features', ))
    stream = MaximumImageDimensions(stream,
                                    image_size,
                                    which_sources=('image_features', ))
    stream = RandomHorizontalSwap(stream, which_sources=('image_features', ))
    stream = Random2DRotation(stream, which_sources=('image_features', ))
    #stream = ScikitResize(stream, image_size, which_sources=('image_features',))

    # Data Preprocessing

    # Data Transformation
    stream = ScaleAndShift(stream,
                           1. / 255,
                           0,
                           which_sources=('image_features', ))
    stream = Cast(stream, dtype='float32', which_sources=('image_features', ))
    return stream
Example #7
0
 def create_data(data):
     stream = DataStream.default_stream(data,
                                        iteration_scheme=ShuffledScheme(
                                            data.num_examples, batch_size))
     stream_downscale = MinimumImageDimensions(
         stream, image_size, which_sources=('image_features', ))
     #stream_rotate = Random2DRotation(stream_downscale, which_sources=('image_features',))
     stream_max = ScikitResize(stream_downscale,
                               image_size,
                               which_sources=('image_features', ))
     stream_scale = ScaleAndShift(stream_max,
                                  1. / 255,
                                  0,
                                  which_sources=('image_features', ))
     stream_cast = Cast(stream_scale,
                        dtype='float32',
                        which_sources=('image_features', ))
     #stream_flat = Flatten(stream_scale, which_sources=('image_features',))
     return stream_cast
Example #8
0
 def test_axes_exception(self):
     stream = MinimumImageDimensions(self.example_stream, (4, 5),
                                     which_sources=('source1', ))
     assert_raises(NotImplementedError, stream.transform_source_example,
                   numpy.empty((2, 3, 4, 2)), 'source1')
Example #9
0
from blocks.extensions import Printing, FinishAfter
from blocks.extensions.monitoring import TrainingDataMonitoring
from blocks.main_loop import MainLoop

# Load the training set
train = DogsVsCats(('train', ), subset=slice(0, 20000))

# We now create a "stream" over the dataset which will return shuffled batches
# of size 128. Using the DataStream.default_stream constructor will turn our
# 8-bit images into floating-point decimals in [0, 1].
stream = DataStream.default_stream(train,
                                   iteration_scheme=ShuffledScheme(
                                       train.num_examples, 128))

# Enlarge images that are too small
downnscale_stream = MinimumImageDimensions(stream, (64, 64),
                                           which_sources=('image_features', ))

# Our images are of different sizes, so we'll use a Fuel transformer
# to take random crops of size (32 x 32) from each image
cropped_stream = RandomFixedSizeCrop(downnscale_stream, (32, 32),
                                     which_sources=('image_features', ))

# We'll use a simple MLP, so we need to flatten the images
# from (channel, width, height) to simply (features,)
flattened_stream = Flatten(cropped_stream, which_sources=('image_features', ))

# Create the Theano MLP
import theano
from theano import tensor
import numpy