def test_cast():
    stream = DataStream(
        IterableDataset(OrderedDict([("features", numpy.array([1, 2, 3]).astype("float64")), ("targets", [0, 1, 0])]))
    )
    wrapper = Cast(stream, "float32", which_sources=("features",))
    assert_equal(list(wrapper.get_epoch_iterator()), [(numpy.array(1), 0), (numpy.array(2), 1), (numpy.array(3), 0)])
    assert all(f.dtype == "float32" for f, t in wrapper.get_epoch_iterator())
Пример #2
0
def test_cast():
    stream = DataStream(
        IterableDataset({'features': numpy.array([1, 2, 3]).astype('float64'),
                         'targets': [0, 1, 0]}))
    wrapper = Cast(stream, 'float32', which_sources=('features',))
    assert_equal(
        list(wrapper.get_epoch_iterator()),
        [(numpy.array(1), 0), (numpy.array(2), 1), (numpy.array(3), 0)])
    assert all(f.dtype == 'float32' for f, t in wrapper.get_epoch_iterator())
Пример #3
0
def test_cast():
    stream = DataStream(
        IterableDataset(
            OrderedDict([
                ('features', numpy.array([1, 2, 3]).astype('float64')),
                ('targets', [0, 1, 0])])))
    wrapper = Cast(stream, 'float32', which_sources=('features',))
    assert_equal(
        list(wrapper.get_epoch_iterator()),
        [(numpy.array(1), 0), (numpy.array(2), 1), (numpy.array(3), 0)])
    assert all(f.dtype == 'float32' for f, t in wrapper.get_epoch_iterator())
Пример #4
0
def batch_iterator(dataset, batchsize, shuffle=False):
    if shuffle:
        train_scheme = ShuffledScheme(examples=dataset.num_examples, batch_size=batchsize)
    else:
        train_scheme = SequentialScheme(examples=dataset.num_examples, batch_size=batchsize)
    stream = DataStream.default_stream(dataset=dataset, iteration_scheme=train_scheme)
    stream_scale = ScaleAndShift(stream, 1./256.0, 0, which_sources=('features',))
    stream_data = Cast(stream_scale, dtype=theano.config.floatX, which_sources=('features',))
    return stream_data.get_epoch_iterator()
Пример #5
0
class TestCast(object):
    def setUp(self):
        dataset = IterableDataset(
            OrderedDict([
                ('features', numpy.array([1, 2, 3]).astype('float64')),
                ('targets', [0, 1, 0])]),
            axis_labels={'features': ('batch'), 'targets': ('batch')})
        self.stream = DataStream(dataset)
        self.wrapper = Cast(
            self.stream, 'float32', which_sources=('features',))

    def test_cast(self):
        assert_equal(
            list(self.wrapper.get_epoch_iterator()),
            [(numpy.array(1), 0), (numpy.array(2), 1), (numpy.array(3), 0)])
        assert all(
            f.dtype == 'float32' for f, t in self.wrapper.get_epoch_iterator())

    def test_axis_labels_are_passed_through(self):
        assert_equal(self.wrapper.axis_labels, self.stream.axis_labels)
Пример #6
0
class TestCast(object):
    def setUp(self):
        dataset = IterableDataset(
            OrderedDict([
                ('features', numpy.array([1, 2, 3]).astype('float64')),
                ('targets', [0, 1, 0])]),
            axis_labels={'features': ('batch'), 'targets': ('batch')})
        self.stream = DataStream(dataset)
        self.wrapper = Cast(
            self.stream, 'float32', which_sources=('features',))

    def test_cast(self):
        assert_equal(
            list(self.wrapper.get_epoch_iterator()),
            [(numpy.array(1), 0), (numpy.array(2), 1), (numpy.array(3), 0)])
        assert all(
            f.dtype == 'float32' for f, t in self.wrapper.get_epoch_iterator())

    def test_axis_labels_are_passed_through(self):
        assert_equal(self.wrapper.axis_labels, self.stream.axis_labels)
Пример #7
0
upscale_test_stream = MaximumImageDimensions(
  data_stream = downscale_test_stream, 
  maximum_shape = image_size, 
  which_sources=('image_features',)
)

scaled_test_stream = ScaleAndShift(
  data_stream = upscale_test_stream, 
  scale = 1./255, 
  shift = 0, 
  which_sources = ('image_features',)
)

data_test_stream = Cast(
  data_stream = scaled_test_stream, 
  dtype = 'float32', 
  which_sources = ('image_features',)
)
test_x =tensor.tensor4('image_features')
predict_function = theano.function(inputs=[test_x], outputs=top_mlp.apply(Flattener().apply(conv_sequence.apply(test_x))))

import csv
csvfile = csv.writer(open("test_pred_overfeat.csv",'wb'))
for i,test_image in enumerate(data_test_stream.get_epoch_iterator()):
    prediction = predict_function(test_image[0])[0]
    isadog = numpy.argmax(prediction)
    csvfile.writerow([str(i+1), str(isadog)])


Пример #8
0
#get the test stream
from fuel.datasets.dogs_vs_cats import DogsVsCats
from fuel.streams import DataStream, ServerDataStream
from fuel.schemes import ShuffledScheme, SequentialExampleScheme
from fuel.transformers.image import RandomFixedSizeCrop, MinimumImageDimensions, MaximumImageDimensions, Random2DRotation
from fuel.transformers import Flatten, Cast, ScaleAndShift
size = (128,128)
cats = DogsVsCats(('test',))
stream = DataStream.default_stream(cats, iteration_scheme=SequentialExampleScheme(cats.num_examples))
stream_upscale = MaximumImageDimensions(stream, size, which_sources=('image_features',))
stream_scale = ScaleAndShift(stream_upscale, 1./255, 0, which_sources=('image_features',))
stream_data = Cast(stream_scale, dtype='float32', which_sources=('image_features',))

#Load the parameters of the model
params = load_parameter_values('convnet_parameters.pkl')
mo = Model(predict)
mo.set_parameter_values(params)
#Create the forward propagation function
fprop = function(mo.inputs, mo.outputs[0], allow_input_downcast=True)
tab = []
i = 1
#Get the prediction for each example of the test set
for data in stream_data.get_epoch_iterator():
    predict = np.argmax(fprop(data))
    tab.append([i, predict])
    print str(i) + "," + str(predict)
    i = i + 1
#Save predictions in a csv file
np.savetxt("dump.csv", tab, delimiter=",", fmt='%d')

Пример #9
0
stream = DataStream.default_stream(cats,
                                   iteration_scheme=SequentialExampleScheme(
                                       cats.num_examples))
stream_upscale = MaximumImageDimensions(stream,
                                        size,
                                        which_sources=('image_features', ))
stream_scale = ScaleAndShift(stream_upscale,
                             1. / 255,
                             0,
                             which_sources=('image_features', ))
stream_data = Cast(stream_scale,
                   dtype='float32',
                   which_sources=('image_features', ))

#Load the parameters of the model
params = load_parameter_values('convnet_parameters.pkl')
mo = Model(predict)
mo.set_parameter_values(params)
#Create the forward propagation function
fprop = function(mo.inputs, mo.outputs[0], allow_input_downcast=True)
tab = []
i = 1
#Get the prediction for each example of the test set
for data in stream_data.get_epoch_iterator():
    predict = np.argmax(fprop(data))
    tab.append([i, predict])
    print str(i) + "," + str(predict)
    i = i + 1
#Save predictions in a csv file
np.savetxt("dump.csv", tab, delimiter=",", fmt='%d')