def test_average_pooling_inc_padding(): x = tensor.tensor4("x") brick = AveragePooling((2, 2), ignore_border=True, padding=(1, 1), include_padding=True) y = brick.apply(x) output = y.eval({x: 3 * numpy.ones((1, 1, 2, 2), dtype=theano.config.floatX)}) expected_out = numpy.array([0.75, 0.75, 0.75, 0.75]).reshape(1, 1, 2, 2) assert_allclose(expected_out, output)
def test_average_pooling_exc_padding(): x = tensor.tensor4("x") brick = AveragePooling((2, 2), ignore_border=True, padding=(1, 1), include_padding=False) y = brick.apply(x) x_ = 3 * numpy.ones((1, 1, 2, 2), dtype=theano.config.floatX) output = y.eval({x: x_}) assert_allclose(x_, output)
def test_batch_normalization_inside_convolutional_sequence(): """Test that BN bricks work in ConvolutionalSequences.""" conv_seq = ConvolutionalSequence( [Convolutional(filter_size=(3, 3), num_filters=4), BatchNormalization(broadcastable=(False, True, True)), AveragePooling(pooling_size=(2, 2)), BatchNormalization(broadcastable=(False, False, False)), MaxPooling(pooling_size=(2, 2), step=(1, 1))], weights_init=Constant(1.), biases_init=Constant(2.), image_size=(10, 8), num_channels=9) conv_seq_no_bn = ConvolutionalSequence( [Convolutional(filter_size=(3, 3), num_filters=4), AveragePooling(pooling_size=(2, 2)), MaxPooling(pooling_size=(2, 2), step=(1, 1))], weights_init=Constant(1.), biases_init=Constant(2.), image_size=(10, 8), num_channels=9) conv_seq.initialize() conv_seq_no_bn.initialize() rng = numpy.random.RandomState((2015, 12, 17)) input_ = random_unif(rng, (2, 9, 10, 8)) x = theano.tensor.tensor4() ybn = conv_seq.apply(x) y = conv_seq_no_bn.apply(x) yield (assert_equal, ybn.eval({x: input_}), y.eval({x: input_})) std = conv_seq.children[-2].population_stdev std.set_value(3 * std.get_value(borrow=True)) yield (assert_equal, ybn.eval({x: input_}), y.eval({x: input_}) / 3.)
def test_average_pooling(): x = tensor.tensor4("x") brick = AveragePooling((2, 2)) y = brick.apply(x) tmp = numpy.arange(16, dtype=theano.config.floatX).reshape(1, 1, 4, 4) x_ = numpy.tile(tmp, [2, 3, 1, 1]) out = y.eval({x: x_}) assert_allclose(out - numpy.array([[10 / 4.0, 18 / 4.0], [42 / 4.0, 50 / 4.0]]), numpy.zeros_like(out))
def test_average_pooling_exc_padding(): x = tensor.tensor4('x') brick = AveragePooling((2, 2), ignore_border=True, padding=(1, 1), include_padding=False) y = brick.apply(x) x_ = 3 * numpy.ones((1, 1, 2, 2), dtype=theano.config.floatX) output = y.eval({x: x_}) assert_allclose(x_, output)
def test_average_pooling_inc_padding(): x = tensor.tensor4('x') brick = AveragePooling((2, 2), ignore_border=True, padding=(1, 1), include_padding=True) y = brick.apply(x) output = y.eval({x: 3 * numpy.ones((1, 1, 2, 2), dtype=theano.config.floatX)}) expected_out = numpy.array([0.75, 0.75, 0.75, 0.75]).reshape(1, 1, 2, 2) assert_allclose(expected_out, output)
def test_average_pooling(): x = tensor.tensor4('x') brick = AveragePooling((2, 2)) y = brick.apply(x) tmp = numpy.arange(16, dtype=theano.config.floatX).reshape(1, 1, 4, 4) x_ = numpy.tile(tmp, [2, 3, 1, 1]) out = y.eval({x: x_}) assert_allclose( out - numpy.array([[10 / 4., 18 / 4.], [42 / 4., 50 / 4.]]), numpy.zeros_like(out))
def test_pooling_works_in_convolutional_sequence(): x = tensor.tensor4('x') brick = ConvolutionalSequence([AveragePooling((2, 2), step=(2, 2)), MaxPooling((4, 4), step=(2, 2), ignore_border=True)], image_size=(16, 32), num_channels=3) brick.allocate() y = brick.apply(x) out = y.eval({x: numpy.empty((2, 3, 16, 32), dtype=theano.config.floatX)}) assert out.shape == (2, 3, 3, 7)
###############SECOND STAGE##################### out2 = inception((20, 20), 192, 64, 96, 128, 16, 32, 32, out, 10) out3 = inception((20, 20), 256, 128, 128, 192, 32, 96, 64, out2, 20) out31 = MaxPooling((2, 2), name='poolLow').apply(out3) out4 = inception((10, 10), 480, 192, 96, 208, 16, 48, 64, out31, 30) out5 = inception((10, 10), 512, 160, 112, 224, 24, 64, 64, out4, 40) out6 = inception((10, 10), 512, 128, 128, 256, 24, 64, 64, out5, 50) out7 = inception((10, 10), 512, 112, 144, 288, 32, 64, 64, out6, 60) out8 = inception((10, 10), 528, 256, 160, 320, 32, 128, 128, out7, 70) out81 = MaxPooling((2, 2), name='poolLow1').apply(out8) out9 = inception((5, 5), 832, 256, 160, 320, 32, 128, 128, out81, 80) out10 = inception((5, 5), 832, 384, 192, 384, 48, 128, 128, out9, 90) out91 = AveragePooling((5, 5), name='poolLow2').apply(out10) #FIRST SOFTMAX conv_layers1 = list([ MaxPooling((2, 2), name='MaxPol'), Convolutional(filter_size=(1, 1), num_filters=128, name='Convx2'), Rectifier(), MaxPooling((2, 2), name='MaxPol1'), Convolutional(filter_size=(1, 1), num_filters=1024, name='Convx3'), Rectifier(), MaxPooling((2, 2), name='MaxPol2'), Convolutional(filter_size=(1, 1), num_filters=2, name='Convx4'), Rectifier(), ]) conv_sequence1 = ConvolutionalSequence(conv_layers1, num_channels=512,
def load_vgg_classifier(): """Loads the VGG19 classifier into a brick. Relies on ``vgg19_normalized.pkl`` containing the model parameters. Returns ------- convnet : :class:`blocks.bricks.conv.ConvolutionalSequence` VGG19 convolutional brick. """ convnet = ConvolutionalSequence( layers=[ Convolutional( filter_size=(3, 3), border_mode=(1, 1), num_filters=64, name='conv1_1'), Rectifier(), Convolutional( filter_size=(3, 3), border_mode=(1, 1), num_filters=64, name='conv1_2'), Rectifier(), AveragePooling( pooling_size=(2, 2), name='pool1'), Convolutional( filter_size=(3, 3), border_mode=(1, 1), num_filters=128, name='conv2_1'), Rectifier(), Convolutional( filter_size=(3, 3), border_mode=(1, 1), num_filters=128, name='conv2_2'), Rectifier(), AveragePooling( pooling_size=(2, 2), name='pool2'), Convolutional( filter_size=(3, 3), border_mode=(1, 1), num_filters=256, name='conv3_1'), Rectifier(), Convolutional( filter_size=(3, 3), border_mode=(1, 1), num_filters=256, name='conv3_2'), Rectifier(), Convolutional( filter_size=(3, 3), border_mode=(1, 1), num_filters=256, name='conv3_3'), Rectifier(), Convolutional( filter_size=(3, 3), border_mode=(1, 1), num_filters=256, name='conv3_4'), Rectifier(), AveragePooling( pooling_size=(2, 2), name='pool3'), Convolutional( filter_size=(3, 3), border_mode=(1, 1), num_filters=512, name='conv4_1'), Rectifier(), Convolutional( filter_size=(3, 3), border_mode=(1, 1), num_filters=512, name='conv4_2'), Rectifier(), Convolutional( filter_size=(3, 3), border_mode=(1, 1), num_filters=512, name='conv4_3'), Rectifier(), Convolutional( filter_size=(3, 3), border_mode=(1, 1), num_filters=512, name='conv4_4'), Rectifier(), AveragePooling( pooling_size=(2, 2), name='pool4'), Convolutional( filter_size=(3, 3), border_mode=(1, 1), num_filters=512, name='conv5_1'), Rectifier(), Convolutional( filter_size=(3, 3), border_mode=(1, 1), num_filters=512, name='conv5_2'), Rectifier(), Convolutional( filter_size=(3, 3), border_mode=(1, 1), num_filters=512, name='conv5_3'), Rectifier(), Convolutional( filter_size=(3, 3), border_mode=(1, 1), num_filters=512, name='conv5_4'), Rectifier(), AveragePooling( pooling_size=(2, 2), name='pool5'), ], num_channels=3, image_size=(32, 32), tied_biases=True, weights_init=Constant(0), biases_init=Constant(0), name='convnet') convnet.initialize() with open('vgg19_normalized.pkl', 'rb') as f: if six.PY3: data = cPickle.load(f, encoding='latin1') else: data = cPickle.load(f) parameter_values = data['param values'] conv_weights = parameter_values[::2] conv_biases = parameter_values[1::2] conv_indices = [0, 2, 5, 7, 10, 12, 14, 16, 19, 21, 23, 25, 28, 30, 32, 34] conv_layers = [convnet.layers[i] for i in conv_indices] for layer, W_val, b_val in zip(conv_layers, conv_weights, conv_biases): W, b = layer.parameters W.set_value(W_val) b.set_value(b_val) return convnet
#FIRE MODULES out1 = Fire((55,55), 96, 16, 16, 16, out, 10) out2 = Fire((55,55), 128, 16, 16, 16, out1, 25) out3 = Fire((55,55), 128, 32, 32, 32, out2, 300) out31 = MaxPooling((3,3), step=(2,2), padding=(1,1), name='poolLow').apply(out3) out4 = Fire((28,28), 256, 32, 32, 32, out31, 45) out5 = Fire((28,28), 256, 48, 48, 48, out4, 500) out6 = Fire((28,28), 384, 48, 48, 48, out5, 65) out7 = Fire((28,28), 384, 64, 64, 64, out6, 700) out71 = MaxPooling((3,3), step=(2,2), padding=(1,1), name='poolLow2').apply(out7) out8 = Fire((14,14), 512, 64, 64, 64, out71, 85) #LAST LAYERS conv_layers1 = list([Convolutional(filter_size=(1,1), num_filters=2, name='Convx2'), BatchNormalization(name='batch_vx2'), Rectifier(), AveragePooling((14,14), name='MaxPol1')]) conv_sequence1 = ConvolutionalSequence(conv_layers1, num_channels=512, image_size=(14,14), weights_init=Orthogonal(), use_bias=False, name='ConvSeq3') conv_sequence1.initialize() out_soft1 = Flattener(name='Flatt1').apply(conv_sequence1.apply(out8)) predict1 = NDimensionalSoftmax(name='Soft1').apply(out_soft1) cost = CategoricalCrossEntropy(name='Cross1').apply(y.flatten(), predict1).copy(name='cost') error = MisclassificationRate().apply(y.flatten(), predict1) #Little trick to plot the error rate in two different plots (We can't use two time the same data in the plot for a unknow reason) error_rate = error.copy(name='error_rate') error_rate2 = error.copy(name='error_rate2') cg = ComputationGraph([cost, error_rate]) ########### GET THE DATA ##################### stream_train = ServerDataStream(('image_features','targets'), False, port=5512, hwm=40) stream_valid = ServerDataStream(('image_features','targets'), False, port=5513, hwm=40)