def build_conv_layers(self, image=None): if image is None: image = T.ftensor4('spectrogram') else: image = image conv_list = [] for layer in range(self.layers): layer_param = self.params[layer] conv_layer = Convolutional(layer_param[0], layer_param[1], layer_param[2]) pool_layer = MaxPooling(layer_param[3]) conv_layer.name = "convolution" + str(layer) pool_layer.name = "maxpooling" + str(layer) conv_list.append(conv_layer) conv_list.append(pool_layer) conv_list.append(Rectifier()) conv_seq = ConvolutionalSequence(conv_list, self.params[0][2], image_size=self.image_size, weights_init=IsotropicGaussian( std=0.5, mean=0), biases_init=Constant(0)) conv_seq._push_allocation_config() conv_seq.initialize() out = conv_seq.apply(image) return out, conv_seq.get_dim('output')
def __init__(self, **kwargs): conv_layers = [ Convolutional(filter_size=(3, 3), num_filters=64, border_mode=(1, 1), name='conv_1'), Rectifier(), Convolutional(filter_size=(3, 3), num_filters=64, border_mode=(1, 1), name='conv_2'), Rectifier(), MaxPooling((2, 2), step=(2, 2), name='pool_2'), Convolutional(filter_size=(3, 3), num_filters=128, border_mode=(1, 1), name='conv_3'), Rectifier(), Convolutional(filter_size=(3, 3), num_filters=128, border_mode=(1, 1), name='conv_4'), Rectifier(), MaxPooling((2, 2), step=(2, 2), name='pool_4'), Convolutional(filter_size=(3, 3), num_filters=256, border_mode=(1, 1), name='conv_5'), Rectifier(), Convolutional(filter_size=(3, 3), num_filters=256, border_mode=(1, 1), name='conv_6'), Rectifier(), Convolutional(filter_size=(3, 3), num_filters=256, border_mode=(1, 1), name='conv_7'), Rectifier(), MaxPooling((2, 2), step=(2, 2), name='pool_7'), Convolutional(filter_size=(3, 3), num_filters=512, border_mode=(1, 1), name='conv_8'), Rectifier(), Convolutional(filter_size=(3, 3), num_filters=512, border_mode=(1, 1), name='conv_9'), Rectifier(), Convolutional(filter_size=(3, 3), num_filters=512, border_mode=(1, 1), name='conv_10'), Rectifier(), MaxPooling((2, 2), step=(2, 2), name='pool_10'), Convolutional(filter_size=(3, 3), num_filters=512, border_mode=(1, 1), name='conv_11'), Rectifier(), Convolutional(filter_size=(3, 3), num_filters=512, border_mode=(1, 1), name='conv_12'), Rectifier(), Convolutional(filter_size=(3, 3), num_filters=512, border_mode=(1, 1), name='conv_13'), Rectifier(), MaxPooling((2, 2), step=(2, 2), name='pool_13'), ] mlp = MLP([Rectifier(name='fc_14'), Rectifier('fc_15'), Softmax()], [25088, 4096, 4096, 1000], ) conv_sequence = ConvolutionalSequence( conv_layers, 3, image_size=(224, 224)) super(VGGNet, self).__init__( [conv_sequence.apply, Flattener().apply, mlp.apply], **kwargs)
def create_kim_cnn(layer0_input, embedding_size, input_len, config, pref): ''' One layer convolution with different filter-sizes and maxpooling ''' filter_width_list = [int(fw) for fw in config[pref + '_filterwidth'].split()] print filter_width_list num_filters = int(config[pref+'_num_filters']) #num_filters /= len(filter_width_list) totfilters = 0 for i, fw in enumerate(filter_width_list): num_feature_map = input_len - fw + 1 #39 conv = Convolutional( image_size=(input_len, embedding_size), filter_size=(fw, embedding_size), num_filters=min(int(config[pref + '_maxfilter']), num_filters * fw), num_channels=1 ) totfilters += conv.num_filters initialize2(conv, num_feature_map) conv.name = pref + 'conv_' + str(fw) convout = conv.apply(layer0_input) pool_layer = MaxPooling( pooling_size=(num_feature_map,1) ) pool_layer.name = pref + 'pool_' + str(fw) act = Rectifier() act.name = pref + 'act_' + str(fw) outpool = act.apply(pool_layer.apply(convout)).flatten(2) if i == 0: outpools = outpool else: outpools = T.concatenate([outpools, outpool], axis=1) name_rep_len = totfilters return outpools, name_rep_len
def __init__(self, activation, filter_size, num_filters, pooling_size, num_channels, conv_step=(1, 1), pooling_step=None, batch_size=None, image_size=None, border_mode='valid', **kwargs): self.convolution = ConvolutionalActivation(activation, filter_size, num_filters, num_channels) self.pooling = MaxPooling() super(ConvolutionalLayer, self).__init__( application_methods=[self.convolution.apply, self.pooling.apply], **kwargs) self.convolution.name = self.name + '_convolution' self.pooling.name = self.name + '_pooling' self.filter_size = filter_size self.num_filters = num_filters self.num_channels = num_channels self.pooling_size = pooling_size self.conv_step = conv_step self.pooling_step = pooling_step self.batch_size = batch_size self.border_mode = border_mode self.image_size = image_size
def test_max_pooling_old_pickle(): brick = MaxPooling((3, 4)) brick.allocate() # Simulate old pickle, before #899. del brick.ignore_border del brick.mode del brick.padding # Pickle in this broken state and re-load. broken_pickled = pickle.dumps(brick) loaded = pickle.loads(broken_pickled) # Same shape, same step. assert brick.pooling_size == loaded.pooling_size assert brick.step == loaded.step # Check that the new attributes were indeed added. assert hasattr(loaded, 'padding') and loaded.padding == (0, 0) assert hasattr(loaded, 'mode') and loaded.mode == 'max' assert hasattr(loaded, 'ignore_border') and not loaded.ignore_border try: loaded.apply(tensor.tensor4()) except Exception: raise AssertionError("failed to apply on unpickled MaxPooling") # Make sure we're not overriding these attributes wrongly. new_brick = MaxPooling((4, 3), padding=(2, 1)) new_brick_unpickled = pickle.loads(pickle.dumps(new_brick)) assert new_brick_unpickled.padding == (2, 1) assert new_brick_unpickled.ignore_border
def build_conv_layers(self, image=None) : if image is None : image = T.ftensor4('spectrogram') else : image = image conv_list = [] for layer in range(self.layers) : layer_param = self.params[layer] conv_layer = Convolutional(layer_param[0], layer_param[1], layer_param[2]) pool_layer = MaxPooling(layer_param[3]) conv_layer.name = "convolution"+str(layer) pool_layer.name = "maxpooling"+str(layer) conv_list.append(conv_layer) conv_list.append(pool_layer) conv_list.append(Rectifier()) conv_seq = ConvolutionalSequence( conv_list, self.params[0][2], image_size=self.image_size, weights_init=IsotropicGaussian(std=0.5, mean=0), biases_init=Constant(0)) conv_seq._push_allocation_config() conv_seq.initialize() out = conv_seq.apply(image) return out, conv_seq.get_dim('output')
def create_cnn_general(embedded_x, mycnf, max_len, embedding_size, inp_conv=False): fv_len = 0 filter_sizes = mycnf['cnn_config']['filter_sizes'] num_filters = mycnf['cnn_config']['num_filters'] for i, fw in enumerate(filter_sizes): conv = ConvolutionalActivation( activation=Rectifier().apply, filter_size=(fw, embedding_size), num_filters=num_filters, num_channels=1, image_size=(max_len, embedding_size), name="conv"+str(fw)+embedded_x.name) pooling = MaxPooling((max_len-fw+1, 1), name="pool"+str(fw)+embedded_x.name) initialize([conv]) if inp_conv: convinp = embedded_x else: convinp = embedded_x.flatten().reshape((embedded_x.shape[0], 1, max_len, embedding_size)) onepool = pooling.apply(conv.apply(convinp)).flatten(2) if i == 0: outpools = onepool else: outpools = T.concatenate([outpools, onepool], axis=1) fv_len += conv.num_filters return outpools, fv_len
def create_kim_cnn(layer0_input, embedding_size, input_len, config, pref): ''' One layer convolution with different filter-sizes and maxpooling ''' filter_width_list = [ int(fw) for fw in config[pref + '_filterwidth'].split() ] print filter_width_list num_filters = int(config[pref + '_num_filters']) #num_filters /= len(filter_width_list) totfilters = 0 for i, fw in enumerate(filter_width_list): num_feature_map = input_len - fw + 1 #39 conv = Convolutional(image_size=(input_len, embedding_size), filter_size=(fw, embedding_size), num_filters=min(int(config[pref + '_maxfilter']), num_filters * fw), num_channels=1) totfilters += conv.num_filters initialize2(conv, num_feature_map) conv.name = pref + 'conv_' + str(fw) convout = conv.apply(layer0_input) pool_layer = MaxPooling(pooling_size=(num_feature_map, 1)) pool_layer.name = pref + 'pool_' + str(fw) act = Rectifier() act.name = pref + 'act_' + str(fw) outpool = act.apply(pool_layer.apply(convout)).flatten(2) if i == 0: outpools = outpool else: outpools = T.concatenate([outpools, outpool], axis=1) name_rep_len = totfilters return outpools, name_rep_len
def create_OLD_kim_cnn(layer0_input, embedding_size, input_len, config, pref): ''' One layer convolution with the same filtersize ''' filter_width_list = [ int(fw) for fw in config[pref + '_filterwidth'].split() ] print filter_width_list num_filters = int(config[pref + '_num_filters']) totfilters = 0 for i, fw in enumerate(filter_width_list): num_feature_map = input_len - fw + 1 #39 conv = Convolutional(filter_size=(fw, embedding_size), num_filters=num_filters, num_channels=1, image_size=(input_len, embedding_size), name="conv" + str(fw)) pooling = MaxPooling((num_feature_map, 1), name="pool" + str(fw)) initialize([conv]) totfilters += num_filters outpool = Flattener(name="flat" + str(fw)).apply( Rectifier(name=pref + 'act_' + str(fw)).apply( pooling.apply(conv.apply(layer0_input)))) if i == 0: outpools = outpool else: outpools = T.concatenate([outpools, outpool], axis=1) name_rep_len = totfilters return outpools, name_rep_len
def test_batch_normalization_inside_convolutional_sequence(): """Test that BN bricks work in ConvolutionalSequences.""" conv_seq = ConvolutionalSequence( [Convolutional(filter_size=(3, 3), num_filters=4), BatchNormalization(broadcastable=(False, True, True)), AveragePooling(pooling_size=(2, 2)), BatchNormalization(broadcastable=(False, False, False)), MaxPooling(pooling_size=(2, 2), step=(1, 1))], weights_init=Constant(1.), biases_init=Constant(2.), image_size=(10, 8), num_channels=9) conv_seq_no_bn = ConvolutionalSequence( [Convolutional(filter_size=(3, 3), num_filters=4), AveragePooling(pooling_size=(2, 2)), MaxPooling(pooling_size=(2, 2), step=(1, 1))], weights_init=Constant(1.), biases_init=Constant(2.), image_size=(10, 8), num_channels=9) conv_seq.initialize() conv_seq_no_bn.initialize() rng = numpy.random.RandomState((2015, 12, 17)) input_ = random_unif(rng, (2, 9, 10, 8)) x = theano.tensor.tensor4() ybn = conv_seq.apply(x) y = conv_seq_no_bn.apply(x) yield (assert_equal, ybn.eval({x: input_}), y.eval({x: input_})) std = conv_seq.children[-2].population_stdev std.set_value(3 * std.get_value(borrow=True)) yield (assert_equal, ybn.eval({x: input_}), y.eval({x: input_}) / 3.)
def test_max_pooling_old_pickle(): brick = MaxPooling((3, 4)) brick.allocate() # Simulate old pickle, before #899. del brick.ignore_border del brick.mode del brick.padding # Pickle in this broken state and re-load. broken_pickled = pickle.dumps(brick) loaded = pickle.loads(broken_pickled) # Same shape, same step. assert brick.pooling_size == loaded.pooling_size assert brick.step == loaded.step # Check that the new attributes were indeed added. assert hasattr(loaded, "padding") and loaded.padding == (0, 0) assert hasattr(loaded, "mode") and loaded.mode == "max" assert hasattr(loaded, "ignore_border") and not loaded.ignore_border try: loaded.apply(tensor.tensor4()) except Exception: raise AssertionError("failed to apply on unpickled MaxPooling") # Make sure we're not overriding these attributes wrongly. new_brick = MaxPooling((4, 3), padding=(2, 1)) new_brick_unpickled = pickle.loads(pickle.dumps(new_brick)) assert new_brick_unpickled.padding == (2, 1) assert new_brick_unpickled.ignore_border
def __init__(self, batch_norm, num_channels=1, **kwargs): self.layers = [] self.layers.append( Convolutional(filter_size=(3, 3), num_filters=64, border_mode=(1, 1), name='conv_1')) self.layers.append(Rectifier()) self.layers.append(MaxPooling(pooling_size=(2, 2), name='pool_1')) self.layers.append( Convolutional(filter_size=(3, 3), num_filters=128, border_mode=(1, 1), name='conv_2')) self.layers.append(Rectifier()) self.layers.append(MaxPooling(pooling_size=(2, 2), name='pool_2')) self.layers.append( Convolutional(filter_size=(3, 3), num_filters=256, border_mode=(1, 1), name='conv_3')) if batch_norm: self.layers.append( BatchNormalization(broadcastable=(False, True, True), name='bn_1')) self.layers.append(Rectifier()) self.layers.append( Convolutional(filter_size=(3, 3), num_filters=256, border_mode=(1, 1), name='conv_4')) self.layers.append(Rectifier()) self.layers.append( MaxPooling(pooling_size=(1, 2), step=(1, 2), name='pool_3')) self.layers.append( Convolutional(filter_size=(3, 3), num_filters=512, border_mode=(1, 1), name='conv_5')) if batch_norm: self.layers.append( BatchNormalization(broadcastable=(False, True, True), name='bn_2')) self.layers.append(Rectifier()) self.layers.append( MaxPooling(pooling_size=(2, 1), step=(2, 1), name='pool_4')) self.layers.append( Convolutional(filter_size=(3, 3), num_filters=512, border_mode=(1, 1), name='conv_6')) if batch_norm: self.layers.append( BatchNormalization(broadcastable=(False, True, True), name='bn_3')) self.layers.append(Rectifier()) self.conv_sequence = ConvolutionalSequence(self.layers, 1)
def __init__(self, image_dimension, **kwargs): layers = [] ############################################# # a first block with 2 convolutions of 32 (3, 3) filters layers.append(Convolutional((3, 3), 32, border_mode='half')) layers.append(Rectifier()) layers.append(Convolutional((3, 3), 32, border_mode='half')) layers.append(Rectifier()) # maxpool with size=(2, 2) layers.append(MaxPooling((2, 2))) ############################################# # a 2nd block with 3 convolutions of 64 (3, 3) filters layers.append(Convolutional((3, 3), 64, border_mode='half')) layers.append(Rectifier()) layers.append(Convolutional((3, 3), 64, border_mode='half')) layers.append(Rectifier()) layers.append(Convolutional((3, 3), 64, border_mode='half')) layers.append(Rectifier()) # maxpool with size=(2, 2) layers.append(MaxPooling((2, 2))) ############################################# # a 3rd block with 4 convolutions of 128 (3, 3) filters layers.append(Convolutional((3, 3), 128, border_mode='half')) layers.append(Rectifier()) layers.append(Convolutional((3, 3), 128, border_mode='half')) layers.append(Rectifier()) layers.append(Convolutional((3, 3), 128, border_mode='half')) layers.append(Rectifier()) layers.append(Convolutional((3, 3), 128, border_mode='half')) layers.append(Rectifier()) # maxpool with size=(2, 2) layers.append(MaxPooling((2, 2))) self.conv_sequence = ConvolutionalSequence(layers, 3, image_size=image_dimension) flattener = Flattener() self.top_mlp = MLP(activations=[Rectifier(), Logistic()], dims=[500, 1]) application_methods = [ self.conv_sequence.apply, flattener.apply, self.top_mlp.apply ] super(VGGNet, self).__init__(application_methods, biases_init=Constant(0), weights_init=Uniform(width=.1), **kwargs)
def apply_cnn(self, l_emb1, l_size1, l_emb2, l_size2, r_emb1, r_size1, r_emb2, r_size2, embedding_size, mycnf): assert l_size1 == r_size1 assert l_size2 == r_size2 assert l_size1 == l_size1 max_len = l_size1 fv_len = 0 filter_sizes = mycnf['cnn_config']['filter_sizes'] num_filters = mycnf['cnn_config']['num_filters'] for i, fw in enumerate(filter_sizes): conv_left = ConvolutionalActivation( activation=Rectifier().apply, filter_size=(fw, embedding_size), num_filters=num_filters, num_channels=1, image_size=(max_len, embedding_size), name="conv" + str(fw) + l_emb1.name, seed=self.curSeed) conv_right = ConvolutionalActivation( activation=Rectifier().apply, filter_size=(fw, embedding_size), num_filters=num_filters, num_channels=1, image_size=(max_len, embedding_size), name="conv" + str(fw) + r_emb1.name, seed=self.curSeed) pooling = MaxPooling((max_len - fw + 1, 1), name="pool" + str(fw)) initialize([conv_left, conv_right]) l_convinp1 = l_emb1.flatten().reshape( (l_emb1.shape[0], 1, max_len, embedding_size)) l_convinp2 = l_emb2.flatten().reshape( (l_emb2.shape[0], 1, max_len, embedding_size)) l_pool1 = pooling.apply(conv_left.apply(l_convinp1)).flatten(2) l_pool2 = pooling.apply(conv_left.apply(l_convinp2)).flatten(2) r_convinp1 = r_emb1.flatten().reshape( (r_emb1.shape[0], 1, max_len, embedding_size)) r_convinp2 = r_emb2.flatten().reshape( (r_emb2.shape[0], 1, max_len, embedding_size)) r_pool1 = pooling.apply(conv_right.apply(r_convinp1)).flatten(2) r_pool2 = pooling.apply(conv_right.apply(r_convinp2)).flatten(2) onepools1 = T.concatenate([l_pool1, r_pool1], axis=1) onepools2 = T.concatenate([l_pool2, r_pool2], axis=1) fv_len += conv_left.num_filters * 2 if i == 0: outpools1 = onepools1 outpools2 = onepools2 else: outpools1 = T.concatenate([outpools1, onepools1], axis=1) outpools2 = T.concatenate([outpools2, onepools2], axis=1) return outpools1, outpools2, fv_len
def pool_layer(self, name, method, pool, pad, stride, image_size): """Creates a MaxPooling brick with the given name, pooling size, stride, and image size. If a string other than 'max' is passed in the 'method' parameter, the function throws an exception. The 'pad' argument are ignored. It is instead handled in the conversion through a Padding brick (see below).""" # FIX: ignore padding [0 1 0 1] if method == 'max': layer = MaxPooling(name=name, pooling_size=pool, step=stride, input_dim=image_size) else: raise Exception("Unsupported pooling method: %s" % method) return (layer, layer.get_dim("output"))
def test_max_pooling(): x = tensor.tensor4("x") num_channels = 4 batch_size = 5 x_size = 17 y_size = 13 pool_size = 3 pool = MaxPooling((pool_size, pool_size)) y = pool.apply(x) func = function([x], y) x_val = numpy.ones((batch_size, num_channels, x_size, y_size), dtype=theano.config.floatX) assert_allclose(func(x_val), numpy.ones((batch_size, num_channels, x_size / pool_size, y_size / pool_size))) pool.input_dim = (x_size, y_size) pool.get_dim("output") == (num_channels, x_size / pool_size + 1, y_size / pool_size + 1)
def test_convolutional_sequence_with_no_input_size(): # suppose x is outputted by some RNN x = tensor.tensor4('x') filter_size = (1, 1) num_filters = 2 num_channels = 1 pooling_size = (1, 1) conv = Convolutional(filter_size, num_filters, tied_biases=False, weights_init=Constant(1.), biases_init=Constant(1.)) act = Rectifier() pool = MaxPooling(pooling_size) bad_seq = ConvolutionalSequence([conv, act, pool], num_channels, tied_biases=False) assert_raises_regexp(ValueError, 'Cannot infer bias size \S+', bad_seq.initialize) seq = ConvolutionalSequence([conv, act, pool], num_channels, tied_biases=True) try: seq.initialize() out = seq.apply(x) except TypeError: assert False, "This should have succeeded" assert out.ndim == 4
def build_pipeline(self, input_shape, params): from blocks.bricks import Tanh, Sequence from blocks.bricks.conv import Convolutional, MaxPooling from blocks.initialization import Uniform _, num_channels, input_len, num_freqs = input_shape # bc01 # Note: this layer is linear conv = Convolutional( name='conv', filter_size=(params['filter_width_time'], params['filter_width_freq']), num_filters=params['num_components'], # out num_channels=num_channels, # in image_size=(input_len, num_freqs), weights_init=Uniform(mean=0, std=0.01), use_bias=params['use_bias']) tanh = Tanh() # optional pooling if params['pool_width_time'] > 1 or params['pool_width_freq'] > 1: pool = MaxPooling( (params['pool_width_time'], params['pool_width_freq']), step=(params['pool_stride_time'], params['pool_stride_freq'])) pipeline = Sequence([conv.apply, tanh.apply, pool.apply], name='pipeline') else: pipeline = Sequence([conv.apply, tanh.apply], name='pipeline') pipeline.initialize() return pipeline
def __init__(self, conv_activations, num_channels, image_shape, filter_sizes, feature_maps, pooling_sizes, top_mlp_activations, top_mlp_dims, conv_step=None, border_mode='valid', **kwargs): if conv_step is None: self.conv_step = (1, 1) else: self.conv_step = conv_step self.num_channels = num_channels self.image_shape = image_shape self.top_mlp_activations = top_mlp_activations self.top_mlp_dims = top_mlp_dims self.border_mode = border_mode conv_parameters = zip(filter_sizes, feature_maps) # Construct convolutional, activation, and pooling layers with corresponding parameters self.convolution_layer = ( Convolutional(filter_size=filter_size, num_filters=num_filter, step=self.conv_step, border_mode=self.border_mode, name='conv_{}'.format(i)) for i, (filter_size, num_filter) in enumerate(conv_parameters)) self.BN_layer = (BatchNormalization(name='bn_conv_{}'.format(i)) for i in enumerate(conv_parameters)) self.pooling_layer = (MaxPooling(size, name='pool_{}'.format(i)) for i, size in enumerate(pooling_sizes)) self.layers = list( interleave([ self.convolution_layer, self.BN_layer, conv_activations, self.pooling_layer ])) self.conv_sequence = ConvolutionalSequence(self.layers, num_channels, image_size=image_shape) # Construct a top MLP self.top_mlp = MLP(top_mlp_activations, top_mlp_dims) # We need to flatten the output of the last convolutional layer. # This brick accepts a tensor of dimension (batch_size, ...) and # returns a matrix (batch_size, features) self.flattener = Flattener() application_methods = [ self.conv_sequence.apply, self.flattener.apply, self.top_mlp.apply ] super(LeNet, self).__init__(application_methods, **kwargs)
def create_OLD_kim_cnn(layer0_input, embedding_size, input_len, config, pref): ''' One layer convolution with the same filtersize ''' filter_width_list = [int(fw) for fw in config[pref + '_filterwidth'].split()] print filter_width_list num_filters = int(config[pref+'_num_filters']) totfilters = 0 for i, fw in enumerate(filter_width_list): num_feature_map = input_len - fw + 1 #39 conv = Convolutional( filter_size=(fw, embedding_size), num_filters=num_filters, num_channels=1, image_size=(input_len, embedding_size), name="conv" + str(fw)) pooling = MaxPooling((num_feature_map,1), name="pool"+str(fw)) initialize([conv]) totfilters += num_filters outpool = Flattener(name="flat"+str(fw)).apply(Rectifier(name=pref+'act_'+str(fw)).apply(pooling.apply(conv.apply(layer0_input)))) if i == 0: outpools = outpool else: outpools = T.concatenate([outpools, outpool], axis=1) name_rep_len = totfilters return outpools, name_rep_len
def test_convolutional_sequence(): x = tensor.tensor4('x') num_channels = 4 pooling_size = 3 batch_size = 5 act = Rectifier() conv = Convolutional((3, 3), 5, weights_init=Constant(1.), biases_init=Constant(5.)) pooling = MaxPooling(pooling_size=(pooling_size, pooling_size)) conv2 = Convolutional((2, 2), 4, weights_init=Constant(1.)) seq = ConvolutionalSequence([conv, act, pooling.apply, conv2.apply, act], num_channels, image_size=(17, 13)) seq.push_allocation_config() assert conv.num_channels == 4 assert conv2.num_channels == 5 conv2.use_bias = False y = seq.apply(x) seq.initialize() func = function([x], y) x_val = numpy.ones((batch_size, 4, 17, 13), dtype=theano.config.floatX) y_val = (numpy.ones((batch_size, 4, 4, 2)) * (9 * 4 + 5) * 4 * 5) assert_allclose(func(x_val), y_val)
def convolutional_sequence(filter_size, num_filters, image_size, num_channels=1): layers = [] # layers.append(BatchNormalization(name='batchnorm_pixels')) layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters, use_bias=True, tied_biases=True, name='conv_1')) layers.append(BatchNormalization(name='batchnorm_1')) layers.append(Rectifier(name='non_linear_1')) layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters, use_bias=True, tied_biases=False, name='conv_2')) layers.append(BatchNormalization(name='batchnorm_2')) layers.append(Rectifier(name='non_linear_2')) layers.append(MaxPooling(pooling_size=(2,2), name='maxpool_2')) layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*2, use_bias=True, tied_biases=True, name='conv_3')) layers.append(BatchNormalization(name='batchnorm_3')) layers.append(Rectifier(name='non_linear_3')) layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*2, use_bias=True, tied_biases=True, name='conv_4')) layers.append(BatchNormalization(name='batchnorm_4')) layers.append(Rectifier(name='non_linear_4')) layers.append(MaxPooling(pooling_size=(2,2), name='maxpool_4')) layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*4, use_bias=True, tied_biases=False, name='conv_5')) layers.append(BatchNormalization(name='batchnorm_5')) layers.append(Rectifier(name='non_linear_5')) layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*4, use_bias=True, tied_biases=True, name='conv_6')) layers.append(BatchNormalization(name='batchnorm_6')) layers.append(Rectifier(name='non_linear_6')) layers.append(MaxPooling(pooling_size=(2,2), name='maxpool_6')) layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*8, use_bias=True, tied_biases=True, name='conv_7')) layers.append(BatchNormalization(name='batchnorm_7')) layers.append(Rectifier(name='non_linear_7')) layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*8, use_bias=True, tied_biases=True, name='conv_8')) layers.append(BatchNormalization(name='batchnorm_8')) layers.append(Rectifier(name='non_linear_8')) layers.append(MaxPooling(pooling_size=(2,2), name='maxpool_8')) return ConvolutionalSequence(layers, num_channels=num_channels, image_size=image_size, biases_init=Constant(0.), weights_init=IsotropicGaussian(0.01))
def test_pooling_works_in_convolutional_sequence(): x = tensor.tensor4('x') brick = ConvolutionalSequence([AveragePooling((2, 2), step=(2, 2)), MaxPooling((4, 4), step=(2, 2), ignore_border=True)], image_size=(16, 32), num_channels=3) brick.allocate() y = brick.apply(x) out = y.eval({x: numpy.empty((2, 3, 16, 32), dtype=theano.config.floatX)}) assert out.shape == (2, 3, 3, 7)
def test_fully_layer(): batch_size=2 x = T.tensor4(); y = T.ivector() V = 200 layer_conv = Convolutional(filter_size=(5,5),num_filters=V, name="toto", weights_init=IsotropicGaussian(0.01), biases_init=Constant(0.0)) # try with no bias activation = Rectifier() pool = MaxPooling(pooling_size=(2,2)) convnet = ConvolutionalSequence([layer_conv, activation, pool], num_channels=15, image_size=(10,10), name="conv_section") convnet.push_allocation_config() convnet.initialize() output=convnet.apply(x) batch_size=output.shape[0] output_dim=np.prod(convnet.get_dim('output')) result_conv = output.reshape((batch_size, output_dim)) mlp=MLP(activations=[Rectifier().apply], dims=[output_dim, 10], weights_init=IsotropicGaussian(0.01), biases_init=Constant(0.0)) mlp.initialize() output=mlp.apply(result_conv) cost = T.mean(Softmax().categorical_cross_entropy(y.flatten(), output)) cg = ComputationGraph(cost) W = VariableFilter(roles=[WEIGHT])(cg.variables) B = VariableFilter(roles=[BIAS])(cg.variables) W = W[0]; b = B[0] inputs_fully = VariableFilter(roles=[INPUT], bricks=[Linear])(cg) outputs_fully = VariableFilter(roles=[OUTPUT], bricks=[Linear])(cg) var_input=inputs_fully[0] var_output=outputs_fully[0] [d_W,d_S,d_b] = T.grad(cost, [W, var_output, b]) d_b = d_b.dimshuffle(('x',0)) d_p = T.concatenate([d_W, d_b], axis=0) x_value = 1e3*np.random.ranf((2,15, 10, 10)) f = theano.function([x,y], [var_input, d_S, d_p], allow_input_downcast=True, on_unused_input='ignore') A, B, C= f(x_value, [5, 0]) A = np.concatenate([A, np.ones((2,1))], axis=1) print 'A', A.shape print 'B', B.shape print 'C', C.shape print lin.norm(C - np.dot(np.transpose(A), B), 'fro') return """
def create_yy_cnn(numConvLayer, conv_input, embedding_size, input_len, config, pref): ''' CNN with several layers of convolution, each with specific filter size. Maxpooling at the end. ''' filter_width_list = [ int(fw) for fw in config[pref + '_filterwidth'].split() ] base_num_filters = int(config[pref + '_num_filters']) assert len(filter_width_list) == numConvLayer convs = [] fmlist = [] last_fm = input_len for i in range(numConvLayer): fw = filter_width_list[i] num_feature_map = last_fm - fw + 1 #39 conv = Convolutional(image_size=(last_fm, embedding_size), filter_size=(fw, embedding_size), num_filters=min(int(config[pref + '_maxfilter']), base_num_filters * fw), num_channels=1) fmlist.append(num_feature_map) last_fm = num_feature_map embedding_size = conv.num_filters convs.append(conv) initialize(convs) for i, conv in enumerate(convs): conv.name = pref + '_conv' + str(i) conv_input = conv.apply(conv_input) conv_input = conv_input.flatten().reshape( (conv_input.shape[0], 1, fmlist[i], conv.num_filters)) lastconv = conv lastconv_out = conv_input pool_layer = MaxPooling(pooling_size=(last_fm, 1)) pool_layer.name = pref + '_pool_' + str(fw) act = Rectifier() act.name = 'act_' + str(fw) outpool = act.apply(pool_layer.apply(lastconv_out).flatten(2)) return outpool, lastconv.num_filters
def create_yy_cnn(numConvLayer, conv_input, embedding_size, input_len, config, pref): ''' CNN with several layers of convolution, each with specific filter size. Maxpooling at the end. ''' filter_width_list = [int(fw) for fw in config[pref + '_filterwidth'].split()] base_num_filters = int(config[pref + '_num_filters']) assert len(filter_width_list) == numConvLayer convs = []; fmlist = [] last_fm = input_len for i in range(numConvLayer): fw = filter_width_list[i] num_feature_map = last_fm - fw + 1 #39 conv = Convolutional( image_size=(last_fm, embedding_size), filter_size=(fw, embedding_size), num_filters=min(int(config[pref + '_maxfilter']), base_num_filters * fw), num_channels=1 ) fmlist.append(num_feature_map) last_fm = num_feature_map embedding_size = conv.num_filters convs.append(conv) initialize(convs) for i, conv in enumerate(convs): conv.name = pref+'_conv' + str(i) conv_input = conv.apply(conv_input) conv_input = conv_input.flatten().reshape((conv_input.shape[0], 1, fmlist[i], conv.num_filters)) lastconv = conv lastconv_out = conv_input pool_layer = MaxPooling( pooling_size=(last_fm,1) ) pool_layer.name = pref+'_pool_' + str(fw) act = Rectifier(); act.name = 'act_' + str(fw) outpool = act.apply(pool_layer.apply(lastconv_out).flatten(2)) return outpool, lastconv.num_filters
def net_dvc(image_size=(32, 32)): convos = [5, 5, 5] pools = [2, 2, 2] filters = [100, 200, 300] tuplify = lambda x: (x, x) convos = list(map(tuplify, convos)) conv_layers = [Convolutional(filter_size=s,num_filters=o, num_channels=i, name="Conv"+str(n))\ for s,o,i,n in zip(convos, filters, [3] + filters, range(1000))] pool_layers = [MaxPooling(p) for p in map(tuplify, pools)] activations = [Rectifier() for i in convos] layers = [i for l in zip(conv_layers, activations, pool_layers) for i in l] cnn = ConvolutionalSequence(layers, 3, image_size=image_size, name="cnn", weights_init=Uniform(width=.1), biases_init=Constant(0)) cnn._push_allocation_config() cnn_output = np.prod(cnn.get_dim('output')) mlp_size = [cnn_output, 500, 2] mlp = MLP([Rectifier(), Softmax()], mlp_size, name="mlp", weights_init=Uniform(width=.1), biases_init=Constant(0)) seq = FeedforwardSequence([net.apply for net in [cnn, Flattener(), mlp]]) seq.push_initialization_config() seq.initialize() return seq
def test_max_pooling(): x = tensor.tensor4('x') num_channels = 4 batch_size = 5 x_size = 17 y_size = 13 pool_size = 3 pool = MaxPooling((pool_size, pool_size)) y = pool.apply(x) func = function([x], y) x_val = numpy.ones((batch_size, num_channels, x_size, y_size), dtype=theano.config.floatX) assert_allclose( func(x_val), numpy.ones((batch_size, num_channels, x_size / pool_size + 1, y_size / pool_size + 1))) pool.input_dim = (x_size, y_size) pool.get_dim('output') == (num_channels, x_size / pool_size + 1, y_size / pool_size + 1)
eeg = normalize(eeg, axis=1) acc = normalize(acc, axis=1) # set dims for convolution eeg = eeg.dimshuffle(0, 2, 1, 'x') acc = acc.dimshuffle(0, 2, 1, 'x') # first convolution only on eeg conv_eeg = Convolutional(filter_size=(300, 1), num_filters=20, num_channels=1, border_mode='full', tied_biases=True, name="conv_eeg") maxpool_eeg = MaxPooling(pooling_size=(5, 1), name='maxpool_eeg') # convolve eeg1 = conv_eeg.apply(eeg) # cut borders d1 = (eeg1.shape[2] - eeg.shape[2])/2 eeg1 = eeg1[:, :, d1:d1+eeg.shape[2], :] # subsample eeg1 = maxpool_eeg.apply(eeg1) # activation eeg1 = Tanh(name='act_eeg').apply(eeg1) # second convolution only on eeg conv_eeg2 = Convolutional(filter_size=(100, 1), num_filters=40, num_channels=20, border_mode='full',
def test_max_pooling_padding(): x = tensor.tensor4("x") brick = MaxPooling((6, 2), padding=(3, 1), ignore_border=True) y = brick.apply(x) out = y.eval({x: numpy.zeros((2, 3, 6, 10), dtype=theano.config.floatX)}) assert out.shape == (2, 3, 2, 6)
def test_max_pooling_ignore_border_false(): x = tensor.tensor4("x") brick = MaxPooling((5, 7), ignore_border=False) y = brick.apply(x) out = y.eval({x: numpy.zeros((4, 6, 12, 15), dtype=theano.config.floatX)}) assert out.shape == (4, 6, 3, 3)
def test_max_pooling_ignore_border_true(): x = tensor.tensor4("x") brick = MaxPooling((3, 4), ignore_border=True) y = brick.apply(x) out = y.eval({x: numpy.zeros((8, 3, 10, 13), dtype=theano.config.floatX)}) assert out.shape == (8, 3, 3, 3)
class ConvolutionalLayer(Sequence, Initializable): """A complete convolutional layer: Convolution, nonlinearity, pooling. .. todo:: Mean pooling. Parameters ---------- activation : :class:`.BoundApplication` The application method to apply in the detector stage (i.e. the nonlinearity before pooling. Needed for ``__init__``. See Also -------- :class:`Convolutional` and :class:`MaxPooling` for the other parameters. Notes ----- Uses max pooling. """ @lazy(allocation=['filter_size', 'num_filters', 'num_channels']) def __init__(self, activation, filter_size, num_filters, pooling_size, num_channels, conv_step=(1, 1), pooling_step=None, batch_size=None, image_size=None, border_mode='valid', **kwargs): self.convolution = ConvolutionalActivation(activation, filter_size, num_filters, num_channels) self.pooling = MaxPooling() super(ConvolutionalLayer, self).__init__( application_methods=[self.convolution.apply, self.pooling.apply], **kwargs) self.convolution.name = self.name + '_convolution' self.pooling.name = self.name + '_pooling' self.filter_size = filter_size self.num_filters = num_filters self.num_channels = num_channels self.pooling_size = pooling_size self.conv_step = conv_step self.pooling_step = pooling_step self.batch_size = batch_size self.border_mode = border_mode self.image_size = image_size def _push_allocation_config(self): for attr in [ 'filter_size', 'num_filters', 'num_channels', 'batch_size', 'border_mode', 'image_size' ]: setattr(self.convolution, attr, getattr(self, attr)) self.convolution.step = self.conv_step self.convolution._push_allocation_config() if self.image_size is not None: pooling_input_dim = self.convolution.get_dim('output') else: pooling_input_dim = None self.pooling.input_dim = pooling_input_dim self.pooling.pooling_size = self.pooling_size self.pooling.step = self.pooling_step self.pooling.batch_size = self.batch_size def get_dim(self, name): if name == 'input_': return self.convolution.get_dim('input_') if name == 'output': return self.pooling.get_dim('output') return super(ConvolutionalLayer, self).get_dim(name) @property def num_output_channels(self): return self.num_filters
class ConvolutionalLayer(Sequence, Initializable): """A complete convolutional layer: Convolution, nonlinearity, pooling. .. todo:: Mean pooling. Parameters ---------- activation : :class:`.BoundApplication` The application method to apply in the detector stage (i.e. the nonlinearity before pooling. Needed for ``__init__``. See Also -------- :class:`Convolutional` and :class:`MaxPooling` for the other parameters. Notes ----- Uses max pooling. """ @lazy(allocation=['filter_size', 'num_filters', 'num_channels']) def __init__(self, activation, filter_size, num_filters, pooling_size, num_channels, conv_step=(1, 1), pooling_step=None, batch_size=None, image_size=None, border_mode='valid', **kwargs): self.convolution = ConvolutionalActivation(activation, filter_size, num_filters, num_channels) self.pooling = MaxPooling() super(ConvolutionalLayer, self).__init__( application_methods=[self.convolution.apply, self.pooling.apply], **kwargs) self.convolution.name = self.name + '_convolution' self.pooling.name = self.name + '_pooling' self.filter_size = filter_size self.num_filters = num_filters self.num_channels = num_channels self.pooling_size = pooling_size self.conv_step = conv_step self.pooling_step = pooling_step self.batch_size = batch_size self.border_mode = border_mode self.image_size = image_size def _push_allocation_config(self): for attr in ['filter_size', 'num_filters', 'num_channels', 'batch_size', 'border_mode', 'image_size']: setattr(self.convolution, attr, getattr(self, attr)) self.convolution.step = self.conv_step self.convolution._push_allocation_config() if self.image_size is not None: pooling_input_dim = self.convolution.get_dim('output') else: pooling_input_dim = None self.pooling.input_dim = pooling_input_dim self.pooling.pooling_size = self.pooling_size self.pooling.step = self.pooling_step self.pooling.batch_size = self.batch_size def get_dim(self, name): if name == 'input_': return self.convolution.get_dim('input_') if name == 'output': return self.pooling.get_dim('output') return super(ConvolutionalLayer, self).get_dim(name) @property def num_output_channels(self): return self.num_filters
#Create the stmbolics variable x = tensor.tensor4('image_features') y = tensor.lmatrix('targets') num_epochs = 500 layers = [] ###############FIRST STAGE####################### #Create the convolutions layers layers.append( Convolutional(filter_size=(7, 7), num_filters=32, border_mode='half', name='conv_0')) layers.append(BatchNormalization(name='batch_0')) layers.append(Rectifier()) layers.append(MaxPooling((3, 3), step=(2, 2), padding=(1, 1), name='pool_0')) layers.append( Convolutional(filter_size=(1, 1), num_filters=64, border_mode='half', name='conv_1')) layers.append(BatchNormalization(name='batch_1')) layers.append(Rectifier()) layers.append(MaxPooling((3, 3), step=(2, 2), padding=(1, 1), name='pool_1')) layers.append( Convolutional(filter_size=(3, 3), num_filters=192, border_mode='half', name='conv_2')) layers.append(BatchNormalization(name='batch_2'))
def inception(image_shape, num_input, conv1, conv2, conv3, conv4, conv5, conv6, out, i): layers1 = [] layers2 = [] layers3 = [] layers4 = [] layers1.append( Convolutional(filter_size=(1, 1), num_channels=num_input, num_filters=conv1, image_size=image_shape, border_mode='half', name='conv_{}'.format(i))) layers1.append(BatchNormalization(name='batch_{}'.format(i))) layers1.append(Rectifier()) conv_sequence1 = ConvolutionalSequence(layers1, num_channels=num_input, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i)) conv_sequence1.initialize() out1 = conv_sequence1.apply(out) i = i + 1 layers2.append( Convolutional(filter_size=(1, 1), num_channels=num_input, num_filters=conv2, image_size=image_shape, border_mode='half', name='conv_{}'.format(i))) layers2.append(BatchNormalization(name='batch_{}'.format(i))) layers2.append(Rectifier()) i = i + 1 layers2.append( Convolutional(filter_size=(3, 3), num_channels=conv2, num_filters=conv3, image_size=image_shape, border_mode='half', name='conv_{}'.format(i))) layers2.append(BatchNormalization(name='batch_{}'.format(i))) layers2.append(Rectifier()) conv_sequence2 = ConvolutionalSequence(layers2, num_channels=num_input, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i)) conv_sequence2.initialize() out2 = conv_sequence2.apply(out) i = i + 1 layers3.append( Convolutional(filter_size=(1, 1), num_channels=num_input, num_filters=conv4, image_size=image_shape, border_mode='half', name='conv_{}'.format(i))) layers3.append(BatchNormalization(name='batch_{}'.format(i))) layers3.append(Rectifier()) i = i + 1 layers3.append( Convolutional(filter_size=(5, 5), num_channels=conv4, num_filters=conv5, image_size=image_shape, border_mode='half', name='conv_{}'.format(i))) layers3.append(BatchNormalization(name='batch_{}'.format(i))) layers3.append(Rectifier()) conv_sequence3 = ConvolutionalSequence(layers3, num_channels=num_input, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i)) conv_sequence3.initialize() out3 = conv_sequence3.apply(out) i = i + 1 layers4.append( MaxPooling((3, 3), step=(1, 1), padding=(1, 1), name='pool_{}'.format(i))) layers4.append( Convolutional(filter_size=(1, 1), num_channels=num_input, num_filters=conv6, image_size=image_shape, border_mode='half', name='conv_{}'.format(i))) layers4.append(BatchNormalization(name='batch_{}'.format(i))) layers4.append(Rectifier()) i = i + 1 conv_sequence4 = ConvolutionalSequence(layers4, num_channels=num_input, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i)) conv_sequence4.initialize() out4 = conv_sequence4.apply(out) #Merge return T.concatenate([out1, out2, out3, out4], axis=1)
pooling_size = [(3,3), (2,2), (2,2)] conv_step = (1,1) border_mode = 'valid' conv_layers1 = [] conv_layers1.append(SpatialBatchNormalization(name='spatialBN_{}'.format(i))) conv_layers1.append( Convolutional( filter_size=filter_size[j], num_filters=num_filter[j], step=conv_step, border_mode=border_mode, name='conv_{}'.format(i))) conv_layers1.append(BatchNormalization(name='BNconv_{}'.format(i))) conv_layers1.append(conv_activation[0]) conv_layers1.append(MaxPooling(pooling_size[j], name='pool_{}'.format(i))) i = i + 1 #Sequence conv_layers1.append( Convolutional( filter_size=filter_size[j+1], num_filters=num_filter[j+1], step=conv_step, border_mode=border_mode, name='conv_{}'.format(i))) conv_layers1.append(BatchNormalization(name='BNconv_{}'.format(i))) conv_layers1.append(conv_activation[0]) conv_layers1.append(MaxPooling(pooling_size[j+1], name='pool_{}'.format(i))) i = i + 1 #Sequence with no MaxPooling conv_layers1.append(
def max_pool(): return MaxPooling(pooling_size=(2, 2), step=(2, 2))
biases_init=IsotropicGaussian(std=0.01, mean=1.0), use_bias=True, border_mode="valid", step=(1, 1)) l.initialize() o = l.apply(o) l = BatchNormalizationConv(input_shape=l.get_dim("output"), B_init=IsotropicGaussian(std=0.01), Y_init=IsotropicGaussian(std=0.01)) l.initialize() o = l.apply(o) o = Rectifier().apply(o) l = MaxPooling(pooling_size=(2, 2), step=(2, 2), input_dim=l.get_dim("output")) l.initialize() o = l.apply(o) #ll = Dropout(p_drop=0.5) #ll.initialize() #o = ll.apply(o) l = Convolutional(filter_size=(3, 3), num_filters=32, num_channels=l.get_dim("output")[0], image_size=l.get_dim("output")[1:], weights_init=IsotropicGaussian(std=0.01), biases_init=IsotropicGaussian(std=0.01), use_bias=True, border_mode="valid",
def build_model(images, labels): vgg = VGG(layer='conv3_4') vgg.push_initialization_config() vgg.initialize() sb = SubstractBatch() # Construct a bottom convolutional sequence layers = [ Convolutional(filter_size=(3, 3), num_filters=100, use_bias=True, tied_biases=True, name='final_conv0'), BatchNormalization(name='batchnorm_1'), Rectifier(name='final_conv0_act'), Convolutional(filter_size=(3, 3), num_filters=100, use_bias=True, tied_biases=True, name='final_conv1'), BatchNormalization(name='batchnorm_2'), Rectifier(name='final_conv1_act'), MaxPooling(pooling_size=(2, 2), name='maxpool_final') ] bottom_conv_sequence = ConvolutionalSequence( layers, num_channels=256, image_size=(40, 40), biases_init=Constant(0.), weights_init=IsotropicGaussian(0.01)) bottom_conv_sequence._push_allocation_config() # Flatten layer flattener = Flattener() # Construct a top MLP conv_out_dim = numpy.prod(bottom_conv_sequence.get_dim('output')) print 'dim output conv:', bottom_conv_sequence.get_dim('output') # conv_out_dim = 20 * 40 * 40 top_mlp = BatchNormalizedMLP( [Rectifier(name='non_linear_9'), Softmax(name='non_linear_11')], [conv_out_dim, 1024, 10], weights_init=IsotropicGaussian(), biases_init=Constant(0)) # Construct feedforward sequence ss_seq = FeedforwardSequence([ vgg.apply, bottom_conv_sequence.apply, flattener.apply, top_mlp.apply ]) ss_seq.push_initialization_config() ss_seq.initialize() prediction = ss_seq.apply(images) cost_noreg = CategoricalCrossEntropy().apply(labels.flatten(), prediction) # add regularization selector = Selector([top_mlp]) Ws = selector.get_parameters('W') mlp_brick_name = 'batchnormalizedmlp' W0 = Ws['/%s/linear_0.W' % mlp_brick_name] W1 = Ws['/%s/linear_1.W' % mlp_brick_name] cost = cost_noreg + .0001 * (W0**2).sum() + .001 * (W1**2).sum() # define learned parameters selector = Selector([ss_seq]) Ws = selector.get_parameters('W') bs = selector.get_parameters('b') BNSCs = selector.get_parameters('batch_norm_scale') BNSHs = selector.get_parameters('batch_norm_shift') parameters_top = [] parameters_top += [v for k, v in Ws.items()] parameters_top += [v for k, v in bs.items()] parameters_top += [v for k, v in BNSCs.items()] parameters_top += [v for k, v in BNSHs.items()] selector = Selector([vgg]) convs = selector.get_parameters() parameters_all = [] parameters_all += parameters_top parameters_all += [v for k, v in convs.items()] return cost, [parameters_top, parameters_all]
cb = [] for i, p in enumerate(convs): # Convolution bricks conv = Convolutional( filter_size=(p["filter_size"], 1), # step=(p['stride'],1), num_filters=p["nfilter"], num_channels=conv_in_channels, batch_size=batch_size, border_mode="valid", tied_biases=True, name="conv%d" % i, ) cb.append(conv) maxpool = MaxPooling(pooling_size=(p["pool_stride"], 1), name="mp%d" % i) conv_out = conv.apply(conv_in)[:, :, :: p["stride"], :] conv_out = maxpool.apply(conv_out) if p["normalize"]: conv_out_mean = conv_out.mean(axis=2).mean(axis=0) conv_out_var = ((conv_out - conv_out_mean[None, :, None, :]) ** 2).mean(axis=2).mean(axis=0).sqrt() conv_out = (conv_out - conv_out_mean[None, :, None, :]) / conv_out_var[None, :, None, :] if p["activation"] is not None: conv_out = p["activation"].apply(conv_out) if p["dropout"] > 0: b = [p["activation"] if p["activation"] is not None else conv] dropout_locs.append((VariableFilter(bricks=b, name="output"), p["dropout"])) if p["skip"] is not None and len(p["skip"]) > 0: maxpooladd = MaxPooling(pooling_size=(p["stride"] * p["pool_stride"], 1), name="Mp%d" % i) skip = []