Пример #1
0
 def apply_cnn(self, l_emb1, l_size1, l_emb2, l_size2, r_emb1, r_size1,
               r_emb2, r_size2, embedding_size, mycnf):
     assert l_size1 == r_size1
     assert l_size2 == r_size2
     assert l_size1 == l_size1
     max_len = l_size1
     fv_len = 0
     filter_sizes = mycnf['cnn_config']['filter_sizes']
     num_filters = mycnf['cnn_config']['num_filters']
     for i, fw in enumerate(filter_sizes):
         conv_left = ConvolutionalActivation(
             activation=Rectifier().apply,
             filter_size=(fw, embedding_size),
             num_filters=num_filters,
             num_channels=1,
             image_size=(max_len, embedding_size),
             name="conv" + str(fw) + l_emb1.name,
             seed=self.curSeed)
         conv_right = ConvolutionalActivation(
             activation=Rectifier().apply,
             filter_size=(fw, embedding_size),
             num_filters=num_filters,
             num_channels=1,
             image_size=(max_len, embedding_size),
             name="conv" + str(fw) + r_emb1.name,
             seed=self.curSeed)
         pooling = MaxPooling((max_len - fw + 1, 1), name="pool" + str(fw))
         initialize([conv_left, conv_right])
         l_convinp1 = l_emb1.flatten().reshape(
             (l_emb1.shape[0], 1, max_len, embedding_size))
         l_convinp2 = l_emb2.flatten().reshape(
             (l_emb2.shape[0], 1, max_len, embedding_size))
         l_pool1 = pooling.apply(conv_left.apply(l_convinp1)).flatten(2)
         l_pool2 = pooling.apply(conv_left.apply(l_convinp2)).flatten(2)
         r_convinp1 = r_emb1.flatten().reshape(
             (r_emb1.shape[0], 1, max_len, embedding_size))
         r_convinp2 = r_emb2.flatten().reshape(
             (r_emb2.shape[0], 1, max_len, embedding_size))
         r_pool1 = pooling.apply(conv_right.apply(r_convinp1)).flatten(2)
         r_pool2 = pooling.apply(conv_right.apply(r_convinp2)).flatten(2)
         onepools1 = T.concatenate([l_pool1, r_pool1], axis=1)
         onepools2 = T.concatenate([l_pool2, r_pool2], axis=1)
         fv_len += conv_left.num_filters * 2
         if i == 0:
             outpools1 = onepools1
             outpools2 = onepools2
         else:
             outpools1 = T.concatenate([outpools1, onepools1], axis=1)
             outpools2 = T.concatenate([outpools2, onepools2], axis=1)
     return outpools1, outpools2, fv_len
Пример #2
0
def create_cnn_general(embedded_x, mycnf, max_len, embedding_size, inp_conv=False):
    fv_len = 0
    filter_sizes = mycnf['cnn_config']['filter_sizes']
    num_filters = mycnf['cnn_config']['num_filters']
    for i, fw in enumerate(filter_sizes):
        conv = ConvolutionalActivation(
                        activation=Rectifier().apply,
                        filter_size=(fw, embedding_size), 
                        num_filters=num_filters,
                        num_channels=1,
                        image_size=(max_len, embedding_size),
                        name="conv"+str(fw)+embedded_x.name)
        pooling = MaxPooling((max_len-fw+1, 1), name="pool"+str(fw)+embedded_x.name)
        initialize([conv])
        if inp_conv:
            convinp = embedded_x
        else:
            convinp = embedded_x.flatten().reshape((embedded_x.shape[0], 1, max_len, embedding_size))
        onepool = pooling.apply(conv.apply(convinp)).flatten(2)
        if i == 0:
            outpools = onepool
        else:
            outpools = T.concatenate([outpools, onepool], axis=1)
        fv_len += conv.num_filters
    return outpools, fv_len
Пример #3
0
def create_OLD_kim_cnn(layer0_input, embedding_size, input_len, config, pref):
    '''
        One layer convolution with the same filtersize
    '''
    filter_width_list = [
        int(fw) for fw in config[pref + '_filterwidth'].split()
    ]
    print filter_width_list
    num_filters = int(config[pref + '_num_filters'])
    totfilters = 0
    for i, fw in enumerate(filter_width_list):
        num_feature_map = input_len - fw + 1  #39
        conv = Convolutional(filter_size=(fw, embedding_size),
                             num_filters=num_filters,
                             num_channels=1,
                             image_size=(input_len, embedding_size),
                             name="conv" + str(fw))
        pooling = MaxPooling((num_feature_map, 1), name="pool" + str(fw))
        initialize([conv])

        totfilters += num_filters
        outpool = Flattener(name="flat" + str(fw)).apply(
            Rectifier(name=pref + 'act_' + str(fw)).apply(
                pooling.apply(conv.apply(layer0_input))))
        if i == 0:
            outpools = outpool
        else:
            outpools = T.concatenate([outpools, outpool], axis=1)
    name_rep_len = totfilters
    return outpools, name_rep_len
Пример #4
0
def create_kim_cnn(layer0_input, embedding_size, input_len, config, pref):
    '''
        One layer convolution with different filter-sizes and maxpooling
    '''
    filter_width_list = [int(fw) for fw in config[pref + '_filterwidth'].split()]
    print filter_width_list
    num_filters = int(config[pref+'_num_filters'])
    #num_filters /= len(filter_width_list)
    totfilters = 0
    for i, fw in enumerate(filter_width_list):
        num_feature_map = input_len - fw + 1 #39
        conv = Convolutional(
            image_size=(input_len, embedding_size),
            filter_size=(fw, embedding_size),
            num_filters=min(int(config[pref + '_maxfilter']), num_filters * fw),
            num_channels=1
        )
        totfilters += conv.num_filters
        initialize2(conv, num_feature_map)
        conv.name = pref + 'conv_' + str(fw)
        convout = conv.apply(layer0_input)
        pool_layer = MaxPooling(
            pooling_size=(num_feature_map,1)
        )
        pool_layer.name = pref + 'pool_' + str(fw)
        act = Rectifier()
        act.name = pref + 'act_' + str(fw)
        outpool = act.apply(pool_layer.apply(convout)).flatten(2)
        if i == 0:
            outpools = outpool
        else:
            outpools = T.concatenate([outpools, outpool], axis=1)
    name_rep_len = totfilters
    return outpools, name_rep_len
Пример #5
0
def create_kim_cnn(layer0_input, embedding_size, input_len, config, pref):
    '''
        One layer convolution with different filter-sizes and maxpooling
    '''
    filter_width_list = [
        int(fw) for fw in config[pref + '_filterwidth'].split()
    ]
    print filter_width_list
    num_filters = int(config[pref + '_num_filters'])
    #num_filters /= len(filter_width_list)
    totfilters = 0
    for i, fw in enumerate(filter_width_list):
        num_feature_map = input_len - fw + 1  #39
        conv = Convolutional(image_size=(input_len, embedding_size),
                             filter_size=(fw, embedding_size),
                             num_filters=min(int(config[pref + '_maxfilter']),
                                             num_filters * fw),
                             num_channels=1)
        totfilters += conv.num_filters
        initialize2(conv, num_feature_map)
        conv.name = pref + 'conv_' + str(fw)
        convout = conv.apply(layer0_input)
        pool_layer = MaxPooling(pooling_size=(num_feature_map, 1))
        pool_layer.name = pref + 'pool_' + str(fw)
        act = Rectifier()
        act.name = pref + 'act_' + str(fw)
        outpool = act.apply(pool_layer.apply(convout)).flatten(2)
        if i == 0:
            outpools = outpool
        else:
            outpools = T.concatenate([outpools, outpool], axis=1)
    name_rep_len = totfilters
    return outpools, name_rep_len
Пример #6
0
def test_max_pooling():
    x = tensor.tensor4("x")
    num_channels = 4
    batch_size = 5
    x_size = 17
    y_size = 13
    pool_size = 3
    pool = MaxPooling((pool_size, pool_size))
    y = pool.apply(x)
    func = function([x], y)

    x_val = numpy.ones((batch_size, num_channels, x_size, y_size), dtype=theano.config.floatX)
    assert_allclose(func(x_val), numpy.ones((batch_size, num_channels, x_size / pool_size, y_size / pool_size)))
    pool.input_dim = (x_size, y_size)
    pool.get_dim("output") == (num_channels, x_size / pool_size + 1, y_size / pool_size + 1)
Пример #7
0
def test_max_pooling():
    x = tensor.tensor4('x')
    num_channels = 4
    batch_size = 5
    x_size = 17
    y_size = 13
    pool_size = 3
    pool = MaxPooling((pool_size, pool_size))
    y = pool.apply(x)
    func = function([x], y)

    x_val = numpy.ones((batch_size, num_channels, x_size, y_size),
                       dtype=theano.config.floatX)
    assert_allclose(
        func(x_val),
        numpy.ones((batch_size, num_channels, x_size / pool_size + 1,
                    y_size / pool_size + 1)))
    pool.input_dim = (x_size, y_size)
    pool.get_dim('output') == (num_channels, x_size / pool_size + 1,
                               y_size / pool_size + 1)
Пример #8
0
def create_yy_cnn(numConvLayer, conv_input, embedding_size, input_len, config,
                  pref):
    '''
     CNN with several layers of convolution, each with specific filter size. 
     Maxpooling at the end. 
    '''
    filter_width_list = [
        int(fw) for fw in config[pref + '_filterwidth'].split()
    ]
    base_num_filters = int(config[pref + '_num_filters'])
    assert len(filter_width_list) == numConvLayer
    convs = []
    fmlist = []
    last_fm = input_len
    for i in range(numConvLayer):
        fw = filter_width_list[i]
        num_feature_map = last_fm - fw + 1  #39
        conv = Convolutional(image_size=(last_fm, embedding_size),
                             filter_size=(fw, embedding_size),
                             num_filters=min(int(config[pref + '_maxfilter']),
                                             base_num_filters * fw),
                             num_channels=1)
        fmlist.append(num_feature_map)
        last_fm = num_feature_map
        embedding_size = conv.num_filters
        convs.append(conv)

    initialize(convs)
    for i, conv in enumerate(convs):
        conv.name = pref + '_conv' + str(i)
        conv_input = conv.apply(conv_input)
        conv_input = conv_input.flatten().reshape(
            (conv_input.shape[0], 1, fmlist[i], conv.num_filters))
        lastconv = conv
        lastconv_out = conv_input
    pool_layer = MaxPooling(pooling_size=(last_fm, 1))
    pool_layer.name = pref + '_pool_' + str(fw)
    act = Rectifier()
    act.name = 'act_' + str(fw)
    outpool = act.apply(pool_layer.apply(lastconv_out).flatten(2))
    return outpool, lastconv.num_filters
Пример #9
0
def create_yy_cnn(numConvLayer, conv_input, embedding_size, input_len, config, pref):
    '''
     CNN with several layers of convolution, each with specific filter size. 
     Maxpooling at the end. 
    '''
    filter_width_list = [int(fw) for fw in config[pref + '_filterwidth'].split()]
    base_num_filters = int(config[pref + '_num_filters'])
    assert len(filter_width_list) == numConvLayer
    convs = []; fmlist = []
    last_fm = input_len
    for i in range(numConvLayer):
        fw = filter_width_list[i]
        num_feature_map = last_fm - fw + 1 #39
        conv = Convolutional(
            image_size=(last_fm, embedding_size),
            filter_size=(fw, embedding_size),
            num_filters=min(int(config[pref + '_maxfilter']), base_num_filters * fw),
            num_channels=1
        )
        fmlist.append(num_feature_map)
        last_fm = num_feature_map
        embedding_size = conv.num_filters
        convs.append(conv)

    initialize(convs)
    for i, conv in enumerate(convs):
        conv.name = pref+'_conv' + str(i)
        conv_input = conv.apply(conv_input)
        conv_input = conv_input.flatten().reshape((conv_input.shape[0], 1, fmlist[i], conv.num_filters))
        lastconv = conv 
        lastconv_out = conv_input
    pool_layer = MaxPooling(
        pooling_size=(last_fm,1)
    )
    pool_layer.name = pref+'_pool_' + str(fw)
    act = Rectifier(); act.name = 'act_' + str(fw)
    outpool = act.apply(pool_layer.apply(lastconv_out).flatten(2))
    return outpool, lastconv.num_filters
Пример #10
0
def test_max_pooling_ignore_border_true():
    x = tensor.tensor4("x")
    brick = MaxPooling((3, 4), ignore_border=True)
    y = brick.apply(x)
    out = y.eval({x: numpy.zeros((8, 3, 10, 13), dtype=theano.config.floatX)})
    assert out.shape == (8, 3, 3, 3)
Пример #11
0
    def train(self, X, Y, idx_folds, hyper_params, model_prefix, verbose=False):

        import os
        from collections import OrderedDict
        from fuel.datasets import IndexableDataset
        from blocks.model import Model
        from blocks.bricks import Linear, Softmax
        from blocks.bricks.conv import MaxPooling
        from blocks.initialization import Uniform
        from deepthought.bricks.cost import HingeLoss
        import numpy as np
        import theano
        from theano import tensor

        assert model_prefix is not None

        fold_weights_filename = '{}_weights.npy'.format(model_prefix)

        # convert Y to one-hot encoding
        n_classes = len(set(Y))
        Y = np.eye(n_classes, dtype=int)[Y]

        features = tensor.matrix('features', dtype=theano.config.floatX)
        targets = tensor.lmatrix('targets')

        input_ = features

        dim = X.shape[-1]
        
        # optional additional layers
        if self.pipeline_factory is not None:
            # need to re-shape flattened input to restore bc01 format
            input_shape = (input_.shape[0],) + hyper_params['classifier_input_shape']  # tuple, uses actual batch size
            input_ = input_.reshape(input_shape)

            pipeline = self.pipeline_factory.build_pipeline(input_shape, hyper_params)
            input_ = pipeline.apply(input_)                        
            input_ = input_.flatten(ndim=2)
            
            # this is very hacky, but there seems to be no elegant way to obtain a value for dim
            dummy_fn = theano.function(inputs=[features], outputs=input_)
            dummy_out = dummy_fn(X[:1])
            dim = dummy_out.shape[-1]
            
            
        if hyper_params['classifier_pool_width'] > 1:
            # FIXME: this is probably broken!
            
    #        c = hyper_params['num_components']
    #        input_ = input_.reshape((input_.shape[0], c, input_.shape[-1] // c, 1))  # restore bc01
            # need to re-shape flattened input to restore bc01 format
            input_shape = hyper_params['classifier_pool_input_shape']  # tuple
            input_ = input_.reshape(input_shape)

            pool = MaxPooling(name='pool',
                              input_dim=input_shape[1:],  # (c, X.shape[-1] // c, 1),
                              pooling_size=(hyper_params['classifier_pool_width'], 1),
                              step=(hyper_params['classifier_pool_stride'], 1))
            input_ = pool.apply(input_)
            input_ = input_.reshape((input_.shape[0], tensor.prod(input_.shape[1:])))

            dim = np.prod(pool.get_dim('output'))


        linear = Linear(name='linear',
                        input_dim=dim,
                        output_dim=n_classes,
                        weights_init=Uniform(mean=0, std=0.01),
                        use_bias=False)
        linear.initialize()

        softmax = Softmax('softmax')

        probs = softmax.apply(linear.apply(input_))
        prediction = tensor.argmax(probs, axis=1)

        model = Model(probs)  # classifier with raw probability outputs
        predict = theano.function([features], prediction)  # ready-to-use predict function

        if os.path.isfile(fold_weights_filename):
            # load filter weights from existing file
            fold_weights = np.load(fold_weights_filename)
            print 'loaded filter weights from', fold_weights_filename
        else:
            # train model

            from blocks.bricks.cost import MisclassificationRate
            from blocks.filter import VariableFilter
            from blocks.graph import ComputationGraph
            from blocks.roles import WEIGHT
            from blocks.bricks import Softmax
            from blocks.model import Model
            from blocks.algorithms import GradientDescent, Adam
            from blocks.extensions import FinishAfter, Timing, Printing, ProgressBar
            from blocks.extensions.monitoring import DataStreamMonitoring, TrainingDataMonitoring
            from blocks.extensions.predicates import OnLogRecord
            from fuel.streams import DataStream
            from fuel.schemes import SequentialScheme, ShuffledScheme
            from blocks.monitoring import aggregation
            from blocks.main_loop import MainLoop
            from blocks.extensions.training import TrackTheBest
            from deepthought.extensions.parameters import BestParams
            # from deepthought.datasets.selection import DatasetMetaDB

            init_param_values = model.get_parameter_values()

            cost = HingeLoss().apply(targets, probs)
            # Note: this requires just the class labels, not in a one-hot encoding
            error_rate = MisclassificationRate().apply(targets.argmax(axis=1), probs)
            error_rate.name = 'error_rate'

            cg = ComputationGraph([cost])

            # L1 regularization
            if hyper_params['classifier_l1wdecay'] > 0:
                weights = VariableFilter(roles=[WEIGHT])(cg.variables)
                cost = cost + hyper_params['classifier_l1wdecay'] * sum([abs(W).sum() for W in weights])

            cost.name = 'cost'

            # iterate over trial folds
            fold_weights = []
            fold_errors = []

            # for ifi, ifold in fold_generator.get_inner_cv_folds(outer_fold):
            #
            #     train_selectors = fold_generator.get_fold_selectors(outer_fold=outer_fold, inner_fold=ifold['train'])
            #     valid_selectors = fold_generator.get_fold_selectors(outer_fold=outer_fold, inner_fold=ifold['valid'])
            #
            #     metadb = DatasetMetaDB(meta, train_selectors.keys())
            #
            #     # get selected trial IDs
            #     train_idx = metadb.select(train_selectors)
            #     valid_idx = metadb.select(valid_selectors)

            for train_idx, valid_idx in idx_folds:

                # print train_idx
                # print valid_idx

                trainset = IndexableDataset(indexables=OrderedDict(
                    [('features', X[train_idx]), ('targets', Y[train_idx])]))

                validset = IndexableDataset(indexables=OrderedDict(
                    [('features', X[valid_idx]), ('targets', Y[valid_idx])]))

                model.set_parameter_values(init_param_values)

                best_params = BestParams()
                best_params.add_condition(['after_epoch'],
                                          predicate=OnLogRecord('error_rate_valid_best_so_far'))

                algorithm = GradientDescent(cost=cost, parameters=cg.parameters, step_rule=Adam())

                extensions = [Timing(),
                              FinishAfter(after_n_epochs=hyper_params['classifier_max_epochs']),
                              DataStreamMonitoring(
                                  [cost, error_rate],
                                  DataStream.default_stream(
                                      validset,
                                      iteration_scheme=SequentialScheme(
                                          validset.num_examples, hyper_params['classifier_batch_size'])),
                                  suffix="valid"),
                              TrainingDataMonitoring(
                                  [cost, error_rate,
                                   aggregation.mean(algorithm.total_gradient_norm)],
                                  suffix="train",
                                  after_epoch=True),
                              TrackTheBest('error_rate_valid'),
                              best_params  # after TrackTheBest!
                              ]

                if verbose:
                    extensions.append(Printing())  # optional
                    extensions.append(ProgressBar())

                main_loop = MainLoop(
                    algorithm,
                    DataStream.default_stream(
                        trainset,
                        iteration_scheme=ShuffledScheme(trainset.num_examples, hyper_params['classifier_batch_size'])),
                    model=model,
                    extensions=extensions)

                main_loop.run()

                fold_weights.append(best_params.values['/linear.W'])
                fold_errors.append(main_loop.status['best_error_rate_valid'])
                # break # FIXME

            fold_errors = np.asarray(fold_errors).squeeze()
            print 'simple NN fold classification errors:', fold_errors

            fold_weights = np.asarray(fold_weights)

            # store filter weights for later analysis
            np.save(fold_weights_filename, fold_weights)

        weights = fold_weights.mean(axis=0)

        linear.parameters[0].set_value(weights)

        return model, predict
Пример #12
0
def test_max_pooling_padding():
    x = tensor.tensor4('x')
    brick = MaxPooling((6, 2), padding=(3, 1), ignore_border=True)
    y = brick.apply(x)
    out = y.eval({x: numpy.zeros((2, 3, 6, 10), dtype=theano.config.floatX)})
    assert out.shape == (2, 3, 2, 6)
Пример #13
0
def test_max_pooling_ignore_border_false():
    x = tensor.tensor4('x')
    brick = MaxPooling((5, 7), ignore_border=False)
    y = brick.apply(x)
    out = y.eval({x: numpy.zeros((4, 6, 12, 15), dtype=theano.config.floatX)})
    assert out.shape == (4, 6, 3, 3)
Пример #14
0
def test_max_pooling_ignore_border_true():
    x = tensor.tensor4('x')
    brick = MaxPooling((3, 4), ignore_border=True)
    y = brick.apply(x)
    out = y.eval({x: numpy.zeros((8, 3, 10, 13), dtype=theano.config.floatX)})
    assert out.shape == (8, 3, 3, 3)
Пример #15
0
for i, cp in enumerate(conv_eeg):
    bconv = Convolutional(filter_size=(cp['filter_size'], 1),
                          num_filters=cp['num_filters'],
                          num_channels=eeg_channels,
                          border_mode='full',
                          tied_biases=True,
                          name="conv_eeg_%d" % i)
    bmaxpool = MaxPooling(pooling_size=(cp['pool_size'], 1),
                          name='maxpool_eeg_%d' % i)
    # convolve
    eeg1 = bconv.apply(eeg)
    # cut borders
    d1 = (eeg1.shape[2] - eeg.shape[2]) / 2
    eeg = eeg1[:, :, d1:d1 + eeg.shape[2], :]
    # subsample
    eeg = bmaxpool.apply(eeg)
    # normalize
    if cp['normalize']:
        eeg = normalize(eeg, axis=(0, 2))
    # activation
    act = cp['activation'](name='act_eeg%d' % i)
    eeg = act.apply(eeg)
    # stuff
    bricks += [bconv, bmaxpool]
    eeg_channels = cp['num_filters']
    if cp['dropout'] > 0:
        dropout_locs += [(VariableFilter(bricks=[act],
                                         name='output'), cp['dropout'])]

# Now we can concatenate eeg and acc (dimensions should be right)
data = tensor.concatenate([eeg, acc], axis=1)
Пример #16
0
def create_OLD_kim_cnn(layer0_input, embedding_size, input_len, config, pref):
    '''
        One layer convolution with the same filtersize
    '''
    filter_width_list = [int(fw) for fw in config[pref + '_filterwidth'].split()]
    print filter_width_list
    num_filters = int(config[pref+'_num_filters'])
    totfilters = 0
    for i, fw in enumerate(filter_width_list):
        num_feature_map = input_len - fw + 1 #39
        conv = Convolutional(
                    filter_size=(fw, embedding_size), 
                    num_filters=num_filters,
                    num_channels=1,
                    image_size=(input_len, embedding_size),
                    name="conv" + str(fw))
        pooling = MaxPooling((num_feature_map,1), name="pool"+str(fw))
        initialize([conv])
                
        totfilters += num_filters
        outpool = Flattener(name="flat"+str(fw)).apply(Rectifier(name=pref+'act_'+str(fw)).apply(pooling.apply(conv.apply(layer0_input))))
        if i == 0:
            outpools = outpool
        else:
            outpools = T.concatenate([outpools, outpool], axis=1)
    name_rep_len = totfilters
    return outpools, name_rep_len
Пример #17
0
def test_max_pooling_ignore_border_false():
    x = tensor.tensor4("x")
    brick = MaxPooling((5, 7), ignore_border=False)
    y = brick.apply(x)
    out = y.eval({x: numpy.zeros((4, 6, 12, 15), dtype=theano.config.floatX)})
    assert out.shape == (4, 6, 3, 3)
Пример #18
0
# first convolution only on eeg
conv_eeg = Convolutional(filter_size=(300, 1),
                         num_filters=20,
                         num_channels=1,
                         border_mode='full',
                         tied_biases=True,
                         name="conv_eeg")
maxpool_eeg = MaxPooling(pooling_size=(5, 1), name='maxpool_eeg')
# convolve
eeg1 = conv_eeg.apply(eeg)
# cut borders
d1 = (eeg1.shape[2] - eeg.shape[2])/2
eeg1 = eeg1[:, :, d1:d1+eeg.shape[2], :]
# subsample
eeg1 = maxpool_eeg.apply(eeg1)
# activation
eeg1 = Tanh(name='act_eeg').apply(eeg1)

# second convolution only on eeg
conv_eeg2 = Convolutional(filter_size=(100, 1),
                         num_filters=40,
                         num_channels=20,
                         border_mode='full',
                         tied_biases=True,
                         name="conv_eeg2")
maxpool_eeg2 = MaxPooling(pooling_size=(5, 1), name='maxpool_eeg2')
# convolve
eeg2 = conv_eeg2.apply(eeg1)
# cut borders
d1 = (eeg2.shape[2] - eeg1.shape[2])/2
Пример #19
0
def test_max_pooling_padding():
    x = tensor.tensor4("x")
    brick = MaxPooling((6, 2), padding=(3, 1), ignore_border=True)
    y = brick.apply(x)
    out = y.eval({x: numpy.zeros((2, 3, 6, 10), dtype=theano.config.floatX)})
    assert out.shape == (2, 3, 2, 6)
Пример #20
0
    # Convolution bricks
    conv = Convolutional(
        filter_size=(p["filter_size"], 1),
        #                   step=(p['stride'],1),
        num_filters=p["nfilter"],
        num_channels=conv_in_channels,
        batch_size=batch_size,
        border_mode="valid",
        tied_biases=True,
        name="conv%d" % i,
    )
    cb.append(conv)
    maxpool = MaxPooling(pooling_size=(p["pool_stride"], 1), name="mp%d" % i)

    conv_out = conv.apply(conv_in)[:, :, :: p["stride"], :]
    conv_out = maxpool.apply(conv_out)
    if p["normalize"]:
        conv_out_mean = conv_out.mean(axis=2).mean(axis=0)
        conv_out_var = ((conv_out - conv_out_mean[None, :, None, :]) ** 2).mean(axis=2).mean(axis=0).sqrt()
        conv_out = (conv_out - conv_out_mean[None, :, None, :]) / conv_out_var[None, :, None, :]
    if p["activation"] is not None:
        conv_out = p["activation"].apply(conv_out)
    if p["dropout"] > 0:
        b = [p["activation"] if p["activation"] is not None else conv]
        dropout_locs.append((VariableFilter(bricks=b, name="output"), p["dropout"]))
    if p["skip"] is not None and len(p["skip"]) > 0:
        maxpooladd = MaxPooling(pooling_size=(p["stride"] * p["pool_stride"], 1), name="Mp%d" % i)
        skip = []
        if "max" in p["skip"]:
            skip.append(maxpooladd.apply(conv_in)[:, :, : conv_out.shape[2], :])
        if "min" in p["skip"]: