Beispiel #1
0
def test_inverse_dilation_layer():
    from convvisual.receptive_field.convvisual import receptive_field_build_deconv_layers
    from braindecode.veganlasagne.layer_util import print_layers

    input_var = T.tensor4('inputs').astype(theano.config.floatX)
    network1 = lasagne.layers.InputLayer(shape=[None, 1, 15, 1],
                                         input_var=input_var)
    network2 = StrideReshapeLayer(network1,
                                  n_stride=2,
                                  invalid_fill_value=np.nan)
    network3 = StrideReshapeLayer(network2,
                                  n_stride=2,
                                  invalid_fill_value=np.nan)
    network = FinalReshapeLayer(network3, remove_invalids=True, flatten=False)
    print_layers(network)

    #network = DilationSeparate2DLayer(network,(4,1),((0,1),(0,0)),fill_value=np.nan)
    #network = DilationMerge2DLayer(network,(2,1),((0,0),(0,0)))
    #network = DilationMerge2DLayer(network,(2,1),((0,1),(0,0)))
    deconv_network = receptive_field_build_deconv_layers(network, network2)
    print_layers(deconv_network)

    input = to_4d_time_array([range(1, 16), range(16, 31)])
    preds_cnt = lasagne.layers.get_output(
        lasagne.layers.get_all_layers(network))
    pred_cnt_func = theano.function([input_var], preds_cnt)
    layer_activations = pred_cnt_func(input)
    print layer_activations

    layer_activations2 = lasagne.layers.get_output(
        lasagne.layers.get_all_layers(deconv_network), layer_activations[-1])
    print layer_activations[-1].shape
    print layer_activations2[3].eval()

    assert (np.array_equal(layer_activations2[3].eval(), layer_activations[0]))
Beispiel #2
0
 def conv_pool_block(l, num_filters, filter_length, i_block):
     l = DropoutLayer(l, p=self.drop_prob)
     l = Conv2DLayer(l,
                     num_filters=num_filters,
                     filter_size=[filter_length, 1],
                     nonlinearity=identity,
                     name='combined_conv_{:d}'.format(i_block))
     if self.double_time_convs:
         l = Conv2DLayer(l,
                         num_filters=num_filters,
                         filter_size=[filter_length, 1],
                         nonlinearity=identity,
                         name='combined_conv_{:d}'.format(i_block))
     if self.batch_norm:
         l = BatchNormLayer(l,
                            epsilon=1e-4,
                            alpha=self.batch_norm_alpha,
                            nonlinearity=self.later_nonlin)
     else:
         l = NonlinearityLayer(l, nonlinearity=self.later_nonlin)
     l = Pool2DLayer(l,
                     pool_size=[self.pool_time_length, 1],
                     stride=[1, 1],
                     mode=self.later_pool_mode)
     l = StrideReshapeLayer(l, n_stride=self.pool_time_stride)
     l = NonlinearityLayer(l, self.later_pool_nonlin)
     return l
Beispiel #3
0
def test_stride_reshape_layer_with_padding():
    """Testing with padding... actually should never be any problem, as
    Stide reshape layer is independent of padding before..."""
    input_var = T.tensor4('inputs').astype(theano.config.floatX)
    network = lasagne.layers.InputLayer(shape=[None,1,15,1], input_var=input_var)
    network = lasagne.layers.Conv2DLayer(network, num_filters=1, pad='same', filter_size=[3, 1],
                                         W=lasagne.init.Constant(1), stride=(1,1),
                                    )
    network = StrideReshapeLayer(network, n_stride=2, invalid_fill_value=np.nan)
    
    preds_cnt = lasagne.layers.get_output(lasagne.layers.get_all_layers(network)[1:])
    pred_cnt_func = theano.function([input_var], preds_cnt)
    layer_activations = pred_cnt_func(to_4d_time_array([range(1,16), range(16,31)]))
    assert equal_without_nans(np.array([[[[  3.], [  6.], [  9.], [ 12.], [ 15.], [ 18.], [ 21.], 
                           [ 24.], [ 27.], [ 30.], [ 33.], [ 36.], [ 39.], 
                           [ 42.], [ 29.]]],
        [[[ 33.], [ 51.], [ 54.], [ 57.], [ 60.], [ 63.], [ 66.], 
          [ 69.], [ 72.], [75.], [ 78.], [ 81.], [ 84.], 
          [ 87.], [59. ]]]], 
            dtype=np.float32),
        layer_activations[0])
        
    assert equal_without_nans(np.array(
        [[[[3.], [  9.], [ 15.], [ 21.], [ 27.], [ 33.], [ 39.], [29.]]],
       [[[33.], [ 54.], [ 60.], [ 66.], [ 72.], [ 78.], [ 84.], [59.]]],
       [[[  6.], [ 12.], [ 18.], [ 24.], [ 30.], [ 36.], [ 42.], [ np.nan]]],
       [[[ 51.], [ 57.], [ 63.], [ 69.], [ 75.], [ 81.], [ 87.], [ np.nan]]]],
       dtype=np.float32),
       layer_activations[1])
Beispiel #4
0
    def get_layers(self):
        l = InputLayer([None, self.in_chans, self.input_time_length, 1])
        if self.split_first_layer:
            l = DimshuffleLayer(l, pattern=[0, 3, 2, 1])
            l = Conv2DLayer(l,
                            num_filters=self.n_filters_time,
                            filter_size=[self.filter_time_length, 1],
                            nonlinearity=identity,
                            name='time_conv')
            l = Conv2DAllColsLayer(l,
                                   num_filters=self.n_filters_spat,
                                   filter_size=[1, -1],
                                   nonlinearity=identity,
                                   name='spat_conv')
        else:  #keep channel dim in first dim, so it will also be convolved over
            l = Conv2DLayer(l,
                            num_filters=self.num_filters_time,
                            filter_size=[self.filter_time_length, 1],
                            nonlinearity=identity,
                            name='time_conv')
        if self.batch_norm:
            l = BatchNormLayer(l,
                               epsilon=1e-4,
                               alpha=self.batch_norm_alpha,
                               nonlinearity=self.conv_nonlin)
        else:
            l = NonlinearityLayer(l, nonlinearity=self.conv_nonlin)

        l = Pool2DLayer(l,
                        pool_size=[self.pool_time_length, 1],
                        stride=[1, 1],
                        mode=self.pool_mode)
        l = NonlinearityLayer(l, self.pool_nonlin)
        l = StrideReshapeLayer(l, n_stride=self.pool_time_stride)
        l = DropoutLayer(l, p=self.drop_prob)

        l = Conv2DLayer(l,
                        num_filters=self.n_classes,
                        filter_size=[self.final_dense_length, 1],
                        nonlinearity=identity,
                        name='final_dense')
        l = FinalReshapeLayer(l)
        l = NonlinearityLayer(l, softmax)
        return lasagne.layers.get_all_layers(l)
Beispiel #5
0
def residual_block(
    l,
    batch_norm_alpha,
    batch_norm_epsilon,
    nonlinearity,
    survival_prob,
    add_after_nonlin,
    reduction_method,
    reduction_pool_mode,
    increase_units_factor=None,
    half_time=False,
    projection=False,
):
    assert survival_prob <= 1 and survival_prob >= 0
    input_num_filters = l.output_shape[1]
    if increase_units_factor is not None:
        out_num_filters = int(input_num_filters * increase_units_factor)
        assert (out_num_filters - input_num_filters) % 2 == 0, (
            "Need even "
            "number of extra channels in order to be able to pad correctly")
    else:
        out_num_filters = input_num_filters

    if (not half_time) or (reduction_method == 'conv'):
        stack_1 = batch_norm(Conv2DLayer(l,
                                         num_filters=out_num_filters,
                                         filter_size=(3, 3),
                                         stride=(1, 1),
                                         nonlinearity=nonlinearity,
                                         pad='same',
                                         W=lasagne.init.HeNormal(gain='relu')),
                             epsilon=batch_norm_epsilon,
                             alpha=batch_norm_alpha)
    else:
        assert half_time and reduction_method == 'pool'
        stack_1 = Pool2DLayer(l,
                              pool_size=(3, 1),
                              stride=(1, 1),
                              pad=(1, 0),
                              mode=reduction_pool_mode)
        # 1x1 conv here, therefore can do stride later without problems
        # otherwise would have to do stride here before
        # and make extra if condition later (only reshape with stride
        # in case of reduction method conv)...
        stack_1 = batch_norm(Conv2DLayer(stack_1,
                                         num_filters=out_num_filters,
                                         filter_size=(1, 1),
                                         stride=(1, 1),
                                         nonlinearity=nonlinearity,
                                         pad='same',
                                         W=lasagne.init.HeNormal(gain='relu')),
                             epsilon=batch_norm_epsilon,
                             alpha=batch_norm_alpha)

    if half_time:
        stack_1 = StrideReshapeLayer(stack_1, n_stride=2)
    stack_2 = batch_norm(Conv2DLayer(stack_1,
                                     num_filters=out_num_filters,
                                     filter_size=(3, 3),
                                     stride=(1, 1),
                                     nonlinearity=None,
                                     pad='same',
                                     W=lasagne.init.HeNormal(gain='relu')),
                         epsilon=batch_norm_epsilon,
                         alpha=batch_norm_alpha)

    # add shortcut connections
    shortcut = l
    if half_time:
        # note since we are only reshaping
        # this is ok both for later identity and later projection
        # 1x1 conv of projection is same if we do it before or after this reshape
        # (would not be true if it was anything but 1x1 conv(!))
        shortcut = StrideReshapeLayer(shortcut, n_stride=2)
    if increase_units_factor is not None:
        if projection:
            # projection shortcut, as option B in paper
            shortcut = batch_norm(Conv2DLayer(shortcut,
                                              num_filters=out_num_filters,
                                              filter_size=(1, 1),
                                              stride=(1, 1),
                                              nonlinearity=None,
                                              pad='same',
                                              b=None),
                                  epsilon=batch_norm_epsilon,
                                  alpha=batch_norm_alpha)
        else:
            # identity shortcut, as option A in paper
            n_extra_chans = out_num_filters - input_num_filters
            shortcut = PadLayer(shortcut, [n_extra_chans // 2, 0, 0],
                                batch_ndim=1)
    if add_after_nonlin:
        stack_2 = NonlinearityLayer(stack_2)
        block = ElemwiseSumLayer([stack_2, shortcut])
    else:
        block = NonlinearityLayer(ElemwiseSumLayer([stack_2, shortcut]),
                                  nonlinearity=nonlinearity)
    if survival_prob != 1:
        # Hack to make both be broadcastable along empty third dim
        # Otherwise I get an error that they are of different type:
        # shortcut: TensorType(False,False,False,True)
        # block: TensorType4d(32) or sth
        shortcut = ExpressionLayer(shortcut, lambda x: T.addbroadcast(x, 3))
        block = ExpressionLayer(block, lambda x: T.addbroadcast(x, 3))
        block = RandomSwitchLayer(block, shortcut, survival_prob)
    return block
Beispiel #6
0
    def get_layers(self):
        l = InputLayer([None, self.in_chans, self.input_time_length, 1])
        if self.split_first_layer:
            l = DimshuffleLayer(l, pattern=[0, 3, 2, 1])
            l = DropoutLayer(l, p=self.drop_in_prob)
            l = Conv2DLayer(l,
                            num_filters=self.num_filters_time,
                            filter_size=[self.filter_time_length, 1],
                            nonlinearity=identity,
                            name='time_conv')
            if self.double_time_convs:
                l = Conv2DLayer(l,
                                num_filters=self.num_filters_time,
                                filter_size=[self.filter_time_length, 1],
                                nonlinearity=identity,
                                name='time_conv')
            l = Conv2DAllColsLayer(l,
                                   num_filters=self.num_filters_spat,
                                   filter_size=[1, -1],
                                   nonlinearity=identity,
                                   name='spat_conv')
        else:  #keep channel dim in first dim, so it will also be convolved over
            l = DropoutLayer(l, p=self.drop_in_prob)
            l = Conv2DLayer(l,
                            num_filters=self.num_filters_time,
                            filter_size=[self.filter_time_length, 1],
                            nonlinearity=identity,
                            name='time_conv')
            if self.double_time_convs:
                l = Conv2DLayer(l,
                                num_filters=self.num_filters_time,
                                filter_size=[self.filter_time_length, 1],
                                nonlinearity=identity,
                                name='time_conv')
        if self.batch_norm:
            l = BatchNormLayer(l,
                               epsilon=1e-4,
                               alpha=self.batch_norm_alpha,
                               nonlinearity=self.first_nonlin)
        else:
            l = NonlinearityLayer(l, nonlinearity=self.first_nonlin)
        l = Pool2DLayer(l,
                        pool_size=[self.pool_time_length, 1],
                        stride=[1, 1],
                        mode=self.first_pool_mode)
        l = StrideReshapeLayer(l, n_stride=self.pool_time_stride)
        l = NonlinearityLayer(l, self.first_pool_nonlin)

        def conv_pool_block(l, num_filters, filter_length, i_block):
            l = DropoutLayer(l, p=self.drop_prob)
            l = Conv2DLayer(l,
                            num_filters=num_filters,
                            filter_size=[filter_length, 1],
                            nonlinearity=identity,
                            name='combined_conv_{:d}'.format(i_block))
            if self.double_time_convs:
                l = Conv2DLayer(l,
                                num_filters=num_filters,
                                filter_size=[filter_length, 1],
                                nonlinearity=identity,
                                name='combined_conv_{:d}'.format(i_block))
            if self.batch_norm:
                l = BatchNormLayer(l,
                                   epsilon=1e-4,
                                   alpha=self.batch_norm_alpha,
                                   nonlinearity=self.later_nonlin)
            else:
                l = NonlinearityLayer(l, nonlinearity=self.later_nonlin)
            l = Pool2DLayer(l,
                            pool_size=[self.pool_time_length, 1],
                            stride=[1, 1],
                            mode=self.later_pool_mode)
            l = StrideReshapeLayer(l, n_stride=self.pool_time_stride)
            l = NonlinearityLayer(l, self.later_pool_nonlin)
            return l

        l = conv_pool_block(l, self.num_filters_2, self.filter_length_2, 2)
        l = conv_pool_block(l, self.num_filters_3, self.filter_length_3, 3)
        l = conv_pool_block(l, self.num_filters_4, self.filter_length_4, 4)
        # Final part, transformed dense layer
        l = DropoutLayer(l, p=self.drop_prob)
        l = Conv2DLayer(l,
                        num_filters=self.n_classes,
                        filter_size=[self.final_dense_length, 1],
                        nonlinearity=identity,
                        name='final_dense')
        l = FinalReshapeLayer(l)
        l = NonlinearityLayer(l, self.final_nonlin)
        return lasagne.layers.get_all_layers(l)
Beispiel #7
0
def test_raw_net_trial_based_and_continuous():
    softmax_rng = RandomState(3094839048)
    orig_softmax_weights = softmax_rng.randn(4,20,54,1).astype(theano.config.floatX) * 0.01
    
    
    lasagne.random.set_rng(RandomState(23903823))
    
    input_var = T.tensor4('inputs')
    
    epo_network = lasagne.layers.InputLayer(shape=[None,22,1200,1],
                                        input_var=input_var)
    # we have to switch channel dimension to height axis to not squash/convolve them away
    epo_network = lasagne.layers.DimshuffleLayer(epo_network, pattern=(0,3,2,1))
    epo_network = lasagne.layers.Conv2DLayer(epo_network, num_filters=20,filter_size=[30, 1], nonlinearity=identity)
    epo_network = lasagne.layers.DropoutLayer(epo_network, p=0.5)
    epo_network = lasagne.layers.Conv2DLayer(epo_network, num_filters=20,filter_size=[1, 22], nonlinearity=T.sqr)
    epo_network = SumPool2dLayer(epo_network, pool_size=(100,1), stride=(20,1), mode='average_exc_pad')
    epo_network = lasagne.layers.NonlinearityLayer(epo_network, nonlinearity=safe_log)
    epo_network = lasagne.layers.DropoutLayer(epo_network, p=0.5)
    epo_network = lasagne.layers.DenseLayer(epo_network, num_units=4,nonlinearity=softmax,
                                        W=orig_softmax_weights.reshape(4,-1).T)
    
    preds = lasagne.layers.get_output(epo_network, deterministic=True)
    pred_func = theano.function([input_var], preds)
    
    n_trials = 20
    n_samples = 1200 + n_trials - 1
    
    rng = RandomState(343434216)
    
    orig_inputs = rng.randn(1, get_input_shape(epo_network)[1],n_samples,1).astype(
        theano.config.floatX)
    
    # reshape to 2000 trials
    
    trialwise_inputs = [orig_inputs[:,:,start_i:start_i+1200] for start_i in range(n_trials)]
    
    trialwise_inputs = np.array(trialwise_inputs)[:,0]
    
    
    lasagne.random.set_rng(RandomState(23903823))
    input_var = T.tensor4('inputs')
    
    cnt_network = lasagne.layers.InputLayer(shape=[None,22,n_samples,1],
                                        input_var=input_var)
    # we have to switch channel dimension to height axis to not squash/convolve them away
    cnt_network = lasagne.layers.DimshuffleLayer(cnt_network, pattern=(0,3,2,1))
    cnt_network = lasagne.layers.Conv2DLayer(cnt_network, num_filters=20,
        filter_size=[30, 1], nonlinearity=identity)
    cnt_network = lasagne.layers.DropoutLayer(cnt_network, p=0.5)
    cnt_network = lasagne.layers.Conv2DLayer(cnt_network, num_filters=20,
                filter_size=[1, 22], nonlinearity=T.sqr)
    cnt_network = SumPool2dLayer(cnt_network, pool_size=(100,1),
        stride=(1,1), mode='average_exc_pad')
    cnt_network = lasagne.layers.NonlinearityLayer(cnt_network,
        nonlinearity=safe_log)
    cnt_network = StrideReshapeLayer(cnt_network, n_stride=20)
    cnt_network = lasagne.layers.DropoutLayer(cnt_network, p=0.5)
    cnt_network = lasagne.layers.Conv2DLayer(cnt_network, num_filters=4,
        filter_size=[54, 1], W=orig_softmax_weights[:,:,::-1,:], stride=(1,1),
        nonlinearity=lasagne.nonlinearities.identity)
    cnt_network = FinalReshapeLayer(cnt_network)
    cnt_network = lasagne.layers.NonlinearityLayer(cnt_network,
        nonlinearity=softmax)
    preds_cnt = lasagne.layers.get_output(cnt_network, deterministic=True)
    pred_cnt_func = theano.function([input_var], preds_cnt)
    
    
    results = []
    batch_size = 5
    for i_trial in xrange(0,len(trialwise_inputs),batch_size):
        res =  pred_func(trialwise_inputs[i_trial:min(len(trialwise_inputs),
            i_trial+batch_size)])
        results.append(res)
    results = np.array(results).squeeze()
    res_cnt = pred_cnt_func(orig_inputs)
    reshaped_results = np.concatenate(results)
    assert np.allclose(reshaped_results, res_cnt[:n_trials])
Beispiel #8
0
def test_stride_reshape_layer():
    input_var = T.tensor4('inputs').astype(theano.config.floatX)
    network = lasagne.layers.InputLayer(shape=[None,1,15,1], input_var=input_var)
    network = lasagne.layers.Conv2DLayer(network, num_filters=1,filter_size=[3, 1],
                                         W=lasagne.init.Constant(1), stride=(1,1))
    network = StrideReshapeLayer(network, n_stride=2, invalid_fill_value=np.nan)
    network = lasagne.layers.Conv2DLayer(network, num_filters=1,filter_size=[2, 1],
                                         W=lasagne.init.Constant(1), stride=(1,1))
    network = StrideReshapeLayer(network, n_stride=2, invalid_fill_value=np.nan)
    network = lasagne.layers.Conv2DLayer(network, num_filters=4, filter_size=[2, 1],
                                         W=to_4d_time_array([[1,1], [-1,-1], [0.1,0.1], [-0.1,-0.1]]), stride=(1,1),
                                        nonlinearity=lasagne.nonlinearities.identity)
    network = FinalReshapeLayer(network, remove_invalids=False)
    
    preds_cnt = lasagne.layers.get_output(lasagne.layers.get_all_layers(network)[1:])
    pred_cnt_func = theano.function([input_var], preds_cnt)
    layer_activations = pred_cnt_func(to_4d_time_array([range(1,16), range(16,31)]))
    assert equal_without_nans(np.array([[[[  6.], [  9.], [ 12.], [ 15.], [ 18.], [ 21.], 
                           [ 24.], [ 27.], [ 30.], [ 33.], [ 36.], [ 39.], 
                           [ 42.]]],
        [[[ 51.], [ 54.], [ 57.], [ 60.], [ 63.], [ 66.], 
          [ 69.], [ 72.], [75.], [ 78.], [ 81.], [ 84.], 
          [ 87.]]]], 
            dtype=np.float32),
        layer_activations[0])
    assert equal_without_nans(np.array(
        [[[[  6.], [ 12.], [ 18.], [ 24.], [ 30.], [ 36.], [ 42.]]],
       [[[ 51.], [ 57.], [ 63.], [ 69.], [ 75.], [ 81.], [ 87.]]],
       [[[  9.], [ 15.], [ 21.], [ 27.], [ 33.], [ 39.], [ np.nan]]],
       [[[ 54.], [ 60.], [ 66.], [ 72.], [ 78.], [ 84.], [ np.nan]]]],
       dtype=np.float32),
       layer_activations[1])
    
    assert equal_without_nans(np.array([[[[  18.], [  30.], [  42.], [  54.], [  66.], [  78.]]],
       [[[ 108.], [ 120.], [ 132.], [ 144.], [ 156.], [ 168.]]],
       [[[  24.], [  36.], [  48.], [  60.], [  72.], [  np.nan]]],
       [[[ 114.], [ 126.], [ 138.], [ 150.], [ 162.], [  np.nan]]]],
       dtype=np.float32),
       layer_activations[2])
    
    assert equal_without_nans(np.array([[[[  18.], [  42.], [  66.]]],
        [[[ 108.], [ 132.], [ 156.]]],
        [[[  24.], [  48.], [  72.]]],
        [[[ 114.], [ 138.], [ 162.]]],
        [[[  30.], [  54.], [  78.]]],
        [[[ 120.], [ 144.], [ 168.]]],
        [[[  36.], [  60.], [  np.nan]]],
        [[[ 126.], [ 150.], [  np.nan]]]],
        dtype=np.float32),
        layer_activations[3])
    
    assert allclose_without_nans(np.array(
        [[[[  60.        ], [ 108.        ]], [[ -60.        ], [-108.        ]],
        [[   6.00000048], [  10.80000019]], [[  -6.00000048], [ -10.80000019]]],
        [[[ 240.        ], [ 288.        ]], [[-240.        ], [-288.        ]],
        [[  24.        ], [  28.80000114]], [[ -24.        ], [ -28.80000114]]],
        [[[  72.        ], [ 120.        ]], [[ -72.        ], [-120.        ]],
        [[   7.20000029], [  12.        ]], [[  -7.20000029], [ -12.        ]]],
        [[[ 252.        ], [ 300.        ]], [[-252.        ], [-300.        ]],
        [[  25.20000076], [  30.00000191]], [[ -25.20000076], [ -30.00000191]]],
        [[[  84.        ], [ 132.        ]], [[ -84.        ], [-132.        ]],
        [[   8.40000057], [  13.19999981]], [[  -8.40000057], [ -13.19999981]]],
        [[[ 264.        ], [ 312.        ]], [[-264.        ], [-312.        ]],
        [[  26.40000153], [  31.20000076]], [[ -26.40000153], [ -31.20000076]]],
        [[[  96.        ], [          np.nan]], [[ -96.        ], [          np.nan]],
        [[   9.60000038], [          np.nan]], [[  -9.60000038], [          np.nan]]],
        [[[ 276.        ], [          np.nan]], [[-276.        ], [          np.nan]],
        [[  27.60000038], [          np.nan]], [[ -27.60000038], [          np.nan]]]],
        dtype=np.float32),
        layer_activations[4])
    
    assert allclose_without_nans(np.array(
        [[  60.        ,  -60.        ,    6.0,   -6.],
        [  72.        ,  -72.        ,    7.2,   -7.2],
        [  84.        ,  -84.        ,    8.4,   -8.4],
        [  96.        ,  -96.        ,    9.6,   -9.6],
        [ 108.        , -108.        ,   10.8,  -10.8],
        [ 120.        , -120.        ,   12. ,  -12.        ],
        [ 132.        , -132.        ,   13.2,  -13.2],
        [          np.nan,           np.nan,           np.nan,           np.nan],
        [ 240.        , -240.        ,   24.        ,  -24.        ],
        [ 252.        , -252.        ,   25.2,  -25.2],
        [ 264.        , -264.        ,   26.4,  -26.4],
        [ 276.        , -276.        ,   27.6,  -27.6],
        [ 288.        , -288.        ,   28.8,  -28.8],
        [ 300.        , -300.        ,   30.0,  -30.0],
        [ 312.        , -312.        ,   31.2,  -31.2],
        [          np.nan,           np.nan,           np.nan,           np.nan]],
        dtype=np.float32),
        layer_activations[5])
Beispiel #9
0
def test_stride_reshape_layer_with_nonempty_3rd_dim():
    input_var = T.tensor4('inputs').astype(theano.config.floatX)
    network = lasagne.layers.InputLayer(shape=[None,1,15,2], input_var=input_var)
    network = lasagne.layers.Conv2DLayer(network, num_filters=1,filter_size=[3, 1],
                                         W=lasagne.init.Constant(1), stride=(1,1))
    network = StrideReshapeLayer(network, n_stride=2, invalid_fill_value=np.nan)
    network = lasagne.layers.Conv2DLayer(network, num_filters=1,filter_size=[2, 1],
                                         W=lasagne.init.Constant(1), stride=(1,1))
    network = StrideReshapeLayer(network, n_stride=2, invalid_fill_value=np.nan)
    network = lasagne.layers.Conv2DLayer(network, num_filters=4, filter_size=[2, 1],
                                         W=to_4d_time_array([[1,1], [-1,-1], [0.1,0.1], [-0.1,-0.1]]), stride=(1,1),
                                        nonlinearity=lasagne.nonlinearities.identity)
    network = lasagne.layers.Conv2DLayer(network, num_filters=3, filter_size=[1, 2],
                                         W=np.array([[[[1,1]],[[0.5,0.5]], [[1,1]], [[0.5,0.5]]],
                                                      [[[-1,-1]],[[-0.5,-0.5]], [[-1,-1]], [[0,0]]],
                                                     [[[0,0]],[[0,0]], [[-1,1]], [[0,0]]]], dtype=np.float32), stride=(1,1),
                                        nonlinearity=lasagne.nonlinearities.identity)
    network = FinalReshapeLayer(network, remove_invalids=False)
    
    preds_cnt = lasagne.layers.get_output(lasagne.layers.get_all_layers(network)[1:])
    pred_cnt_func = theano.function([input_var], preds_cnt)
    inputs= np.array([[range(1,16), range(101,116)],
                     [range(16,31), range(116,131)]]).astype(np.float32)
    inputs = inputs.swapaxes(1,2)[:,np.newaxis,:,:]
    layer_activations = pred_cnt_func(inputs)
    assert equal_without_nans(np.array([[[[  6., 306], [  9., 309], [ 12., 312], [ 15.,315], [ 18.,318], [ 21.,321], 
                           [ 24.,324], [ 27.,327], [ 30.,330], [ 33.,333], [ 36.,336], [ 39.,339], 
                           [ 42.,342]]],
        [[[ 51.,351], [ 54.,354], [ 57.,357], [ 60.,360], [ 63.,363], [ 66.,366], 
          [ 69.,369], [ 72.,372], [75.,375], [ 78.,378], [ 81.,381], [ 84.,384], 
          [ 87.,387]]]], 
            dtype=np.float32),
        layer_activations[0])
    
    assert equal_without_nans(np.array(
        [[[[  6., 306], [ 12.,312], [ 18.,318], [ 24.,324], [ 30.,330], [ 36.,336], [ 42.,342]]],
       [[[ 51., 351], [ 57., 357], [ 63., 363], [ 69., 369], [ 75., 375], [ 81., 381], [ 87., 387]]],
       [[[  9., 309], [ 15., 315], [ 21., 321], [ 27., 327], [ 33., 333], [ 39., 339], [ np.nan, np.nan]]],
       [[[ 54., 354], [ 60., 360], [ 66., 366], [ 72., 372], [ 78., 378], [ 84., 384], [ np.nan, np.nan]]]],
       dtype=np.float32),
       layer_activations[1])
    assert equal_without_nans(np.array([[[[  18.,618.], [  30., 630], [  42., 642], [  54., 654], [  66., 666], [  78., 678]]],
       [[[ 108., 708], [ 120., 720], [ 132., 732], [ 144., 744], [ 156., 756], [ 168., 768]]],
       [[[  24., 624], [  36., 636], [  48., 648], [  60., 660], [  72., 672], [  np.nan, np.nan]]],
       [[[ 114., 714], [ 126., 726], [ 138., 738], [ 150., 750], [ 162., 762], [  np.nan, np.nan]]]],
       dtype=np.float32),
       layer_activations[2])
    assert equal_without_nans(np.array([[[[  18., 618], [  42.,642], [  66.,666]]],
        [[[ 108.,708], [ 132.,732], [ 156.,756]]],
        [[[  24.,624], [  48.,648], [  72.,672]]],
        [[[ 114.,714], [ 138.,738], [ 162.,762]]],
        [[[  30.,630], [  54.,654], [  78.,678]]],
        [[[ 120.,720], [ 144.,744], [ 168.,768]]],
        [[[  36.,636], [  60.,660], [  np.nan, np.nan]]],
        [[[ 126., 726], [ 150., 750], [  np.nan, np.nan]]]],
        dtype=np.float32),
        layer_activations[3])
    assert allclose_without_nans(np.array(
        [[[[  60.        ,1260], [ 108.        , 1308]], [[ -60.        , -1260], [-108.        ,-1308]],
        [[   6.00000048, 126], [  10.80000019, 130.80000305]], [[  -6.00000048, -126], [ -10.80000019, -130.80000305]]],
          [[[  240.        ,  1440.        ], [  288.        ,  1488.        ]],   [[ -240.        , -1440.        ],
             [ -288.        , -1488.        ]], [[   24.        ,   144.        ],    [   28.79999924,   148.80000305]],
            [[  -24.        ,  -144.        ],  [  -28.79999924,  -148.80000305]]],
         [[[   72.        ,  1272.        ],  [  120.        ,  1320.        ]],  [[  -72.        , -1272.        ],
             [ -120.        , -1320.        ]], [[    7.20000029,   127.20000458],  [   12.        ,   132.        ]],
            [[   -7.20000029,  -127.20000458],   [  -12.        ,  -132.        ]]],
           [[[  252.        ,  1452.        ], [  300.        ,  1500.        ]], [[ -252.        , -1452.        ],
             [ -300.        , -1500.        ]],   [[   25.20000076,   145.19999695], [   30.        ,   150.        ]],
            [[  -25.20000076,  -145.19999695],  [  -30.        ,  -150.        ]]],
           [[[   84.        ,  1284.        ], [  132.        ,  1332.        ]],  [[  -84.        , -1284.        ],
             [ -132.        , -1332.        ]], [[    8.39999962,   128.3999939 ], [   13.19999981,   133.19999695]],
            [[   -8.39999962,  -128.3999939 ], [  -13.19999981,  -133.19999695]]],
           [[[  264.        ,  1464.        ], [  312.        ,  1512.        ]], [[ -264.        , -1464.        ],
             [ -312.        , -1512.        ]], [[   26.39999962,   146.3999939 ], [   31.20000076,   151.19999695]],
            [[  -26.39999962,  -146.3999939 ],  [  -31.20000076,  -151.19999695]]],
           [[[   96.        ,  1296.        ],  [np.nan, np.nan]], [[  -96.        , -1296.        ],
             [np.nan,  np.nan]],
            [[    9.60000038,   129.6000061 ], [np.nan, np.nan]], [[   -9.60000038,  -129.6000061 ],
             [np.nan, np.nan]]],
           [[[  276.        ,  1476.        ], [np.nan, np.nan]],
            [[ -276.        , -1476.        ], [np.nan, np.nan]],
            [[   27.60000038,   147.6000061 ], [np.nan, np.nan]],
            [[  -27.60000038,  -147.6000061 ], [np.nan, np.nan]]]], dtype=np.float32),
        layer_activations[4])
    expected_5 = [np.sum(layer_activations[4][:,[0,2]] * 1 + layer_activations[4][:,[1,3]] * 0.5, axis=(1,3)),
             np.sum(layer_activations[4][:,[0,2]] * (-1), axis=(1,3)) + 
                  np.sum(layer_activations[4][:,[1]] * (-0.5), axis=(1,3)),
             layer_activations[4][:,2,:,0] - layer_activations[4][:,2,:,1]]
    expected_5 = np.array(expected_5).swapaxes(0,1)[:,:,:,np.newaxis]
    assert allclose_without_nans(expected_5, layer_activations[5])
    assert allclose_without_nans(layer_activations[6][:,0],
    np.concatenate((np.linspace(726,805.2, 7), [np.nan], np.linspace(924, 1003.2, 7), [np.nan])))
    assert allclose_without_nans(layer_activations[6][:,1],
        np.concatenate((np.linspace(-792,-878.4, 7), [np.nan], np.linspace(-1008, -1094.4, 7), [np.nan])))
    assert allclose_without_nans(layer_activations[6][:,2], 
                                 np.concatenate(([-120] * 7, [np.nan], [-120] * 7, [np.nan],)))
    
    for elem in layer_activations[6].flatten():
        assert np.isnan(elem) or elem in layer_activations[5]
    assert np.sum(np.isnan(layer_activations[5])) == np.sum(np.isnan(layer_activations[6]))