Пример #1
0
    def test_specified_rng(self, input_layer):
        from lasagne.layers.noise import GaussianNoiseLayer
        input = theano.shared(numpy.ones((100, 100)))
        seed = 123456789

        set_rng(RandomState(seed))
        result = GaussianNoiseLayer(input_layer).get_output_for(input)
        result_eval1 = result.eval()

        set_rng(RandomState(seed))
        result = GaussianNoiseLayer(input_layer).get_output_for(input)
        result_eval2 = result.eval()
        assert numpy.allclose(result_eval1, result_eval2)
Пример #2
0
def build_model(encoder_specs,
                decoder_specs,
                p_drop_input,
                p_drop_hidden,
                input_shape,
                batch_size=None,
                activation=rectify,
                combinator_type='MILAudem'):
    net = OrderedDict()
    net['input'] = InputLayer((batch_size, ) + tuple(input_shape),
                              name='input')
    # corrupted input
    net['input_corr'] = GaussianNoiseLayer(net['input'],
                                           sigma=p_drop_input,
                                           name='input_corr')

    # dirty encoder
    train_output_l, dirty_encoder = build_encoder(net, encoder_specs,
                                                  activation, 'dirty',
                                                  p_drop_hidden, None)

    # clean encoder
    clean_encoder = OrderedDict(net.items()[:1])
    eval_output_l, clean_net = build_encoder(clean_encoder, encoder_specs,
                                             activation, 'clean', 0.,
                                             dirty_encoder)

    # dirty decoder
    dirty_decoder = build_decoder(dirty_encoder, clean_net, 'dirty',
                                  decoder_specs, combinator_type)

    return (train_output_l, eval_output_l, dirty_encoder, dirty_decoder,
            clean_encoder)
Пример #3
0
def create_rnn(input_vars, num_inputs, depth, hidden_layer_size, num_outputs):
    # network = InputLayer((None, None, num_inputs), input_vars)
    network = lasagne.layers.InputLayer(shape=(None, 1, 1, num_inputs),
                                        input_var=input_vars)
    batch_size_theano, _, _, seqlen = network.input_var.shape

    network = GaussianNoiseLayer(network, sigma=0.05)
    for i in range(depth):
        network = RecurrentLayer(network,
                                 hidden_layer_size,
                                 W_hid_to_hid=GlorotUniform(),
                                 W_in_to_hid=GlorotUniform(),
                                 b=Constant(1.0),
                                 nonlinearity=lasagne.nonlinearities.tanh,
                                 learn_init=True)
    network = ReshapeLayer(network, (-1, hidden_layer_size))
    network = DenseLayer(network, num_outputs, nonlinearity=softmax)

    return network
Пример #4
0
def create_blstm(input_vars, mask_vars, num_inputs, depth, hidden_layer_size,
                 num_outputs):
    network = lasagne.layers.InputLayer(shape=(None, 1, 1, num_inputs),
                                        input_var=input_vars)
    mask = InputLayer((None, None), mask_vars)
    network = GaussianNoiseLayer(network, sigma=0.01)
    for i in range(depth):
        forward = LSTMLayer(network,
                            hidden_layer_size,
                            mask_input=mask,
                            learn_init=True)
        backward = LSTMLayer(network,
                             hidden_layer_size,
                             mask_input=mask,
                             learn_init=True,
                             backwards=True)
        network = ElemwiseSumLayer([forward, backward])
    network = ReshapeLayer(network, (-1, hidden_layer_size))
    network = DenseLayer(network, num_outputs, nonlinearity=softmax)
    return network
Пример #5
0
    def test_specified_rng(self, input_layer):
        from lasagne.layers.noise import GaussianNoiseLayer
        input = theano.shared(numpy.ones((100, 100)))
        seed = 123456789
        rng = get_rng()

        set_rng(RandomState(seed))
        result = GaussianNoiseLayer(input_layer).get_output_for(input)
        result_eval1 = result.eval()

        set_rng(RandomState(seed))
        result = GaussianNoiseLayer(input_layer).get_output_for(input)
        result_eval2 = result.eval()

        set_rng(rng)  # reset to original RNG for other tests
        assert numpy.allclose(result_eval1, result_eval2)
Пример #6
0
 def layer(self):
     from lasagne.layers.noise import GaussianNoiseLayer
     return GaussianNoiseLayer(Mock(output_shape=(None, )))
Пример #7
0
def build_enc_layer(incoming, name, transform, specs, activation, i,
                    p_drop_hidden, shared_net):
    net = OrderedDict()
    lname = 'enc_{}_{}'.format(i,
                               transform if 'pool' in transform else 'affine')
    nbatchn_lname = 'enc_batchn_{}_norm'.format(i)
    noise_lname = 'enc_noise_{}'.format(i)
    lbatchn_lname = 'enc_batchn_{}_learn'.format(i)

    if shared_net is None:
        # affine pars
        W = lasagne.init.GlorotUniform()
        # batchnorm pars
        beta = lasagne.init.Constant(0)
        gamma = None if activation == rectify else lasagne.init.Constant(1)
    else:
        # batchnorm pars
        beta = shared_net[lbatchn_lname + '_beta'].get_params()[0]
        gamma = None if activation == rectify else \
            shared_net[lbatchn_lname + '_gamma'].get_params()[0]
        if not isinstance(shared_net[lname], (pool, unpool)):
            # affine weights
            W = shared_net[lname].get_params()[0]
        else:
            W = None

    # affine (conv/dense/deconv) or (un)pooling transformation: $W \hat{h}$
    net[lname] = get_transform_layer(incoming, name + '_' + lname, transform,
                                     specs, W)

    # 1. batchnormalize without learning -> goes to combinator layer
    layer2bn = net.values()[-1]
    l_name = '{}_{}'.format(name, nbatchn_lname)
    bn_broadcast_cond = layer2bn.output_shape[1] == 1
    if len(layer2bn.output_shape) == 4 and bn_broadcast_cond:
        ax = (0, 1, 2, 3)
    elif len(layer2bn.output_shape) == 2 and bn_broadcast_cond:
        ax = (0, 1)
    else:
        ax = 'auto'
    net[nbatchn_lname] = BatchNormLayer(layer2bn,
                                        axes=ax,
                                        alpha=0.1,
                                        beta=None,
                                        gamma=None,
                                        name=l_name)
    if shared_net is None:
        # for dirty encoder -> add noise
        net[noise_lname] = GaussianNoiseLayer(net.values()[-1],
                                              sigma=p_drop_hidden,
                                              name='{}_{}'.format(
                                                  name, noise_lname))

    # 2. scaling & offsetting batchnormalization + noise
    l_name = '{}_{}'.format(name, lbatchn_lname)
    # offset by beta
    net[lbatchn_lname + '_beta'] = BiasLayer(net.values()[-1],
                                             b=beta,
                                             name=l_name + '_beta')
    if gamma is not None:
        # if not rectify, scale by gamma
        net[lbatchn_lname + '_gamma'] = ScaleLayer(net.values()[-1],
                                                   scales=gamma,
                                                   name=l_name + '_gamma')

    return net
Пример #8
0
 def layer(self):
     from lasagne.layers.noise import GaussianNoiseLayer
     return GaussianNoiseLayer(Mock())