Exemplo n.º 1
0
 def test_undefined_shape(self, ScaleLayer):
     # should work:
     ScaleLayer((64, None, 3), shared_axes=(1, 2))
     # should not work:
     with pytest.raises(ValueError) as exc:
         ScaleLayer((64, None, 3), shared_axes=(0, 2))
     assert 'needs specified input sizes' in exc.value.args[0]
Exemplo n.º 2
0
 def test_scales_init(self, ScaleLayer, init_scales):
     input_shape = (2, 3, 4)
     # default: share scales over all but second axis
     b = ScaleLayer(input_shape, scales=init_scales).scales
     assert np.allclose(b.get_value(), init_scales((3, )))
     # share over first axis only
     b = ScaleLayer(input_shape, scales=init_scales, shared_axes=0).scales
     assert np.allclose(b.get_value(), init_scales((3, 4)))
     # share over second and third axis
     b = ScaleLayer(input_shape, scales=init_scales,
                    shared_axes=(1, 2)).scales
     assert np.allclose(b.get_value(), init_scales((2, )))
Exemplo n.º 3
0
 def test_get_output_for(self, ScaleLayer, init_scales):
     input_shape = (2, 3, 4)
     # random input tensor
     input = np.random.randn(*input_shape).astype(theano.config.floatX)
     # default: share scales over all but second axis
     layer = ScaleLayer(input_shape, scales=init_scales)
     assert np.allclose(
         layer.get_output_for(input).eval(), input * init_scales((1, 3, 1)))
     # share over first axis only
     layer = ScaleLayer(input_shape, scales=init_scales, shared_axes=0)
     assert np.allclose(
         layer.get_output_for(input).eval(), input * init_scales((1, 3, 4)))
     # share over second and third axis
     layer = ScaleLayer(input_shape, scales=init_scales, shared_axes=(1, 2))
     assert np.allclose(
         layer.get_output_for(input).eval(), input * init_scales((2, 1, 1)))
Exemplo n.º 4
0
 def test_scales_init(self, ScaleLayer, init_scales):
     input_shape = (2, 3, 4)
     # default: share scales over all but second axis
     b = ScaleLayer(input_shape, scales=init_scales).scales
     assert np.allclose(b.get_value(), init_scales((3,)))
     # share over first axis only
     b = ScaleLayer(input_shape, scales=init_scales, shared_axes=0).scales
     assert np.allclose(b.get_value(), init_scales((3, 4)))
     # share over second and third axis
     b = ScaleLayer(
         input_shape, scales=init_scales, shared_axes=(1, 2)).scales
     assert np.allclose(b.get_value(), init_scales((2,)))
Exemplo n.º 5
0
 def test_get_output_for(self, ScaleLayer, init_scales):
     input_shape = (2, 3, 4)
     # random input tensor
     input = np.random.randn(*input_shape).astype(theano.config.floatX)
     # default: share scales over all but second axis
     layer = ScaleLayer(input_shape, scales=init_scales)
     assert np.allclose(layer.get_output_for(input).eval(),
                        input * init_scales((1, 3, 1)))
     # share over first axis only
     layer = ScaleLayer(input_shape, scales=init_scales, shared_axes=0)
     assert np.allclose(layer.get_output_for(input).eval(),
                        input * init_scales((1, 3, 4)))
     # share over second and third axis
     layer = ScaleLayer(input_shape, scales=init_scales, shared_axes=(1, 2))
     assert np.allclose(layer.get_output_for(input).eval(),
                        input * init_scales((2, 1, 1)))
Exemplo n.º 6
0
def build_enc_layer(incoming, name, transform, specs, activation, i,
                    p_drop_hidden, shared_net):
    net = OrderedDict()
    lname = 'enc_{}_{}'.format(i,
                               transform if 'pool' in transform else 'affine')
    nbatchn_lname = 'enc_batchn_{}_norm'.format(i)
    noise_lname = 'enc_noise_{}'.format(i)
    lbatchn_lname = 'enc_batchn_{}_learn'.format(i)

    if shared_net is None:
        # affine pars
        W = lasagne.init.GlorotUniform()
        # batchnorm pars
        beta = lasagne.init.Constant(0)
        gamma = None if activation == rectify else lasagne.init.Constant(1)
    else:
        # batchnorm pars
        beta = shared_net[lbatchn_lname + '_beta'].get_params()[0]
        gamma = None if activation == rectify else \
            shared_net[lbatchn_lname + '_gamma'].get_params()[0]
        if not isinstance(shared_net[lname], (pool, unpool)):
            # affine weights
            W = shared_net[lname].get_params()[0]
        else:
            W = None

    # affine (conv/dense/deconv) or (un)pooling transformation: $W \hat{h}$
    net[lname] = get_transform_layer(incoming, name + '_' + lname, transform,
                                     specs, W)

    # 1. batchnormalize without learning -> goes to combinator layer
    layer2bn = net.values()[-1]
    l_name = '{}_{}'.format(name, nbatchn_lname)
    bn_broadcast_cond = layer2bn.output_shape[1] == 1
    if len(layer2bn.output_shape) == 4 and bn_broadcast_cond:
        ax = (0, 1, 2, 3)
    elif len(layer2bn.output_shape) == 2 and bn_broadcast_cond:
        ax = (0, 1)
    else:
        ax = 'auto'
    net[nbatchn_lname] = BatchNormLayer(layer2bn,
                                        axes=ax,
                                        alpha=0.1,
                                        beta=None,
                                        gamma=None,
                                        name=l_name)
    if shared_net is None:
        # for dirty encoder -> add noise
        net[noise_lname] = GaussianNoiseLayer(net.values()[-1],
                                              sigma=p_drop_hidden,
                                              name='{}_{}'.format(
                                                  name, noise_lname))

    # 2. scaling & offsetting batchnormalization + noise
    l_name = '{}_{}'.format(name, lbatchn_lname)
    # offset by beta
    net[lbatchn_lname + '_beta'] = BiasLayer(net.values()[-1],
                                             b=beta,
                                             name=l_name + '_beta')
    if gamma is not None:
        # if not rectify, scale by gamma
        net[lbatchn_lname + '_gamma'] = ScaleLayer(net.values()[-1],
                                                   scales=gamma,
                                                   name=l_name + '_gamma')

    return net