Beispiel #1
0
    def initNetwork2(self, input_dim, F1, F2, num_classes):
        dropout = self.dropout
        C,H,W = input_dim
        batch_norm_dropout = lambda x:lasagne.layers.DropoutLayer(batch_norm(x), p=dropout)        
        net = batch_norm(lasagne.layers.InputLayer(shape=(None, C, H, W), input_var=self.xvar))
        self.penaltyL1 = lasagne.regularization.regularize_network_params(net, lasagne.regularization.l2)*0
        if True:
            net = batch_norm_dropout(lasagne.layers.Conv2DLayer(net, num_filters=F1, filter_size=7, stride=3, pad=0, W=self.Wnorm()))
            self.penaltyL1 = lasagne.regularization.regularize_network_params(net, lasagne.regularization.l2)*self.regL1
            net = batch_norm_dropout(lasagne.layers.Conv2DLayer(net, num_filters=F2, filter_size=5, stride=1, pad=0, W=self.Wnorm()))
        elif False:
            net = batch_norm_dropout(lasagne.layers.Conv2DLayer(net, num_filters=F1, filter_size=7, stride=3, pad=0, W=self.Wnorm()))
            net = batch_norm_dropout(lasagne.layers.Conv2DLayer(net, num_filters=F2, filter_size=3, stride=1, pad=0, W=self.Wnorm()))
            net = batch_norm_dropout(lasagne.layers.Conv2DLayer(net, num_filters=F2, filter_size=3, stride=1, pad=0, W=self.Wnorm()))
        elif False:
            net = batch_norm_dropout(lasagne.layers.Conv2DLayer(net, num_filters=F1, filter_size=3, stride=3, pad=1, W=self.Wnorm()))
            for _ in xrange(3):
                net = batch_norm_dropout(lasagne.layers.Conv2DLayer(net, num_filters=F2, filter_size=3, stride=1, pad=0, W=self.Wnorm()))
        else:
            #net = batch_norm_dropout(lasagne.layers.Conv2DLayer(net, num_filters=F1, filter_size=3, stride=1, pad=0, W=self.Wnorm()))
            #net = batch_norm_dropout(lasagne.layers.Conv2DLayer(net, num_filters=F2, filter_size=3, stride=1, pad=0, W=self.Wnorm()))
            net = batch_norm_dropout(lasagne.layers.Conv2DLayer(net, num_filters=F1, filter_size=5, stride=1, pad=0, W=self.Wnorm()))
            net = batch_norm_dropout(lasagne.layers.Conv2DLayer(net, num_filters=F2, filter_size=3, stride=3, pad=0, W=self.Wnorm()))
            net = batch_norm_dropout(lasagne.layers.Conv2DLayer(net, num_filters=F2, filter_size=3, stride=1, pad=0, W=self.Wnorm()))
            net = batch_norm_dropout(lasagne.layers.Conv2DLayer(net, num_filters=F2, filter_size=3, stride=1, pad=0, W=self.Wnorm()))

        net = lasagne.layers.MaxPool2DLayer(net, pool_size=2, stride=2, pad=0)
        net = lasagne.layers.Conv2DLayer(net, num_filters=num_classes, filter_size=1, stride=1, pad=0, nonlinearity=None, W=self.Wnorm())
        net = lasagne.layers.DimshuffleLayer(net, (0,2,3,1))
        net = lasagne.layers.ReshapeLayer(net, (-1, num_classes))
        net = lasagne.layers.NonlinearityLayer(net, nonlinearity=lasagne.nonlinearities.softmax)

        return net
  def build_network(self, input_var = None, batch_size = None):


		print "build_network in VideoClassifier executed.."
		print "inputs are : " , self.sinputs

		if not input_var is None: self.sinputs = input_var
		if not batch_size is None:
			self.batch_size = batch_size


		# Merge or fuse or concatenate incoming layers
		self.network['ConcatLayer'] = lasagne.layers.ConcatLayer([self.right_network['FC_2'], self.left_network['FC_2']], axis=1, cropping=None)


		self.network['FC_3'] = batch_norm(lasagne.layers.DenseLayer(
							lasagne.layers.dropout(self.network['ConcatLayer'], p=self.dropout_rates[0]),
							num_units=84,
							nonlinearity=lasagne.nonlinearities.tanh))


		self.network['prob'] = batch_norm(lasagne.layers.DenseLayer(
							lasagne.layers.dropout(self.network['FC_3'], p=self.dropout_rates[2]),
							num_units=self.fc_layers[2],
							nonlinearity=lasagne.nonlinearities.softmax))



		return self.network
def lasagne_model():
    l_in = InputLayer(shape=(None, 1)+speechSize)

    l_conv1 = Conv2DLayer(l_in, num_filters = 128, filter_size=(3,3), nonlinearity=rectify)
    l_conv1b = Conv2DLayer(l_conv1, num_filters = 128, filter_size=(3,3), nonlinearity=rectify)
    l_conv1b = batch_norm(l_conv1b)
    l_pool1 = MaxPool2DLayer(l_conv1b, pool_size=(2,2))
    # l_pool1 = DropoutLayer(l_pool1, p=0.2)

    l_conv2 = Conv2DLayer(l_pool1, num_filters = 256, filter_size=(3,3), nonlinearity=rectify)
    l_conv2b = Conv2DLayer(l_conv2, num_filters = 256, filter_size=(3,3), nonlinearity=rectify)
    l_conv2b = batch_norm(l_conv2b)
    l_pool2 = MaxPool2DLayer(l_conv2b, pool_size=(2,2))
    # l_pool2 = DropoutLayer(l_pool2, p=0.2)

    # TODO:tanh这个要研究啊!
    l_hidden3 = DenseLayer(l_pool2, num_units = h_dimension, nonlinearity=tanh)
    l_hidden3 = batch_norm(l_hidden3)
    # l_hidden3 = DropoutLayer(l_hidden3, p=0.3)

    l_hidden4 = DenseLayer(l_hidden3, num_units = h_dimension, nonlinearity=tanh)
    # l_hidden4 = DropoutLayer(l_hidden4, p=0.5)

    l_out = DenseLayer(l_hidden4, num_units=num_labels_train, nonlinearity=softmax)

    return l_out,l_hidden4
  def build_network(self, input_var=None, batch_size = None):

		print "build_network() in SkeletonClassifier invoked"
		print self.sinputs

		if not input_var is None: self.sinputs = input_var
		if batch_size: self.batch_size = batch_size



		if not input_var is None: self.sinputs = input_var
		if not batch_size is None:
			self.batch_size = batch_size

		self.network['input'] = lasagne.layers.InputLayer(shape=(self.batch_size,self.nframes,1,self.dlength), input_var=self.sinputs[0])

		self.network['FC_1'] = batch_norm(lasagne.layers.DenseLayer( lasagne.layers.dropout(self.network['input'], p=self.dropout_rates[1]),
					num_units=self.fc_layers[0],nonlinearity=lasagne.nonlinearities.tanh))

		self.network['FC_2'] = batch_norm(lasagne.layers.DenseLayer(
					lasagne.layers.dropout(self.network['FC_1'], p=self.dropout_rates[2]),
					num_units=self.fc_layers[1],
					nonlinearity=lasagne.nonlinearities.tanh))

		self.network['FC_3'] = batch_norm(lasagne.layers.DenseLayer(
					lasagne.layers.dropout(self.network['FC_2'], p=self.dropout_rates[3]),
					num_units=self.fc_layers[2],
					nonlinearity=lasagne.nonlinearities.tanh))

		self.network['prob'] = lasagne.layers.DenseLayer(
					lasagne.layers.dropout(self.network['FC_3'], p=.2),
					num_units=self.nclasses,
					nonlinearity=lasagne.nonlinearities.softmax)

		return self.network
Beispiel #5
0
def test_batch_norm_macro():
    from lasagne.layers import (Layer, BatchNormLayer, batch_norm,
                                NonlinearityLayer)
    from lasagne.nonlinearities import identity
    input_shape = (2, 3)
    obj = object()

    # check if it steals the nonlinearity
    layer = Mock(Layer, output_shape=input_shape, nonlinearity=obj)
    bnstack = batch_norm(layer)
    assert isinstance(bnstack, NonlinearityLayer)
    assert isinstance(bnstack.input_layer, BatchNormLayer)
    assert layer.nonlinearity is identity
    assert bnstack.nonlinearity is obj

    # check if it removes the bias
    layer = Mock(Layer, output_shape=input_shape, b=obj, params={obj: set()})
    bnstack = batch_norm(layer)
    assert isinstance(bnstack, BatchNormLayer)
    assert layer.b is None
    assert obj not in layer.params

    # check if it can handle an unset bias
    layer = Mock(Layer, output_shape=input_shape, b=None, params={obj: set()})
    bnstack = batch_norm(layer)
    assert isinstance(bnstack, BatchNormLayer)
    assert layer.b is None

    # check if it passes on kwargs
    layer = Mock(Layer, output_shape=input_shape)
    bnstack = batch_norm(layer, name='foo')
    assert isinstance(bnstack, BatchNormLayer)
    assert bnstack.name == 'foo'
Beispiel #6
0
def build_contract_level(incoming,
                         num_filters,
                         nonlin,
                         W_init=lasagne.init.GlorotUniform(),
                         b_init=lasagne.init.Constant(0.01),
                         filter_size=3):
    """Builds a Conv-Conv-Pool block of the U-Net encoder."""

    network = nn.Conv2DLayer(incoming,
                             num_filters,
                             filter_size,
                             pad='same',
                             W=W_init,
                             b=b_init,
                             nonlinearity=nonlin)
    network = nn.batch_norm(network)
    network = nn.Conv2DLayer(network,
                             num_filters,
                             filter_size,
                             pad='same',
                             W=W_init,
                             b=b_init,
                             nonlinearity=nonlin)
    network = nn.batch_norm(network)
    return network, nn.MaxPool2DLayer(network, 2)
Beispiel #7
0
def pons_cnn(params):
    """"""
    layers = L.InputLayer((None, 1, params['dur'], 128))
    print layers.output_shape

    sclr = joblib.load(params['scaler'])
    layers = L.standardize(layers,
                           sclr.mean_.astype(np.float32),
                           sclr.scale_.astype(np.float32),
                           shared_axes=(0, 1, 2))
    print layers.output_shape

    layers_timbre = L.GlobalPoolLayer(
        L.batch_norm(L.Conv2DLayer(layers, 64, (1, 96))))

    layers_rhythm = L.GlobalPoolLayer(
        L.batch_norm(L.Conv2DLayer(layers, 64, (params['dur'] - 10, 1))))

    layers = L.ConcatLayer([layers_rhythm, layers_timbre], axis=-1)

    layers = L.DenseLayer(layers, 64, nonlinearity=nl.rectify)
    print layers.output_shape

    layers = L.DenseLayer(layers, 16, nonlinearity=nl.softmax)
    print layers.output_shape

    return layers
def createDiscriminator2(input_var=None):

	_ = InputLayer(shape=(None, 3, 64, 64), input_var=input_var)
	_ = batch_norm(Conv2DDNNLayer(_, 64, 3, pad='same'))
	_ = batch_norm(Conv2DDNNLayer(_, 64, 3, pad='same'))
	_ = MaxPool2DDNNLayer(_, 2)
	_ = batch_norm(Conv2DDNNLayer(_, 64, 3, pad='same'))
	_ = MaxPool2DDNNLayer(_, 2)
	_ = batch_norm(Conv2DDNNLayer(_, 128, 3, pad='same'))
	_ = batch_norm(Conv2DDNNLayer(_, 128, 3, pad='same'))
	_ = FlattenLayer(_)
	_ = DenseLayer(_, num_units=1000, nonlinearity=lasagne.nonlinearities.rectify)
	l_discriminator = DenseLayer(_, num_units=1, nonlinearity=lasagne.nonlinearities.sigmoid)

	print('--------------------')
	print('Discriminator architecture: \n')

	#get all layers
	allLayers=lasagne.layers.get_all_layers(l_discriminator)
	#for each layer print its shape information
	for l in allLayers:
		print(lasagne.layers.get_output_shape(l))

	print ("Discriminator output:", l_discriminator.output_shape)
	return l_discriminator
def normal(ilayer,fmaps,activation,t='enc',ltype='normal'):
     if t == 'enc':
          x = batch_norm(lasagne.layers.Conv2DLayer(
               ilayer, num_filters=fmaps[0],filter_size=(3,3),
               nonlinearity=None,pad=1,
               W=initf
          ))
     else:
          x = batch_norm(lasagne.layers.Conv2DLayer(
               ilayer, num_filters=fmaps[0],filter_size=(3,3),
               nonlinearity=activation,pad=1,
               W=initf
          ))
     if ltype == 'normal':
          x = batch_norm(lasagne.layers.Conv2DLayer(
               x, num_filters=fmaps[1],filter_size=(3,3),
               nonlinearity=activation,pad=1,
               W=initf
          ))
     elif ltype == 'residual':
          x = batch_norm(lasagne.layers.Conv2DLayer(
               x, num_filters=fmaps[1],filter_size=(3,3),
               nonlinearity=None,pad=1,
               W=initf
          ))
          y = lasagne.layers.Conv2DLayer(
               ilayer, num_filters=fmaps[1],filter_size=(1,1),
               nonlinearity=None,pad='same', W=initf)
          x = ElemwiseSumLayer([x, y])
          x = NonlinearityLayer(x,nonlinearity=activation)
     return x
def build_discriminator(input_var=None, convs=0):
    from lasagne.layers import (InputLayer, DenseLayer, batch_norm)
    from lasagne.layers.dnn import Conv2DDNNLayer as Conv2DLayer  # override
    from lasagne.nonlinearities import LeakyRectify, sigmoid
    lrelu = LeakyRectify(0.2)
    if convs == 0:
        # input: (None, 1, 64, 64)
        layer = InputLayer(shape=(None, 1, 64, 64), input_var=input_var)
        # two convolutions
        layer = batch_norm(
            Conv2DLayer(layer, 64, 5, stride=2, pad=2, nonlinearity=lrelu))
        layer = batch_norm(
            Conv2DLayer(layer, 128, 5, stride=2, pad=2, nonlinearity=lrelu))
    else:
        # input: (None, 1, 128, 128)
        layer = InputLayer(shape=(None, 1, 128, 128), input_var=input_var)
        # two convolutions
        layer = batch_norm(
            Conv2DLayer(layer, 64, 5, stride=2, pad=2, nonlinearity=lrelu))
        layer = batch_norm(
            Conv2DLayer(layer, 128, 5, stride=2, pad=2, nonlinearity=lrelu))
    # fully-connected layer
    layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
    # output layer
    layer = DenseLayer(layer, 1, nonlinearity=sigmoid)
    print("Discriminator output:", layer.output_shape)
    return layer
Beispiel #11
0
def get_model(input_var, target_var, multiply_var):

    # input layer with unspecified batch size
    layer     = InputLayer(shape=(None, 12, 64, 64), input_var=input_var) #InputLayer(shape=(None, 1, 30, 64, 64), input_var=input_var)
    layer     = DimshuffleLayer(layer, (0, 'x', 1, 2, 3))

    # Z-score?

    # Convolution then batchNormalisation then activation layer, then zero padding layer followed by a dropout layer
    layer         = batch_norm(Conv3DDNNLayer(incoming=layer, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=rectify))
    layer         = batch_norm(Conv3DDNNLayer(incoming=layer, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=rectify))
    layer         = Conv3DDNNLayer(incoming=layer, num_filters=1, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=sigmoid)
    layer_prediction  = layer

    # Loss
    prediction           = get_output(layer_prediction)
    loss                 = binary_crossentropy(prediction[:,0,:,:,:], target_var).mean()

    #Updates : Stochastic Gradient Descent (SGD) with Nesterov momentum
    params               = get_all_params(layer_prediction, trainable=True)

    # Create a loss expression for validation/testing. The crucial difference
    # here is that we do a deterministic forward pass through the network, disabling dropout layers.
    test_prediction      = get_output(layer_prediction, deterministic=True)
    test_loss            = binary_crossentropy(test_prediction[:,0,:,:,:], target_var).mean()

    return test_prediction, prediction, loss, params
Beispiel #12
0
def discriminator_3D(input_var=None, num_units=512, seq_length=4):
    discriminator = []
    lrelu = lasagne.nonlinearities.LeakyRectify(0.2)

    discriminator.append(
        ll.InputLayer(shape=(None, seq_length, 3, 80, 160),
                      input_var=input_var))

    # lasagne documentations requires shape :
    # (batch_size, num_input_channels, input_depth, input_rows, input_columns)
    # so we need to change dimension ordering

    discriminator.append(ll.DimshuffleLayer(discriminator[-1],
                                            (0, 2, 1, 3, 4)))

    discriminator.append(
        ll.Conv3DLayer(discriminator[-1],
                       num_filters=num_units / 8,
                       filter_size=5,
                       stride=2,
                       pad=2,
                       nonlinearity=lrelu))

    discriminator.append(
        ll.batch_norm(
            ll.Conv3DLayer(discriminator[-1],
                           num_filters=num_units / 4,
                           filter_size=5,
                           stride=2,
                           pad=2,
                           nonlinearity=lrelu)))

    discriminator.append(
        ll.batch_norm(
            ll.Conv3DLayer(discriminator[-1],
                           num_filters=num_units / 2,
                           filter_size=5,
                           stride=2,
                           pad=2,
                           nonlinearity=lrelu)))

    discriminator.append(
        ll.batch_norm(
            ll.Conv3DLayer(discriminator[-1],
                           num_filters=num_units,
                           filter_size=5,
                           stride=2,
                           pad=2,
                           nonlinearity=lrelu)))

    discriminator.append(ll.FlattenLayer(discriminator[-1]))

    discriminator.append(
        ll.DenseLayer(discriminator[-1], num_units=1, nonlinearity=None))

    for layer in discriminator:
        print layer.output_shape
    print ""

    return discriminator
def build_discriminator_32(image=None, ndf=128):
    lrelu = LeakyRectify(0.2)
    # input: images
    InputImg = InputLayer(shape=(None, 3, 32, 32), input_var=image)
    print("Dis Img_input:", InputImg.output_shape)
    # Conv Layer
    dis1 = Conv2DLayer(InputImg,
                       ndf, (4, 4), (2, 2),
                       pad=1,
                       W=Normal(0.02),
                       nonlinearity=lrelu)
    print("Dis conv1:", dis1.output_shape)
    # Conv Layer
    dis2 = batch_norm(
        Conv2DLayer(dis1,
                    ndf * 2, (4, 4), (2, 2),
                    pad=1,
                    W=Normal(0.02),
                    nonlinearity=lrelu))
    print("Dis conv2:", dis2.output_shape)
    # Conv Layer
    dis3 = batch_norm(
        Conv2DLayer(dis2,
                    ndf * 4, (4, 4), (2, 2),
                    pad=1,
                    W=Normal(0.02),
                    nonlinearity=lrelu))
    print("Dis conv3:", dis3.output_shape)
    # Conv Layer
    dis4 = DenseLayer(dis3, 1, W=Normal(0.02), nonlinearity=sigmoid)
    print("Dis output:", dis4.output_shape)
    return dis4
Beispiel #14
0
def build_discriminator(input_var=None, convs=0):
    from lasagne.layers import (InputLayer, DenseLayer, batch_norm)
    from lasagne.layers.dnn import Conv2DDNNLayer as Conv2DLayer  # override
    from lasagne.nonlinearities import LeakyRectify, sigmoid
    lrelu = LeakyRectify(0.2)
    if convs == 0:
        # input: (None, 1, 64, 64)
        layer = InputLayer(shape=(None, 1, 64, 64), input_var=input_var)
        # two convolutions
        layer = batch_norm(
            Conv2DLayer(layer, 64, 5, stride=2, pad=2, nonlinearity=lrelu))
        layer = batch_norm(
            Conv2DLayer(layer, 128, 5, stride=2, pad=2, nonlinearity=lrelu))
    else:
        # input: (None, 1, 128, 128)
        layer = InputLayer(shape=(None, 1, 128, 128), input_var=input_var)
        # two convolutions
        layer = batch_norm(
            Conv2DLayer(layer, 64, 5, stride=2, pad=2, nonlinearity=lrelu))
        layer = batch_norm(
            Conv2DLayer(layer, 128, 5, stride=2, pad=2, nonlinearity=lrelu))
    # fully-connected layer
    layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
    # output layer
    layer = DenseLayer(layer, 1, nonlinearity=sigmoid)
    print("Discriminator output:", layer.output_shape)
    return layer
Beispiel #15
0
def build_model(nActions):
    net = OrderedDict()
    net['input'] = InputLayer((None, 4, 84, 84))
    net['conv1'] = batch_norm(
        ConvLayer(net['input'],
                  num_filters=32,
                  filter_size=8,
                  stride=4,
                  pad='valid',
                  nonlinearity=ReLU))
    net['conv2'] = batch_norm(
        ConvLayer(net['conv1'],
                  num_filters=64,
                  filter_size=4,
                  stride=2,
                  pad='valid',
                  nonlinearity=ReLU))
    net['conv3'] = batch_norm(
        ConvLayer(net['conv2'],
                  num_filters=64,
                  filter_size=3,
                  stride=1,
                  pad='valid',
                  nonlinearity=ReLU))
    net['fc4'] = batch_norm(
        DenseLayer(net['conv3'], num_units=512, nonlinearity=ReLU))
    net['fc5'] = DenseLayer(net['fc4'],
                            num_units=nActions,
                            nonlinearity=linear)
    return net
Beispiel #16
0
 def build_nn(self, l_obs):
     dense_layer = batch_norm(lasagne.layers.DenseLayer(l_obs,
                                                        num_units=64, nonlinearity=lasagne.nonlinearities.tanh))
     dense_layer = DropoutLayer(dense_layer)
     output_layer = batch_norm(lasagne.layers.DenseLayer(dense_layer, num_units=self.y_test.shape[1],
                                                         nonlinearity=lasagne.nonlinearities.sigmoid))
     return output_layer
    def residual_block(l, increase_dim=False, projection=False):
        input_num_filters = l.output_shape[1]
        if increase_dim:
            first_stride = (2,2)
            out_num_filters = input_num_filters*2
        else:
            first_stride = (1,1)
            out_num_filters = input_num_filters

        stack_1 = batch_norm(ConvLayer(l, num_filters=out_num_filters, filter_size=(3,3), stride=first_stride, nonlinearity=rectify, pad='same', W=lasagne.init.HeNormal(gain='relu'), flip_filters=False))
        stack_2 = batch_norm(ConvLayer(stack_1, num_filters=out_num_filters, filter_size=(3,3), stride=(1,1), nonlinearity=None, pad='same', W=lasagne.init.HeNormal(gain='relu'), flip_filters=False))
        
        # add shortcut connections
        if increase_dim:
            if projection:
                # projection shortcut, as option B in paper
                projection = batch_norm(ConvLayer(l, num_filters=out_num_filters, filter_size=(1,1), stride=(2,2), nonlinearity=None, pad='same', b=None, flip_filters=False))
                block = NonlinearityLayer(ElemwiseSumLayer([stack_2, projection]),nonlinearity=rectify)
            else:
                # identity shortcut, as option A in paper
                identity = ExpressionLayer(l, lambda X: X[:, :, ::2, ::2], lambda s: (s[0], s[1], s[2]//2, s[3]//2))
                padding = PadLayer(identity, [out_num_filters//4,0,0], batch_ndim=1)
                block = NonlinearityLayer(ElemwiseSumLayer([stack_2, padding]),nonlinearity=rectify)
        else:
            block = NonlinearityLayer(ElemwiseSumLayer([stack_2, l]),nonlinearity=rectify)
        
        return block
Beispiel #18
0
def encoder(z_dim=100, input_var=None, num_units=512, vae=True):
    encoder = []
    lrelu = lasagne.nonlinearities.LeakyRectify(0.2)

    encoder.append(ll.InputLayer(shape=(None, 3, 80, 160),
                                 input_var=input_var))

    encoder.append(
        ll.Conv2DLayer(encoder[-1],
                       num_filters=num_units / 8,
                       filter_size=(5, 5),
                       stride=2,
                       pad=2,
                       nonlinearity=lrelu))

    encoder.append(
        ll.batch_norm(
            ll.Conv2DLayer(encoder[-1],
                           num_filters=num_units / 4,
                           filter_size=(5, 5),
                           stride=2,
                           pad=2,
                           nonlinearity=lrelu)))

    encoder.append(
        ll.batch_norm(
            ll.Conv2DLayer(encoder[-1],
                           num_filters=num_units / 2,
                           filter_size=(5, 5),
                           stride=2,
                           pad=2,
                           nonlinearity=lrelu)))

    encoder.append(
        ll.batch_norm(
            ll.Conv2DLayer(encoder[-1],
                           num_filters=num_units,
                           filter_size=(5, 5),
                           stride=2,
                           pad=2,
                           nonlinearity=lrelu)))

    encoder.append(ll.FlattenLayer(encoder[-1]))

    if vae:
        enc_mu = ll.DenseLayer(encoder[-1], num_units=z_dim, nonlinearity=None)

        enc_logsigma = ll.DenseLayer(encoder[-1],
                                     num_units=z_dim,
                                     nonlinearity=None)

        l_z = GaussianSampleLayer(enc_mu, enc_logsigma, name='Z layer')

        encoder += [enc_mu, enc_logsigma, l_z]

    for layer in encoder:
        print layer.output_shape
    print ""

    return encoder
Beispiel #19
0
def get_model(input_var, target_var, multiply_var):

    # input layer with unspecified batch size
    layer     = InputLayer(shape=(None, 30, 64, 64), input_var=input_var) #InputLayer(shape=(None, 1, 30, 64, 64), input_var=input_var)
    layer     = DimshuffleLayer(layer, (0, 'x', 1, 2, 3))

    # Z-score?

    # Convolution then batchNormalisation then activation layer, then zero padding layer followed by a dropout layer
    layer         = batch_norm(Conv3DDNNLayer(incoming=layer, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=rectify))
    layer         = batch_norm(Conv3DDNNLayer(incoming=layer, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=rectify))
    layer         = batch_norm(Conv3DDNNLayer(incoming=layer, num_filters=1, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=rectify))
    layer_prediction  = layer

    # Loss
    prediction           = get_output(layer_prediction)
    loss                 = categorical_crossentropy(prediction.flatten(), target_var.flatten())

    #Updates : Stochastic Gradient Descent (SGD) with Nesterov momentum
    params               = get_all_params(layer_prediction, trainable=True)

    # Create a loss expression for validation/testing. The crucial difference
    # here is that we do a deterministic forward pass through the network, disabling dropout layers.
    test_prediction      = get_output(layer_prediction, deterministic=True)
    test_loss            = categorical_crossentropy(test_prediction.flatten(), target_var.flatten())

    return test_prediction, prediction, loss, params
Beispiel #20
0
def build_cnn(config, use_noise=True, use_bn=True):
    
    # NOTE: Neither Conv2DDNNLayer nor Conv2DMMLayer will not work
    # with T.Rop operation, which used for the Fisher-vector product.
    
    l_input = L.InputLayer((None, 1, config['height'], config['width']))

    l_out = L.Conv2DLayer(l_input,
        num_filters=config['cnn_f1'], filter_size=(6,6), stride=2,
        nonlinearity=relu, W=LI.HeUniform('relu'), b=LI.Constant(0.)
    )
    
    # https://arxiv.org/pdf/1602.01407v2.pdf
    # QUOTE: KFC-pre and BN can be combined synergistically.
    
    if use_bn: l_out = L.batch_norm(l_out, beta=None, gamma=None)

    l_out = L.Conv2DLayer(l_out,
        num_filters=config['cnn_f2'], filter_size=(4,4), stride=2,
        nonlinearity=relu, W=LI.HeUniform('relu'), b=LI.Constant(0.)
    )
    
    if use_bn: l_out = L.batch_norm(l_out, beta=None, gamma=None)
    if use_noise: l_out = L.dropout(l_out)
    
    l_out = L.Conv2DLayer(l_out,
        num_filters=config['cnn_f3'], filter_size=(4,4), stride=2,
        nonlinearity=relu, W=LI.HeUniform('relu'), b=LI.Constant(0.)
    )
    
    if use_bn: l_out = L.batch_norm(l_out, beta=None, gamma=None)
    if use_noise: l_out = L.dropout(l_out)
    
    return l_input, l_out
Beispiel #21
0
    def residual_block(l, increase_dim=False, projection=False):
        input_num_filters = l.output_shape[1]
        if increase_dim:
            first_stride = (2,2)
            out_num_filters = input_num_filters*2
        else:
            first_stride = (1,1)
            out_num_filters = input_num_filters

        stack_1 = batch_norm(ConvLayer(l, num_filters=out_num_filters, filter_size=(3,3), stride=first_stride, nonlinearity=rectify, pad='same', W=lasagne.init.HeNormal(gain='relu'), flip_filters=False))
        stack_2 = batch_norm(ConvLayer(stack_1, num_filters=out_num_filters, filter_size=(3,3), stride=(1,1), nonlinearity=None, pad='same', W=lasagne.init.HeNormal(gain='relu'), flip_filters=False))

        # add shortcut connections
        if increase_dim:
            if projection:
                # projection shortcut, as option B in paper
                projection = batch_norm(ConvLayer(l, num_filters=out_num_filters, filter_size=(1,1), stride=(2,2), nonlinearity=None, pad='same', b=None, flip_filters=False))
                block = NonlinearityLayer(ElemwiseSumLayer([stack_2, projection]),nonlinearity=rectify)
            else:
                # identity shortcut, as option A in paper
                identity = ExpressionLayer(l, lambda X: X[:, :, ::2, ::2], lambda s: (s[0], s[1], s[2]//2, s[3]//2))
                padding = PadLayer(identity, [out_num_filters//4,0,0], batch_ndim=1)
                block = NonlinearityLayer(ElemwiseSumLayer([stack_2, padding]),nonlinearity=rectify)
        else:
            block = NonlinearityLayer(ElemwiseSumLayer([stack_2, l]),nonlinearity=rectify)

        return block
Beispiel #22
0
    def __build_48_net__(self):
        network = layers.InputLayer((None, 3, 48, 48),
                                    input_var=self.__input_var__)

        network = layers.Conv2DLayer(network,
                                     num_filters=64,
                                     filter_size=(5, 5),
                                     stride=1,
                                     nonlinearity=relu)
        network = layers.MaxPool2DLayer(network, pool_size=(3, 3), stride=2)
        network = layers.batch_norm(network)

        network = layers.Conv2DLayer(network,
                                     num_filters=64,
                                     filter_size=(5, 5),
                                     stride=1,
                                     nonlinearity=relu)
        network = layers.batch_norm(network)
        network = layers.MaxPool2DLayer(network, pool_size=(3, 3), stride=2)

        network = layers.Conv2DLayer(network,
                                     num_filters=64,
                                     filter_size=(3, 3),
                                     stride=1,
                                     nonlinearity=relu)
        network = layers.batch_norm(network)
        network = layers.MaxPool2DLayer(network, pool_size=(3, 3), stride=2)

        network = layers.DenseLayer(network, num_units=256, nonlinearity=relu)
        network = layers.DenseLayer(network, num_units=2, nonlinearity=softmax)
        return network
Beispiel #23
0
def define_net():
    net = {}
    # net['input_img'] = ll.InputLayer(shape=(None, 1, 28, 28), input_var=input_img)
    # net['input_noise'] = ll.InputLayer(shape=(None, 1, 28, 28), input_var=noise)
    # ll.ConcatLayer([net['input_img'], net['input_noise']], axis=1)
    net['input'] = ll.InputLayer(shape=(None, 2, 28, 28))

    net['conv_1'] = ll.batch_norm(
        ll.Conv2DLayer(net['input'],
                       num_filters=32,
                       stride=(2, 2),
                       filter_size=(5, 5),
                       pad='same'))
    net['conv_2'] = ll.batch_norm(
        ll.Conv2DLayer(net['conv_1'],
                       num_filters=64,
                       stride=(2, 2),
                       filter_size=(5, 5),
                       pad='same'))
    net['unconv_3'] = ll.batch_norm(
        ll.TransposedConv2DLayer(net['conv_2'],
                                 filter_size=(5, 5),
                                 num_filters=32,
                                 stride=(2, 2),
                                 crop=(2, 2)))
    net['out'] = ll.batch_norm(
        ll.TransposedConv2DLayer(net['unconv_3'],
                                 filter_size=(4, 4),
                                 num_filters=1,
                                 stride=(2, 2),
                                 crop=(0, 0),
                                 nonlinearity=lasagne.nonlinearities.sigmoid))
    return net
def lasagne_model():
    l_in = InputLayer(shape=(None, 1,PIXELS , PIXELS))

    # l_in =lasagne.layers.NonlinearityLayer(l_in,lasagne.nonlinearities.tanh)
    l_conv1 = Conv2DLayer(l_in, num_filters = 128, filter_size=(3,3), nonlinearity=rectify)
    l_conv1b = Conv2DLayer(l_conv1, num_filters = 128, filter_size=(3,3), nonlinearity=rectify)
    l_conv1b =batch_norm(l_conv1b)
    l_pool1 = MaxPool2DLayer(l_conv1b, pool_size=(2,2))
    # l_dropout1 = DropoutLayer(l_pool1, p=0.2)

    l_conv2 = Conv2DLayer(l_pool1, num_filters = 256, filter_size=(3,3), nonlinearity=rectify)
    l_conv2b = Conv2DLayer(l_conv2, num_filters = 256, filter_size=(3,3), nonlinearity=rectify)
    l_conv2b =batch_norm(l_conv2b)
    l_pool2 = MaxPool2DLayer(l_conv2b, pool_size=(2,2))
    # l_dropout2 = DropoutLayer(l_pool2, p=0.2)

    l_hidden3 = DenseLayer(l_pool2, num_units = h_dimension, nonlinearity=rectify)
    l_hidden3 =batch_norm(l_hidden3)
    # l_dropout3 = DropoutLayer(l_hidden3, p=0.5)

    l_hidden4 = DenseLayer(l_hidden3, num_units = h_dimension, nonlinearity=rectify)
    # l_dropout4 = DropoutLayer(l_hidden4, p=0.5)

    l_out = DenseLayer(l_hidden4, num_units=num_labels_train, nonlinearity=softmax)


    return l_out,l_hidden4
Beispiel #25
0
    def __init__(self, dims, nonlinearities=None, dropouts=None,
                 update_fn=None, batch_norm=False,
                 loss_type='cosine_margin', margin=0.8):
        """Initialize a Siamese neural network

        Parameters:
        -----------
        update_fn: theano function with 2 arguments (loss, params)
            Update scheme, default to adadelta
        batch_norm: bool
            Do batch normalisation on first layer, default to false
        """
        assert len(dims) >= 3, 'Not enough dimmensions'
        if dropouts != None:
            dropouts = copy.copy(dropouts)
            assert len(dropouts) == len(dims) - 1
            dropouts.append(0)
        else:
            dropouts = [0] * len(dims)
        if nonlinearities==None:
            nonlinearities = [nl.sigmoid] * (len(dims) -1)
        else:
            assert len(nonlinearities) == len(dims) - 1
        if update_fn == None:
            update_fn = lasagne.updates.adadelta
        self.input_var1 = T.matrix('inputs1')
        self.input_var2 = T.matrix('inputs2')
        self.target_var = T.ivector('targets')
        # input layer
        network1 = layers.InputLayer((None, dims[0]), input_var=self.input_var1)
        network2 = layers.InputLayer((None, dims[0]), input_var=self.input_var2)
        if dropouts[0]:
            network1 = layers.DropoutLayer(network1, p=dropouts[0])
            network2 = layers.DropoutLayer(network2, p=dropouts[0])
        # hidden layers
        for dim, dropout, nonlin in zip(dims[1:], dropouts[1:], nonlinearities):
            network1 = layers.DenseLayer(network1, num_units=dim,
                                         W=lasagne.init.GlorotUniform(),
                                         nonlinearity=nonlin)
            network2 = layers.DenseLayer(network2, num_units=dim,
                                         W=network1.W, b=network1.b,
                                         nonlinearity=nonlin)
            if batch_norm:
                network1 = layers.batch_norm(network1)
                network2 = layers.batch_norm(network2)
            if dropout:
                network1 = layers.DropoutLayer(network1, p=dropout)
                network2 = layers.DropoutLayer(network2, p=dropout)
        self.network = [network1, network2]
        self.params = layers.get_all_params(network1, trainable=True)

        # util functions, completely stolen from Lasagne example
        self.prediction1 = layers.get_output(network1)
        self.prediction2 = layers.get_output(network2)
        # if non-determnistic:
        self.test_prediction1 = layers.get_output(network1, deterministic=True)
        self.test_prediction2 = layers.get_output(network2, deterministic=True)

        self.change_loss(loss_type, margin)
        self.change_update(update_fn)
Beispiel #26
0
def build_generator(parameter, input_var=None):
    from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer, batch_norm
    from lasagne.nonlinearities import tanh, sigmoid

    layer = InputLayer(shape=(None, 100), input_var=input_var)

    # fully-connected layer
    layer = DenseLayer(layer, 256 * 4 * 4 * 4)
    parameter['W1'] = layer.W
    parameter['b1'] = layer.b
    layer = batch_norm(layer, beta=parameter['beta1'], gamma=parameter['gamma1'],
                       mean=parameter['mean1'], inv_std=parameter['inv_std1'])
    layer = ReshapeLayer(layer, ([0], 256 * 4, 4, 4))
    layer = Deconv2DLayer(layer, 256 * 2, 5, stride=2, pad=2)
    parameter['W2'] = layer.W
    parameter['b2'] = layer.b
    layer = batch_norm(layer, beta=parameter['beta2'], gamma=parameter['gamma2'],
                       mean=parameter['mean2'], inv_std=parameter['inv_std2'])

    layer = Deconv2DLayer(layer, 256, 5, stride=2, pad=2)
    parameter['W3'] = layer.W
    parameter['b3'] = layer.b
    layer = batch_norm(layer, beta=parameter['beta3'], gamma=parameter['gamma3'],
                       mean=parameter['mean3'], inv_std=parameter['inv_std3'])
    
    layer = Deconv2DLayer(layer, 4, 5, stride=2, pad=2, nonlinearity=sigmoid)
    parameter['W4'] = layer.W
    parameter['b4'] = layer.b
    # shape=(batch,1,28,28)
    print("Generator output:", layer.output_shape)
    return layer
Beispiel #27
0
    def __init__(self, dims, nonlinearities=None, dropouts=None,
                 update_fn=None, batch_norm=False,
                 loss_type='cosine_margin', margin=0.8):
        """Initialize a Siamese neural network

        Parameters:
        -----------
        update_fn: theano function with 2 arguments (loss, params)
            Update scheme, default to adadelta
        batch_norm: bool
            Do batch normalisation on first layer, default to false
        """
        assert len(dims) >= 3, 'Not enough dimmensions'
        if dropouts != None:
            dropouts = copy.copy(dropouts)
            assert len(dropouts) == len(dims) - 1
            dropouts.append(0)
        else:
            dropouts = [0] * len(dims)
        if nonlinearities==None:
            nonlinearities = [nl.sigmoid] * (len(dims) -1)
        else:
            assert len(nonlinearities) == len(dims) - 1
        if update_fn == None:
            update_fn = lasagne.updates.adadelta
        self.input_var1 = T.matrix('inputs1')
        self.input_var2 = T.matrix('inputs2')
        self.target_var = T.ivector('targets')
        # input layer
        network1 = layers.InputLayer((None, dims[0]), input_var=self.input_var1)
        network2 = layers.InputLayer((None, dims[0]), input_var=self.input_var2)
        if dropouts[0]:
            network1 = layers.DropoutLayer(network1, p=dropouts[0])
            network2 = layers.DropoutLayer(network2, p=dropouts[0])
        # hidden layers
        for dim, dropout, nonlin in zip(dims[1:], dropouts[1:], nonlinearities):
            network1 = layers.DenseLayer(network1, num_units=dim,
                                         W=lasagne.init.GlorotUniform(),
                                         nonlinearity=nonlin)
            network2 = layers.DenseLayer(network2, num_units=dim,
                                         W=network1.W, b=network1.b,
                                         nonlinearity=nonlin)
            if batch_norm:
                network1 = layers.batch_norm(network1)
                network2 = layers.batch_norm(network2)
            if dropout:
                network1 = layers.DropoutLayer(network1, p=dropout)
                network2 = layers.DropoutLayer(network2, p=dropout)
        self.network = [network1, network2]
        self.params = layers.get_all_params(network1, trainable=True)

        # util functions, completely stolen from Lasagne example
        self.prediction1 = layers.get_output(network1)
        self.prediction2 = layers.get_output(network2)
        # if non-determnistic:
        self.test_prediction1 = layers.get_output(network1, deterministic=True)
        self.test_prediction2 = layers.get_output(network2, deterministic=True)

        self.change_loss(loss_type, margin)
        self.change_update(update_fn)
Beispiel #28
0
def build_generator(input_var=None):
    #D_inp = T.tensor4('ds')
    G = l.InputLayer(shape=(None, 1, noise_H, noise_W), input_var=input_var)
    G = batch_norm(
        l.DenseLayer(G, num_units=(noise_H * noise_W * 256),
                     nonlinearity=reLU))
    G = l.ReshapeLayer(G, shape=([0], 256, noise_H, noise_W))  #4
    G = l.TransposedConv2DLayer(G,
                                1,
                                filter_size=(2, 2),
                                stride=(2, 2),
                                output_size=8)  #8
    G = batch_norm(l.Conv2DLayer(G, 40, (3, 3), nonlinearity=reLU,
                                 pad='full'))  #10
    G = l.TransposedConv2DLayer(G,
                                1,
                                filter_size=(2, 2),
                                stride=(2, 2),
                                output_size=20)  #20
    G = batch_norm(l.Conv2DLayer(G, 20, (3, 3), nonlinearity=reLU,
                                 pad='full'))  #22
    G = batch_norm(l.Conv2DLayer(G, 20, (5, 5), nonlinearity=reLU,
                                 pad='full'))  #26
    G = batch_norm(l.Conv2DLayer(G, 1, (3, 3), nonlinearity=reLU,
                                 pad='full'))  #28

    return G
def build_generator(input_noise=None, input_text=None):
    from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer, batch_norm, ConcatLayer
    from lasagne.nonlinearities import sigmoid
    # input: 100dim
    layer = InputLayer(shape=(None, noise_dim), input_var=input_noise)
    layer2 = InputLayer(shape=(None,1,300), input_var=input_text)
    layer2 = ReshapeLayer(layer2, ([0], 1*300))


    layer = ConcatLayer([layer, layer2], axis=1)

    #increasing order of fc-layer
    for i in range(len(fclayer_list)):
        layer = batch_norm(DenseLayer(layer, fclayer_list[i]))
    
    newPS = 28
    if stride!=1:
        newPS = 28/(2**len(layer_list))

    layer = batch_norm(DenseLayer(layer, layer_list[0]*newPS*newPS))
    layer = ReshapeLayer(layer, ([0], layer_list[0], newPS, newPS))
    
    for i in range(1,len(layer_list)):
        layer = batch_norm(Deconv2DLayer(layer, layer_list[i], filter_sz, stride=stride, pad=(filter_sz-1)/2))
    layer = Deconv2DLayer(layer, 1, filter_sz, stride=stride, pad=(filter_sz-1)/2,
                          nonlinearity=sigmoid)
    print ("Generator output:", layer.output_shape)
    return layer
Beispiel #30
0
def discriminator(input_var=None, configs=None):

    lrelu = LeakyRectify(0.2)

    network = InputLayer(shape=(None, 1, configs['img_rows'],
                                configs['img_cols']),
                         input_var=input_var)
    network = batch_norm(
        Conv2DLayer(network,
                    num_filters=64,
                    filter_size=(5, 5),
                    stride=2,
                    nonlinearity=lrelu,
                    W=lasagne.init.GlorotUniform()))
    #	network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2,2))
    network = batch_norm(
        lasagne.layers.Conv2DLayer(network,
                                   num_filters=128,
                                   filter_size=5,
                                   stride=2,
                                   nonlinearity=lrelu))
    #	network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2,2))
    network = batch_norm(
        DenseLayer(incoming=lasagne.layers.dropout(network, p=0.25),
                   num_units=1024,
                   nonlinearity=lrelu))
    network = DenseLayer(
        incoming=network,
        num_units=1,
        nonlinearity=sigmoid,
    )

    network = lasagne.layers.ReshapeLayer(network, (-1, nb_classes))

    return network
Beispiel #31
0
def classificationBranch(net, kernel_size):

    # Post Convolution
    branch = l.batch_norm(l.Conv2DLayer(net,
                        num_filters=int(FILTERS[-1] * RESNET_K),
                        filter_size=kernel_size,
                        nonlinearity=nl.rectify))

    #log.p(("\t\tPOST  CONV SHAPE:", l.get_output_shape(branch), "LAYER:", len(l.get_all_layers(branch)) - 1))

    # Dropout Layer
    branch = l.DropoutLayer(branch)
    
    # Dense Convolution
    branch = l.batch_norm(l.Conv2DLayer(branch,
                        num_filters=int(FILTERS[-1] * RESNET_K * 2),
                        filter_size=1,
                        nonlinearity=nl.rectify))

    #log.p(("\t\tDENSE CONV SHAPE:", l.get_output_shape(branch), "LAYER:", len(l.get_all_layers(branch)) - 1))
    
    # Dropout Layer
    branch = l.DropoutLayer(branch)
    
    # Class Convolution
    branch = l.Conv2DLayer(branch,
                        num_filters=len(cfg.CLASSES),
                        filter_size=1,
                        nonlinearity=None)
    return branch
def build_discriminator(input_img=None, input_text=None):
    from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
                                DenseLayer, batch_norm, ConcatLayer)
    from lasagne.nonlinearities import LeakyRectify, sigmoid
    lrelu = LeakyRectify(0.1)
    # input: (None, 1, 28, 28)
    layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_img)

    layer2 = InputLayer(shape=(None,1,300), input_var=input_text)
    layer2 = ReshapeLayer(layer2, ([0], 1*300))

    for i in reversed(range(len(layer_list))):
        layer = batch_norm(Conv2DLayer(layer, layer_list[i], filter_sz, stride=stride, pad=(filter_sz-1)/2, nonlinearity=lrelu)) 
       
    newPS = 28
    if stride!=1:
        newPS = 28/(2**len(layer_list))

    layer = ReshapeLayer(layer, ([0], layer_list[0]*newPS*newPS))
    layer = ConcatLayer([layer, layer2], axis=1)

    for i in reversed(range(len(fclayer_list))):
        layer = batch_norm(DenseLayer(layer, fclayer_list[i], nonlinearity=lrelu))
    
    layer = DenseLayer(layer, 1, nonlinearity=None, b=None)
    print ("Discriminator output:", layer.output_shape)
    return layer
    def build_network(self, input_var=None):
        if not input_var is None: self.sinputs = input_var

        self.network['input'] = lasagne.layers.InputLayer(shape=(self.batch_size, 1, self.input_size['audio'][0],self.input_size['audio'][1]),
                                                          input_var=self.sinputs[0])

        self.network['Conv2D_1'] = batch_norm(lasagne.layers.Conv2DLayer(
            lasagne.layers.dropout(self.network['input'], p=self.dropout_rates[0]) , num_filters=25, filter_size=(5, 5),
            nonlinearity=lasagne.nonlinearities.tanh,
            W=lasagne.init.GlorotUniform()))

        self.network['MaxPool2D_1'] = lasagne.layers.MaxPool2DLayer(self.network['Conv2D_1'], pool_size=(1, 1))

        self.network['FC_1'] = batch_norm(lasagne.layers.DenseLayer(
            lasagne.layers.dropout(self.network['MaxPool2D_1'], p=self.dropout_rates[1]),
            num_units=self.fc_layers[0],
            nonlinearity=lasagne.nonlinearities.tanh))

        self.network['FC_N'] = batch_norm(lasagne.layers.DenseLayer(lasagne.layers.dropout(self.network['FC_1'], p=self.dropout_rates[2]),
            num_units=self.fc_layers[1],
            nonlinearity=lasagne.nonlinearities.tanh))


        self.network['prob'] =  batch_norm(lasagne.layers.DenseLayer(
            lasagne.layers.dropout(self.network['FC_N'], p=self.dropout_rates[3]),
            num_units=self.nclasses,
            nonlinearity=lasagne.nonlinearities.softmax))

        return self.network
Beispiel #34
0
def build_generator(input_var=None,
                    noise_size=100,
                    nfilters=[512, 256, 128, 64, 3]):

    ###############################
    # Build Network Configuration #
    ###############################

    print('... Building the generator')

    # Input of the network : shape = (batch_size, 100)
    network = layers.InputLayer(shape=(None, noise_size), input_var=input_var)

    # Reshape layer : shape = (batch_size, 100, 1, 1)
    network = layers.ReshapeLayer(network, (-1, noise_size, 1, 1))

    # Tranposed conv layer : shape = (batch_size, 512, 4, 4)
    network = layers.batch_norm(
        layers.TransposedConv2DLayer(network,
                                     num_filters=nfilters[0],
                                     filter_size=(4, 4),
                                     stride=(1, 1)))

    # Tranposed conv layer : shape = (batch_size, 256, 8, 8)
    network = layers.batch_norm(
        layers.TransposedConv2DLayer(network,
                                     num_filters=nfilters[1],
                                     filter_size=(5, 5),
                                     stride=(2, 2),
                                     crop=2,
                                     output_size=8))

    # Tranposed conv layer : shape = (batch_size, 128, 16, 16)
    network = layers.batch_norm(
        layers.TransposedConv2DLayer(network,
                                     num_filters=nfilters[2],
                                     filter_size=(5, 5),
                                     stride=(2, 2),
                                     crop=2,
                                     output_size=16))

    # Tranposed conv layer : shape = (batch_size, 64, 32, 32)
    network = layers.batch_norm(
        layers.TransposedConv2DLayer(network,
                                     num_filters=nfilters[3],
                                     filter_size=(5, 5),
                                     stride=(2, 2),
                                     crop=2,
                                     output_size=32))

    # Tranposed conv layer : shape = (batch_size, 3, 64, 64)
    network = layers.TransposedConv2DLayer(network,
                                           num_filters=nfilters[4],
                                           filter_size=5,
                                           stride=2,
                                           crop=2,
                                           output_size=64,
                                           nonlinearity=nonlinearities.sigmoid)

    return network
Beispiel #35
0
def test_batch_norm_macro():
    from lasagne.layers import (Layer, BatchNormLayer, batch_norm,
                                NonlinearityLayer)
    from lasagne.nonlinearities import identity
    input_shape = (2, 3)
    obj = object()

    # check if it steals the nonlinearity
    layer = Mock(Layer, output_shape=input_shape, nonlinearity=obj)
    bnstack = batch_norm(layer)
    assert isinstance(bnstack, NonlinearityLayer)
    assert isinstance(bnstack.input_layer, BatchNormLayer)
    assert layer.nonlinearity is identity
    assert bnstack.nonlinearity is obj

    # check if it removes the bias
    layer = Mock(Layer, output_shape=input_shape, b=obj, params={obj: set()})
    bnstack = batch_norm(layer)
    assert isinstance(bnstack, BatchNormLayer)
    assert layer.b is None
    assert obj not in layer.params

    # check if it can handle an unset bias
    layer = Mock(Layer, output_shape=input_shape, b=None, params={obj: set()})
    bnstack = batch_norm(layer)
    assert isinstance(bnstack, BatchNormLayer)
    assert layer.b is None

    # check if it passes on kwargs
    layer = Mock(Layer, output_shape=input_shape)
    bnstack = batch_norm(layer, name='foo')
    assert isinstance(bnstack, BatchNormLayer)
    assert bnstack.name == 'foo'
def inc_dim_layer(l_in, num_filters):
    """
    Increase the dimension of filter number
    decrease image size
    Args:
        incoming:

    Returns:

    """
    l = batch_norm(
        ConvLayer(l_in,
                  num_filters=num_filters,
                  filter_size=(3, 3),
                  stride=(2, 2),
                  nonlinearity=rectify,
                  pad='same',
                  W=lasagne.init.HeNormal(gain='relu'))
    )  # 128 x 16 x 16 (1 highway block) (2 conv layers)

    l = batch_norm(
        ConvLayer(l,
                  num_filters=num_filters,
                  filter_size=(3, 3),
                  stride=(1, 1),
                  nonlinearity=rectify,
                  pad='same',
                  W=lasagne.init.HeNormal(gain='relu')))

    return l
Beispiel #37
0
def build_discriminator(input_var=None,
                        nfilters=[64, 128, 256, 512],
                        input_channels=3):

    ###############################
    # Build Network Configuration #
    ###############################

    print('... Building the discriminator')

    leaky = nonlinearities.LeakyRectify(0.2)

    # Input of the network : shape = (batch_size, 3, 64, 64)
    network = layers.InputLayer(shape=(None, input_channels, 64, 64),
                                input_var=input_var)

    # Conv layer : shape = (batch_size, 64, 32, 32)
    network = layers.Conv2DLayer(network,
                                 num_filters=nfilters[0],
                                 filter_size=(5, 5),
                                 stride=2,
                                 pad=2,
                                 nonlinearity=leaky)

    # Conv layer : shape = (batch_size, 128, 16, 16)
    network = layers.batch_norm(
        lasagne.layers.Conv2DLayer(network,
                                   num_filters=nfilters[1],
                                   filter_size=(5, 5),
                                   stride=2,
                                   pad=2,
                                   nonlinearity=leaky))

    # Conv layer : shape = (batch_size, 256, 8, 8)
    network = layers.batch_norm(
        lasagne.layers.Conv2DLayer(network,
                                   num_filters=nfilters[2],
                                   filter_size=(5, 5),
                                   stride=2,
                                   pad=2,
                                   nonlinearity=leaky))

    # Conv layer : shape = (batch_size, 512, 4, 4)
    network = layers.batch_norm(
        lasagne.layers.Conv2DLayer(network,
                                   num_filters=nfilters[3],
                                   filter_size=(5, 5),
                                   stride=2,
                                   pad=2,
                                   nonlinearity=leaky))

    # Flatten layer :shape = (batch_size, 8192)
    network = lasagne.layers.FlattenLayer(network)

    # Dense layer :shape = (batch_size, 1)
    network = lasagne.layers.DenseLayer(
        network, 1, nonlinearity=lasagne.nonlinearities.sigmoid)

    return network
    def init_discriminator(self, first_layer, input_var=None):
        """
        Initialize the DCGAN discriminator network using lasagne
        Returns the network
        """

        lrelu = nonlinearities.LeakyRectify(0.2)
        layers = []

        l_in = lyr.InputLayer((None, 3, 64, 64), input_var)
        layers.append(l_in)

        l_1 = lyr.Conv2DLayer(incoming=l_in,
                              num_filters=first_layer,
                              filter_size=5,
                              stride=2,
                              pad=2,
                              nonlinearity=lrelu)
        layers.append(l_1)

        l_2 = lyr.batch_norm(
            lyr.Conv2DLayer(incoming=l_1,
                            num_filters=first_layer * 2,
                            filter_size=5,
                            stride=2,
                            pad=2,
                            nonlinearity=lrelu))
        layers.append(l_2)

        l_3 = lyr.batch_norm(
            lyr.Conv2DLayer(incoming=l_2,
                            num_filters=first_layer * 4,
                            filter_size=5,
                            stride=2,
                            pad=2,
                            nonlinearity=lrelu))
        layers.append(l_3)

        l_4 = lyr.batch_norm(
            lyr.Conv2DLayer(incoming=l_3,
                            num_filters=first_layer * 8,
                            filter_size=5,
                            stride=2,
                            pad=2,
                            nonlinearity=lrelu))
        l_4 = lyr.FlattenLayer(l_4)
        layers.append(l_4)

        l_out = lyr.DenseLayer(incoming=l_4,
                               num_units=1,
                               nonlinearity=nonlinearities.sigmoid)
        layers.append(l_out)

        if self.verbose:
            for i, layer in enumerate(layers):
                print 'dicriminator layer %s output shape:' % i, layer.output_shape

        return l_out
Beispiel #39
0
def architecture_upconv_mp3(input_var, input_shape, n_conv_layers,
                            n_conv_filters):

    net = {}

    kwargs = dict(nonlinearity=lasagne.nonlinearities.elu,
                  W=lasagne.init.HeNormal())

    net['data'] = InputLayer(input_shape, input_var)
    print("\rLayer output shapes")
    print(net['data'].output_shape)

    # Bunch of 3 x 3 convolution layers: experimentally we found that, adding 3 conv layers in start than in middle is better: but why?
    i = 'data'
    j = 'c1'
    for idx in range(n_conv_layers):
        print("Conv layer index: %d" % (idx + 1))
        net[j] = batch_norm(
            Conv2DLayer(net[i],
                        num_filters=n_conv_filters,
                        filter_size=3,
                        stride=1,
                        pad=1,
                        **kwargs))
        print(net[j].output_shape)
        # renaming for next iteration
        i = j
        j = j[:-1] + str(idx + 2)

    # Bunch of transposed convolution layers
    net['uc1'] = batch_norm(
        TransposedConv2DLayer(net[i],
                              num_filters=n_conv_filters / 2,
                              filter_size=4,
                              stride=2,
                              crop=1,
                              **kwargs))
    print(net['uc1'].output_shape)

    net['uc2'] = batch_norm(
        TransposedConv2DLayer(net['uc1'],
                              num_filters=1,
                              filter_size=4,
                              stride=2,
                              crop=1,
                              **kwargs))
    print(net['uc2'].output_shape)

    # slicing the output to 115 x 80 size
    net['s1'] = lasagne.layers.SliceLayer(net['uc2'], slice(0, 115), axis=-2)
    print(net['s1'].output_shape)
    net['out'] = lasagne.layers.SliceLayer(net['s1'], slice(0, 80), axis=-1)
    print(net['out'].output_shape)

    print("Number of parameter to be learned: %d" %
          (lasagne.layers.count_params(net['out'])))

    return net['out']
 def __build_48_calib_net__(self):
     network = layers.InputLayer((None, 3, 48, 48), input_var=self.__input_var__)
     network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
     network = layers.batch_norm(layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2))
     network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
     network = layers.batch_norm(layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2))
     network = layers.DenseLayer(network,num_units = 256,nonlinearity = relu)
     network = layers.DenseLayer(network,num_units = 45, nonlinearity = softmax)
     return network
Beispiel #41
0
def build_generator(input_var=None, dim_z=100):
    layer = InputLayer(shape=(None, dim_z), input_var=input_var)
    layer = batch_norm(DenseLayer(layer, 1024))
    layer = batch_norm(DenseLayer(layer, 128 * 7 * 7))
    layer = ReshapeLayer(layer, ([0], 128, 7, 7))
    layer = batch_norm(Deconv2DLayer(layer, 64, 5, stride=2, pad=2))
    layer = Deconv2DLayer(layer, 1, 5, stride=2, pad=2, nonlinearity=sigmoid)
    logger.debug('Generator output: {}'.format(layer.output_shape))
    return layer
Beispiel #42
0
def build_synth(input_dist=None):
    from lasagne.layers import (InputLayer, DenseLayer, batch_norm, ReshapeLayer)
    from lasagne.nonlinearities import LeakyRectify, rectify
    lrelu = LeakyRectify(0.2)
    layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_dist)
    layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
    layer = batch_norm(DenseLayer(layer, 1 * 28 * 28))#, nonlinearity=lrelu)
    layer = ReshapeLayer(layer, ([0], 1, 28, 28))
    return layer
Beispiel #43
0
    def build_treatment_model(self, n_vars, **kwargs):

        input_vars = TT.matrix()
        instrument_vars = TT.matrix()
        targets = TT.vector()

        inputs = layers.InputLayer((None, n_vars), input_vars)
        inputs = layers.DropoutLayer(inputs, p=0.2)

        dense_layer = layers.DenseLayer(inputs, 2 * kwargs['dense_size'], nonlinearity=nonlinearities.rectify)
        dense_layer = layers.batch_norm(dense_layer)
        dense_layer= layers.DropoutLayer(dense_layer, p=0.2)

        for _ in xrange(kwargs['n_dense_layers'] - 1):
            dense_layer = layers.DenseLayer(dense_layer, kwargs['dense_size'], nonlinearity=nonlinearities.rectify)
            dense_layer = layers.batch_norm(dense_layer)

        self.treatment_output = layers.DenseLayer(dense_layer, 1, nonlinearity=nonlinearities.linear)
        init_params = layers.get_all_param_values(self.treatment_output)

        prediction = layers.get_output(self.treatment_output, deterministic=False)
        test_prediction = layers.get_output(self.treatment_output, deterministic=True)

        l2_cost = regularization.regularize_network_params(self.treatment_output, regularization.l2)
        loss = gmm_loss(prediction, targets, instrument_vars) + 1e-4 * l2_cost

        params = layers.get_all_params(self.treatment_output, trainable=True)
        param_updates = updates.adadelta(loss, params)

        self._train_fn = theano.function(
            [
                input_vars,
                targets,
                instrument_vars,
            ],
            loss,
            updates=param_updates
        )

        self._loss_fn = theano.function(
            [
                input_vars,
                targets,
                instrument_vars,
            ],
            loss,
        )

        self._output_fn = theano.function(
            [
                input_vars,
            ],
            test_prediction,
        )

        return init_params
def build_net():
    net = OrderedDict()

    net['input'] = InputLayer((BATCH_SIZE, 1, 128, 128))

    net['conv_1_1'] = batch_norm(ConvLayer(net['input'], 12, 7, pad='same', stride=1, nonlinearity=lasagne.nonlinearities.elu))
    # net['conv_1_1_do'] = DropoutLayer(net['conv_1_1'], p=0.1)
    net['conv_1_2'] = batch_norm(ConvLayer(net['conv_1_1'], 12, 5, pad='same', stride=1, nonlinearity=lasagne.nonlinearities.elu))
    # net['conv_1_2_do'] = DropoutLayer(net['conv_1_2'], p=0.1)
    net['maxPool_1_1'] = Pool2DLayer(net['conv_1_2'], 2, mode='max')

    net['conv_2_1'] = batch_norm(ConvLayer(net['maxPool_1_1'], 24, 3, pad='same', stride=1, nonlinearity=lasagne.nonlinearities.elu))
    # net['conv_2_1_do'] = DropoutLayer(net['conv_2_1'], p=0.2)
    net['conv_2_2'] = batch_norm(ConvLayer(net['conv_2_1'], 24, 3, pad='same', stride=1, nonlinearity=lasagne.nonlinearities.elu))
    # net['conv_2_2_do'] = DropoutLayer(net['conv_2_2'], p=0.2)
    net['maxPool_2_1'] = Pool2DLayer(net['conv_2_2'], 2, mode='max')

    net['conv_3_1'] = batch_norm(ConvLayer(net['maxPool_2_1'], 48, 3, pad=1, stride=1, nonlinearity=lasagne.nonlinearities.elu))
    # net['conv_3_1_do'] = DropoutLayer(net['conv_3_1'], p=0.3)
    net['maxPool_3_1'] = Pool2DLayer(net['conv_3_1'], 2, mode='max')
    net['conv_3_2'] = batch_norm(ConvLayer(net['maxPool_3_1'], 48, 3, pad=1, stride=1, nonlinearity=lasagne.nonlinearities.elu))
    # net['conv_3_2_do'] = DropoutLayer(net['conv_3_2'], p=0.3)
    net['maxPool_3_2'] = Pool2DLayer(net['conv_3_2'], 2, mode='max')
    net['conv_3_3'] = batch_norm(ConvLayer(net['maxPool_3_2'], 48, 3, pad=1, stride=1, nonlinearity=lasagne.nonlinearities.elu))
    # net['conv_3_3_do'] = DropoutLayer(net['conv_3_3'], p=0.3)
    net['maxPool_3_3'] = Pool2DLayer(net['conv_3_3'], 2, mode='max')

    net['fc_4'] = batch_norm(DenseLayer(net['maxPool_3_3'], 200, nonlinearity=lasagne.nonlinearities.elu))
    # net['fc_4_dropOut'] = DropoutLayer(net['fc_4'], p=0.5)

    net['prob'] = batch_norm(DenseLayer(net['fc_4'], 2, nonlinearity=lasagne.nonlinearities.softmax))

    return net
Beispiel #45
0
def mlp_network(dim=784,num_hidden_layers=1,num_hidden_nodes_per_layer=10,dropout=0.5):
  net={}
  net['l_in']=lasagne.layers.InputLayer((None,dim))
  net['l_d']=lasagne.layers.DropoutLayer(net['l_in'],p=.1)
  for i in range(num_hidden_layers):
    if i==0:
      net[i]=batch_norm(lasagne.layers.DenseLayer(net['l_d'],num_units=num_hidden_nodes_per_layer,nonlinearity=lasagne.nonlinearities.rectify))
    else:
      net[i]=batch_norm(lasagne.layers.DenseLayer(net[i-1],num_units=num_hidden_nodes_per_layer,nonlinearity=lasagne.nonlinearities.rectify))
    net[i+1]=lasagne.layers.DropoutLayer(net[i],p=dropout)
  net['l_out']=lasagne.layers.DenseLayer(net[2*num_hidden_layers-1],num_units=10,nonlinearity=lasagne.nonlinearities.softmax)
  return net
Beispiel #46
0
 def setup_discriminator(self):
     c = args.discriminator_size
     self.make_layer('disc1.1', batch_norm(self.network['conv1_2']), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
     self.make_layer('disc1.2', self.last_layer(), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
     self.make_layer('disc2', batch_norm(self.network['conv2_2']), 2*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
     self.make_layer('disc3', batch_norm(self.network['conv3_2']), 3*c, filter_size=(3,3), stride=(1,1), pad=(1,1))
     hypercolumn = ConcatLayer([self.network['disc1.2>'], self.network['disc2>'], self.network['disc3>']])
     self.make_layer('disc4', hypercolumn, 4*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
     self.make_layer('disc5', self.last_layer(), 3*c, filter_size=(3,3), stride=(2,2))
     self.make_layer('disc6', self.last_layer(), 2*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
     self.network['disc'] = batch_norm(ConvLayer(self.last_layer(), 1, filter_size=(1,1),
                                                 nonlinearity=lasagne.nonlinearities.linear))
 def __build_24_net__(self):
    
     network = layers.InputLayer((None, 3, 24, 24), input_var=self.__input_var__)
     network = layers.dropout(network, p=0.1)
     network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
     network = layers.batch_norm(network)
     network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
     network = layers.DropoutLayer(network,p=0.5)
     network = layers.batch_norm(network)
     network = layers.DenseLayer(network,num_units = 64,nonlinearity = relu)
     network = layers.DropoutLayer(network,p=0.5)
     network = layers.DenseLayer(network,num_units = 2, nonlinearity = softmax)
     return network
def resfuse_block(l, residual=1, projection=True):
    """
    residual: a hyperparameter of how much to leak in (should be small,
    or loss will explode: when it = 1, training loss: 2217594.131540)
    (such effect will be even higher with deeper layers)

    If we don't do projection, then the loss will just explode

    Every resfuse block is made of 2 resblock
    and top connect to bottom
    We simply won't allow dimension increase in a simple
    resfuse_block, dimension increase should be performed
    by a single resnet block layer!
    Args:
        increase_dim: only affect the first resnet block
        excessive: whether we try to connect more, or less
    """
    input_num_filters = l.output_shape[1]

    stack1 = residual_block(l)
    stack2 = residual_block(stack1)

    block = None

    if projection:
        block = batch_norm(
            ConvLayer(l, num_filters=input_num_filters, filter_size=(1, 1), stride=(1, 1), nonlinearity=None,
                      pad='same', b=None, name="resfuse_projection"))
        assert block.output_shape == stack2.output_shape
        block = NonlinearityLayer(ElemwiseSumLayer([block, stack2]), nonlinearity=None)
    else:
        # block = NonlinearityLayer(ElemwiseSumLayer([stack2, l], coeffs=residual), nonlinearity=None)
        block = stack2  # no summation, just regular resnet block

    return block
Beispiel #49
0
def get_convolutional_block(
        incoming,
        convo_size=3,
        num_filters=32,
        pool_size=2,
        drop=0.5,
        padding='valid',
        counter=itertools.count(),
        sufix=''
):
    index = counter.next()
    convolution = Conv3DLayer(
        incoming=incoming,
        name='\033[34mconv_%s%d\033[0m' % (sufix, index),
        num_filters=num_filters,
        filter_size=convo_size,
        pad=padding
    )
    normalisation = batch_norm(
        layer=convolution,
        name='norm_%s%d' % (sufix, index)
    )
    dropout = DropoutLayer(
        incoming=normalisation,
        name='drop_%s%d' % (sufix, index),
        p=drop
    )
    pool = Pool3DLayer(
        incoming=dropout,
        name='\033[31mavg_pool_%s%d\033[0m' % (sufix, index),
        pool_size=pool_size,
        mode='average_inc_pad'
    )

    return pool
Beispiel #50
0
def build_resfuse_net(input_var=None, n=5, execessive=False):
    # Building the network
    l_in = InputLayer(shape=(None, 3, 64, 64), input_var=input_var)

    # first layer, output is 16 x 64 x 64
    l = batch_norm(ConvLayer(l_in, num_filters=16, filter_size=(3, 3), stride=(1, 1), nonlinearity=rectify, pad='same',
                             W=lasagne.init.HeNormal(gain='relu')))

    # first stack of residual blocks, output is 16 x 64 x 64
    l = resfuse_block(l)
    # 2 resfuse blocks
    l = resfuse_super_block(l, excessive=execessive)

    # second stack of residual blocks, output is 32 x 32 x 32
    l = residual_block(l, increase_dim=True)
    l = resfuse_super_block(l, excessive=execessive)  # 4 res-blocks

    # third stack of residual blocks, output is 64 x 16 x 16
    l = residual_block(l, increase_dim=True)
    l = resfuse_super_block(l, excessive=execessive)  # 4 res-blocks

    # average pooling
    l = GlobalPoolLayer(l)

    # fully connected layer
    network = DenseLayer(
        l, num_units=100,
        W=lasagne.init.HeNormal(),
        nonlinearity=softmax)

    return network
Beispiel #51
0
def build_CNN_nopool(in_shape,
                     num_filter,
                     fil_size,
                     strides,
                     num_out,
                     nlin_func=rectify,
                     in_var=None):

    # build a CNN
    net = InputLayer(input_var=in_var,
                     shape=in_shape)

    for i in xrange(len(fil_size)):
        net = batch_norm(ConvLayer(net,
                                   num_filters=num_filter[i],
                                   filter_size=fil_size[i],
                                   stride=strides[i],
                                   pad=1,
                                   nonlinearity=nlin_func,
                                   flip_filters=False))

    net = DenseLayer(incoming=net,
                     num_units=num_out,
                     nonlinearity=identity)

    return net
    def residual_block(l, increase_dim=False, projection=True, first=False):
        input_num_filters = l.output_shape[1]
        if increase_dim:
            first_stride = (2,2)
            out_num_filters = input_num_filters*2
        else:
            first_stride = (1,1)
            out_num_filters = input_num_filters

        if first:
            # hacky solution to keep layers correct
            bn_pre_relu = l
        else:
            # contains the BN -> ReLU portion, steps 1 to 2
            bn_pre_conv = BatchNormLayer(l)
            bn_pre_relu = NonlinearityLayer(bn_pre_conv, rectify)

        # contains the weight -> BN -> ReLU portion, steps 3 to 5
        conv_1 = batch_norm(ConvLayer(bn_pre_relu, num_filters=out_num_filters, filter_size=(3,3), stride=first_stride, nonlinearity=rectify, pad='same', W=he_norm))

        # contains the last weight portion, step 6
        conv_2 = ConvLayer(conv_1, num_filters=out_num_filters, filter_size=(3,3), stride=(1,1), nonlinearity=None, pad='same', W=he_norm)

        # add shortcut connections
        if increase_dim:
            # projection shortcut, as option B in paper
            projection = ConvLayer(l, num_filters=out_num_filters, filter_size=(1,1), stride=(2,2), nonlinearity=None, pad='same', b=None)
            block = ElemwiseSumLayer([conv_2, projection])
        else:
            block = ElemwiseSumLayer([conv_2, l])

        return block
Beispiel #53
0
def build_residual_layer(network, num_filters, num_elements):
    original = network
    #like an 8x8 but with about half the parameters... maybe...
    for i in xrange(num_elements):
        network = Conv2DLayer(network, num_filters, 7, stride = 1, pad='same')
        network = batch_norm(network)
    return ElemwiseSumLayer([original, network])
def build_cnn(input_var=None, n=5):
    # Building the network
    l_in = InputLayer(shape=(None, 3, 64, 64), input_var=input_var)

    # first layer, output is 16 x 64 x 64
    l = batch_norm(ConvLayer(l_in, num_filters=16, filter_size=(3, 3), stride=(1, 1), nonlinearity=rectify, pad='same',
                             W=lasagne.init.HeNormal(gain='relu')))

    # we could pool aggressively here, but we don't have to
    # CIFAR-10 doesn't aggressively pool, and ImageNet 128x128 aggressively pools

    # first stack of residual blocks, output is 16 x 64 x 64
    for _ in range(n):
        l = residual_block(l)

    # second stack of residual blocks, output is 32 x 32 x 32
    l = residual_block(l, increase_dim=True)
    for _ in range(1, n):
        l = residual_block(l)

    # third stack of residual blocks, output is 64 x 16 x 16
    l = residual_block(l, increase_dim=True)
    for _ in range(1, n):
        l = residual_block(l)

    # average pooling
    l = GlobalPoolLayer(l)

    # fully connected layer
    network = DenseLayer(
        l, num_units=100,
        W=lasagne.init.HeNormal(),
        nonlinearity=softmax)

    return network
 def processLayer(self, network, layer_definition):
     '''
     Create a lasagne layer corresponding to the "layer definition"
     '''
     if (layer_definition["type"] == "Input"):
         if self.network_type == 'CAE':
             network = lasagne.layers.InputLayer(shape=tuple([None] + layer_definition['output_shape']), input_var=self.t_input)
         elif self.network_type == 'AE':
             network = lasagne.layers.InputLayer(shape=(None, layer_definition['output_shape'][2]), input_var=self.t_input)
     elif (layer_definition['type'] == 'Dense'):
         network = lasagne.layers.DenseLayer(network, num_units=layer_definition['num_units'], nonlinearity=self.getNonLinearity(layer_definition['non_linearity']), name=self.getLayerName(layer_definition),W=self.getInitializationFct())
     elif (layer_definition['type'] == 'Conv2D'):
         network = lasagne.layers.Conv2DLayer(network, num_filters=layer_definition['num_filters'], filter_size=tuple(layer_definition["filter_size"]), pad=layer_definition['conv_mode'], nonlinearity=self.getNonLinearity(layer_definition['non_linearity']), name=self.getLayerName(layer_definition),W=self.getInitializationFct())
     elif (layer_definition['type'] == 'MaxPool2D' or layer_definition['type'] == 'MaxPool2D*'):
         network = lasagne.layers.MaxPool2DLayer(network, pool_size=tuple(layer_definition["filter_size"]), name=self.getLayerName(layer_definition))
     elif (layer_definition['type'] == 'InverseMaxPool2D'):
         network = lasagne.layers.InverseLayer(network, self.layer_list[layer_definition['layer_index']], name=self.getLayerName(layer_definition))
     elif (layer_definition['type'] == 'Unpool2D'):
         network = Unpool2DLayer(network, tuple(layer_definition['filter_size']), name=self.getLayerName(layer_definition))
     elif (layer_definition['type'] == 'Reshape'):
         network = lasagne.layers.ReshapeLayer(network, shape=tuple([-1] + layer_definition["output_shape"]), name=self.getLayerName(layer_definition))
     elif (layer_definition['type'] == 'Deconv2D'):
         network = lasagne.layers.Deconv2DLayer(network, num_filters=layer_definition['num_filters'], filter_size=tuple(layer_definition['filter_size']), crop=layer_definition['conv_mode'], nonlinearity=self.getNonLinearity(layer_definition['non_linearity']), name=self.getLayerName(layer_definition))
     self.layer_list.append(network)
     # Batch normalization on all convolutional layers except if at output
     if (self.batch_norm and (not layer_definition["is_output"]) and layer_definition['type'] in ("Conv2D", "Deconv2D")):
         network = batch_norm(network)
     # Save the encode layer separately
     if (layer_definition['is_encode']):
         self.encode_layer = lasagne.layers.flatten(network, name='fl')
         self.encode_size = layer_definition['output_shape'][0] * layer_definition['output_shape'][1] * layer_definition['output_shape'][2]
     return network
Beispiel #56
0
def cnn_network(dim=784,num_filters=3,filter_size=3):
  #Defining a simple one layer CNN network
  net={}
  net['l_in']=lasagne.layers.InputLayer((None,dim))
  net[0]=lasagne.layers.ReshapeLayer(net['l_in'],(-1,1,28,28))
  net[1]=batch_norm(lasagne.layers.Conv2DLayer(net[0],num_filters,filter_size,pad=1,nonlinearity=lasagne.nonlinearities.rectify))
  net['l_out']=lasagne.layers.DenseLayer(net[1],num_units=10,nonlinearity=lasagne.nonlinearities.softmax)
  return net
Beispiel #57
0
def get_model(input_var, target_var, multiply_var):

    # input layer with unspecified batch size
    layer_input     = InputLayer(shape=(None, 30, 80, 80), input_var=input_var) #InputLayer(shape=(None, 1, 30, 64, 64), input_var=input_var)
    layer_0         = DimshuffleLayer(layer_input, (0, 'x', 1, 2, 3))

    # Z-score?

    # Convolution then batchNormalisation then activation layer, then zero padding layer followed by a dropout layer
    layer_1         = batch_norm(Conv3DDNNLayer(incoming=layer_0, num_filters=64, filter_size=(3,3,3), stride=(1,3,3), pad='same', nonlinearity=leaky_rectify))
    layer_2         = MaxPool3DDNNLayer(layer_1, pool_size=(1, 2, 2), stride=(1, 2, 2), pad=(0, 1, 1))
    layer_3         = DropoutLayer(layer_2, p=0.25)

    # Convolution then batchNormalisation then activation layer, then zero padding layer followed by a dropout layer
    layer_4         = batch_norm(Conv3DDNNLayer(incoming=layer_3, num_filters=128, filter_size=(3,3,3), stride=(1,3,3), pad='same', nonlinearity=leaky_rectify))
    layer_5         = MaxPool3DDNNLayer(layer_4, pool_size=(1, 2, 2), stride=(1, 2, 2), pad=(0, 1, 1))
    layer_6         = DropoutLayer(layer_5, p=0.25)

    # Recurrent layer
    layer_7         = DimshuffleLayer(layer_6, (0,2,1,3,4))
    layer_8         = LSTMLayer(layer_7, num_units=612, only_return_final=True)
    layer_9         = DropoutLayer(layer_8, p=0.25)

    # Output Layer
    layer_hidden         = DenseLayer(layer_9, 500, nonlinearity=sigmoid)
    layer_prediction     = DenseLayer(layer_hidden, 2, nonlinearity=linear)

    # Loss
    prediction           = get_output(layer_prediction) / multiply_var
    loss                 = squared_error(prediction, target_var)
    loss                 = loss.mean()

    #Updates : Stochastic Gradient Descent (SGD) with Nesterov momentum
    params               = get_all_params(layer_prediction, trainable=True)

    # Create a loss expression for validation/testing. The crucial difference
    # here is that we do a deterministic forward pass through the network, disabling dropout layers.
    test_prediction      = get_output(layer_prediction, deterministic=True) / multiply_var
    test_loss            = squared_error(test_prediction, target_var)
    test_loss            = test_loss.mean()

    # crps estimate
    crps                 = T.abs_(test_prediction - target_var).mean()/600

    return test_prediction, crps, loss, params
Beispiel #58
0
Datei: adt.py Projekt: zenna/ig
def pop(input_stack, nlayers = 5):
  pop_net = {}
  prev_layer = input_stack
  for i in range(nlayers):
    next_layer = "l%i" % i
    prev_layer = pop_net[next_layer] = batch_norm(DenseLayer(prev_layer, nstack_reals+nitem_reals,
      nonlinearity = lasagne.nonlinearities.rectify, W=lasagne.init.HeNormal(gain='relu')))

  return pop_net, prev_layer
def highway_layer(incoming, filter_size=(3, 3), increase_dim=False, **kwargs):
    num_filters = incoming.output_shape[1]

    # regular layer
    l_h = batch_norm(lasagne.layers.Conv2DLayer(incoming, num_filters=num_filters,
                                                filter_size=filter_size,
                                                pad='same', stride=(1, 1),
                                                W=lasagne.init.HeNormal(gain='relu'),
                                                nonlinearity=rectify))

    # gate layer
    l_t = batch_norm(lasagne.layers.Conv2DLayer(incoming, num_filters=num_filters,
                                                filter_size=filter_size,
                                                pad='same', stride=(1, 1),
                                                W=lasagne.init.HeNormal(),
                                                nonlinearity=T.nnet.sigmoid))

    return MultiplicativeGatingLayer(gate=l_t, input1=l_h, input2=incoming)