Пример #1
0
def highway_conv3(incoming, nonlinearity=nn.nonlinearities.rectify, **kwargs):
    wh = nn.init.Orthogonal('relu')
    bh = nn.init.Constant(0.0)
    wt = nn.init.Orthogonal('relu')
    bt = nn.init.Constant(-2.)
    num_filters = incoming.output_shape[1]

    # H
    l_h = Conv2DDNNLayer(incoming,
                         num_filters=num_filters,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         W=wh,
                         b=bh,
                         nonlinearity=nonlinearity)
    # T
    l_t = Conv2DDNNLayer(incoming,
                         num_filters=num_filters,
                         filter_size=(3, 3),
                         stride=(1, 1),
                         pad='same',
                         W=wt,
                         b=bt,
                         nonlinearity=T.nnet.sigmoid)

    return HighwayLayer(gate=l_t, input1=l_h, input2=incoming)
def createGenerator2(input_var=None):

	_ = InputLayer(shape=(None, 64), input_var=input_var)
	_ = batch_norm(DenseLayer(_, num_units=1000, nonlinearity=lasagne.nonlinearities.rectify))
	_ = batch_norm(DenseLayer(_, num_units=64*16*16, nonlinearity=lasagne.nonlinearities.rectify))
	_ = ReshapeLayer(_, ([0], 64, 16, 16))
	_ = batch_norm(Conv2DDNNLayer(_, 128, 3, pad='same'))
	_ = Upscale2DLayer(_, 2)
	_ = batch_norm(Conv2DDNNLayer(_, 128, 3, pad='same'))
	_ = Upscale2DLayer(_, 2)
	_ = batch_norm(Conv2DDNNLayer(_, 256, 3, pad='same'))
	_ = batch_norm(Conv2DDNNLayer(_, 256, 3, pad='same'))
	l_generator = batch_norm(Conv2DDNNLayer(_, 3, 3, pad='same', nonlinearity=lasagne.nonlinearities.sigmoid))


	print('--------------------')
	print('Generator architecture: \n')

	#get all layers
	allLayers=lasagne.layers.get_all_layers(l_generator)
	#for each layer print its shape information
	for l in allLayers:
		print(lasagne.layers.get_output_shape(l))

	print ("Generator output:", l_generator.output_shape)
	return l_generator
def createDiscriminator2(input_var=None):

	_ = InputLayer(shape=(None, 3, 64, 64), input_var=input_var)
	_ = batch_norm(Conv2DDNNLayer(_, 64, 3, pad='same'))
	_ = batch_norm(Conv2DDNNLayer(_, 64, 3, pad='same'))
	_ = MaxPool2DDNNLayer(_, 2)
	_ = batch_norm(Conv2DDNNLayer(_, 64, 3, pad='same'))
	_ = MaxPool2DDNNLayer(_, 2)
	_ = batch_norm(Conv2DDNNLayer(_, 128, 3, pad='same'))
	_ = batch_norm(Conv2DDNNLayer(_, 128, 3, pad='same'))
	_ = FlattenLayer(_)
	_ = DenseLayer(_, num_units=1000, nonlinearity=lasagne.nonlinearities.rectify)
	l_discriminator = DenseLayer(_, num_units=1, nonlinearity=lasagne.nonlinearities.sigmoid)

	print('--------------------')
	print('Discriminator architecture: \n')

	#get all layers
	allLayers=lasagne.layers.get_all_layers(l_discriminator)
	#for each layer print its shape information
	for l in allLayers:
		print(lasagne.layers.get_output_shape(l))

	print ("Discriminator output:", l_discriminator.output_shape)
	return l_discriminator
Пример #4
0
def nin(layer, conv_filters, filter_size, pad, cccp1_filters, cccp2_filters):
    layer = Conv2DDNNLayer(layer,
                           num_filters=conv_filters,
                           filter_size=filter_size,
                           pad=pad,
                           nonlinearity=nonlinearities.rectify,
                           flip_filters=False)
    layer = Conv2DDNNLayer(layer,
                           num_filters=cccp1_filters,
                           filter_size=(1, 1),
                           nonlinearity=nonlinearities.rectify,
                           flip_filters=False)
    layer = Conv2DDNNLayer(layer,
                           num_filters=cccp2_filters,
                           filter_size=(1, 1),
                           nonlinearity=nonlinearities.rectify,
                           flip_filters=False)
    return layer
Пример #5
0
    def test_pad(self, DummyInputLayer):
        try:
            from lasagne.layers.dnn import Conv2DDNNLayer
        except ImportError:
            pytest.skip("dnn not available")

        input_layer = DummyInputLayer((1, 2, 3, 3))

        layer = Conv2DDNNLayer(input_layer, num_filters=4, filter_size=(3, 3),
                               pad=(3, 3))
        assert layer.output_shape == (1, 4, 7, 7)
Пример #6
0
    def test_pad(self, DummyInputLayer):
        try:
            from lasagne.layers.dnn import Conv2DDNNLayer
        except ImportError:
            pytest.skip("dnn not available")

        input_layer = DummyInputLayer((1, 2, 3, 3))
        with pytest.raises(RuntimeError) as exc:
            layer = Conv2DDNNLayer(input_layer,
                                   num_filters=1,
                                   filter_size=(3, 3),
                                   border_mode='valid',
                                   pad=(1, 1))
        assert ("You cannot specify both 'border_mode' and 'pad'"
                in exc.value.args[0])

        layer = Conv2DDNNLayer(input_layer,
                               num_filters=4,
                               filter_size=(3, 3),
                               pad=(3, 3))
        assert layer.output_shape == (1, 4, 7, 7)
Пример #7
0
def build_model(input_var=None):
    # Input layer
    ''' 
    out: b x 3 x 227 x 227 
    '''
    lin = InputLayer(shape=(None, 3, 227, 227), input_var=input_var)

    # ConvPool1
    ''' 
    out: b x 96 x 27 x 27 
    out.W: 96 x 3 x 11 x 11
    '''
    """ input was b01c, need to be bc01"""
    l1 = Conv2DDNNLayer(
        lin,
        #lasagne.layers.dimshuffle(lin, (0,3,1,2)),
        num_filters=96,
        filter_size=11,
        stride=4,
        W=lasagne.init.Constant(0.),  #W = Ws['W_0'], b = bs['b_0'],
        nonlinearity=lasagne.nonlinearities.rectify)
    l1 = MaxPool2DDNNLayer(l1, pool_size=3, stride=2)

    # ConvPool2: 2 groups
    ''' 
    out: b x 256 x 13 x 13
    out.W0/1: 128 x 48 x 5 x 5
    '''
    l1_0 = SliceLayer(l1, indices=slice(None, 48), axis=1)
    l2_0 = Conv2DDNNLayer(
        l1_0,
        num_filters=128,
        filter_size=5,
        stride=1,
        pad=2,
        W=lasagne.init.Constant(0.),  #W = Ws['W0_1'], b = bs['b0_1'],
        nonlinearity=lasagne.nonlinearities.rectify)
    l2_0p = MaxPool2DDNNLayer(l2_0, pool_size=3, stride=2)

    l1_1 = SliceLayer(l1, indices=slice(48, None), axis=1)
    l2_1 = Conv2DDNNLayer(
        l1_1,
        num_filters=128,
        filter_size=5,
        stride=1,
        pad=2,
        W=lasagne.init.Constant(0.),  #W = Ws['W1_1'], b = bs['b1_1'],
        nonlinearity=lasagne.nonlinearities.rectify)
    l2_1p = MaxPool2DDNNLayer(l2_1, pool_size=3, stride=2)

    l2 = ConcatLayer([l2_0p, l2_1p], axis=1)

    # Conv3
    ''' 
    out: b x 384 x 13 x 13
    out.W: 384 x 256 x 3 x 3
    '''
    l3 = Conv2DDNNLayer(
        l2,
        num_filters=384,
        filter_size=3,
        stride=1,
        pad='same',
        W=lasagne.init.Constant(0.),  #W = Ws['W_2'], b = bs['b_2'],
        nonlinearity=lasagne.nonlinearities.rectify)

    # Conv4: 2 groups
    ''' 
    out: b x 384 x 13 x 13
    out.W0/1: 192 x 192 x 3 x 3
    '''
    l3_0 = SliceLayer(l3, indices=slice(None, 192), axis=1)
    l4_0 = Conv2DDNNLayer(
        l3_0,
        num_filters=192,
        filter_size=3,
        stride=1,
        pad='same',
        W=lasagne.init.Constant(0.),  #W = Ws['W0_3'], b = bs['b0_3'],
        nonlinearity=lasagne.nonlinearities.rectify)

    l3_1 = SliceLayer(l3, indices=slice(192, None), axis=1)
    l4_1 = Conv2DDNNLayer(
        l3_1,
        num_filters=192,
        filter_size=3,
        stride=1,
        pad='same',
        W=lasagne.init.Constant(0.),  #W = Ws['W1_3'], b = bs['b1_3'],
        nonlinearity=lasagne.nonlinearities.rectify)

    # ConvPool5: 2 groups
    ''' 
    out: b x 256 x 6 x 6
    out.W0/1: 128 x 192 x 3 x 3
    '''
    l5_0 = Conv2DDNNLayer(
        l4_0,
        num_filters=128,
        filter_size=3,
        stride=1,
        pad='same',
        W=lasagne.init.Constant(0.),  #W = Ws['W0_4'], b = bs['b0_4'],
        nonlinearity=lasagne.nonlinearities.rectify)
    l5_0p = MaxPool2DDNNLayer(l5_0, pool_size=3, stride=2)

    l5_1 = Conv2DDNNLayer(
        l4_1,
        num_filters=128,
        filter_size=3,
        stride=1,
        pad='same',
        W=lasagne.init.Constant(0.),  #W = Ws['W1_4'], b = bs['b1_4'],
        nonlinearity=lasagne.nonlinearities.rectify)
    l5_1p = MaxPool2DDNNLayer(l5_1, pool_size=3, stride=2)

    l5 = ConcatLayer([l5_0p, l5_1p], axis=1)

    # FC6
    ''' 
    out: b x 4096 (x 1 x 1)
    out.W: 9216 x 4096
    '''
    l6 = DenseLayer(
        l5,
        #lasagne.layers.dropout(l5, p=.0),
        num_units=4096,
        W=lasagne.init.Constant(0.),  #W = Ws['W_5'], b = bs['b_5'],
        nonlinearity=lasagne.nonlinearities.rectify)

    # FC7
    ''' 
    out: b x 4096 (x 1 x 1)
    out.W: 4096 x 4096
    '''
    l7 = DenseLayer(
        l6,
        #lasagne.layers.dropout(l6, p=.5),
        num_units=4096,
        W=lasagne.init.Constant(0.),  #W = Ws['W_6'], b = bs['b_6'],
        nonlinearity=lasagne.nonlinearities.rectify)

    # FC8: replace last layer in AlexNet
    ''' 
    out: b x 22
    out.W: 4096 x 22
    '''
    l8 = DenseLayer(l7,
                    num_units=22,
                    nonlinearity=lasagne.nonlinearities.softmax)
    return l8
Пример #8
0
def build_model():

    #################
    # Regular model #
    #################
    input_size = data_sizes["sliced:data:singleslice"]

    l0 = InputLayer(input_size)
    # add channel layer
    #l0r = reshape(l0, (-1, 1, ) + input_size[1:])

    # (batch, channel, time, x, y)
    l = Conv2DDNNLayer(l0, num_filters=64, filter_size=(3, 3),
                       W=lasagne.init.Orthogonal('relu'),
                       b=lasagne.init.Constant(0.1),
                       pad='same')
    l = Conv2DDNNLayer(l, num_filters=64, filter_size=(3, 3),
                       W=lasagne.init.Orthogonal("relu"),
                       b=lasagne.init.Constant(0.1),
                       pad="valid")

    l = lasagne.layers.PadLayer(l, width=(1, 1))
    l = MaxPool2DDNNLayer(l, pool_size=(2, 2), stride=(2, 2))
    #l = lasagne.layers.DropoutLayer(l, p=0.25)

    # ---------------------------------------------------------------
    l = Conv2DDNNLayer(l, num_filters=96, filter_size=(3, 3),
                       W=lasagne.init.Orthogonal("relu"),
                       b=lasagne.init.Constant(0.1),
                       pad="same")
    l = Conv2DDNNLayer(l, num_filters=96, filter_size=(3, 3),
                       W=lasagne.init.Orthogonal("relu"),
                       b=lasagne.init.Constant(0.1),
                       pad="valid")

    l = lasagne.layers.PadLayer(l, width=(1, 1))
    l = MaxPool2DDNNLayer(l, pool_size=(2, 2), stride=(2, 2))
    #l = lasagne.layers.DropoutLayer(l, p=0.25)

    # ---------------------------------------------------------------
    l = Conv2DDNNLayer(l, num_filters=128, filter_size=(2, 2),
                       W=lasagne.init.Orthogonal("relu"),
                       b=lasagne.init.Constant(0.1))
    l = Conv2DDNNLayer(l, num_filters=128, filter_size=(2, 2),
                       W=lasagne.init.Orthogonal("relu"),
                       b=lasagne.init.Constant(0.1))
    l = MaxPool2DDNNLayer(l, pool_size=(2, 2), stride=(2, 2))
    l = lasagne.layers.DropoutLayer(l, p=0.25)

    # --------------------------------------------------------------

    l = lasagne.layers.FlattenLayer(l)
    l_d1 = lasagne.layers.DenseLayer(l, num_units=1024, W=lasagne.init.Orthogonal('relu'), b=lasagne.init.Constant(0.1))
    l_systole = lasagne.layers.DenseLayer(lasagne.layers.dropout(l_d1, p=0.5), num_units=1, W=lasagne.init.Orthogonal('relu'),
                                      b=lasagne.init.Constant(0.1), nonlinearity=lasagne.nonlinearities.identity)

    # --------------------------------------------------------------
    # --------------------------------------------------------------
    # --------------------------------------------------------------


    l = Conv2DDNNLayer(l0, num_filters=64, filter_size=(3, 3),
                       W=lasagne.init.Orthogonal('relu'),
                       b=lasagne.init.Constant(0.1),
                       pad='same')
    l = Conv2DDNNLayer(l, num_filters=64, filter_size=(3, 3),
                       W=lasagne.init.Orthogonal("relu"),
                       b=lasagne.init.Constant(0.1),
                       pad="valid")

    l = lasagne.layers.PadLayer(l, width=(1, 1))
    l = MaxPool2DDNNLayer(l, pool_size=(2, 2), stride=(2, 2))
    #l = lasagne.layers.DropoutLayer(l, p=0.25)

    # ---------------------------------------------------------------
    l = Conv2DDNNLayer(l, num_filters=96, filter_size=(3, 3),
                       W=lasagne.init.Orthogonal("relu"),
                       b=lasagne.init.Constant(0.1),
                       pad="same")
    l = Conv2DDNNLayer(l, num_filters=96, filter_size=(3, 3),
                       W=lasagne.init.Orthogonal("relu"),
                       b=lasagne.init.Constant(0.1),
                       pad="valid")

    l = lasagne.layers.PadLayer(l, width=(1, 1))
    l = MaxPool2DDNNLayer(l, pool_size=(2, 2), stride=(2, 2))
    #l = lasagne.layers.DropoutLayer(l, p=0.25)

    # ---------------------------------------------------------------
    l = Conv2DDNNLayer(l, num_filters=128, filter_size=(2, 2),
                       W=lasagne.init.Orthogonal("relu"),
                       b=lasagne.init.Constant(0.1))
    l = Conv2DDNNLayer(l, num_filters=128, filter_size=(2, 2),
                       W=lasagne.init.Orthogonal("relu"),
                       b=lasagne.init.Constant(0.1))
    l = MaxPool2DDNNLayer(l, pool_size=(2, 2), stride=(2, 2))
    l = lasagne.layers.DropoutLayer(l, p=0.25)

    # --------------------------------------------------------------

    l = lasagne.layers.FlattenLayer(l)
    l_d2 = lasagne.layers.DenseLayer(l, num_units=1024, W=lasagne.init.Orthogonal('relu'), b=lasagne.init.Constant(0.1))
    l_diastole = lasagne.layers.DenseLayer(lasagne.layers.dropout(l_d2, p=0.5), num_units=1, W=lasagne.init.Orthogonal('relu'),
                                      b=lasagne.init.Constant(0.1), nonlinearity=lasagne.nonlinearities.identity)

    return {
        "inputs":{
            "sliced:data:singleslice": l0
        },
        "outputs": {
            "systole:value": l_systole,
            "diastole:value": l_diastole,
        },
        "regularizable": {
            l_d1: 1e-3,
            l_d2: 1e-3,
        }
    }
Пример #9
0
 def d_architecture(self):
     ### Input
     lrelu = lasagne.nonlinearities.LeakyRectify(0.2)
     input_shape = self.size_input  # 50000*1*28*28 (channel=1, lenght=28,with=28)
     discriminator_layers = [
         lasagne.layers.InputLayer(shape=(None, input_shape[1],
                                          input_shape[2], input_shape[3]),
                                   input_var=self.input_var)
     ]
     discriminator_layers.append(
         nn.weight_norm(
             Conv2DDNNLayer(discriminator_layers[-1],
                            64, (5, 5),
                            pad=2,
                            stride=2,
                            W=lasagne.init.Normal(0.05),
                            nonlinearity=lrelu)))
     discriminator_layers.append(
         nn.weight_norm(
             Conv2DDNNLayer(discriminator_layers[-1],
                            128, (5, 5),
                            pad=2,
                            stride=2,
                            W=lasagne.init.Normal(0.05),
                            nonlinearity=lrelu)))
     discriminator_layers.append(
         nn.weight_norm(
             Conv2DDNNLayer(discriminator_layers[-1],
                            256, (5, 5),
                            pad=2,
                            stride=2,
                            W=lasagne.init.Normal(0.05),
                            nonlinearity=lrelu)))
     discriminator_layers.append(
         nn.weight_norm(
             Conv2DDNNLayer(discriminator_layers[-1],
                            512, (5, 5),
                            pad=2,
                            stride=2,
                            W=lasagne.init.Normal(0.05),
                            nonlinearity=lrelu)))
     discriminator_layers.append(
         nn.weight_norm(
             Conv2DDNNLayer(discriminator_layers[-1],
                            1024, (5, 5),
                            pad=2,
                            stride=2,
                            W=lasagne.init.Normal(0.05),
                            nonlinearity=lrelu)))
     discriminator_layers.append(
         lasagne.layers.DropoutLayer(discriminator_layers[-1], p=0.2))
     discriminator_layers.append(
         nn.weight_norm(
             lasagne.layers.NINLayer(discriminator_layers[-1],
                                     num_units=1024,
                                     W=lasagne.init.Normal(0.05),
                                     nonlinearity=lrelu)))
     discriminator_layers.append(
         lasagne.layers.GlobalPoolLayer(discriminator_layers[-1]))
     #discriminator_layers.append(lasagne.layers.batch_norm(lasagne.layers.DenseLayer(discriminator_layers[-1] , num_units=1, W=lasagne.init.Normal(0.05), nonlinearity=lasagne.nonlinearities.sigmoid)))
     discriminator_layers.append(
         nn.weight_norm(lasagne.layers.DenseLayer(
             discriminator_layers[-1],
             num_units=1,
             W=lasagne.init.Normal(0.05),
             nonlinearity=lasagne.nonlinearities.sigmoid),
                        train_g=True,
                        init_stdv=0.1))
     self.architecture = discriminator_layers
Пример #10
0
    def d_architecture2(self):
        ### Input
        input_shape = self.size_input  # 50000*1*28*28 (channel=1, lenght=28,with=28)
        print(input_shape)
        discriminator_layers = [
            lasagne.layers.InputLayer(shape=(None, input_shape[1],
                                             input_shape[2], input_shape[3]),
                                      input_var=self.input_var)
        ]
        # on passe de 3*32*32  a  96*32*32 car on a 96 filtres
        Tempw = lasagne.utils.create_param(lasagne.init.Normal(0.05),
                                           (1, 96, 32, 32), "W_D_conv1")
        Tempb = lasagne.utils.create_param(lasagne.init.Constant(0.0), (96, ),
                                           "b_D_conv1")
        discriminator_layers.append(
            lasagne.layers.Conv2DLayer(discriminator_layers[-1],
                                       num_filters=96,
                                       filter_size=(5, 5),
                                       nonlinearity=self.nonlinear,
                                       W=Tempw,
                                       b=Tempb))
        # check if convolution type (full, same,valid)
        #########################
        # on passe de 96*32*32 a 96*16*16  car pooling 2*2
        #discriminator_layers.append(lasagne.layers.MaxPool2DLayer(discriminator_layers[-1], pool_size=(2,2)))
        # on passe de 96*32*32 a 192*32*32
        Tempw = lasagne.utils.create_param(lasagne.init.Normal(0.05),
                                           (1, 192, 32, 32), "W_D_conv2")
        Tempb = lasagne.utils.create_param(lasagne.init.Constant(0.0), (192, ),
                                           "b_D_conv2")
        #Tempw = theano.shared(self.fonction_ini(0.05,1,(1,192,32,32)).astype('float32'))
        #Tempb = theano.shared(self.fonction_ini(0.05,1,(192,)).astype('float32'))
        discriminator_layers.append(
            lasagne.layers.Conv2DLayer(discriminator_layers[-1],
                                       num_filters=192,
                                       filter_size=(5, 5),
                                       nonlinearity=self.nonlinear,
                                       W=Tempw,
                                       b=Tempb))
        # on passe de 192*32*32 a 192
        Tempw = lasagne.utils.create_param(lasagne.init.Normal(0.05),
                                           (1, 192, 32, 32), "W_D_conv3")
        Tempb = lasagne.utils.create_param(lasagne.init.Constant(0.0), (192, ),
                                           "b_D_conv3")
        discriminator_layers.append(
            lasagne.layers.DropoutLayer(discriminator_layers[-1], p=.5))
        discriminator_layers.append(
            lasagne.layers.Conv2DLayer(discriminator_layers[-1],
                                       num_filters=192,
                                       filter_size=(5, 5),
                                       nonlinearity=self.nonlinear,
                                       W=Tempw,
                                       b=Tempb))
        # on passe de 192 a 192
        Tempw = lasagne.utils.create_param(lasagne.init.Normal(0.05), (1, 192),
                                           "W_D_conv4")
        Tempb = lasagne.utils.create_param(lasagne.init.Constant(0.0), (1, ),
                                           "b_D_conv4")
        discriminator_layers.append(
            lasagne.layers.NINLayer(discriminator_layers[-1],
                                    num_units=192,
                                    nonlinearity=self.nonlinear,
                                    W=Tempw,
                                    b=Tempb))
        discriminator_layers.append(
            lasagne.layers.GlobalPoolLayer(discriminator_layers[-1]))
        # on passe de 92 a 2
        Tempw = lasagne.utils.create_param(lasagne.init.Normal(0.05), (1, 2),
                                           "W_D_conv5")
        Tempb = lasagne.utils.create_param(lasagne.init.Constant(0.0), (1, ),
                                           "b_D_conv5")
        discriminator_layers.append(
            lasagne.layers.DenseLayer(discriminator_layers[-1],
                                      num_units=2,
                                      W=Tempw,
                                      b=Tempb,
                                      nonlinearity=None))
        print(input_shape)
        self.parameters = lasagne.layers.get_all_params(discriminator_layers,
                                                        trainable=True)
        self.last_layer = discriminator_layers[-1]
        ##########################
        self.architecture = discriminator_layers

        input_shape = self.size_input  # 50000*1*28*28 (channel=1, lenght=28,with=28)

        discriminator_layers = [
            lasagne.layers.InputLayer(shape=(None, input_shape[1],
                                             input_shape[2], input_shape[3]),
                                      input_var=self.input_var)
        ]
        # on passe de 3*32*32  a  96*32*32 car on a 96 filtres
        discriminator_layers.append(
            lasagne.layers.DropoutLayer(discriminator_layers[-1], p=0.5))
        discriminator_layers.append(
            nn.weight_norm(
                Conv2DDNNLayer(
                    discriminator_layers[-1],
                    192, (3, 3),
                    pad=1,
                    W=lasagne.init.Normal(0.05),
                    nonlinearity=lasagne.nonlinearities.Leakyrectify)))
        discriminator_layers.append(
            nn.weight_norm(
                Conv2DDNNLayer(
                    discriminator_layers[-1],
                    192, (3, 3),
                    pad=1,
                    W=lasagne.init.Normal(0.05),
                    nonlinearity=lasagne.nonlinearities.Leakyrectify)))
        discriminator_layers.append(
            nn.weight_norm(
                Conv2DDNNLayer(
                    discriminator_layers[-1],
                    192, (3, 3),
                    pad=1,
                    stride=2,
                    W=lasagne.init.Normal(0.05),
                    nonlinearity=lasagne.nonlinearities.Leakyrectify)))

        discriminator_layers.append(
            nn.weight_norm(
                Conv2DDNNLayer(discriminator_layers[-1],
                               256, (3, 3),
                               pad=1,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lasagne.nonlinearities.rectify)))
        discriminator_layers.append(
            nn.weight_norm(
                Conv2DDNNLayer(discriminator_layers[-1],
                               256, (3, 3),
                               pad=1,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lasagne.nonlinearities.rectify)))
        discriminator_layers.append(
            nn.weight_norm(
                Conv2DDNNLayer(discriminator_layers[-1],
                               256, (3, 3),
                               pad=1,
                               stride=2,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lasagne.nonlinearities.rectify)))
        discriminator_layers.append(
            lasagne.layers.DropoutLayer(discriminator_layers[-1], p=0.5))

        discriminator_layers.append(
            nn.weight_norm(
                Conv2DDNNLayer(discriminator_layers[-1],
                               256, (3, 3),
                               pad=0,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lasagne.nonlinearities.rectify)))
        discriminator_layers.append(
            nn.weight_norm(
                lasagne.layers.NINLayer(
                    discriminator_layers[-1],
                    num_units=256,
                    W=lasagne.init.Normal(0.05),
                    nonlinearity=lasagne.nonlinearities.rectify)))
        discriminator_layers.append(
            nn.weight_norm(
                lasagne.layers.NINLayer(
                    discriminator_layers[-1],
                    num_units=256,
                    W=lasagne.init.Normal(0.05),
                    nonlinearity=lasagne.nonlinearities.rectify)))
        discriminator_layers.append(
            lasagne.layers.GlobalPoolLayer(discriminator_layers[-1]))

        self.architecture = disc_layers

        return
Пример #11
0
def build_cnn():
    """
	VGG-19 CNN Network 
	Paper Name: Very Deep Convolutional Networks for Large-Scale Image Recognition
	"""
    flip = argv.filters
    net = {}
    net['input'] = layers.InputLayer((1, 3, IMAGE_DIM, IMAGE_DIM))
    net['conv1_1'] = Conv2DDNNLayer(net['input'],
                                    64,
                                    3,
                                    pad=1,
                                    flip_filters=flip)
    net['conv1_2'] = Conv2DDNNLayer(net['conv1_1'],
                                    64,
                                    3,
                                    pad=1,
                                    flip_filters=flip)
    net['pool1'] = layers.Pool2DLayer(net['conv1_2'],
                                      2,
                                      mode='average_exc_pad')
    net['conv2_1'] = Conv2DDNNLayer(net['pool1'],
                                    128,
                                    3,
                                    pad=1,
                                    flip_filters=flip)
    net['conv2_2'] = Conv2DDNNLayer(net['conv2_1'],
                                    128,
                                    3,
                                    pad=1,
                                    flip_filters=flip)
    net['pool2'] = layers.Pool2DLayer(net['conv2_2'],
                                      2,
                                      mode='average_exc_pad')
    net['conv3_1'] = Conv2DDNNLayer(net['pool2'],
                                    256,
                                    3,
                                    pad=1,
                                    flip_filters=flip)
    net['conv3_2'] = Conv2DDNNLayer(net['conv3_1'],
                                    256,
                                    3,
                                    pad=1,
                                    flip_filters=flip)
    net['conv3_3'] = Conv2DDNNLayer(net['conv3_2'],
                                    256,
                                    3,
                                    pad=1,
                                    flip_filters=flip)
    net['conv3_4'] = Conv2DDNNLayer(net['conv3_3'],
                                    256,
                                    3,
                                    pad=1,
                                    flip_filters=flip)
    net['pool3'] = layers.Pool2DLayer(net['conv3_4'],
                                      2,
                                      mode='average_exc_pad')
    net['conv4_1'] = Conv2DDNNLayer(net['pool3'],
                                    512,
                                    3,
                                    pad=1,
                                    flip_filters=flip)
    net['conv4_2'] = Conv2DDNNLayer(net['conv4_1'],
                                    512,
                                    3,
                                    pad=1,
                                    flip_filters=flip)
    net['conv4_3'] = Conv2DDNNLayer(net['conv4_2'],
                                    512,
                                    3,
                                    pad=1,
                                    flip_filters=flip)
    net['conv4_4'] = Conv2DDNNLayer(net['conv4_3'],
                                    512,
                                    3,
                                    pad=1,
                                    flip_filters=flip)
    net['pool4'] = layers.Pool2DLayer(net['conv4_4'],
                                      2,
                                      mode='average_exc_pad')
    net['conv5_1'] = Conv2DDNNLayer(net['pool4'],
                                    512,
                                    3,
                                    pad=1,
                                    flip_filters=flip)
    net['conv5_2'] = Conv2DDNNLayer(net['conv5_1'],
                                    512,
                                    3,
                                    pad=1,
                                    flip_filters=flip)
    net['conv5_3'] = Conv2DDNNLayer(net['conv5_2'],
                                    512,
                                    3,
                                    pad=1,
                                    flip_filters=flip)
    net['conv5_4'] = Conv2DDNNLayer(net['conv5_3'],
                                    512,
                                    3,
                                    pad=1,
                                    flip_filters=flip)
    net['pool5'] = layers.Pool2DLayer(net['conv5_4'],
                                      2,
                                      mode='average_exc_pad')

    return net
Пример #12
0
 def _build(self):
     layer = layers.InputLayer(shape=(None, 3, 224, 224), input_var=self.X)
     layer = Conv2DDNNLayer(layer, num_filters=64, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = Conv2DDNNLayer(layer, num_filters=64, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = layers.Pool2DLayer(layer, pool_size=(2, 2), mode='max')
     layer = Conv2DDNNLayer(layer, num_filters=128, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = Conv2DDNNLayer(layer, num_filters=128, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = layers.Pool2DLayer(layer, pool_size=(2, 2), mode='max')
     layer = Conv2DDNNLayer(layer, num_filters=256, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = Conv2DDNNLayer(layer, num_filters=256, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = Conv2DDNNLayer(layer, num_filters=256, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = layers.Pool2DLayer(layer, pool_size=(2, 2), mode='max')
     layer = Conv2DDNNLayer(layer, num_filters=512, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = Conv2DDNNLayer(layer, num_filters=512, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = Conv2DDNNLayer(layer, num_filters=512, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = layers.Pool2DLayer(layer, pool_size=(2, 2), mode='max')
     layer = Conv2DDNNLayer(layer, num_filters=512, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = Conv2DDNNLayer(layer, num_filters=512, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = Conv2DDNNLayer(layer, num_filters=512, filter_size=(3, 3), pad=1, flip_filters=False)
     layer = layers.Pool2DLayer(layer, pool_size=(2, 2), mode='max')
     layer = layers.DenseLayer(layer, num_units=4096)
     layer = layers.DropoutLayer(layer, p=0.5)
     layer = layers.DenseLayer(layer, num_units=4096)
     layer = layers.DropoutLayer(layer, p=0.5)
     layer = layers.DenseLayer(layer, num_units=1000)
     layer = layers.NonlinearityLayer(layer, nonlinearity=nonlinearities.softmax)
     return layer
Пример #13
0
    def __init__(self, input_var=None, w_path='data/vgg16_rm.pkl'):
        super(Vgg16Base, self).__init__(input_var, w_path)

        net = {}
        net['input'] = InputLayer((None, 3, 224, 224), input_var=input_var)
        net['conv1_1'] = Conv2DDNNLayer(net['input'],
                                        64,
                                        3,
                                        pad=1,
                                        flip_filters=False)
        net['conv1_2'] = Conv2DDNNLayer(net['conv1_1'],
                                        64,
                                        3,
                                        pad=1,
                                        flip_filters=False)
        net['pool1'] = MaxPool2DLayer(net['conv1_2'], 2)
        net['conv2_1'] = Conv2DDNNLayer(net['pool1'],
                                        128,
                                        3,
                                        pad=1,
                                        flip_filters=False)
        net['conv2_2'] = Conv2DDNNLayer(net['conv2_1'],
                                        128,
                                        3,
                                        pad=1,
                                        flip_filters=False)
        net['pool2'] = MaxPool2DLayer(net['conv2_2'], 2)
        net['conv3_1'] = Conv2DDNNLayer(net['pool2'],
                                        256,
                                        3,
                                        pad=1,
                                        flip_filters=False)
        net['conv3_2'] = Conv2DDNNLayer(net['conv3_1'],
                                        256,
                                        3,
                                        pad=1,
                                        flip_filters=False)
        net['conv3_3'] = Conv2DDNNLayer(net['conv3_2'],
                                        256,
                                        3,
                                        pad=1,
                                        flip_filters=False)
        net['pool3'] = MaxPool2DLayer(net['conv3_3'], 2)
        net['conv4_1'] = Conv2DDNNLayer(net['pool3'],
                                        512,
                                        3,
                                        pad=1,
                                        flip_filters=False)
        net['conv4_2'] = Conv2DDNNLayer(net['conv4_1'],
                                        512,
                                        3,
                                        pad=1,
                                        flip_filters=False)
        net['conv4_3'] = Conv2DDNNLayer(net['conv4_2'],
                                        512,
                                        3,
                                        pad=1,
                                        flip_filters=False)
        net['pool4'] = MaxPool2DLayer(net['conv4_3'], 2)
        net['conv5_1'] = Conv2DDNNLayer(net['pool4'],
                                        512,
                                        3,
                                        pad=1,
                                        flip_filters=False)
        net['conv5_2'] = Conv2DDNNLayer(net['conv5_1'],
                                        512,
                                        3,
                                        pad=1,
                                        flip_filters=False)
        net['conv5_3'] = Conv2DDNNLayer(net['conv5_2'],
                                        512,
                                        3,
                                        pad=1,
                                        flip_filters=False)
        net['pool5'] = MaxPool2DLayer(net['conv5_3'], 2)
        net['fc6'] = DenseLayer(net['pool5'], num_units=4096)
        net['drop6'] = DropoutLayer(net['fc6'], p=0.5)
        net['fc7'] = DenseLayer(net['drop6'], num_units=4096)
        net['drop7'] = DropoutLayer(net['fc7'], p=0.5)
        net['fc8'] = DenseLayer(net['drop7'],
                                num_units=1000,
                                nonlinearity=None)
        net['prob'] = NonlinearityLayer(net['fc8'], softmax)

        self.net = net
        if w_path:
            self.load_weights('prob')
Пример #14
0

def Winit(shape):
    rtn = np.random.normal(size=shape).astype(floatX)
    rtn[np.random.uniform(size=shape) < 0.9] *= 0.01
    return rtn


input_var = T.tensor4()
target_var = T.vector()
N_FILTERS = 512
N_FILTERS2 = 4096

_ = InputLayer(shape=(None, 16, 4, 4), input_var=input_var)

conv_a = Conv2DDNNLayer(_, N_FILTERS, (2, 1),
                        pad='valid')  # , W=Winit((N_FILTERS, 16, 2, 1)))
conv_b = Conv2DDNNLayer(_, N_FILTERS, (1, 2),
                        pad='valid')  # , W=Winit((N_FILTERS, 16, 1, 2)))

conv_aa = Conv2DDNNLayer(
    conv_a, N_FILTERS2, (2, 1),
    pad='valid')  # , W=Winit((N_FILTERS2, N_FILTERS, 2, 1)))
conv_ab = Conv2DDNNLayer(
    conv_a, N_FILTERS2, (1, 2),
    pad='valid')  # , W=Winit((N_FILTERS2, N_FILTERS, 1, 2)))

conv_ba = Conv2DDNNLayer(
    conv_b, N_FILTERS2, (2, 1),
    pad='valid')  # , W=Winit((N_FILTERS2, N_FILTERS, 2, 1)))
conv_bb = Conv2DDNNLayer(
    conv_b, N_FILTERS2, (1, 2),
Пример #15
0
    def build_network(self, X, Y):
        # Define the layers

        lrelu = lasagne.nonlinearities.LeakyRectify(0.1)
        input_shape = self.Size_Input
        #temp = input_shape[1]*input_shape[2]*input_shape[3]
        Auto_Enc_Layer = [
            lasagne.layers.InputLayer(shape=(None, input_shape[1],
                                             input_shape[2], input_shape[3]),
                                      input_var=X)
        ]
        # Encode lasagne.nonlinearities.rectify
        Auto_Enc_Layer.append(
            nn.weight_norm(
                Conv2DDNNLayer(Auto_Enc_Layer[-1],
                               64, (3, 3),
                               pad=1,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lrelu)))
        Auto_Enc_Layer.append(
            nn.weight_norm(
                Conv2DDNNLayer(Auto_Enc_Layer[-2],
                               64, (3, 3),
                               pad=1,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lrelu)))

        Auto_Enc_Layer_Global0 = nn.weight_norm(
            lasagne.layers.NINLayer(Auto_Enc_Layer[-1],
                                    num_units=32,
                                    W=lasagne.init.Normal(0.05),
                                    nonlinearity=lrelu))
        Auto_Enc_Layer_Global = lasagne.layers.GlobalPoolLayer(
            Auto_Enc_Layer_Global0)

        Auto_Enc_Layer.append(
            nn.weight_norm(
                Conv2DDNNLayer(Auto_Enc_Layer[-1],
                               64, (3, 3),
                               pad=1,
                               stride=2,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lrelu)))
        Auto_Enc_Layer.append(
            lasagne.layers.DropoutLayer(Auto_Enc_Layer[-1], p=0.5))
        Auto_Enc_Layer.append(
            nn.weight_norm(
                Conv2DDNNLayer(Auto_Enc_Layer[-1],
                               128, (3, 3),
                               pad=1,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lrelu)))
        Auto_Enc_Layer.append(
            nn.weight_norm(
                Conv2DDNNLayer(Auto_Enc_Layer[-1],
                               128, (3, 3),
                               pad=1,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lrelu)))
        Auto_Enc_Layer.append(
            nn.weight_norm(
                Conv2DDNNLayer(Auto_Enc_Layer[-1],
                               128, (3, 3),
                               pad=1,
                               stride=2,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lrelu)))

        Auto_Enc_Layer_Local0 = nn.weight_norm(
            lasagne.layers.NINLayer(Auto_Enc_Layer[-1],
                                    num_units=128,
                                    W=lasagne.init.Normal(0.05),
                                    nonlinearity=lrelu))
        Auto_Enc_Layer_Local = lasagne.layers.GlobalPoolLayer(
            Auto_Enc_Layer_Local0)

        Auto_Enc_Layer.append(
            nn.weight_norm(
                lasagne.layers.NINLayer(Auto_Enc_Layer[-1],
                                        num_units=512,
                                        W=lasagne.init.Normal(0.05),
                                        nonlinearity=lrelu)))
        Auto_Enc_Layer.append(
            lasagne.layers.GlobalPoolLayer(Auto_Enc_Layer[-1]))

        Auto_Enc_Layer.append(
            nn.weight_norm(
                DenseLayer(Auto_Enc_Layer_Local,
                           num_units=32 * 16 * 16,
                           W=lasagne.init.Normal(0.05),
                           nonlinearity=lasagne.nonlinearities.tanh)))
        # Decccode

        Auto_Dec_Layer = [
            (lasagne.layers.ReshapeLayer(Auto_Enc_Layer[-1],
                                         (self.Batch_Size, 32, 16, 16)))
        ]
        Auto_Dec_Layer.append(
            nn.weight_norm(
                nn.Deconv2DLayer(Auto_Dec_Layer[-1],
                                 (self.Batch_Size, 32, 32, 32), (3, 3),
                                 W=lasagne.init.Normal(0.05),
                                 nonlinearity=lrelu)))  # 4 -> 8
        Auto_Dec_Layer.append(
            nn.weight_norm(
                nn.Deconv2DLayer(
                    Auto_Dec_Layer[-1], (self.Batch_Size, 3, 64, 64), (3, 3),
                    W=lasagne.init.Normal(0.05),
                    nonlinearity=lasagne.nonlinearities.tanh)))  # 4 -> 8

        all_params = lasagne.layers.get_all_params(Auto_Dec_Layer,
                                                   trainable=True)
        network_output = lasagne.layers.get_output(Auto_Dec_Layer[-1],
                                                   X,
                                                   deterministic=False)

        encoded_output = lasagne.layers.get_output(Auto_Enc_Layer_Local,
                                                   X,
                                                   deterministic=True)
        encoded_output1 = lasagne.layers.get_output(Auto_Enc_Layer_Global,
                                                    X,
                                                    deterministic=True)
        #encoded_output1 = Auto_Enc_Layer_Global
        network_output1 = lasagne.layers.get_output(Auto_Dec_Layer[-1],
                                                    X,
                                                    deterministic=True)

        loss_A = T.mean(
            lasagne.objectives.squared_error(
                network_output[:, 0, :, :], Y[:, 0, :, :])) + T.mean(
                    lasagne.objectives.squared_error(
                        network_output[:, 1, :, :], Y[:, 1, :, :])) + T.mean(
                            lasagne.objectives.squared_error(
                                network_output[:, 2, :, :], Y[:, 2, :, :]))

        #loss_A = T.mean(lasagne.objectives.squared_error(network_output,Y))

        loss = [loss_A, encoded_output, network_output]
        #Autoencodeur_params_updates = lasagne.updates.momentum(loss_A,all_params,learning_rate = 0.05,momentum = 0.5)
        Autoencodeur_params_updates = lasagne.updates.adam(loss_A,
                                                           all_params,
                                                           learning_rate=0.001,
                                                           beta1=0.9)
        # Some Theano functions ,

        self.generate_fn_X = theano.function([X], network_output)
        self.train = theano.function([X, Y],
                                     loss,
                                     updates=Autoencodeur_params_updates,
                                     allow_input_downcast=True)
        self.predict = theano.function([X],
                                       network_output1,
                                       allow_input_downcast=True)
        self.encode_L = theano.function([X],
                                        encoded_output,
                                        allow_input_downcast=True)
        self.encode_G = theano.function([X],
                                        encoded_output1,
                                        allow_input_downcast=True)
        #s#elf.encode_G =  encoded_output1
        self.network = Auto_Enc_Layer

        return
Пример #16
0
    def __init__(self, board, worldfeedback, learningrate=0.01, discountfactor=0.6, epsilon=0.1, depsilon=0.0,
                 minepsilon=0.1, rho=0.99, rms_epsilon=1e-6, batchsize=32):
        super(DeepQLearner, self).__init__(board, worldfeedback, learningrate, discountfactor, epsilon)
        self.depsilon = depsilon
        self.minepsilon = minepsilon

        self.replaybuf = Replay.Replay(1000000)
        self.batchsize = batchsize

        last_state = T.tensor4('last_state')
        last_action = T.icol('last_action')
        state = T.tensor4('state')
        reward = T.col('reward')
        terminal = T.icol('terminal')

        self.state_shared = theano.shared(
            np.zeros((batchsize, 1, board.height, board.width), dtype=theano.config.floatX))
        self.last_state_shared = theano.shared(
            np.zeros((batchsize, 1, board.height, board.width), dtype=theano.config.floatX))
        self.last_action_shared = theano.shared(np.zeros((batchsize, 1), dtype='int32'), broadcastable=(False, True))
        self.reward_shared = theano.shared(np.zeros((batchsize, 1), dtype=theano.config.floatX),
                                           broadcastable=(False, True))
        self.terminal_shared = theano.shared(np.zeros((batchsize, 1), dtype='int32'), broadcastable=(False, True))

        model = lasagne.layers.InputLayer(shape=(batchsize, 1, board.height, board.width))
        model = Conv2DDNNLayer(model, 24, 3, pad=1, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1))
        model = Conv2DDNNLayer(model, 48, 3, pad=1, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1))
        model = Conv2DDNNLayer(model, 12, 3, pad=1, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1))
        model = lasagne.layers.DenseLayer(model, 256, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1))
        model = lasagne.layers.DenseLayer(model, len(self._moves), W=lasagne.init.HeUniform(),
                                          b=lasagne.init.Constant(.1),
                                          nonlinearity=lasagne.nonlinearities.identity)

        lastQvals = lasagne.layers.get_output(model, last_state)
        Qvals = lasagne.layers.get_output(model, state)
        Qvals = theano.gradient.disconnected_grad(Qvals)

        delta = reward + \
                terminal * self.gamma * T.max(Qvals, axis=1, keepdims=True) - \
                lastQvals[T.arange(batchsize), last_action.reshape((-1,))].reshape((-1, 1))

        loss = T.mean(0.5 * delta ** 2)

        params = lasagne.layers.get_all_params(model)
        givens = {
            state: self.state_shared,
            last_state: self.last_state_shared,
            last_action: self.last_action_shared,
            reward: self.reward_shared,
            terminal: self.terminal_shared,
        }
        updates = lasagne.updates.rmsprop(loss, params, learning_rate=self.lr, rho=rho, epsilon=rms_epsilon)

        self.model = model
        self.train_fn = theano.function([], [loss, Qvals], updates=updates, givens=givens)
        self.Qvals = theano.function([], Qvals, givens={state: self.state_shared})

        self.last_state = None
        self.action = None

        self.avgloss = []