コード例 #1
0
def createDiscriminator2(input_var=None):

	_ = InputLayer(shape=(None, 3, 64, 64), input_var=input_var)
	_ = batch_norm(Conv2DDNNLayer(_, 64, 3, pad='same'))
	_ = batch_norm(Conv2DDNNLayer(_, 64, 3, pad='same'))
	_ = MaxPool2DDNNLayer(_, 2)
	_ = batch_norm(Conv2DDNNLayer(_, 64, 3, pad='same'))
	_ = MaxPool2DDNNLayer(_, 2)
	_ = batch_norm(Conv2DDNNLayer(_, 128, 3, pad='same'))
	_ = batch_norm(Conv2DDNNLayer(_, 128, 3, pad='same'))
	_ = FlattenLayer(_)
	_ = DenseLayer(_, num_units=1000, nonlinearity=lasagne.nonlinearities.rectify)
	l_discriminator = DenseLayer(_, num_units=1, nonlinearity=lasagne.nonlinearities.sigmoid)

	print('--------------------')
	print('Discriminator architecture: \n')

	#get all layers
	allLayers=lasagne.layers.get_all_layers(l_discriminator)
	#for each layer print its shape information
	for l in allLayers:
		print(lasagne.layers.get_output_shape(l))

	print ("Discriminator output:", l_discriminator.output_shape)
	return l_discriminator
コード例 #2
0
 def test_fail_on_mismatching_dimensionality(self):
     try:
         from lasagne.layers.dnn import MaxPool2DDNNLayer
     except ImportError:
         pytest.skip("cuDNN not available")
     with pytest.raises(ValueError) as exc:
         MaxPool2DDNNLayer((10, 20, 30), 3, 2)
     assert "Expected 4 input dimensions" in exc.value.args[0]
     with pytest.raises(ValueError) as exc:
         MaxPool2DDNNLayer((10, 20, 30, 40, 50), 3, 2)
     assert "Expected 4 input dimensions" in exc.value.args[0]
コード例 #3
0
ファイル: test_pool.py プロジェクト: Python3pkg/Lasagne
 def test_not_implemented(self):
     try:
         from lasagne.layers.dnn import MaxPool2DDNNLayer
     except ImportError:
         pytest.skip("cuDNN not available")
     with pytest.raises(NotImplementedError) as exc:
         layer = MaxPool2DDNNLayer((1, 2, 3, 4), pool_size=2,
                                   ignore_border=False)
     assert ("Pool2DDNNLayer does not support ignore_border=False" in
             exc.value.args[0])
コード例 #4
0
    def layer(self, input_layer, pool_size, stride, pad):
        try:
            from lasagne.layers.dnn import MaxPool2DDNNLayer
        except ImportError:
            pytest.skip("cuDNN not available")

        return MaxPool2DDNNLayer(
            input_layer,
            pool_size=pool_size,
            stride=stride,
            pad=pad,
        )
コード例 #5
0
ファイル: model_init0.py プロジェクト: hoamle/multiLabel
def build_model(input_var=None):
    # Input layer
    ''' 
    out: b x 3 x 227 x 227 
    '''
    lin = InputLayer(shape=(None, 3, 227, 227), input_var=input_var)

    # ConvPool1
    ''' 
    out: b x 96 x 27 x 27 
    out.W: 96 x 3 x 11 x 11
    '''
    """ input was b01c, need to be bc01"""
    l1 = Conv2DDNNLayer(
        lin,
        #lasagne.layers.dimshuffle(lin, (0,3,1,2)),
        num_filters=96,
        filter_size=11,
        stride=4,
        W=lasagne.init.Constant(0.),  #W = Ws['W_0'], b = bs['b_0'],
        nonlinearity=lasagne.nonlinearities.rectify)
    l1 = MaxPool2DDNNLayer(l1, pool_size=3, stride=2)

    # ConvPool2: 2 groups
    ''' 
    out: b x 256 x 13 x 13
    out.W0/1: 128 x 48 x 5 x 5
    '''
    l1_0 = SliceLayer(l1, indices=slice(None, 48), axis=1)
    l2_0 = Conv2DDNNLayer(
        l1_0,
        num_filters=128,
        filter_size=5,
        stride=1,
        pad=2,
        W=lasagne.init.Constant(0.),  #W = Ws['W0_1'], b = bs['b0_1'],
        nonlinearity=lasagne.nonlinearities.rectify)
    l2_0p = MaxPool2DDNNLayer(l2_0, pool_size=3, stride=2)

    l1_1 = SliceLayer(l1, indices=slice(48, None), axis=1)
    l2_1 = Conv2DDNNLayer(
        l1_1,
        num_filters=128,
        filter_size=5,
        stride=1,
        pad=2,
        W=lasagne.init.Constant(0.),  #W = Ws['W1_1'], b = bs['b1_1'],
        nonlinearity=lasagne.nonlinearities.rectify)
    l2_1p = MaxPool2DDNNLayer(l2_1, pool_size=3, stride=2)

    l2 = ConcatLayer([l2_0p, l2_1p], axis=1)

    # Conv3
    ''' 
    out: b x 384 x 13 x 13
    out.W: 384 x 256 x 3 x 3
    '''
    l3 = Conv2DDNNLayer(
        l2,
        num_filters=384,
        filter_size=3,
        stride=1,
        pad='same',
        W=lasagne.init.Constant(0.),  #W = Ws['W_2'], b = bs['b_2'],
        nonlinearity=lasagne.nonlinearities.rectify)

    # Conv4: 2 groups
    ''' 
    out: b x 384 x 13 x 13
    out.W0/1: 192 x 192 x 3 x 3
    '''
    l3_0 = SliceLayer(l3, indices=slice(None, 192), axis=1)
    l4_0 = Conv2DDNNLayer(
        l3_0,
        num_filters=192,
        filter_size=3,
        stride=1,
        pad='same',
        W=lasagne.init.Constant(0.),  #W = Ws['W0_3'], b = bs['b0_3'],
        nonlinearity=lasagne.nonlinearities.rectify)

    l3_1 = SliceLayer(l3, indices=slice(192, None), axis=1)
    l4_1 = Conv2DDNNLayer(
        l3_1,
        num_filters=192,
        filter_size=3,
        stride=1,
        pad='same',
        W=lasagne.init.Constant(0.),  #W = Ws['W1_3'], b = bs['b1_3'],
        nonlinearity=lasagne.nonlinearities.rectify)

    # ConvPool5: 2 groups
    ''' 
    out: b x 256 x 6 x 6
    out.W0/1: 128 x 192 x 3 x 3
    '''
    l5_0 = Conv2DDNNLayer(
        l4_0,
        num_filters=128,
        filter_size=3,
        stride=1,
        pad='same',
        W=lasagne.init.Constant(0.),  #W = Ws['W0_4'], b = bs['b0_4'],
        nonlinearity=lasagne.nonlinearities.rectify)
    l5_0p = MaxPool2DDNNLayer(l5_0, pool_size=3, stride=2)

    l5_1 = Conv2DDNNLayer(
        l4_1,
        num_filters=128,
        filter_size=3,
        stride=1,
        pad='same',
        W=lasagne.init.Constant(0.),  #W = Ws['W1_4'], b = bs['b1_4'],
        nonlinearity=lasagne.nonlinearities.rectify)
    l5_1p = MaxPool2DDNNLayer(l5_1, pool_size=3, stride=2)

    l5 = ConcatLayer([l5_0p, l5_1p], axis=1)

    # FC6
    ''' 
    out: b x 4096 (x 1 x 1)
    out.W: 9216 x 4096
    '''
    l6 = DenseLayer(
        l5,
        #lasagne.layers.dropout(l5, p=.0),
        num_units=4096,
        W=lasagne.init.Constant(0.),  #W = Ws['W_5'], b = bs['b_5'],
        nonlinearity=lasagne.nonlinearities.rectify)

    # FC7
    ''' 
    out: b x 4096 (x 1 x 1)
    out.W: 4096 x 4096
    '''
    l7 = DenseLayer(
        l6,
        #lasagne.layers.dropout(l6, p=.5),
        num_units=4096,
        W=lasagne.init.Constant(0.),  #W = Ws['W_6'], b = bs['b_6'],
        nonlinearity=lasagne.nonlinearities.rectify)

    # FC8: replace last layer in AlexNet
    ''' 
    out: b x 22
    out.W: 4096 x 22
    '''
    l8 = DenseLayer(l7,
                    num_units=22,
                    nonlinearity=lasagne.nonlinearities.softmax)
    return l8
コード例 #6
0
ファイル: j0_iranet5.py プロジェクト: Keesiu/meta-kaggle
def build_model():

    #################
    # Regular model #
    #################
    input_size = data_sizes["sliced:data:singleslice"]

    l0 = InputLayer(input_size)
    # add channel layer
    #l0r = reshape(l0, (-1, 1, ) + input_size[1:])

    # (batch, channel, time, x, y)
    l = Conv2DDNNLayer(l0, num_filters=64, filter_size=(3, 3),
                       W=lasagne.init.Orthogonal('relu'),
                       b=lasagne.init.Constant(0.1),
                       pad='same')
    l = Conv2DDNNLayer(l, num_filters=64, filter_size=(3, 3),
                       W=lasagne.init.Orthogonal("relu"),
                       b=lasagne.init.Constant(0.1),
                       pad="valid")

    l = lasagne.layers.PadLayer(l, width=(1, 1))
    l = MaxPool2DDNNLayer(l, pool_size=(2, 2), stride=(2, 2))
    #l = lasagne.layers.DropoutLayer(l, p=0.25)

    # ---------------------------------------------------------------
    l = Conv2DDNNLayer(l, num_filters=96, filter_size=(3, 3),
                       W=lasagne.init.Orthogonal("relu"),
                       b=lasagne.init.Constant(0.1),
                       pad="same")
    l = Conv2DDNNLayer(l, num_filters=96, filter_size=(3, 3),
                       W=lasagne.init.Orthogonal("relu"),
                       b=lasagne.init.Constant(0.1),
                       pad="valid")

    l = lasagne.layers.PadLayer(l, width=(1, 1))
    l = MaxPool2DDNNLayer(l, pool_size=(2, 2), stride=(2, 2))
    #l = lasagne.layers.DropoutLayer(l, p=0.25)

    # ---------------------------------------------------------------
    l = Conv2DDNNLayer(l, num_filters=128, filter_size=(2, 2),
                       W=lasagne.init.Orthogonal("relu"),
                       b=lasagne.init.Constant(0.1))
    l = Conv2DDNNLayer(l, num_filters=128, filter_size=(2, 2),
                       W=lasagne.init.Orthogonal("relu"),
                       b=lasagne.init.Constant(0.1))
    l = MaxPool2DDNNLayer(l, pool_size=(2, 2), stride=(2, 2))
    l = lasagne.layers.DropoutLayer(l, p=0.25)

    # --------------------------------------------------------------

    l = lasagne.layers.FlattenLayer(l)
    l_d1 = lasagne.layers.DenseLayer(l, num_units=1024, W=lasagne.init.Orthogonal('relu'), b=lasagne.init.Constant(0.1))
    l_systole = lasagne.layers.DenseLayer(lasagne.layers.dropout(l_d1, p=0.5), num_units=1, W=lasagne.init.Orthogonal('relu'),
                                      b=lasagne.init.Constant(0.1), nonlinearity=lasagne.nonlinearities.identity)

    # --------------------------------------------------------------
    # --------------------------------------------------------------
    # --------------------------------------------------------------


    l = Conv2DDNNLayer(l0, num_filters=64, filter_size=(3, 3),
                       W=lasagne.init.Orthogonal('relu'),
                       b=lasagne.init.Constant(0.1),
                       pad='same')
    l = Conv2DDNNLayer(l, num_filters=64, filter_size=(3, 3),
                       W=lasagne.init.Orthogonal("relu"),
                       b=lasagne.init.Constant(0.1),
                       pad="valid")

    l = lasagne.layers.PadLayer(l, width=(1, 1))
    l = MaxPool2DDNNLayer(l, pool_size=(2, 2), stride=(2, 2))
    #l = lasagne.layers.DropoutLayer(l, p=0.25)

    # ---------------------------------------------------------------
    l = Conv2DDNNLayer(l, num_filters=96, filter_size=(3, 3),
                       W=lasagne.init.Orthogonal("relu"),
                       b=lasagne.init.Constant(0.1),
                       pad="same")
    l = Conv2DDNNLayer(l, num_filters=96, filter_size=(3, 3),
                       W=lasagne.init.Orthogonal("relu"),
                       b=lasagne.init.Constant(0.1),
                       pad="valid")

    l = lasagne.layers.PadLayer(l, width=(1, 1))
    l = MaxPool2DDNNLayer(l, pool_size=(2, 2), stride=(2, 2))
    #l = lasagne.layers.DropoutLayer(l, p=0.25)

    # ---------------------------------------------------------------
    l = Conv2DDNNLayer(l, num_filters=128, filter_size=(2, 2),
                       W=lasagne.init.Orthogonal("relu"),
                       b=lasagne.init.Constant(0.1))
    l = Conv2DDNNLayer(l, num_filters=128, filter_size=(2, 2),
                       W=lasagne.init.Orthogonal("relu"),
                       b=lasagne.init.Constant(0.1))
    l = MaxPool2DDNNLayer(l, pool_size=(2, 2), stride=(2, 2))
    l = lasagne.layers.DropoutLayer(l, p=0.25)

    # --------------------------------------------------------------

    l = lasagne.layers.FlattenLayer(l)
    l_d2 = lasagne.layers.DenseLayer(l, num_units=1024, W=lasagne.init.Orthogonal('relu'), b=lasagne.init.Constant(0.1))
    l_diastole = lasagne.layers.DenseLayer(lasagne.layers.dropout(l_d2, p=0.5), num_units=1, W=lasagne.init.Orthogonal('relu'),
                                      b=lasagne.init.Constant(0.1), nonlinearity=lasagne.nonlinearities.identity)

    return {
        "inputs":{
            "sliced:data:singleslice": l0
        },
        "outputs": {
            "systole:value": l_systole,
            "diastole:value": l_diastole,
        },
        "regularizable": {
            l_d1: 1e-3,
            l_d2: 1e-3,
        }
    }