예제 #1
0
def build_simple_block(incoming_layer,
                       names,
                       num_filters,
                       filter_size,
                       stride,
                       pad,
                       use_bias=False,
                       nonlin=rectify):
    net = []
    net.append((names[0],
                ConvLayer(incoming_layer,
                          num_filters,
                          filter_size,
                          pad,
                          stride,
                          flip_filters=False,
                          nonlinearity=None)
                if use_bias else ConvLayer(incoming_layer,
                                           num_filters,
                                           filter_size,
                                           stride,
                                           pad,
                                           b=None,
                                           flip_filters=False,
                                           nonlinearity=None)))

    net.append((names[1], BatchNormLayer(net[-1][1])))
    if nonlin is not None:
        net.append((names[2], NonlinearityLayer(net[-1][1],
                                                nonlinearity=nonlin)))

    return OrderedDict(net), net[-1][0]
예제 #2
0
def build_cnn(input_var, pretrained_model):
    #pretrained layers from vgg16
    conv1_1 = pretrained_model['conv1_1']
    conv1_2 = pretrained_model['conv1_2']

    #new layers
    network = InputLayer(shape=(None, 3, 48, 48), input_var=input_var)

    network = ConvLayer(network,
                        64,
                        3,
                        pad=1,
                        flip_filters=False,
                        W=conv1_1.W.get_value(),
                        b=conv1_1.b.get_value())

    network = ConvLayer(network,
                        64,
                        3,
                        pad=1,
                        flip_filters=False,
                        W=conv1_2.W.get_value(),
                        b=conv1_2.b.get_value())

    network = MaxPoolLayer(network, pool_size=(2, 2))

    network = DenseLayer(dropout(network, p=.5),
                         num_units=256,
                         nonlinearity=lasagne.nonlinearities.rectify)

    network = DenseLayer(dropout(network, p=.5),
                         num_units=7,
                         nonlinearity=lasagne.nonlinearities.softmax)

    return network
예제 #3
0
파일: resnet.py 프로젝트: stes/deepml
def build_simple_block(incoming_layer,
                       names,
                       num_filters,
                       filter_size,
                       stride,
                       pad,
                       use_bias=False,
                       nonlin=rectify):
    """Creates stacked Lasagne layers ConvLayer -> BN -> (ReLu)
    Parameters:
    ----------
    incoming_layer : instance of Lasagne layer
        Parent layer
    names : list of string
        Names of the layers in block
    num_filters : int
        Number of filters in convolution layer
    filter_size : int
        Size of filters in convolution layer
    stride : int
        Stride of convolution layer
    pad : int
        Padding of convolution layer
    use_bias : bool
        Whether to use bias in conlovution layer
    nonlin : function
        Nonlinearity type of Nonlinearity layer
    Returns
    -------
    tuple: (net, last_layer_name)
        net : dict
            Dictionary with stacked layers
        last_layer_name : string
            Last layer name
    """
    net = []
    names = list(names)
    net.append((names[0],
                ConvLayer(incoming_layer,
                          num_filters,
                          filter_size,
                          pad,
                          stride,
                          flip_filters=False,
                          nonlinearity=None)
                if use_bias else ConvLayer(incoming_layer,
                                           num_filters,
                                           filter_size,
                                           stride,
                                           pad,
                                           b=None,
                                           flip_filters=False,
                                           nonlinearity=None)))

    net.append((names[1], BatchNormLayer(net[-1][1])))
    if nonlin is not None:
        net.append((names[2], NonlinearityLayer(net[-1][1],
                                                nonlinearity=nonlin)))

    return dict(net), net[-1][0]
def Fully_Conv_4(num_of_classes, input_var=None):
    net = {}
    net['input'] = InputLayer(shape=(None, 3, 224, 224), input_var=input_var)
    net['conv1_1'] = ConvLayer(net['input'],
                               32,
                               4, (2, 2),
                               pad=0,
                               flip_filters=False)
    net['conv1_2'] = ConvLayer(net['conv1_1'],
                               64,
                               4,
                               pad=0,
                               flip_filters=False)
    net['conv2_1'] = ConvLayer(net['conv1_2'],
                               128,
                               6, (2, 2),
                               pad=0,
                               flip_filters=False)
    net['conv2_2'] = ConvLayer(net['conv2_1'],
                               256,
                               6, (2, 2),
                               pad=0,
                               flip_filters=False)
    net['fc3'] = DenseLayer(net['conv2_2'], num_units=1024)
    net['fc3_dropout'] = DropoutLayer(net['fc3'], p=0.5)
    net['fc4'] = DenseLayer(net['fc3_dropout'], num_units=512)
    net['fc4_dropout'] = DropoutLayer(net['fc4'], p=0.5)
    net['fc5'] = DenseLayer(net['fc4_dropout'],
                            num_units=num_of_classes,
                            nonlinearity=None)
    net['prob'] = NonlinearityLayer(net['fc5'], softmax)

    return net
예제 #5
0
def debug_net(input_var=None, depth=3):
    """
    Debug network which is small & fast
    :param input_var: Input variable
    :param depth: Depth of the net's core
    :return: lasagne.layer
    """
    # Input
    l_in = InputLayer(shape=(None, 3, 228, 304), input_var=input_var)
    l = l_in
    for _ in range(depth):
        l = batch_norm(
            ConvLayer(l,
                      num_filters=64,
                      filter_size=(3, 3),
                      stride=(1, 1),
                      nonlinearity=rectify,
                      pad="same",
                      W=lasagne.init.HeNormal(gain='relu'),
                      flip_filters=False))
    l = ConvLayer(l,
                  num_filters=1,
                  filter_size=(3, 3),
                  stride=(1, 1),
                  nonlinearity=rectify,
                  pad="same",
                  W=lasagne.init.HeNormal(gain='relu'),
                  flip_filters=False)
    return PoolLayer(l, pool_size=(2, 2))
예제 #6
0
파일: carc.py 프로젝트: 5l1v3r1/ARC-1
def residual_block(l, increase_dim=False, projection=True, first=False, filters=16):
	if increase_dim:
		first_stride = (2, 2)
	else:
		first_stride = (1, 1)
	
	if first:
		bn_pre_relu = l
	else:
		bn_pre_conv = BatchNormLayer(l)
		bn_pre_relu = NonlinearityLayer(bn_pre_conv, rectify)
	
	conv_1 = batch_norm(ConvLayer(bn_pre_relu, num_filters=filters, filter_size=(3,3), stride=first_stride, nonlinearity=rectify, pad='same', W=HeNormal(gain='relu')))
	dropout = DropoutLayer(conv_1, p=0.3)
	conv_2 = ConvLayer(dropout, num_filters=filters, filter_size=(3,3), stride=(1,1), nonlinearity=None, pad='same', W=HeNormal(gain='relu'))
	
	if increase_dim:
		projection = ConvLayer(l, num_filters=filters, filter_size=(1,1), stride=(2,2), nonlinearity=None, pad='same', b=None)
		block = ElemwiseSumLayer([conv_2, projection])
	elif first:
		projection = ConvLayer(l, num_filters=filters, filter_size=(1,1), stride=(1,1), nonlinearity=None, pad='same', b=None)
		block = ElemwiseSumLayer([conv_2, projection])
	else:
		block = ElemwiseSumLayer([conv_2, l])
	
	return block
예제 #7
0
    def make_res_block(self,
                       name,
                       input,
                       units,
                       filter_size=(3, 3),
                       stride=(1, 1),
                       pad=(1, 1),
                       alpha=0.25):
        conv1 = ConvLayer(input,
                          units,
                          filter_size,
                          stride=stride,
                          pad=pad,
                          nonlinearity=None)
        relu = rrelu(conv1)
        conv2 = ConvLayer(input,
                          units,
                          filter_size,
                          stride=stride,
                          pad=pad,
                          nonlinearity=None)

        self.network[name + 'x1'] = conv1
        self.network[name + '~1'] = relu
        self.network[name + 'x2'] = conv2
        # print('input for make_layer shape: ', input.shape)
        # print('units for make_layer: ',units)
        # print('kernal: ',filter_size)
        # print('padding: ',pad)
        # print('make_layer output: ', prelu)
        return conv2
예제 #8
0
def build_model(height, width):
	net = OrderedDict()
	net['input'] = InputLayer((None, 3, height, width), name='input')
	net['conv1'] = ConvLayer(net['input'], num_filters=32, filter_size=7, pad='same', name='conv1')
	net['conv2'] = ConvLayer(net['conv1'], num_filters=32, filter_size=5, pad='same', name='conv2')
	net['conv3'] = ConvLayer(net['conv2'], num_filters=64, filter_size=3, pad='same', name='conv3')
	net['conv4'] = ConvLayer(net['conv3'], num_filters=64, filter_size=3, pad='same', name='conv4')

	net['pad5'] = PadLayer(net['conv4'], width=1, val=0, name='pad5')
	net['conv_dil5'] = DilatedConv2DLayer(net['pad5'], num_filters=64, filter_size=3, dilation=(1,1), name='conv_dil5')

	net['pad6'] = PadLayer(net['conv_dil5'], width=2, val=0, name='pad6')
	net['conv_dil6'] = DilatedConv2DLayer(net['pad6'], num_filters=64, filter_size=3, dilation=(2,2), name='conv_dil6')

	net['pad7'] = PadLayer(net['conv_dil6'], width=4, val=0, name='pad6')
	net['conv_dil7'] = DilatedConv2DLayer(net['pad7'], num_filters=64, filter_size=3, dilation=(4,4), name='conv_dil7')

	net['pad8'] = PadLayer(net['conv_dil7'], width=8, val=0, name='pad8')
	net['conv_dil8'] = DilatedConv2DLayer(net['pad8'], num_filters=64, filter_size=3, dilation=(8,8), name='conv_dil8')

	net['pad9'] = PadLayer(net['conv_dil8'], width=16, val=0, name='pad9')
	net['conv_dil9'] = DilatedConv2DLayer(net['pad9'], num_filters=64, filter_size=3, dilation=(16,16), name='conv_dil9')

	net['pad10'] = PadLayer(net['conv_dil9'], width=1, val=0, name='pad10')
	net['l_out'] = DilatedConv2DLayer(net['pad10'], num_filters=2, filter_size=3, dilation=(1,1), name='l_out')

	for layer in lasagne.layers.get_all_layers(net['l_out']):
		print layer.name,layer.output_shape
	print "output shape", net['l_out'].output_shape

	net['l_in'] = net['input']
	return net
    def residual_block(l, increase_dim=False, projection=False):
        input_num_filters = l.output_shape[1]
        if increase_dim:
            first_stride = (2,2)
            out_num_filters = input_num_filters*2
        else:
            first_stride = (1,1)
            out_num_filters = input_num_filters

        stack_1 = batch_norm(ConvLayer(l, num_filters=out_num_filters, filter_size=(3,3), stride=first_stride, nonlinearity=rectify, pad='same', W=lasagne.init.HeNormal(gain='relu'), flip_filters=False))
        stack_2 = batch_norm(ConvLayer(stack_1, num_filters=out_num_filters, filter_size=(3,3), stride=(1,1), nonlinearity=None, pad='same', W=lasagne.init.HeNormal(gain='relu'), flip_filters=False))
        
        # add shortcut connections
        if increase_dim:
            if projection:
                # projection shortcut, as option B in paper
                projection = batch_norm(ConvLayer(l, num_filters=out_num_filters, filter_size=(1,1), stride=(2,2), nonlinearity=None, pad='same', b=None, flip_filters=False))
                block = NonlinearityLayer(ElemwiseSumLayer([stack_2, projection]),nonlinearity=rectify)
            else:
                # identity shortcut, as option A in paper
                identity = ExpressionLayer(l, lambda X: X[:, :, ::2, ::2], lambda s: (s[0], s[1], s[2]//2, s[3]//2))
                padding = PadLayer(identity, [out_num_filters//4,0,0], batch_ndim=1)
                block = NonlinearityLayer(ElemwiseSumLayer([stack_2, padding]),nonlinearity=rectify)
        else:
            block = NonlinearityLayer(ElemwiseSumLayer([stack_2, l]),nonlinearity=rectify)
        
        return block
예제 #10
0
def build_model():
    net = {}
    net['input'] = InputLayer((None, 3, None, None))
    net['conv1/7x7_s2'] = ConvLayer(net['input'], 64, 7, stride=2, pad=3)
    net['pool1/3x3_s2'] = PoolLayer(net['conv1/7x7_s2'],
                                    pool_size=3,
                                    stride=2,
                                    ignore_border=False)
    net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1)
    net['conv2/3x3_reduce'] = ConvLayer(net['pool1/norm1'], 64, 1)
    net['conv2/3x3'] = ConvLayer(net['conv2/3x3_reduce'], 192, 3, pad=1)
    net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1)
    net['pool2/3x3_s2'] = PoolLayer(net['conv2/norm2'], pool_size=3, stride=2)

    net.update(
        build_inception_module('inception_3a', net['pool2/3x3_s2'],
                               [32, 64, 96, 128, 16, 32]))
    net.update(
        build_inception_module('inception_3b', net['inception_3a/output'],
                               [64, 128, 128, 192, 32, 96]))
    net['pool3/3x3_s2'] = PoolLayer(net['inception_3b/output'],
                                    pool_size=3,
                                    stride=2)

    net.update(
        build_inception_module('inception_4a', net['pool3/3x3_s2'],
                               [64, 192, 96, 208, 16, 48]))
    net.update(
        build_inception_module('inception_4b', net['inception_4a/output'],
                               [64, 160, 112, 224, 24, 64]))
    net.update(
        build_inception_module('inception_4c', net['inception_4b/output'],
                               [64, 128, 128, 256, 24, 64]))
    net.update(
        build_inception_module('inception_4d', net['inception_4c/output'],
                               [64, 112, 144, 288, 32, 64]))
    net.update(
        build_inception_module('inception_4e', net['inception_4d/output'],
                               [128, 256, 160, 320, 32, 128]))
    net['pool4/3x3_s2'] = PoolLayer(net['inception_4e/output'],
                                    pool_size=3,
                                    stride=2)

    net.update(
        build_inception_module('inception_5a', net['pool4/3x3_s2'],
                               [128, 256, 160, 320, 32, 128]))
    net.update(
        build_inception_module('inception_5b', net['inception_5a/output'],
                               [128, 384, 192, 384, 48, 128]))

    net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output'])
    net['loss3/classifier'] = DenseLayer(net['pool5/7x7_s1'],
                                         num_units=1000,
                                         nonlinearity=linear)
    net['prob'] = NonlinearityLayer(net['loss3/classifier'],
                                    nonlinearity=softmax)
    return net
예제 #11
0
def build_model_small(input_shape, input_var):
    net = {}
    net['input'] = InputLayer(input_shape, input_var=input_var)
    net['input'].num_filters = input_shape[1]
    net['conv1'] = batch_norm(
        ConvLayer(net['input'],
                  num_filters=256,
                  filter_size=11,
                  nonlinearity=nonlinearities.leaky_rectify,
                  pad='same'))
    net['pool1'] = dropout(PoolLayer(net['conv1'], 2, mode='max'), 0.5)
    net['conv2'] = batch_norm(
        ConvLayer(net['pool1'],
                  num_filters=256,
                  filter_size=7,
                  nonlinearity=nonlinearities.leaky_rectify,
                  pad='same'))
    net['pool2'] = dropout(PoolLayer(net['conv2'], 2, mode='max'), 0.5)
    net['conv3'] = batch_norm(
        ConvLayer(net['pool2'],
                  num_filters=396,
                  filter_size=5,
                  nonlinearity=nonlinearities.leaky_rectify,
                  pad='same'))
    net['pool3'] = dropout(PoolLayer(net['conv3'], 2, mode='max'), 0.5)
    net['conv4'] = dropout(
        batch_norm(
            ConvLayer(net['pool3'],
                      num_filters=512,
                      filter_size=3,
                      nonlinearity=nonlinearities.leaky_rectify,
                      pad='same')), 0.5)
    net['conv5'] = dropout(
        batch_norm(
            ConvLayer(net['conv4'],
                      num_filters=1024,
                      filter_size=1,
                      nonlinearity=nonlinearities.leaky_rectify,
                      pad='same')), 0.5)
    net['dense1'] = dropout(
        batch_norm(
            DenseLayer(net['conv5'],
                       num_units=1024,
                       nonlinearity=nonlinearities.leaky_rectify)), 0.5)
    net['dense2'] = DenseLayer(net['dense1'],
                               num_units=11,
                               nonlinearity=nonlinearities.softmax)
    net['prob'] = net['dense2']
    for layer in get_all_layers(net['prob']):
        print layer
        print layer.output_shape
    return net
예제 #12
0
def build_model():
    net = OrderedDict()

    net['input'] = InputLayer((None, 3, 224, 224))
    net['conv1'] = ConvLayer(net['input'],
                             num_filters=96,
                             filter_size=7,
                             stride=2,
                             flip_filters=False)
    # caffe has alpha = alpha * pool_size
    net['norm1'] = NormLayer(net['conv1'], alpha=0.0001)
    net['pool1'] = PoolLayer(net['norm1'],
                             pool_size=3,
                             stride=3,
                             ignore_border=False)
    net['conv2'] = ConvLayer(net['pool1'],
                             num_filters=256,
                             filter_size=5,
                             flip_filters=False)
    net['pool2'] = PoolLayer(net['conv2'],
                             pool_size=2,
                             stride=2,
                             ignore_border=False)
    net['conv3'] = ConvLayer(net['pool2'],
                             num_filters=512,
                             filter_size=3,
                             pad=1,
                             flip_filters=False)
    net['conv4'] = ConvLayer(net['conv3'],
                             num_filters=512,
                             filter_size=3,
                             pad=1,
                             flip_filters=False)
    net['conv5'] = ConvLayer(net['conv4'],
                             num_filters=512,
                             filter_size=3,
                             pad=1,
                             flip_filters=False)
    net['pool5'] = PoolLayer(net['conv5'],
                             pool_size=3,
                             stride=3,
                             ignore_border=False)
    net['fc6'] = DenseLayer(net['pool5'], num_units=4096)
    net['drop6'] = DropoutLayer(net['fc6'], p=0.5)
    net['fc7'] = DenseLayer(net['drop6'], num_units=4096)
    net['drop7'] = DropoutLayer(net['fc7'], p=0.5)
    net['fc8'] = DenseLayer(net['drop7'], num_units=1000, nonlinearity=None)
    net['prob'] = NonlinearityLayer(net['fc8'], softmax)

    return net
예제 #13
0
    def build_model(self):
        '''
        Build Acoustic Event Net model
        :return:
        '''

        # A architecture 41 classes
        nonlin = lasagne.nonlinearities.rectify
        net = {}
        # channel, time. frequency
        net['input'] = InputLayer(
            (None, feat_shape[0], feat_shape[1], feat_shape[2]))
        # ----------- 1st layer group ---------------
        net['conv1a'] = ConvLayer(net['input'],
                                  num_filters=64,
                                  filter_size=(3, 3),
                                  stride=1,
                                  nonlinearity=nonlin)
        net['conv1b'] = ConvLayer(net['conv1a'],
                                  num_filters=64,
                                  filter_size=(3, 3),
                                  stride=1,
                                  nonlinearity=nonlin)
        net['pool1'] = MaxPool2DLayer(net['conv1b'],
                                      pool_size=(1, 2))  # (time, freq)
        # ----------- 2nd layer group ---------------
        net['conv2a'] = ConvLayer(net['pool1'],
                                  num_filters=128,
                                  filter_size=(3, 3),
                                  stride=1,
                                  nonlinearity=nonlin)
        net['conv2b'] = ConvLayer(net['conv2a'],
                                  num_filters=128,
                                  filter_size=(3, 3),
                                  stride=1,
                                  nonlinearity=nonlin)
        net['pool2'] = MaxPool2DLayer(net['conv2b'],
                                      pool_size=(2, 2))  # (time, freq)
        # ----------- fully connected layer group ---------------
        net['fc5'] = DenseLayer(net['pool2'],
                                num_units=1024,
                                nonlinearity=nonlin)
        net['fc6'] = DenseLayer(net['fc5'],
                                num_units=1024,
                                nonlinearity=nonlin)
        net['prob'] = DenseLayer(net['fc6'],
                                 num_units=41,
                                 nonlinearity=lasagne.nonlinearities.softmax)

        return net
def ZFTurboNet(input_var=None):
    l_in = InputLayer(shape=(None, 1, PIXELS, PIXELS), input_var=input_var)

    l_conv = ConvLayer(l_in, num_filters=8, filter_size=3, pad=1, nonlinearity=rectify)
    l_convb = ConvLayer(l_conv, num_filters=8, filter_size=3, pad=1, nonlinearity=rectify)
    l_pool = MaxPool2DLayer(l_convb, pool_size=2) # feature maps 12x12

    #l_dropout1 = DropoutLayer(l_pool, p=0.25)
    l_hidden = DenseLayer(l_pool, num_units=128, nonlinearity=rectify)
    #l_dropout2 = DropoutLayer(l_hidden, p=0.5)

    l_out = DenseLayer(l_hidden, num_units=10, nonlinearity=softmax)

    return l_out
예제 #15
0
def build_shallow_cnn(input_var=None):
    # As a third model, we'll create a CNN of two convolution + pooling stages
    # and a fully-connected hidden layer in front of the output layer.

    # Input layer, as usual:
    network = lasagne.layers.InputLayer(shape=(None, 1, 48, 48),
                                        input_var=input_var)
    # This time we do not apply input dropout, as it tends to work less well
    # for convolutional layers.

    # Convolutional layer with 32 kernels of size 5x5. Strided and padded
    # convolutions are supported as well; see the docstring.
    network = lasagne.layers.batch_norm(ConvLayer(
            network, num_filters=32, filter_size=(5, 5),
            nonlinearity= lasagne.nonlinearities.rectify,
            flip_filters=False,
            W=lasagne.init.GlorotUniform()))
    # Expert note: Lasagne provides alternative convolutional layers that
    # override Theano's choice of which implementation to use; for details
    # please see http://lasagne.readthedocs.org/en/latest/user/tutorial.html.

    #Adding a Batchnorm layer and a Dropout layer

    network = lasagne.layers.dropout(network, p=0.5)
    # Max-pooling layer of factor 2 in both dimensions:
    #network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))


    # Another convolution with 32 5x5 kernels, and another 2x2 pooling:
    network = lasagne.layers.batch_norm(ConvLayer(
            network, num_filters=64, filter_size=(3, 3),
            nonlinearity=lasagne.nonlinearities.rectify,
            flip_filters=False))
    network = lasagne.layers.dropout(network, p=0.5)
    network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))

    # A fully-connected layer of 256 units with 50% dropout on its inputs:
    network = lasagne.layers.DenseLayer(
            network,
            num_units=514,
            nonlinearity=lasagne.nonlinearities.rectify)

    # And, finally, the 10-unit output layer with 50% dropout on its inputs:
    network = lasagne.layers.DenseLayer(network,
            num_units=7,
            nonlinearity=lasagne.nonlinearities.softmax)

    return network
def smooth_convolution(prediction, n_classes):
    from lasagne.layers import Conv1DLayer as ConvLayer
    from lasagne.layers import DimshuffleLayer, ReshapeLayer
    prediction = ReshapeLayer(prediction, (-1, 200, n_classes))
    # channels first
    prediction = DimshuffleLayer(prediction, (0, 2, 1))

    input_size = lasagne.layers.get_output(prediction).shape
    # reshape to put each channel in the batch dimensions, to filter each
    # channel independently
    prediction = ReshapeLayer(prediction,
                              (T.prod(input_size[0:2]), 1, input_size[2]))

    trans_filter = np.tile(np.array([0, -1., 1.]).astype('float32'), (1, 1, 1))
    convolved = ConvLayer(prediction,
                          num_filters=1,
                          filter_size=3,
                          stride=1,
                          b=None,
                          nonlinearity=None,
                          W=trans_filter,
                          pad='same')

    # reshape back
    convolved = ReshapeLayer(convolved, input_size)

    return convolved
예제 #17
0
 def make_layer(self,
                name,
                input,
                units,
                filter_size=(3, 3),
                stride=(1, 1),
                pad=(1, 1),
                alpha=0.25):
     '''
         name is name of layer
     '''
     # Conv2DLayer accepts input layer feeding into this layer
     # units is generator_filter, number of learnable convolutional filters
     # filter_size is size of filter
     conv = ConvLayer(input,
                      units,
                      filter_size,
                      stride=stride,
                      pad=pad,
                      nonlinearity=None)
     # param rectify rectifies nonlinearity
     # import for neural network image classification
     # from Delving Deep into Rectifiers (Kaiming He, 2015)
     prelu = lasagne.layers.ParametricRectifierLayer(
         conv, alpha=lasagne.init.Constant(alpha))
     # add layer to neural network OrderedDict
     self.network[name + 'x'] = conv
     # add layer to neural network OrderedDict
     self.network[name + '>'] = prelu
     # return the parametric rectifier
     return prelu
예제 #18
0
    def setup_generator(self, input, config):
        for k, v in config.items():
            setattr(args, k, v)
        units_iter = extend(args.generator_filters)
        units = next(units_iter)
        self.make_layer('iter.0-A',
                        input,
                        units,
                        filter_size=(5, 5),
                        pad=(2, 2))
        self.make_layer('iter.0-B',
                        self.last_layer(),
                        units,
                        filter_size=(5, 5),
                        pad=(2, 2))
        self.network['iter.0'] = self.last_layer()

        for i in range(0, args.generator_blocks):
            self.make_block('iter.%i' % (i + 1), self.last_layer(), units)

        for i in range(0, args.scales):
            u = next(units_iter)
            self.make_layer('scale%i.3' % i, self.last_layer(), u * 4)
            self.network['scale%i.2' % i] = SubpixelReshuffleLayer(
                self.last_layer(), u, 2)
            self.make_layer('scale%i.1' % i, self.last_layer(), u)

        self.network['out'] = ConvLayer(
            self.last_layer(),
            3,
            filter_size=(5, 5),
            stride=(1, 1),
            pad=(2, 2),
            nonlinearity=lasagne.nonlinearities.tanh)
예제 #19
0
 def make_layer(self, name, input, units, filter_size=(3,3), stride=(1,1), pad=(1,1), alpha=0.25):
     reflected = ReflectLayer(input, pad=pad[0]) if pad[0] > 0 else input
     conv = ConvLayer(reflected, units, filter_size, stride=stride, pad=(0,0), nonlinearity=None)
     prelu = lasagne.layers.ParametricRectifierLayer(conv, alpha=lasagne.init.Constant(alpha))
     self.network[name+'x'] = conv
     self.network[name+'>'] = prelu
     return prelu
예제 #20
0
    def setup_generator(self, input, config):
        for k, v in config.items():
            setattr(args, k, v)
        args.zoom = 2**(args.generator_upscale - args.generator_downscale)

        units_iter = extend(args.generator_filters)
        units = next(units_iter)
        self.make_layer('iter.0', input, units, filter_size=(7, 7), pad=(3, 3))

        for i in range(0, args.generator_downscale):
            self.make_layer('downscale%i' % i,
                            self.last_layer(),
                            next(units_iter),
                            filter_size=(4, 4),
                            stride=(2, 2))

        units = next(units_iter)
        for i in range(0, args.generator_blocks):
            self.make_block('iter.%i' % (i + 1), self.last_layer(), units)

        for i in range(0, args.generator_upscale):
            u = next(units_iter)
            self.make_layer('upscale%i.2' % i, self.last_layer(), u * 4)
            self.network['upscale%i.1' % i] = SubpixelReshuffleLayer(
                self.last_layer(), u, 2)

        self.network['out'] = ConvLayer(self.last_layer(),
                                        3,
                                        filter_size=(7, 7),
                                        pad=(3, 3),
                                        nonlinearity=None)
def std_conv_layer(
        input,
        num_filters,
        filter_shape,
        pad='same',
        nonlinearity=lasagne.nonlinearities.rectify,
        W=None,
        #W = lasagne.init.Normal(std = 0.01, mean = 0.0),
        b=lasagne.init.Constant(0.),
        do_batch_norm=False):
    if W == None:
        if nonlinearity == lasagne.nonlinearities.rectify:
            print 'convlayer: rectifier func'
            W = lasagne.init.HeNormal(gain='relu')
        else:
            print 'convlayer: sigmoid func'
            W = lasagne.init.HeNormal(1.0)
    else:
        print 'convlayer: W not None'
    conv_layer = ConvLayer(input,
                           num_filters,
                           filter_shape,
                           pad=pad,
                           flip_filters=False,
                           W=W,
                           b=b,
                           nonlinearity=nonlinearity)
    if do_batch_norm:
        conv_layer = lasagne.layers.batch_norm(conv_layer)
    else:
        print 'convlayer: No batch norm.'
    return conv_layer
예제 #22
0
def output_path(net, incoming_layer, n_classes, filter_size, out_nonlin):
    '''
    Build the output path (including last conv layer to have n_classes
    feature maps). Dimshuffle layers to fit with softmax implementation

    Parameters
    ----------
    Same as above
    incoming_layer : string, name of last layer from bottleneck layers
    '''

    #Final convolution (n_classes feature maps) with filter_size = 1
    net['final_conv'] = ConvLayer(net[incoming_layer], n_classes, 1)

    #DimshuffleLayer and all this stuff is necessary to fit with softmax
    #implementation. In training, we specify layer = ['probs'] to have the
    #right layer but the 2 last reshape layers are necessary only to visualize
    #data.
    net['final_dimshuffle'] = DimshuffleLayer(net['final_conv'], (0, 2, 1))

    laySize = lasagne.layers.get_output(net['final_dimshuffle']).shape
    net['final_reshape'] = ReshapeLayer(net['final_dimshuffle'],
                                        (T.prod(laySize[0:2]), laySize[2]))

    net['probs'] = NonlinearityLayer(net['final_reshape'],
                                     nonlinearity=out_nonlin)

    net['probs_reshape'] = ReshapeLayer(net['probs'],
                                        (laySize[0], laySize[1], n_classes))

    net['probs_dimshuffle'] = DimshuffleLayer(net['probs_reshape'], (0, 2, 1))

    return net
예제 #23
0
def std_conv_layer(input, num_filters, filter_shape, pad='same'):
    return ConvLayer(input,
                     num_filters,
                     filter_shape,
                     pad=pad,
                     flip_filters=False,
                     W=lasagne.init.Normal(std=0.01, mean=0.0))
예제 #24
0
    def make_recursive_block(self,
                             name,
                             input,
                             units=128,
                             filter_size=(3, 3),
                             stride=(1, 1),
                             pad=(1, 1),
                             res_blocks=9):
        residual = input
        input = ConvLayer(input,
                          units,
                          filter_size,
                          stride=stride,
                          pad=pad,
                          nonlinearity=None)
        out = input

        for _ in range(
                res_blocks):  # number of res units per one recursive block
            out = lasagne.layers.rrelu(out)
            out = ConvLayer(out,
                            units,
                            filter_size,
                            stride=stride,
                            pad=pad,
                            nonlinearity=None)
            out = lasagne.layers.rrelu(out)
            out = ConvLayer(out,
                            units,
                            filter_size,
                            stride=stride,
                            pad=pad,
                            nonlinearity=None)
            out = ElemwiseSumLayer([out, input])

        out = lasagne.layers.rrelu(out)
        out = ConvLayer(out,
                        units,
                        filter_size,
                        stride=stride,
                        pad=pad,
                        nonlinearity=None)
        out = ElemwiseSumLayer([out, residual])

        self.network[name + '&'] = out

        return out
예제 #25
0
def build_small_model():
    net = {}
    net['input_layer'] = InputLayer((None,3,128,128))
    net['conv1_1'] = ConvLayer(
        net['input_layer'], 64, 3, pad=1)#, flip_filters=False)
    net['conv1_2'] = ConvLayer(
        net['conv1_1'], 64, 3, pad=1)#, flip_filters=False)
    net['pool1'] = PoolLayer(net['conv1_2'], 2)
    net['conv2_1'] = ConvLayer(
        net['pool1'], 128, 3, pad=1)#, flip_filters=False)
    net['conv2_2'] = ConvLayer(
        net['conv2_1'], 128, 3, pad=1 )#, flip_filters=False)
    net['pool2'] = PoolLayer(net['conv2_2'], 2)
    net['conv3_1'] = ConvLayer(
        net['pool2'], 256, 3, pad=1 )#, flip_filters=False)
    net['conv3_2'] = ConvLayer(
        net['conv3_1'], 256, 3, pad=1)#, flip_filters=False)
    net['pool3'] = PoolLayer(net['conv3_2'], 2)
    net['conv4_1'] = ConvLayer(
        net['pool3'], 512, 3, pad=1)#, flip_filters=False)
    net['pool4'] = PoolLayer(net['conv4_1'], 2)
    net['fc6'] = DenseLayer(net['pool4'], num_units=200)
    net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5)
    net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=100)
    net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5)
    net['fc8'] = DenseLayer(
        net['fc7_dropout'], num_units=2, nonlinearity=None)
    net['prob'] = NonlinearityLayer(net['fc8'], softmax)
    return net
예제 #26
0
def conv_bn_relu(net, incoming_layer, depth, num_filters, filter_size, pad = 'same'):
    net['conv'+str(depth)] = ConvLayer(net[incoming_layer],
                num_filters = num_filters, filter_size = filter_size,
                pad = pad, nonlinearity=None)
    net['bn'+str(depth)] = BatchNormLayer(net['conv'+str(depth)])
    net['relu'+str(depth)] = NonlinearityLayer( net['bn'+str(depth)], nonlinearity = rectify)
    incoming_layer = 'relu'+str(depth)

    return incoming_layer
def build_model_multiscale(n_feature_maps, scales, nonlinearity=lasagne.nonlinearities.rectify):
    net = {}
    net['input'] = InputLayer((1, 3, IMAGE_W, IMAGE_W))

    multiple_scales = [ConvLayer(net['input'], n_feature_maps, filter_size, pad=filter_size//2, flip_filters=False,
                                 nonlinearity=nonlinearity)
                       for filter_size in scales]
    net['conv1_1'] = ConcatLayer(multiple_scales)
    return net
예제 #28
0
def build_model_dense(input_shape, input_var):
    net = {}
    net['input'] = InputLayer(input_shape, input_var=input_var)
    net['input'].num_filters = input_shape[1]
    net['conv1'] = ConvLayer(net['input'], num_filters=256, filter_size=3, nonlinearity=nonlinearities.leaky_rectify, pad='same')
    net['conv2'] = ConvLayer(net['conv1'], num_filters=256, filter_size=3, nonlinearity=nonlinearities.leaky_rectify, pad='same')
    net['conv2/reshape'] = ReshapeLayer(net['conv2'], (-1, net['conv2'].output_shape[1] * net['conv2'].output_shape[2]))
    net['dense'] = dropout(DenseLayer(net['conv2/reshape'], num_units=1024, nonlinearity=nonlinearities.leaky_rectify), 0.5)

    net['dense/inverse'] = inverse_dense_layer(net['dense'], net['dense'], net['conv2'].output_shape)
    net['conv2/inverse'] = inverse_convolution_layer(net['dense/inverse'], net['conv2'])
    net['conv1/inverse'] = inverse_convolution_layer(net['conv2/inverse'], net['conv1'])
    net['conv0/inverse'] = ConvLayer(net['conv1/inverse'], num_filters=input_shape[1], filter_size=1,nonlinearity=nonlinearities.linear, pad='same')
    net['prob'] = net['conv0/inverse']
    for layer in get_all_layers(net['prob']):
        print layer
        print layer.output_shape
    return net
예제 #29
0
def buildmodel(x):
    net = {}
    net['input'] = InputLayer((None, 3, 32, 32), input_var=x)
    net['conv1'] = ConvLayer(net['input'],
                             num_filters=192,
                             filter_size=5,
                             pad=2,
                             flip_filters=False)
    net['cccp1'] = ConvLayer(
        net['conv1'], num_filters=160, filter_size=1, flip_filters=False)
    net['cccp2'] = ConvLayer(
        net['cccp1'], num_filters=96, filter_size=1, flip_filters=False)
    net['pool1'] = PoolLayer(net['cccp2'],
                             pool_size=3,
                             stride=2,
                             mode='max',
                             ignore_border=False)
    net['drop3'] = DropoutLayer(net['pool1'], p=0.5)
    net['conv2'] = ConvLayer(net['drop3'],
                             num_filters=192,
                             filter_size=5,
                             pad=2,
                             flip_filters=False)
    net['cccp3'] = ConvLayer(
        net['conv2'], num_filters=192, filter_size=1, flip_filters=False)
    net['cccp4'] = ConvLayer(
        net['cccp3'], num_filters=192, filter_size=1, flip_filters=False)
    net['pool2'] = PoolLayer(net['cccp4'],
                             pool_size=3,
                             stride=2,
                             mode='average_exc_pad',
                             ignore_border=False)
    net['drop6'] = DropoutLayer(net['pool2'], p=0.5)
    net['conv3'] = ConvLayer(net['drop6'],
                             num_filters=192,
                             filter_size=3,
                             pad=1,
                             flip_filters=False)
    net['cccp5'] = ConvLayer(
        net['conv3'], num_filters=192, filter_size=1, flip_filters=False)
    net['cccp6'] = ConvLayer(
        net['cccp5'], num_filters=10, filter_size=1, flip_filters=False)
    net['pool3'] = PoolLayer(net['cccp6'],
                             pool_size=8,
                             mode='average_exc_pad',
                             ignore_border=False)
    net['out'] = NonlinearityLayer(FlattenLayer(net['pool3']), nonlinearity=nonlinearities.softmax)
    net['dense'] = layers.DenseLayer(net['cccp6'], 10, b=None, nonlinearity=nonlinearities.softmax)
    return net
예제 #30
0
def build_neural_network(input_var, input_shape):
    net = {}

    net['input'] = InputLayer(input_shape, input_var)
    net['conv1'] = ConvLayer(net['input'],
                             num_filters=96,
                             filter_size=7,
                             stride=2)
    net['norm1'] = NormLayer(net['conv1'], alpha=0.0001)
    net['pool1'] = PoolLayer(net['norm1'],
                             pool_size=3,
                             stride=3,
                             ignore_border=False)
    net['conv2'] = ConvLayer(net['pool1'], num_filters=256, filter_size=5)
    net['pool2'] = PoolLayer(net['conv2'],
                             pool_size=2,
                             stride=2,
                             ignore_border=False)
    net['conv3'] = ConvLayer(net['pool2'],
                             num_filters=512,
                             filter_size=3,
                             pad=1)
    net['conv4'] = ConvLayer(net['conv3'],
                             num_filters=512,
                             filter_size=3,
                             pad=1)
    net['conv5'] = ConvLayer(net['conv4'],
                             num_filters=512,
                             filter_size=3,
                             pad=1)
    net['pool5'] = PoolLayer(net['conv5'],
                             pool_size=3,
                             stride=3,
                             ignore_border=False)
    net['fc6'] = DenseLayer(net['pool5'], num_units=4096)
    net['drop6'] = DropoutLayer(net['fc6'], p=0.5)
    net['fc7'] = DenseLayer(net['drop6'], num_units=4096)
    net['drop7'] = DropoutLayer(net['fc7'], p=0.5)
    net['fc8'] = DenseLayer(net['drop7'], num_units=1, nonlinearity=None)
    net['prob'] = NonlinearityLayer(net['fc8'], sigmoid)

    return net