def build_model(input_var,dro=0.5): net = {} net['input'] = InputLayer((None, 3, 299, 299),input_var=input_var) print(net['input']) net['conv1/7x7_s2'] = ConvLayer( net['input'], 64, 7, stride=2, pad=3, flip_filters=False) print(net['conv1/7x7_s2']) net['pool1/3x3_s2'] = PoolLayer( net['conv1/7x7_s2'], pool_size=3, stride=2, ignore_border=False) net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1) net['conv2/3x3_reduce'] = ConvLayer( net['pool1/norm1'], 64, 1, flip_filters=False) net['conv2/3x3'] = ConvLayer( net['conv2/3x3_reduce'], 192, 3, pad=1, flip_filters=False) net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1) net['pool2/3x3_s2'] = PoolLayer( net['conv2/norm2'], pool_size=3, stride=2, ignore_border=False) net.update(build_inception_module('inception_3a', net['pool2/3x3_s2'], [32, 64, 96, 128, 16, 32])) net.update(build_inception_module('inception_3b', net['inception_3a/output'], [64, 128, 128, 192, 32, 96])) net['pool3/3x3_s2'] = PoolLayer( net['inception_3b/output'], pool_size=3, stride=2, ignore_border=False) net.update(build_inception_module('inception_4a', net['pool3/3x3_s2'], [64, 192, 96, 208, 16, 48],dro)) net.update(build_inception_module('inception_4b', net['inception_4a/output'], [64, 160, 112, 224, 24, 64],dro)) net.update(build_inception_module('inception_4c', net['inception_4b/output'], [64, 128, 128, 256, 24, 64],dro)) net.update(build_inception_module('inception_4d', net['inception_4c/output'], [64, 112, 144, 288, 32, 64],dro)) net.update(build_inception_module('inception_4e', net['inception_4d/output'], [128, 256, 160, 320, 32, 128],dro)) net['pool4/3x3_s2'] = PoolLayer( net['inception_4e/output'], pool_size=3, stride=2, ignore_border=False) net.update(build_inception_module('inception_5a', net['pool4/3x3_s2'], [128, 256, 160, 320, 32, 128])) net.update(build_inception_module('inception_5b', net['inception_5a/output'], [128, 384, 192, 384, 48, 128])) net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output']) net['pool5/7x7_s1_dropout'] = DropoutLayer(net['pool5/7x7_s1'], p=dro) net['loss3/classifier'] = DenseLayer(net['pool5/7x7_s1_dropout'], num_units=1, nonlinearity=linear) net['prob'] = NonlinearityLayer(net['loss3/classifier'], nonlinearity=sigmoid) return net
def build_cnn(input_var=None): network = lasagne.layers.InputLayer(shape=(BATCHSIZE, 3, 256, 256), input_var=input_var) network = Conv2DLayer(network, num_filters=64, filter_size=7, stride=2, pad=3) network = lasagne.layers.MaxPool2DLayer(network,pool_size=3, stride=2, ignore_border=False) network= LRNLayer(network, alpha=0.00002, k=1) network = lasagne.layers.NINLayer(network, num_units=64, W=lasagne.init.Orthogonal(1), b=lasagne.init.Constant(0)) network = Conv2DLayer(network, 192, 3, pad=1) network = LRNLayer(network, alpha=0.00002, k=1) network = lasagne.layers.MaxPool2DLayer(network, pool_size=3, stride=2) network = inception_module( network,pool_filters=32, num_1x1=64, reduce_3x3=96, num_3x3=128, reduce_5x5=16, num_5x5=32) network = inception_module( network,pool_filters=64, num_1x1=128, reduce_3x3=128, num_3x3=192, reduce_5x5=32, num_5x5=96) network = lasagne.layers.MaxPool2DLayer(network, pool_size=3, stride=2) network = inception_module( network,pool_filters=64, num_1x1=192, reduce_3x3=96, num_3x3=208, reduce_5x5=16, num_5x5=48) network = inception_module( network,pool_filters=64, num_1x1=160, reduce_3x3=112, num_3x3=224, reduce_5x5=24, num_5x5=64) network = inception_module( network,pool_filters=64, num_1x1=128, reduce_3x3=128, num_3x3=256, reduce_5x5=24, num_5x5=64) network = inception_module( network,pool_filters=64, num_1x1=112, reduce_3x3=144, num_3x3=288, reduce_5x5=32, num_5x5=64) network = inception_module( network,pool_filters=128, num_1x1=256, reduce_3x3=160, num_3x3=320, reduce_5x5=32, num_5x5=128) network = lasagne.layers.MaxPool2DLayer(network, pool_size=3, stride=2) network = inception_module( network,pool_filters=128, num_1x1=256, reduce_3x3=160, num_3x3=320, reduce_5x5=32, num_5x5=128) network = inception_module( network,pool_filters=128, num_1x1=384, reduce_3x3=192, num_3x3=384, reduce_5x5=48, num_5x5=128) network = GlobalPoolLayer(network) network = lasagne.layers.DenseLayer( lasagne.layers.dropout(network, p=.4), num_units=344, nonlinearity=lasagne.nonlinearities.linear) network = lasagne.layers.NonlinearityLayer(network,nonlinearity=lasagne.nonlinearities.softmax) return network
def build_model_LeNet(input_var=None): net = {} net['input'] = InputLayer(shape=(None, 1, 224, 224), input_var=input_var) net['conv1/7x7_s2'] = ConvLayer(net['input'], 64, 7, stride=2, pad=3) net['pool1/3x3_s2'] = PoolLayer(net['conv1/7x7_s2'], pool_size=3, stride=2, ignore_border=False) net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1) net['conv2/3x3_reduce'] = ConvLayer(net['pool1/norm1'], 64, 1) net['conv2/3x3'] = ConvLayer(net['conv2/3x3_reduce'], 192, 3, pad=1) net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1) net['pool2/3x3_s2'] = PoolLayer(net['conv2/norm2'], pool_size=3, stride=2) net.update( build_inception_module('inception_3a', net['pool2/3x3_s2'], [32, 64, 96, 128, 16, 32])) net.update( build_inception_module('inception_3b', net['inception_3a/output'], [64, 128, 128, 192, 32, 96])) net['pool3/3x3_s2'] = PoolLayer(net['inception_3b/output'], pool_size=3, stride=2) net.update( build_inception_module('inception_4a', net['pool3/3x3_s2'], [64, 192, 96, 208, 16, 48])) net.update( build_inception_module('inception_4b', net['inception_4a/output'], [64, 160, 112, 224, 24, 64])) net.update( build_inception_module('inception_4c', net['inception_4b/output'], [64, 128, 128, 256, 24, 64])) net.update( build_inception_module('inception_4d', net['inception_4c/output'], [64, 112, 144, 288, 32, 64])) net.update( build_inception_module('inception_4e', net['inception_4d/output'], [128, 256, 160, 320, 32, 128])) net['pool4/3x3_s2'] = PoolLayer(net['inception_4e/output'], pool_size=3, stride=2) net.update( build_inception_module('inception_5a', net['pool4/3x3_s2'], [128, 256, 160, 320, 32, 128])) net.update( build_inception_module('inception_5b', net['inception_5a/output'], [128, 384, 192, 384, 48, 128])) net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output']) net['loss3/classifier'] = DenseLayer(net['pool5/7x7_s1'], num_units=num_categories, nonlinearity=linear) net['prob'] = NonlinearityLayer(net['loss3/classifier'], nonlinearity=softmax) #print(net) return net['prob']
def CNN_model(): net = {} net['input'] = InputLayer((None, 3, 64, 64)) net['conv1/3x3_s1'] = ConvLayer(net['input'], 16, 3, stride=2, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 16*64*64 # net['pool1/3x3_s2'] = PoolLayer(net['conv1/3x3_s1'], pool_size=3, stride=2, ignore_border=False) # 16*32*32 net['pool1/norm1'] = LRNLayer(net['conv1/3x3_s2'], alpha=0.00002, k=1) net['conv2/3x3_s1'] = ConvLayer(net['pool1/norm1'], 32, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 32*32*32 net['pool2/3x3_s2'] = PoolLayer(net['conv2/3x3_s1'], pool_size=3, stride=2, ignore_border=False) # 32*16*16 net['pool2/norm1'] = LRNLayer(net['pool2/3x3_s2'], alpha=0.00002, k=1) net['conv3/3x3_s1'] = ConvLayer(net['pool2/norm1'], 64, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 64*16*16 net['pool3/3x3_s2'] = PoolLayer(net['conv3/3x3_s1'], pool_size=3, stride=2, ignore_border=False) # 64*8*8 net['pool3/norm1'] = LRNLayer(net['pool3/3x3_s2'], alpha=0.00002, k=1) net['conv4/3x3_s1'] = ConvLayer(net['pool3/norm1'], 128, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 128*8*8 net['pool4/3x3_s2'] = PoolLayer(net['conv4/3x3_s1'], pool_size=3, stride=2, ignore_border=False) # 128*4*4 net['pool4/norm1'] = LRNLayer(net['pool4/3x3_s2'], alpha=0.00002, k=1) net['conv5/3x3_s1'] = ConvLayer(net['pool4/norm1'], 128, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) net['pool5/norm1'] = LRNLayer(net['conv5/3x3_s1'], alpha=0.00002, k=1) net['conv6/3x3_s1'] = ConvLayer(net['pool5/norm1'], 128, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) net['pool6/norm1'] = LRNLayer(net['conv6/3x3_s1'], alpha=0.00002, k=1) net['conv7/3x3_s1'] = ConvLayer(net['pool6/norm1'], 128, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) net['pool7/norm1'] = LRNLayer(net['conv7/3x3_s1'], alpha=0.00002, k=1) net['conv8/3x3_s1'] = ConvLayer(net['pool7/norm1'], 256, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 256*4*4 net['pool8/4x4_s1'] = GlobalPoolLayer(net['conv8/3x3_s1']); # 256 return net
def build_model(input_shape): net = {} net['input'] = InputLayer(input_shape) net['conv1'] = ConvLayer(net['input'], num_filters=96, filter_size=7, stride=2, flip_filters=False) net['norm1'] = LRNLayer( net['conv1'], alpha=0.0001) # caffe has alpha = alpha * pool_size net['pool1'] = PoolLayer(net['norm1'], pool_size=3, stride=3, ignore_border=False) net['conv2'] = ConvLayer(net['pool1'], num_filters=256, filter_size=5, flip_filters=False) net['pool2'] = PoolLayer(net['conv2'], pool_size=2, stride=2, ignore_border=False) net['conv3'] = ConvLayer(net['pool2'], num_filters=512, filter_size=3, pad=1, flip_filters=False) net['conv4'] = ConvLayer(net['conv3'], num_filters=512, filter_size=3, pad=1, flip_filters=False) net['conv5'] = ConvLayer(net['conv4'], num_filters=512, filter_size=3, pad=1, flip_filters=False) net['pool5'] = PoolLayer(net['conv5'], pool_size=3, stride=3, ignore_border=False) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['drop6'] = DropoutLayer(net['fc6'], p=0.5) net['fc7'] = DenseLayer(net['drop6'], num_units=4096) net['drop7'] = DropoutLayer(net['fc7'], p=0.5) net['fc8'] = DenseLayer(net['drop7'], num_units=1000, nonlinearity=lasagne.nonlinearities.softmax) for layer in net.values(): print str(lasagne.layers.get_output_shape(layer)) return net
def build_model(input_var): from lasagne.layers import InputLayer from lasagne.layers import DenseLayer from lasagne.layers import ConcatLayer from lasagne.layers import NonlinearityLayer from lasagne.layers import GlobalPoolLayer from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer from lasagne.layers.dnn import MaxPool2DDNNLayer as PoolLayerDNN from lasagne.layers import MaxPool2DLayer as PoolLayer from lasagne.layers import LocalResponseNormalization2DLayer as LRNLayer from lasagne.nonlinearities import softmax, linear def build_inception_module(name, input_layer, nfilters): # nfilters: (pool_proj, 1x1, 3x3_reduce, 3x3, 5x5_reduce, 5x5) net = dict() net['pool'] = PoolLayerDNN(input_layer, pool_size=3, stride=1, pad=1) net['pool_proj'] = ConvLayer(net['pool'], nfilters[0], 1, flip_filters=False) net['1x1'] = ConvLayer(input_layer, nfilters[1], 1, flip_filters=False) net['3x3_reduce'] = ConvLayer(input_layer, nfilters[2], 1, flip_filters=False) net['3x3'] = ConvLayer(net['3x3_reduce'], nfilters[3], 3, pad=1, flip_filters=False) net['5x5_reduce'] = ConvLayer(input_layer, nfilters[4], 1, flip_filters=False) net['5x5'] = ConvLayer(net['5x5_reduce'], nfilters[5], 5, pad=2, flip_filters=False) net['output'] = ConcatLayer( [net['1x1'], net['3x3'], net['5x5'], net['pool_proj']]) return {'{}/{}'.format(name, k): v for k, v in net.items()} net = dict() net['input'] = InputLayer((None, 3, None, None), input_var) net['conv1/7x7_s2'] = ConvLayer(net['input'], 64, 7, stride=2, pad=3, flip_filters=False) net['pool1/3x3_s2'] = PoolLayer(net['conv1/7x7_s2'], pool_size=3, stride=2, ignore_border=False) net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1) net['conv2/3x3_reduce'] = ConvLayer(net['pool1/norm1'], 64, 1, flip_filters=False) net['conv2/3x3'] = ConvLayer(net['conv2/3x3_reduce'], 192, 3, pad=1, flip_filters=False) net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1) net['pool2/3x3_s2'] = PoolLayer(net['conv2/norm2'], pool_size=3, stride=2, ignore_border=False) net.update( build_inception_module('inception_3a', net['pool2/3x3_s2'], [32, 64, 96, 128, 16, 32])) net.update( build_inception_module('inception_3b', net['inception_3a/output'], [64, 128, 128, 192, 32, 96])) net['pool3/3x3_s2'] = PoolLayer(net['inception_3b/output'], pool_size=3, stride=2, ignore_border=False) net.update( build_inception_module('inception_4a', net['pool3/3x3_s2'], [64, 192, 96, 208, 16, 48])) net.update( build_inception_module('inception_4b', net['inception_4a/output'], [64, 160, 112, 224, 24, 64])) net.update( build_inception_module('inception_4c', net['inception_4b/output'], [64, 128, 128, 256, 24, 64])) net.update( build_inception_module('inception_4d', net['inception_4c/output'], [64, 112, 144, 288, 32, 64])) net.update( build_inception_module('inception_4e', net['inception_4d/output'], [128, 256, 160, 320, 32, 128])) net['pool4/3x3_s2'] = PoolLayer(net['inception_4e/output'], pool_size=3, stride=2, ignore_border=False) net.update( build_inception_module('inception_5a', net['pool4/3x3_s2'], [128, 256, 160, 320, 32, 128])) net.update( build_inception_module('inception_5b', net['inception_5a/output'], [128, 384, 192, 384, 48, 128])) net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output']) net['loss3/classifier'] = DenseLayer(net['pool5/7x7_s1'], num_units=1000, nonlinearity=linear) net['prob'] = NonlinearityLayer(net['loss3/classifier'], nonlinearity=softmax) return net
def build_model(self, incoming, out_size): net = {} #net['input'] = InputLayer((batch_size, 3, 224, 224)) net['conv1/7x7_s2'] = ConvLayer(incoming, 64, 7, stride=2, pad=3, flip_filters=False) net['pool1/3x3_s2'] = PoolLayer(net['conv1/7x7_s2'], pool_size=3, stride=2, ignore_border=False) net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1) net['conv2/3x3_reduce'] = ConvLayer(net['pool1/norm1'], 64, 1, flip_filters=False) net['conv2/3x3'] = ConvLayer(net['conv2/3x3_reduce'], 192, 3, pad=1, flip_filters=False) net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1) net['pool2/3x3_s2'] = PoolLayer(net['conv2/norm2'], pool_size=3, stride=2, ignore_border=False) net.update( self.build_inception_module('inception_3a', net['pool2/3x3_s2'], [32, 64, 96, 128, 16, 32])) net.update( self.build_inception_module('inception_3b', net['inception_3a/output'], [64, 128, 128, 192, 32, 96])) net['pool3/3x3_s2'] = PoolLayer(net['inception_3b/output'], pool_size=3, stride=2, ignore_border=False) net.update( self.build_inception_module('inception_4a', net['pool3/3x3_s2'], [64, 192, 96, 208, 16, 48])) net.update( self.build_inception_module('inception_4b', net['inception_4a/output'], [64, 160, 112, 224, 24, 64])) net.update( self.build_inception_module('inception_4c', net['inception_4b/output'], [64, 128, 128, 256, 24, 64])) net.update( self.build_inception_module('inception_4d', net['inception_4c/output'], [64, 112, 144, 288, 32, 64])) net.update( self.build_inception_module('inception_4e', net['inception_4d/output'], [128, 256, 160, 320, 32, 128])) net['pool4/3x3_s2'] = PoolLayer(net['inception_4e/output'], pool_size=3, stride=2, ignore_border=False) net.update( self.build_inception_module('inception_5a', net['pool4/3x3_s2'], [128, 256, 160, 320, 32, 128])) net.update( self.build_inception_module('inception_5b', net['inception_5a/output'], [128, 384, 192, 384, 48, 128])) net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output']) net['loss3/classifier'] = DenseLayer(net['pool5/7x7_s1'], num_units=1000, nonlinearity=linear) net['prob'] = NonlinearityLayer(net['loss3/classifier'], nonlinearity=softmax) return net
def alexnet(library): if library == 'keras': model = Sequential() # First convolutional layer model.add(Convolution2D(96, 11, 11, border_mode='valid', subsample=(4, 4), input_shape=(3, 224, 224))) model.add(Activation('relu')) model.add(BatchNormalization(alpha=0.000002, k=1)) model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) # Second convolutional layer model.add(Convolution2D(256, 5, 5, border_mode='valid')) model.add(Activation('relu')) model.add(BatchNormalization(alpha=0.000002, k=1)) model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) # Third convolutional layer model.add(Convolution2D(384, 3, 3, border_mode='valid')) model.add(Activation('relu')) # Fourth convolutional layer model.add(Convolution2D(384, 3, 3, border_mode='valid')) model.add(Activation('relu')) # Fifth convolutional layer model.add(Convolution2D(256, 3, 3, border_mode='valid')) model.add(Activation('relu')) # First fully connected layer model.add(Flatten()) model.add(Dense(4096)) model.add(Activation('relu')) # Second fully connected layer model.add(Dense(4096)) model.add(Activation('relu')) # Softmax prediction layer model.add(Dense(1000)) model.add(Activation('softmax')) return model else: net = {} net['input'] = InputLayer((3, 244, 244)) # First convolutional layer net['conv1'] = ConvLayer( net['input'], 96, 11, stride=(4, 4), flip_filters=False) net['norm1'] = LRNLayer(net['conv1']) net['pool1'] = PoolLayer(net['norm1'], 3, stride=2) # Second convolutional layer net['conv2'] = ConvLayer(net['pool1'], 256, 5, flip_filters=False) net['norm2'] = LRNLayer(net['conv2']) net['pool2'] = PoolLayer(net['norm2'], 3, stride=2) # Third convolutional layer net['conv3'] = ConvLayer(net['pool2'], 384, 3, flip_filters=False) # Fourth convolutional layer net['conv4'] = ConvLayer(net['conv3'], 384, 3, flip_filters=False) # Fifth convolutional layer net['conv5'] = ConvLayer(net['conv4'], 256, 3, flip_filters=False) # First fully connected layer net['fc1'] = DenseLayer(net['conv5'], num_units=4096) # Second fully connected layer net['fc2'] = DenseLayer(net['fc1'], num_units=4096) # Softmax prediction layer net['fc3'] = DenseLayer(net['fc2'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc3'], softmax) return net
def googlenet(library): if library == 'keras': model = Sequential() # First convolutional layer model.add(ZeroPadding2D(padding=(3, 3), input_shape=(3, 244, 244))) model.add(Convolution2D(64, 7, 7, subsample=(2, 2))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) model.add(BatchNormalization(alpha=0.000002, k=1)) # Second convolutional layer model.add(Convolution2D(64, 1, 11)) model.add(Activation('relu')) model.add(ZeroPadding2D(padding=(1, 1))) model.add(Convolution2D(192, 3, 3)) model.add(Activation('relu')) model.add(BatchNormalization(alpha=0.000002, k=1)) model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) # First set of inception modules create_inception_module_keras(model, [32, 64, 96, 128, 16, 32]) create_inception_module_keras(model, [64, 128, 128, 192, 32, 96]) model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) # Second set of inception modules create_inception_module_keras(model, [64, 192, 96, 208, 16, 48]) create_inception_module_keras(model, [64, 160, 112, 224, 24, 64]) create_inception_module_keras(model, [64, 128, 128, 256, 24, 64]) create_inception_module_keras(model, [64, 122, 144, 288, 32, 64]) create_inception_module_keras(model, [128, 256, 160, 320, 32, 128]) model.add(pool_size=(3, 3), strides=(2, 2)) # Third set of inception modules create_inception_module_keras(model, [128, 256, 160, 320, 32, 128]) create_inception_module_keras(model, [128, 384, 192, 384, 48, 128]) # Average pooling layer model.add(AveragePooling2D()) # Softmax prediction layer model.add(Flatten()) model.add(Dense(1000)) model.add(Activation('softmax')) return model else: net = {} net['input'] = InputLayer((None, 3, 244, 244)) # First convolutional layer net['conv1/7x7_s2'] = ConvLayer( net['input'], 64, 7, stride=2, pad=3, flip_filters=False) net['pool1/3x3_s2'] = PoolLayer( net['conv1/7x7_s2'], pool_size=3, stride=2, ignore_border=False) net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.000002, k=1) # Second convolutional layer net['conv2/3x3_reduce'] = ConvLayer( net['pool1/norm1'], 64, 1, flip_filters=False) net['conv2/3x3'] = ConvLayer( net['conv2/3x3_reduce'], 192, 3, pad=1, flip_filters=False) net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1) net['pool2/3x3_s2'] = PoolLayer( net['conv2/norm2'], pool_size=3, stride=2, ignore_border=False) # First set of inception modules net.update(create_inception_module_lasagne('inception_3a', net['pool2/3x3_s2'], [32, 64, 96, 128, 16, 32])) net.update(create_inception_module_lasagne('inception_3b', net['inception_3a/output'], [64, 128, 128, 192, 32, 96])) net['pool3/3x3_s2'] = PoolLayer( net['inception_3b/output'], pool_size=3, stride=2, ignore_border=False) # Second set of inception modules net.update(create_inception_module_lasagne('inception_4a', net['pool3/3x3_s2'], [64, 192, 96, 208, 16, 48])) net.update(create_inception_module_lasagne('inception_4b', net['inception_4a/output'], [64, 160, 112, 224, 24, 64])) net.update(create_inception_module_lasagne('inception_4c', net['inception_4b/output'], [64, 128, 128, 256, 24, 64])) net.update(create_inception_module_lasagne('inception_4d', net['inception_4c/output'], [64, 122, 144, 288, 32, 64])) net.update(create_inception_module_lasagne('inception_4e', net['inception_4d/output'], [128, 256, 160, 320, 32, 128])) net['pool4/3x3_s2'] = PoolLayer( net['inception_4e/output'], pool_size=3, stride=2, ignore_border=False) # Third set of inception modules net.update(create_inception_module_lasagne('inception_5a', net['pool4/3x3_s2'], [128, 256, 160, 320, 32, 128])) net.update(create_inception_module_lasagne('inception_5b', net['inception_5a/output'], [128, 384, 192, 384, 48, 128])) net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output']) net['loss3/classifier'] = DenseLayer(net['pool5/7x7_s1'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['loss3/classifier'], nonlinearity=softmax) return net
def build_model(self, input_var, forward, dropout): net = dict() net['input'] = InputLayer((None, 3, None, None), input_var=input_var) net['conv1/7x7_s2'] = ConvLayer(net['input'], 64, 7, stride=2, pad=3, flip_filters=False) net['pool1/3x3_s2'] = PoolLayer(net['conv1/7x7_s2'], pool_size=3, stride=2, ignore_border=False) net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1) net['conv2/3x3_reduce'] = ConvLayer(net['pool1/norm1'], 64, 1, flip_filters=False) net['conv2/3x3'] = ConvLayer(net['conv2/3x3_reduce'], 192, 3, pad=1, flip_filters=False) net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1) net['pool2/3x3_s2'] = PoolLayerDNN(net['conv2/norm2'], pool_size=3, stride=2) net.update( self.build_inception_module('inception_3a', net['pool2/3x3_s2'], [32, 64, 96, 128, 16, 32])) net.update( self.build_inception_module('inception_3b', net['inception_3a/output'], [64, 128, 128, 192, 32, 96])) net['pool3/3x3_s2'] = PoolLayerDNN(net['inception_3b/output'], pool_size=3, stride=2) net.update( self.build_inception_module('inception_4a', net['pool3/3x3_s2'], [64, 192, 96, 208, 16, 48])) net.update( self.build_inception_module('inception_4b', net['inception_4a/output'], [64, 160, 112, 224, 24, 64])) net.update( self.build_inception_module('inception_4c', net['inception_4b/output'], [64, 128, 128, 256, 24, 64])) net.update( self.build_inception_module('inception_4d', net['inception_4c/output'], [64, 112, 144, 288, 32, 64])) net.update( self.build_inception_module('inception_4e', net['inception_4d/output'], [128, 256, 160, 320, 32, 128])) net['pool4/3x3_s2'] = PoolLayerDNN(net['inception_4e/output'], pool_size=3, stride=2) net.update( self.build_inception_module('inception_5a', net['pool4/3x3_s2'], [128, 256, 160, 320, 32, 128])) net.update( self.build_inception_module('inception_5b', net['inception_5a/output'], [128, 384, 192, 384, 48, 128])) net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output']) if forward: #net['fc6'] = DenseLayer(net['pool5/7x7_s1'], num_units=1000) net['prob'] = DenseLayer(net['pool5/7x7_s1'], num_units=4, nonlinearity=softmax) else: net['dropout1'] = DropoutLayer(net['pool5/7x7_s1'], p=dropout) #net['fc6'] = DenseLayer(net['dropout1'], num_units=1000) #net['dropout2'] = DropoutLayer(net['fc6'], p=dropout) net['prob'] = DenseLayer(net['dropout1'], num_units=4, nonlinearity=softmax) return net
def build_model(x): print 'build googlenet' net = {} net['input'] = InputLayer((None, 3, None, None), x) net['conv1/7x7_s2'] = ConvLayer( net['input'], 64, 7, stride=2, pad=3, flip_filters=False) net['pool1/3x3_s2'] = PoolLayer( net['conv1/7x7_s2'], pool_size=3, stride=2, ignore_border=False) net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1) net['conv2/3x3_reduce'] = ConvLayer( net['pool1/norm1'], 64, 1, flip_filters=False) net['conv2/3x3'] = ConvLayer( net['conv2/3x3_reduce'], 192, 3, pad=1, flip_filters=False) net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1) net['pool2/3x3_s2'] = PoolLayer(net['conv2/norm2'], pool_size=3, stride=2) net.update(build_inception_module('inception_3a', net['pool2/3x3_s2'], [32, 64, 96, 128, 16, 32])) net.update(build_inception_module('inception_3b', net['inception_3a/output'], [64, 128, 128, 192, 32, 96])) net['pool3/3x3_s2'] = PoolLayer(net['inception_3b/output'], pool_size=3, stride=2) net.update(build_inception_module('inception_4a', net['pool3/3x3_s2'], [64, 192, 96, 208, 16, 48])) net.update(build_inception_module('inception_4b', net['inception_4a/output'], [64, 160, 112, 224, 24, 64])) net.update(build_inception_module('inception_4c', net['inception_4b/output'], [64, 128, 128, 256, 24, 64])) net.update(build_inception_module('inception_4d', net['inception_4c/output'], [64, 112, 144, 288, 32, 64])) net.update(build_inception_module('inception_4e', net['inception_4d/output'], [128, 256, 160, 320, 32, 128])) net['pool4/3x3_s2'] = PoolLayer(net['inception_4e/output'], pool_size=3, stride=2) net.update(build_inception_module('inception_5a', net['pool4/3x3_s2'], [128, 256, 160, 320, 32, 128])) net.update(build_inception_module('inception_5b', net['inception_5a/output'], [128, 384, 192, 384, 48, 128])) net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output']) net['loss3/classifier'] = DenseLayer(net['pool5/7x7_s1'], num_units=1000, nonlinearity=linear) net['prob'] = NonlinearityLayer(net['loss3/classifier'], nonlinearity=softmax) ''' download from https://s3.amazonaws.com/lasagne/recipes/pretrained/imagenet/blvc_googlenet.pkl ''' file = open(googlenet_file, 'r') vals = pickle.load(file) values = pickle.load( open(googlenet_file))['param values'] lasagne.layers.set_all_param_values( net['prob'], [v.astype(np.float32) for v in values]) return net, vals['synset words']
def build_model(data_size, num_classes, batch_norm=True): net = {} net['input'] = InputLayer(data_size) net['conv1/7x7_s2'] = ConvLayer(net['input'], 64, 7, stride=2, pad=3, flip_filters=False) if batch_norm: net['conv1/7x7_s2'] = normalization.batch_norm(net['conv1/7x7_s2']) net['pool1/3x3_s2'] = PoolLayer(net['conv1/7x7_s2'], pool_size=3, stride=2, ignore_border=False) net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1) net['conv2/3x3_reduce'] = ConvLayer(net['pool1/norm1'], 64, 1, flip_filters=False) if batch_norm: net['conv2/3x3_reduce'] = normalization.batch_norm( net['conv2/3x3_reduce']) net['conv2/3x3'] = ConvLayer(net['conv2/3x3_reduce'], 192, 3, pad=1, flip_filters=False) if batch_norm: net['conv2/3x3'] = normalization.batch_norm(net['conv2/3x3']) net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1) net['pool2/3x3_s2'] = PoolLayer(net['conv2/norm2'], pool_size=3, stride=2, ignore_border=False) net.update( build_inception_module('inception_3a', net['pool2/3x3_s2'], [32, 64, 96, 128, 16, 32], batch_norm)) net.update( build_inception_module('inception_3b', net['inception_3a/output'], [64, 128, 128, 192, 32, 96], batch_norm)) net['pool3/3x3_s2'] = PoolLayer(net['inception_3b/output'], pool_size=3, stride=2, ignore_border=False) net.update( build_inception_module('inception_4a', net['pool3/3x3_s2'], [64, 192, 96, 208, 16, 48], batch_norm)) net.update( build_inception_module('inception_4b', net['inception_4a/output'], [64, 160, 112, 224, 24, 64], batch_norm)) net.update( build_inception_module('inception_4c', net['inception_4b/output'], [64, 128, 128, 256, 24, 64], batch_norm)) net.update( build_inception_module('inception_4d', net['inception_4c/output'], [64, 112, 144, 288, 32, 64], batch_norm)) net.update( build_inception_module('inception_4e', net['inception_4d/output'], [128, 256, 160, 320, 32, 128], batch_norm)) net['pool4/3x3_s2'] = PoolLayer(net['inception_4e/output'], pool_size=3, stride=2, ignore_border=False) net.update( build_inception_module('inception_5a', net['pool4/3x3_s2'], [128, 256, 160, 320, 32, 128], batch_norm)) net.update( build_inception_module('inception_5b', net['inception_5a/output'], [128, 384, 192, 384, 48, 128], batch_norm)) net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output']) net['dropout'] = DropoutLayer(net['pool5/7x7_s1'], p=0.4) net['loss3/classifier'] = DenseLayer(net['dropout'], num_units=1000, nonlinearity=linear) if batch_norm: net['loss3/classifier'] = normalization.batch_norm( net['loss3/classifier']) net['output'] = DenseLayer(net['loss3/classifier'], num_units=num_classes, nonlinearity=softmax) if batch_norm: net['output'] = normalization.batch_norm(net['output']) return net
def CNN_model(): net = {} net['input'] = InputLayer((None, 3, 64, 64)) net['conv0/3x3_s1'] = ConvLayer( net['input'], 16, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 16*64*64 net['conv0/norm1'] = LRNLayer(net['conv0/3x3_s1'], alpha=0.00002, k=1) net['conv1/3x3_s1'] = ConvLayer( net['conv0/norm1'], 32, 3, stride=2, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 32*32*32 net['conv1/norm1'] = LRNLayer(net['conv1/3x3_s1'], alpha=0.00002, k=1) net['conv2/3x3_s1'] = ConvLayer( net['conv1/norm1'], 32, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 32*32*32 net['conv2/norm1'] = LRNLayer(net['conv2/3x3_s1'], alpha=0.00002, k=1) net['conv3/3x3_s1'] = ConvLayer( net['conv2/norm1'], 64, 3, stride=2, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 64*16*16 net['conv3/norm1'] = LRNLayer(net['conv3/3x3_s1'], alpha=0.00002, k=1) net['conv4/3x3_s1'] = ConvLayer( net['conv3/norm1'], 64, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 64*16*16 net['conv4/norm1'] = LRNLayer(net['conv4/3x3_s1'], alpha=0.00002, k=1) net['conv5/3x3_s1'] = ConvLayer( net['conv4/norm1'], 128, 3, stride=2, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 128*8*8 net['conv5/norm1'] = LRNLayer(net['conv5/3x3_s1'], alpha=0.00002, k=1) net['conv6/3x3_s1'] = ConvLayer( net['conv5/norm1'], 128, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 128*8*8 net['conv6/norm1'] = LRNLayer(net['conv6/3x3_s1'], alpha=0.00002, k=1) net['conv7/3x3_s1'] = ConvLayer( net['conv6/norm1'], 256, 3, stride=2, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 256*4*4 net['conv7/norm1'] = LRNLayer(net['conv7/3x3_s1'], alpha=0.00002, k=1) net['conv8/3x3_s1'] = ConvLayer( net['conv7/norm1'], 256, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 256*4*4 net['conv8/norm1'] = LRNLayer(net['conv8/3x3_s1'], alpha=0.00002, k=1) net['conv9/3x3_s1'] = ConvLayer( net['conv8/norm1'], 512, 3, stride=2, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 512*2*2 net['conv9/norm1'] = LRNLayer(net['conv9/3x3_s1'], alpha=0.00002, k=1) net['conv10/3x3_s1'] = ConvLayer( net['conv9/norm1'], 512, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 512*2*2 net['conv10/norm1'] = LRNLayer(net['conv10/3x3_s1'], alpha=0.00002, k=1) net['conv11/3x3_s1'] = ConvLayer( net['conv10/norm1'], 1024, 3, stride=2, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 1024*1*1 # net['conv11/norm1'] = LRNLayer(net['conv11/3x3_s1'], alpha=0.00002, k=1) return net
def build_model_vgg_cnn_s(input_shape, verbose): ''' See Lasagne Modelzoo: https://github.com/Lasagne/Recipes/blob/master/modelzoo/vgg_cnn_s.py ''' if verbose: print 'VGG_cnn_s (from lasagne model zoo)' net = {} net['input'] = InputLayer(input_shape) net['conv1'] = ConvLayer(net['input'], num_filters=96, filter_size=7, stride=2, flip_filters=False) net['norm1'] = LRNLayer( net['conv1'], alpha=0.0001) # caffe has alpha = alpha * pool_size net['pool1'] = PoolLayer(net['norm1'], pool_size=3, stride=3, ignore_border=False) net['conv2'] = ConvLayer(net['pool1'], num_filters=256, filter_size=5, flip_filters=False) net['pool2'] = PoolLayer(net['conv2'], pool_size=2, stride=2, ignore_border=False) net['conv3'] = ConvLayer(net['pool2'], num_filters=512, filter_size=3, pad=1, flip_filters=False) net['conv4'] = ConvLayer(net['conv3'], num_filters=512, filter_size=3, pad=1, flip_filters=False) net['conv5'] = ConvLayer(net['conv4'], num_filters=512, filter_size=3, pad=1, flip_filters=False) net['pool5'] = PoolLayer(net['conv5'], pool_size=3, stride=3, ignore_border=False) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['drop6'] = DropoutLayer(net['fc6'], p=0.5) net['fc7'] = DenseLayer(net['drop6'], num_units=4096) net['drop7'] = DropoutLayer(net['fc7'], p=0.5) net['fc8'] = DenseLayer(net['drop7'], num_units=1000, nonlinearity=lasagne.nonlinearities.softmax) if verbose: for layer in net.values(): print str(lasagne.layers.get_output_shape(layer)) return net
def build_model(input, prefix, lastClassNum=67, dropoutratio=0.4, classificationFlag=False): net = {} net[prefix + 'input'] = input net[prefix + 'conv1/7x7_s2'] = ConvLayer(net[prefix + 'input'], 64, 7, stride=2, pad=3, flip_filters=False, name=prefix + 'conv1/7x7_s2') net[prefix + 'pool1/3x3_s2'] = PoolLayer(net[prefix + 'conv1/7x7_s2'], pool_size=3, stride=2, ignore_border=False, name=prefix + 'pool1/3x3_s2') net[prefix + 'pool1/norm1'] = LRNLayer(net[prefix + 'pool1/3x3_s2'], alpha=0.00002, k=1, name=prefix + 'pool1/norm1') net[prefix + 'conv2/3x3_reduce'] = ConvLayer(net[prefix + 'pool1/norm1'], 64, 1, flip_filters=False, name=prefix + 'conv2/3x3_reduce') net[prefix + 'conv2/3x3'] = ConvLayer(net[prefix + 'conv2/3x3_reduce'], 192, 3, pad=1, flip_filters=False, name=prefix + 'conv2/3x3') net[prefix + 'conv2/norm2'] = LRNLayer(net[prefix + 'conv2/3x3'], alpha=0.00002, k=1, name=prefix + 'conv2/norm2') net[prefix + 'pool2/3x3_s2'] = PoolLayer(net[prefix + 'conv2/norm2'], pool_size=3, stride=2, ignore_border=False, name=prefix + 'pool2/3x3_s2') net.update( build_inception_module('inception_3a', prefix, net[prefix + 'pool2/3x3_s2'], [32, 64, 96, 128, 16, 32]), ) net.update( build_inception_module('inception_3b', prefix, net[prefix + 'inception_3a/output'], [64, 128, 128, 192, 32, 96])) net[prefix + 'pool3/3x3_s2'] = PoolLayer(net[prefix + 'inception_3b/output'], pool_size=3, stride=2, ignore_border=False, name=prefix + 'pool3/3x3_s2') net.update( build_inception_module('inception_4a', prefix, net[prefix + 'pool3/3x3_s2'], [64, 192, 96, 208, 16, 48])) net.update( build_inception_module('inception_4b', prefix, net[prefix + 'inception_4a/output'], [64, 160, 112, 224, 24, 64])) net.update( build_inception_module('inception_4c', prefix, net[prefix + 'inception_4b/output'], [64, 128, 128, 256, 24, 64])) net.update( build_inception_module('inception_4d', prefix, net[prefix + 'inception_4c/output'], [64, 112, 144, 288, 32, 64])) net.update( build_inception_module('inception_4e', prefix, net[prefix + 'inception_4d/output'], [128, 256, 160, 320, 32, 128])) net[prefix + 'pool4/3x3_s2'] = PoolLayer(net[prefix + 'inception_4e/output'], pool_size=3, stride=2, ignore_border=False, name=prefix + 'pool4/3x3_s2') net.update( build_inception_module('inception_5a', prefix, net[prefix + 'pool4/3x3_s2'], [128, 256, 160, 320, 32, 128])) net.update( build_inception_module('inception_5b', prefix, net[prefix + 'inception_5a/output'], [128, 384, 192, 384, 48, 128])) net[prefix + 'pool5/7x7_s1'] = GlobalPoolLayer( net[prefix + 'inception_5b/output'], name=prefix + 'pool5/7x7_s1') net[prefix + 'dropout'] = lasagne.layers.DropoutLayer( net[prefix + 'pool5/7x7_s1'], p=dropoutratio, name=prefix + 'dropout') if classificationFlag == True: net[prefix + 'loss3/classifier'] = DenseLayer(net[prefix + 'dropout'], num_units=lastClassNum, nonlinearity=None, name=prefix + 'loss3/classifier') net[prefix + 'prob'] = NonlinearityLayer(net[prefix + 'loss3/classifier'], nonlinearity=softmax, name=prefix + 'prob') return net
def build_model(): net = {} net['input'] = InputLayer((None, 3, image_size, image_size)) net['conv1/7x7_s2'] = ConvLayer(net['input'], 64, 7, stride=2, pad=3, flip_filters=False) net['pool1/3x3_s2'] = PoolLayer(net['conv1/7x7_s2'], pool_size=3, stride=2, ignore_border=False) net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1) net['conv2/3x3_reduce'] = ConvLayer(net['pool1/norm1'], 64, 1, flip_filters=False) net['conv2/3x3'] = ConvLayer(net['conv2/3x3_reduce'], 192, 3, pad=1, flip_filters=False) net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1) net['pool2/3x3_s2'] = PoolLayer(net['conv2/norm2'], pool_size=3, stride=2) net.update( build_inception_module('inception_3a', net['pool2/3x3_s2'], [32, 64, 96, 128, 16, 32])) net.update( build_inception_module('inception_3b', net['inception_3a/output'], [64, 128, 128, 192, 32, 96])) net['pool3/3x3_s2'] = PoolLayer(net['inception_3b/output'], pool_size=3, stride=2) net.update( build_inception_module('inception_4a', net['pool3/3x3_s2'], [64, 192, 96, 208, 16, 48])) net.update( build_inception_module('inception_4b', net['inception_4a/output'], [64, 160, 112, 224, 24, 64])) net.update( build_inception_module('inception_4c', net['inception_4b/output'], [64, 128, 128, 256, 24, 64])) net.update( build_inception_module('inception_4d', net['inception_4c/output'], [64, 112, 144, 288, 32, 64])) net.update( build_inception_module('inception_4e', net['inception_4d/output'], [128, 256, 160, 320, 32, 128])) net['pool4/3x3_s2'] = PoolLayer(net['inception_4e/output'], pool_size=3, stride=2) net.update( build_inception_module('inception_5a', net['pool4/3x3_s2'], [128, 256, 160, 320, 32, 128])) net.update( build_inception_module('inception_5b', net['inception_5a/output'], [128, 384, 192, 384, 48, 128])) net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output']) import pickle model = pickle.load(open(root + 'models/blvc_googlenet.pkl')) set_all_param_values(net['pool5/7x7_s1'], model['param values'][:114]) net['loss3/classifier'] = DenseLayer(net['pool5/7x7_s1'], num_units=196, nonlinearity=linear) net['prob'] = NonlinearityLayer(net['loss3/classifier'], nonlinearity=softmax) model = NeuralNet( layers=net['prob'], #use_label_encoder=False, #objective_l2=1e-4, #1e-3 #update=lasagne.updates.adam, #update_learning_rate=1e-4, update=lasagne.updates.nesterov_momentum, update_momentum=0.9, update_learning_rate=theano.shared(float32(0.03)), # 1e-4 train_split=TrainSplit(0.1, random_state=42, stratify=False), #batch_iterator_train=train_iterator, #batch_iterator_test=test_iterator, on_epoch_finished=[ save_weights, save_training_history, plot_training_history, early_stopping, #StepDecay('update_learning_rate', start=1e-2, stop=1e-3) ], verbose=1, max_epochs=200, #custom_score = ('CRPS', CRPS) ) return model
def build_model(): net = {} net['input'] = InputLayer((None, 3, None, None)) net['conv1/7x7_s2'] = ConvLayer(net['input'], 64, 7, stride=2, pad=3, flip_filters=False) net['pool1/3x3_s2'] = PoolLayer(net['conv1/7x7_s2'], pool_size=3, stride=2, ignore_border=False) net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1) net['conv2/3x3_reduce'] = ConvLayer(net['pool1/norm1'], 64, 1, flip_filters=False) net['conv2/3x3'] = ConvLayer(net['conv2/3x3_reduce'], 192, 3, pad=1, flip_filters=False) net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1) net['pool2/3x3_s2'] = PoolLayer(net['conv2/norm2'], pool_size=3, stride=2, ignore_border=False) net.update( build_inception_module('inception_3a', net['pool2/3x3_s2'], [32, 64, 96, 128, 16, 32])) net.update( build_inception_module('inception_3b', net['inception_3a/output'], [64, 128, 128, 192, 32, 96])) net['pool3/3x3_s2'] = PoolLayer(net['inception_3b/output'], pool_size=3, stride=2, ignore_border=False) net.update( build_inception_module('inception_4a', net['pool3/3x3_s2'], [64, 192, 96, 208, 16, 48])) net.update( build_inception_module('inception_4b', net['inception_4a/output'], [64, 160, 112, 224, 24, 64])) net.update( build_inception_module('inception_4c', net['inception_4b/output'], [64, 128, 128, 256, 24, 64])) net.update( build_inception_module('inception_4d', net['inception_4c/output'], [64, 112, 144, 288, 32, 64])) net.update( build_inception_module('inception_4e', net['inception_4d/output'], [128, 256, 160, 320, 32, 128])) net['pool4/3x3_s2'] = PoolLayer(net['inception_4e/output'], pool_size=3, stride=2, ignore_border=False) net.update( build_inception_module('inception_5a', net['pool4/3x3_s2'], [128, 256, 160, 320, 32, 128])) net.update( build_inception_module('inception_5b', net['inception_5a/output'], [128, 384, 192, 384, 48, 128])) net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output']) # added a dropout layer (mentioned in the publication, missing in the lasagne-code) net['pool5_drop_7x7_s1'] = DropoutLayer(net['pool5/7x7_s1'], p=0.5) net['loss3/classifier'] = DenseLayer(net['pool5_drop_7x7_s1'], num_units=1000, nonlinearity=linear) net['prob'] = NonlinearityLayer(net['loss3/classifier'], nonlinearity=softmax) return net
def build_GoogLeNet(width, height): # Download pretrained weights from: # https://s3.amazonaws.com/lasagne/recipes/pretrained/imagenet/blvc_googlenet.pkl net = {} net['input'] = InputLayer((None, 3, width, height)) net['conv1/7x7_s2'] = ConvLayer(net['input'], 64, 7, stride=2, pad=3, nonlinearity=elu, flip_filters=False) net['pool1/3x3_s2'] = PoolLayer(net['conv1/7x7_s2'], pool_size=3, stride=2) net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1) net['conv2/3x3_reduce'] = DropoutLayer(ConvLayer(net['pool1/norm1'], 64, 1, nonlinearity=elu, flip_filters=False), p=0.2) net['conv2/3x3'] = ConvLayer(net['conv2/3x3_reduce'], 192, 3, pad=1, nonlinearity=elu, flip_filters=False) net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1) net['pool2/3x3_s2'] = PoolLayer(net['conv2/norm2'], pool_size=3, stride=2) net.update( build_inception_module('inception_3a', net['pool2/3x3_s2'], [32, 64, 96, 128, 16, 32])) net.update( build_inception_module('inception_3b', net['inception_3a/output'], [64, 128, 128, 192, 32, 96])) net['pool3/3x3_s2'] = PoolLayer(net['inception_3b/output'], pool_size=3, stride=2) net.update( build_inception_module('inception_4a', net['pool3/3x3_s2'], [64, 192, 96, 208, 16, 48])) net.update( build_inception_module('inception_4b', net['inception_4a/output'], [64, 160, 112, 224, 24, 64])) net.update( build_inception_module('inception_4c', net['inception_4b/output'], [64, 128, 128, 256, 24, 64])) net.update( build_inception_module('inception_4d', net['inception_4c/output'], [64, 112, 144, 288, 32, 64])) net.update( build_inception_module('inception_4e', net['inception_4d/output'], [128, 256, 160, 320, 32, 128])) net['pool4/3x3_s2'] = PoolLayer(net['inception_4e/output'], pool_size=3, stride=2) net.update( build_inception_module('inception_5a', net['pool4/3x3_s2'], [128, 256, 160, 320, 32, 128])) net.update( build_inception_module('inception_5b', net['inception_5a/output'], [128, 384, 192, 384, 48, 128])) net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output']) net['loss3/classifier'] = DenseLayer(net['pool5/7x7_s1'], num_units=2, nonlinearity=linear) net['softmax'] = NonlinearityLayer(net['loss3/classifier'], nonlinearity=softmax) return net