def __init__(self, weights=None, augmentation=False): super(GoogLeNet, self).__init__(weights, augmentation) def build_inception_module(name, input_layer, nfilters): # nfilters: (pool_proj, 1x1, 3x3_reduce, 3x3, 5x5_reduce, 5x5) net = {} net['pool'] = PoolLayerDNN(input_layer, pool_size=3, stride=1, pad=1) net['pool_proj'] = ConvLayer(net['pool'], nfilters[0], 1, flip_filters=False) net['1x1'] = ConvLayer(input_layer, nfilters[1], 1, flip_filters=False) net['3x3_reduce'] = ConvLayer(input_layer, nfilters[2], 1, flip_filters=False) net['3x3'] = ConvLayer(net['3x3_reduce'], nfilters[3], 3, pad=1, flip_filters=False) net['5x5_reduce'] = ConvLayer(input_layer, nfilters[4], 1, flip_filters=False) net['5x5'] = ConvLayer(net['5x5_reduce'], nfilters[5], 5, pad=2, flip_filters=False) net['output'] = lasagne.layers.ConcatLayer([ net['1x1'], net['3x3'], net['5x5'], net['pool_proj']]) return {'{}/{}'.format(name, k): v for k, v in net.items()} net = {} net['input'] = lasagne.layers.InputLayer((None, 3, 224, 224)) net['conv1/7x7_s2'] = ConvLayer(net['input'], 64, 7, stride=2, pad=3, flip_filters=False) net['pool1/3x3_s2'] = PoolLayer(net['conv1/7x7_s2'], pool_size=3, stride=2, ignore_border=False) net['pool1/norm1'] = lasagne.layers.LocalResponseNormalization2DLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1) net['conv2/3x3_reduce'] = ConvLayer(net['pool1/norm1'], 64, 1, flip_filters=False) net['conv2/3x3'] = ConvLayer(net['conv2/3x3_reduce'], 192, 3, pad=1, flip_filters=False) net['conv2/norm2'] = lasagne.layers.LocalResponseNormalization2DLayer(net['conv2/3x3'], alpha=0.00002, k=1) net['pool2/3x3_s2'] = PoolLayer(net['conv2/norm2'], pool_size=3, stride=2, ignore_border=False) net.update(build_inception_module('inception_3a', net['pool2/3x3_s2'], [32, 64, 96, 128, 16, 32])) net.update(build_inception_module('inception_3b', net['inception_3a/output'], [64, 128, 128, 192, 32, 96])) net['pool3/3x3_s2'] = PoolLayer(net['inception_3b/output'], pool_size=3, stride=2, ignore_border=False) net.update(build_inception_module('inception_4a', net['pool3/3x3_s2'], [64, 192, 96, 208, 16, 48])) net.update(build_inception_module('inception_4b', net['inception_4a/output'], [64, 160, 112, 224, 24, 64])) net.update(build_inception_module('inception_4c', net['inception_4b/output'], [64, 128, 128, 256, 24, 64])) net.update(build_inception_module('inception_4d', net['inception_4c/output'], [64, 112, 144, 288, 32, 64])) net.update(build_inception_module('inception_4e', net['inception_4d/output'], [128, 256, 160, 320, 32, 128])) net['pool4/3x3_s2'] = PoolLayer(net['inception_4e/output'], pool_size=3, stride=2, ignore_border=False) net.update(build_inception_module('inception_5a', net['pool4/3x3_s2'], [128, 256, 160, 320, 32, 128])) net.update(build_inception_module('inception_5b', net['inception_5a/output'], [128, 384, 192, 384, 48, 128])) net['pool5/7x7_s1'] = lasagne.layers.GlobalPoolLayer(net['inception_5b/output']) net['dropout5'] = lasagne.layers.DropoutLayer(net['pool5/7x7_s1'], p=0.4) self.net = net self.out_layer = net['dropout5'] if self.weights is not None: init_weights = self._get_weights_from_file(self.weights, 'param values') init_weights = init_weights[:-2] # since we have chopped off the last two layers of the network (loss3/classifier and prob), we won't need those lasagne.layers.set_all_param_values(self.out_layer, init_weights)
def __init__(self, weights=None, augmentation=False): super(VGG16, self).__init__(weights, augmentation) net = {} net['input'] = lasagne.layers.InputLayer((None, 3, 224, 224)) net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1, flip_filters=False) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1, flip_filters=False) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1, flip_filters=False) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1, flip_filters=False) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1, flip_filters=False) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1, flip_filters=False) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1, flip_filters=False) net['pool3'] = PoolLayer(net['conv3_3'], 2) net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1, flip_filters=False) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1, flip_filters=False) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1, flip_filters=False) net['pool4'] = PoolLayer(net['conv4_3'], 2) net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1, flip_filters=False) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1, flip_filters=False) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1, flip_filters=False) net['pool5'] = PoolLayer(net['conv5_3'], 2) net['fc6'] = lasagne.layers.DenseLayer(net['pool5'], num_units=4096) net['fc6_dropout'] = lasagne.layers.DropoutLayer(net['fc6'], p=0.5) net['fc7'] = lasagne.layers.DenseLayer(net['fc6_dropout'], num_units=4096) net['fc7_dropout'] = lasagne.layers.DropoutLayer(net['fc7'], p=0.5) self.net = net self.out_layer = net['fc7_dropout'] if self.weights is not None: init_weights = self._get_weights_from_file(self.weights, 'param values') init_weights = init_weights[:-2] # since we have chopped off the last two layers of the network, we won't need those lasagne.layers.set_all_param_values(self.out_layer, init_weights)
def conv_pool_cnn_c(): net = {} net['input'] = InputLayer((None, 3, 32, 32)) net['drop_in'] = DropoutLayer(net['input'], p=0.2) net['conv1_1'] = ConvLayer(net['drop_in'], num_filters=96, filter_size=3, pad=1, flip_filters=False) net['conv1_2'] = ConvLayer(net['conv1_1'], num_filters=96, filter_size=3, pad=1, flip_filters=False) net['conv1_3'] = ConvLayer(net['conv1_2'], num_filters=96, filter_size=3, pad=1, flip_filters=False) net['conv2_1'] = PoolLayer(net['conv1_3'], pool_size=3, stride=2) net['drop2_1'] = DropoutLayer(net['conv2_1'], p=0.5) net['conv3_1'] = ConvLayer(net['drop2_1'], num_filters=192, filter_size=3, pad=1, flip_filters=False) net['conv3_2'] = ConvLayer(net['conv3_1'], num_filters=192, filter_size=3, pad=1, flip_filters=False) net['conv3_3'] = ConvLayer(net['conv3_2'], num_filters=192, filter_size=3, pad=1, flip_filters=False) net['conv4_1'] = PoolLayer(net['conv3_3'], pool_size=3, stride=2) net['drop4_1'] = DropoutLayer(net['conv4_1'], p=0.5) net['conv5_1'] = ConvLayer(net['drop4_1'], num_filters=192, filter_size=3, pad=1, flip_filters=False) net['conv6_1'] = ConvLayer(net['conv5_1'], num_filters=192, filter_size=1, flip_filters=False) net['conv7_1'] = ConvLayer(net['conv6_1'], num_filters=10, filter_size=1, flip_filters=False) net['global_avg'] = GlobalPoolLayer(net['conv7_1']) net['output'] = NonlinearityLayer(net['global_avg'], softmax) return net
def initialize_network(width): # initialize network - VGG19 style net = {} net['input'] = InputLayer((1, 3, width, width)) net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1) net['pool1'] = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad') net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1) net['pool2'] = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad') net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1) net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=1) net['pool3'] = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad') net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1) net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=1) net['pool4'] = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad') net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1) net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=1) net['pool5'] = PoolLayer(net['conv5_4'], 2, mode='average_exc_pad') return net
def define_vgg19_base(input_var=None, nonlinearity=rectify): net = {} net['input'] = InputLayer((None, 3, None, None), input_var=input_var) net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1) net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=1) net['pool3'] = PoolLayer(net['conv3_4'], 2) net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1) net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=1) net['pool4'] = PoolLayer(net['conv4_4'], 2) net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1) net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=1) net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=1) layersstr = ['conv1_1','conv1_2', 'conv2_1','conv2_2', 'conv3_1','conv3_2','conv3_3','conv3_4', 'conv4_1','conv4_2','conv4_3','conv4_4', 'conv5_1','conv5_2','conv5_3','conv5_4'] return net, layersstr
def build_model(self,numclasses): #NOTE: perhaps we should save the network architecture along with the weights, since they could be different logger.info("Building network") net = {} net['input'] = InputLayer((None, 3, 224, 224)) net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1) net['pool3'] = PoolLayer(net['conv3_3'], 2) net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1) net['pool4'] = PoolLayer(net['conv4_3'], 2) net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1) net['pool5'] = PoolLayer(net['conv5_3'], 2) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['fc7'] = DenseLayer(net['fc6'], num_units=4096) net['fc8'] = DenseLayer(net['fc7'], num_units=numclasses, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) logger.info("Network built") return net
def build_small_model(): net = {} net['input_layer'] = InputLayer((None,3,128,128)) net['conv1_1'] = ConvLayer( net['input_layer'], 64, 3, pad=1)#, flip_filters=False) net['conv1_2'] = ConvLayer( net['conv1_1'], 64, 3, pad=1)#, flip_filters=False) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer( net['pool1'], 128, 3, pad=1)#, flip_filters=False) net['conv2_2'] = ConvLayer( net['conv2_1'], 128, 3, pad=1 )#, flip_filters=False) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer( net['pool2'], 256, 3, pad=1 )#, flip_filters=False) net['conv3_2'] = ConvLayer( net['conv3_1'], 256, 3, pad=1)#, flip_filters=False) net['pool3'] = PoolLayer(net['conv3_2'], 2) net['conv4_1'] = ConvLayer( net['pool3'], 512, 3, pad=1)#, flip_filters=False) net['pool4'] = PoolLayer(net['conv4_1'], 2) net['fc6'] = DenseLayer(net['pool4'], num_units=200) net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5) net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=100) net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5) net['fc8'] = DenseLayer( net['fc7_dropout'], num_units=2, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) return net
def build_model(input_var,dro=0.5): net = {} net['input'] = InputLayer((None, 3, 299, 299),input_var=input_var) print(net['input']) net['conv1/7x7_s2'] = ConvLayer( net['input'], 64, 7, stride=2, pad=3, flip_filters=False) print(net['conv1/7x7_s2']) net['pool1/3x3_s2'] = PoolLayer( net['conv1/7x7_s2'], pool_size=3, stride=2, ignore_border=False) net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1) net['conv2/3x3_reduce'] = ConvLayer( net['pool1/norm1'], 64, 1, flip_filters=False) net['conv2/3x3'] = ConvLayer( net['conv2/3x3_reduce'], 192, 3, pad=1, flip_filters=False) net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1) net['pool2/3x3_s2'] = PoolLayer( net['conv2/norm2'], pool_size=3, stride=2, ignore_border=False) net.update(build_inception_module('inception_3a', net['pool2/3x3_s2'], [32, 64, 96, 128, 16, 32])) net.update(build_inception_module('inception_3b', net['inception_3a/output'], [64, 128, 128, 192, 32, 96])) net['pool3/3x3_s2'] = PoolLayer( net['inception_3b/output'], pool_size=3, stride=2, ignore_border=False) net.update(build_inception_module('inception_4a', net['pool3/3x3_s2'], [64, 192, 96, 208, 16, 48],dro)) net.update(build_inception_module('inception_4b', net['inception_4a/output'], [64, 160, 112, 224, 24, 64],dro)) net.update(build_inception_module('inception_4c', net['inception_4b/output'], [64, 128, 128, 256, 24, 64],dro)) net.update(build_inception_module('inception_4d', net['inception_4c/output'], [64, 112, 144, 288, 32, 64],dro)) net.update(build_inception_module('inception_4e', net['inception_4d/output'], [128, 256, 160, 320, 32, 128],dro)) net['pool4/3x3_s2'] = PoolLayer( net['inception_4e/output'], pool_size=3, stride=2, ignore_border=False) net.update(build_inception_module('inception_5a', net['pool4/3x3_s2'], [128, 256, 160, 320, 32, 128])) net.update(build_inception_module('inception_5b', net['inception_5a/output'], [128, 384, 192, 384, 48, 128])) net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output']) net['pool5/7x7_s1_dropout'] = DropoutLayer(net['pool5/7x7_s1'], p=dro) net['loss3/classifier'] = DenseLayer(net['pool5/7x7_s1_dropout'], num_units=1, nonlinearity=linear) net['prob'] = NonlinearityLayer(net['loss3/classifier'], nonlinearity=sigmoid) return net
def load_vgg19(pkl_filename="vgg19_normalized.pkl"): net = {} net['input'] = InputLayer((1, 3, None, None)) net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1) net['pool1'] = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad') net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1) net['pool2'] = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad') net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1) net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=1) net['pool3'] = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad') net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1) net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=1) net['pool4'] = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad') net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1) net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=1) net['pool5'] = PoolLayer(net['conv5_4'], 2, mode='average_exc_pad') if bytes == str: # Python 2 values = pickle.load(open(pkl_filename, 'rb'))['param values'] else: #Python 3 values = pickle.load(open(pkl_filename, 'rb'), encoding='latin1')['param values'] set_all_param_values(net['pool5'], values[:32]) return net
def build_model(): net = {} net['input'] = InputLayer((None, 3, 224, 224)) net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1) net['pool3'] = PoolLayer(net['conv3_3'], 2) net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1) net['pool4'] = PoolLayer(net['conv4_3'], 2) net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1) net['pool5'] = PoolLayer(net['conv5_3'], 2) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['drop6'] = DropoutLayer(net['fc6'], p=0.5) net['fc7'] = DenseLayer(net['drop6'], num_units=4096) net['drop7'] = DropoutLayer(net['fc7'], p=0.5) net['fc8'] = DenseLayer(net['drop7'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) return net
def build_model(): net = {} net['input'] = InputLayer((1, 3, None, None)) net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1) net['pool1'] = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad') net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1) net['pool2'] = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad') net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1) net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=1) net['pool3'] = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad') net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1) net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=1) net['pool4'] = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad') net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1) net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=1) net['pool5'] = PoolLayer(net['conv5_4'], 2, mode='average_exc_pad') return net
def CNN_model(): net = {} net['input'] = InputLayer((None, 3, 64, 64)) net['conv1/3x3_s1'] = ConvLayer(net['input'], 16, 3, stride=2, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 16*64*64 # net['pool1/3x3_s2'] = PoolLayer(net['conv1/3x3_s1'], pool_size=3, stride=2, ignore_border=False) # 16*32*32 net['pool1/norm1'] = LRNLayer(net['conv1/3x3_s2'], alpha=0.00002, k=1) net['conv2/3x3_s1'] = ConvLayer(net['pool1/norm1'], 32, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 32*32*32 net['pool2/3x3_s2'] = PoolLayer(net['conv2/3x3_s1'], pool_size=3, stride=2, ignore_border=False) # 32*16*16 net['pool2/norm1'] = LRNLayer(net['pool2/3x3_s2'], alpha=0.00002, k=1) net['conv3/3x3_s1'] = ConvLayer(net['pool2/norm1'], 64, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 64*16*16 net['pool3/3x3_s2'] = PoolLayer(net['conv3/3x3_s1'], pool_size=3, stride=2, ignore_border=False) # 64*8*8 net['pool3/norm1'] = LRNLayer(net['pool3/3x3_s2'], alpha=0.00002, k=1) net['conv4/3x3_s1'] = ConvLayer(net['pool3/norm1'], 128, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 128*8*8 net['pool4/3x3_s2'] = PoolLayer(net['conv4/3x3_s1'], pool_size=3, stride=2, ignore_border=False) # 128*4*4 net['pool4/norm1'] = LRNLayer(net['pool4/3x3_s2'], alpha=0.00002, k=1) net['conv5/3x3_s1'] = ConvLayer(net['pool4/norm1'], 128, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) net['pool5/norm1'] = LRNLayer(net['conv5/3x3_s1'], alpha=0.00002, k=1) net['conv6/3x3_s1'] = ConvLayer(net['pool5/norm1'], 128, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) net['pool6/norm1'] = LRNLayer(net['conv6/3x3_s1'], alpha=0.00002, k=1) net['conv7/3x3_s1'] = ConvLayer(net['pool6/norm1'], 128, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) net['pool7/norm1'] = LRNLayer(net['conv7/3x3_s1'], alpha=0.00002, k=1) net['conv8/3x3_s1'] = ConvLayer(net['pool7/norm1'], 256, 3, stride=1, pad='same', W=lasagne.init.HeNormal(gain='relu')) # 256*4*4 net['pool8/4x4_s1'] = GlobalPoolLayer(net['conv8/3x3_s1']); # 256 return net
def build_vgg(shape, input_var): net = {} w, h = shape net['input'] = InputLayer((None, 3, w, h), input_var=input_var) net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1) net['pool1'] = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad') net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1) net['pool2'] = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad') net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1) net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=1) net['pool3'] = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad') net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1) net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=1) net['pool4'] = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad') net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1) net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=1) net['pool5'] = PoolLayer(net['conv5_4'], 2, mode='average_exc_pad') return net
def build_model(timesteps, pX, pY): net = OrderedDict() net['input'] = InputLayer(shape=(None, timesteps, pX, pY)) net['conv1_1'] = batch_norm( ConvLayer(net['input'], num_filters=32, filter_size=(3, 3), nonlinearity=ReLU, pad='same')) net['conv1_2'] = batch_norm( ConvLayer(net['conv1_1'], num_filters=32, filter_size=(3, 3), nonlinearity=ReLU, pad='same')) net['pool1'] = PoolLayer(net['conv1_2'], pool_size=(2, 2)) net['conv2_1'] = batch_norm( ConvLayer(net['pool1'], num_filters=64, filter_size=(3, 3), nonlinearity=ReLU, pad='same')) net['conv2_2'] = batch_norm( ConvLayer(net['conv2_1'], num_filters=64, filter_size=(3, 3), nonlinearity=ReLU, pad='same')) net['pool2'] = PoolLayer(net['conv2_2'], pool_size=(2, 2)) net['conv3_1'] = batch_norm( ConvLayer(net['pool2'], num_filters=128, filter_size=(3, 3), nonlinearity=ReLU, pad='same')) net['conv3_2'] = batch_norm( ConvLayer(net['conv3_1'], num_filters=128, filter_size=(3, 3), nonlinearity=ReLU, pad='same')) net['pool3'] = PoolLayer(net['conv3_2'], pool_size=(2, 2)) net['fcLayer1'] = batch_norm( DenseLayer(dropout(net['pool3'], p=0.5), num_units=512, nonlinearity=ReLU)) net['output'] = DenseLayer(dropout(net['fcLayer1'], p=0.5), num_units=2, nonlinearity=softmax) return net
def setup_perceptual(self, input): """Use lasagne to create a network of convolution layers using pre-trained VGG19 weights. """ offset = np.array([103.939, 116.779, 123.680], dtype=np.float32).reshape((1,3,1,1)) self.network['percept'] = lasagne.layers.NonlinearityLayer(input, lambda x: ((x+0.5)*255.0) - offset) self.network['mse'] = self.network['percept'] self.network['conv1_1'] = ConvLayer(self.network['percept'], 64, 3, pad=1) self.network['conv1_2'] = ConvLayer(self.network['conv1_1'], 64, 3, pad=1) self.network['pool1'] = PoolLayer(self.network['conv1_2'], 2, mode='max') self.network['conv2_1'] = ConvLayer(self.network['pool1'], 128, 3, pad=1) self.network['conv2_2'] = ConvLayer(self.network['conv2_1'], 128, 3, pad=1) self.network['pool2'] = PoolLayer(self.network['conv2_2'], 2, mode='max') self.network['conv3_1'] = ConvLayer(self.network['pool2'], 256, 3, pad=1) self.network['conv3_2'] = ConvLayer(self.network['conv3_1'], 256, 3, pad=1) self.network['conv3_3'] = ConvLayer(self.network['conv3_2'], 256, 3, pad=1) self.network['conv3_4'] = ConvLayer(self.network['conv3_3'], 256, 3, pad=1) self.network['pool3'] = PoolLayer(self.network['conv3_4'], 2, mode='max') self.network['conv4_1'] = ConvLayer(self.network['pool3'], 512, 3, pad=1) self.network['conv4_2'] = ConvLayer(self.network['conv4_1'], 512, 3, pad=1) self.network['conv4_3'] = ConvLayer(self.network['conv4_2'], 512, 3, pad=1) self.network['conv4_4'] = ConvLayer(self.network['conv4_3'], 512, 3, pad=1) self.network['pool4'] = PoolLayer(self.network['conv4_4'], 2, mode='max') self.network['conv5_1'] = ConvLayer(self.network['pool4'], 512, 3, pad=1) self.network['conv5_2'] = ConvLayer(self.network['conv5_1'], 512, 3, pad=1) self.network['conv5_3'] = ConvLayer(self.network['conv5_2'], 512, 3, pad=1) self.network['conv5_4'] = ConvLayer(self.network['conv5_3'], 512, 3, pad=1)
def get_net(self): net = {} net['input'] = InputLayer((None, 3, 224, 224)) net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1) net['pool3'] = PoolLayer(net['conv3_3'], 2) net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1) net['pool4'] = PoolLayer(net['conv4_3'], 2) net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1) net['pool5'] = PoolLayer(net['conv5_3'], 2) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5) net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=4096) net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5) net['fc8'] = DenseLayer(net['fc7_dropout'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) output_layer = net['prob'] return net, net['prob'] # the whole net and the output layer
def build_model(): print("building model") net = {} net['input'] = InputLayer((1, 3, IMAGE_W, IMAGE_W)) net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1) net['pool1'] = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad') net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1) net['pool2'] = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad') net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1) net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=1) net['pool3'] = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad') net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1) net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=1) net['pool4'] = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad') net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1) net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=1) net['pool5'] = PoolLayer(net['conv5_4'], 2, mode='average_exc_pad') print("model built.") print() print("Loading weights.") values = pickle.load(open('vgg19_normalized.pkl'))['param values'] print() print("Loaded. Setting the weights.") lasagne.layers.set_all_param_values(net['pool5'], values) print() print("Set.") return net
def build_vgg(self): ''' Builds the VGG-16, 16-layer model as given in the paper: "Very Deep Convolutional Networks for Large-Scale Image Recognition" and assigns optimal weights from the ImageNet classification challenge dataset Original source: https://gist.github.com/ksimonyan/211839e770f7b538e2d8''' net = {} net['input'] = InputLayer((None, 3, 224, 224)) net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1) net['pool3'] = PoolLayer(net['conv3_3'], 2) net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1) net['pool4'] = PoolLayer(net['conv4_3'], 2) net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1) net['pool5'] = PoolLayer(net['conv5_3'], 2) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['fc7'] = DenseLayer(net['fc6'], num_units=4096) net['fc8'] = DenseLayer(net['fc7'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) lasagne.layers.set_all_param_values(net['prob'], self.vgg16_params['param values']) print "Built VGG-16 model" return net
def build_vgg_model(): net = {} net['input'] = InputLayer((1, 3, IMAGE_W, IMAGE_W)) net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1, flip_filters=False) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1, flip_filters=False) net['pool1'] = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad') net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1, flip_filters=False) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1, flip_filters=False) net['pool2'] = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad') net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1, flip_filters=False) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1, flip_filters=False) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1, flip_filters=False) net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=1, flip_filters=False) net['pool3'] = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad') net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1, flip_filters=False) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1, flip_filters=False) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1, flip_filters=False) net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=1, flip_filters=False) net['pool4'] = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad') net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1, flip_filters=False) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1, flip_filters=False) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1, flip_filters=False) net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=1, flip_filters=False) net['pool5'] = PoolLayer(net['conv5_4'], 2, mode='average_exc_pad') return net
def makeNeuralNet(input_var=None): net = {} net['input'] = InputLayer(shape=(None, 3, 224, 224), input_var=input_var) net['bnorm'] = BatchNormLayer(net['input']) net['conv1'] = ConvLayer(net['bnorm'], num_filters=96, filter_size=5, stride=2) #96*112*112 net['norm1'] = NormLayer( net['conv1'], alpha=0.0001) # caffe has alpha = alpha * pool_size net['pool1'] = PoolLayer(net['norm1'], pool_size=3, stride=3, ignore_border=False) #96*37...approx net['conv2'] = ConvLayer(net['pool1'], num_filters=256, filter_size=5, pad=1) net['pool2'] = PoolLayer(net['conv2'], pool_size=2, stride=2, ignore_border=False) net['fc6'] = DenseLayer(net['pool2'], num_units=1024) net['drop6'] = DropoutLayer(net['fc6'], p=0.2) net['_fc7'] = DenseLayer(net['drop6'], num_units=256) net['_drop7'] = DropoutLayer(net['_fc7'], p=0.2) net['_fc8out'] = DenseLayer(net['_drop7'], num_units=1, nonlinearity=lasagne.nonlinearities.sigmoid) output_layer_driver = net['_fc8out'] return output_layer_driver, net
def build_model_vgg16_3channels(input_var=None): net = {} net['input'] = InputLayer((None, 3, 224, 224), input_var=input_var) net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1) net['pool3'] = PoolLayer(net['conv3_3'], 2) net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1) net['pool4'] = PoolLayer(net['conv4_3'], 2) net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1) net['pool5'] = PoolLayer(net['conv5_3'], 2) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['fc7'] = DenseLayer(net['fc6'], num_units=4096) net['fc8'] = DenseLayer(net['fc7'], num_units=num_categories, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) return net['prob']
def build_model(input_var): net = {} net['input'] = InputLayer((None, 3, 224, 224), input_var=input_var) net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1, flip_filters=False) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1, flip_filters=False) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1, flip_filters=False) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1, flip_filters=False) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1, flip_filters=False) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1, flip_filters=False) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1, flip_filters=False) net['pool3'] = PoolLayer(net['conv3_3'], 2) net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1, flip_filters=False) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1, flip_filters=False) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1, flip_filters=False) net['pool4'] = PoolLayer(net['conv4_3'], 2) net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1, flip_filters=False) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1, flip_filters=False) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1, flip_filters=False) net['pool5'] = PoolLayer(net['conv5_3'], 2) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['fc7'] = DenseLayer(net['fc6'], num_units=4096) net['fc8'] = DenseLayer(net['fc7'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) return net
def build_model_LeNet(input_var=None): net = {} net['input'] = InputLayer(shape=(None, 1, 224, 224), input_var=input_var) net['conv1/7x7_s2'] = ConvLayer(net['input'], 64, 7, stride=2, pad=3) net['pool1/3x3_s2'] = PoolLayer(net['conv1/7x7_s2'], pool_size=3, stride=2, ignore_border=False) net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1) net['conv2/3x3_reduce'] = ConvLayer(net['pool1/norm1'], 64, 1) net['conv2/3x3'] = ConvLayer(net['conv2/3x3_reduce'], 192, 3, pad=1) net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1) net['pool2/3x3_s2'] = PoolLayer(net['conv2/norm2'], pool_size=3, stride=2) net.update( build_inception_module('inception_3a', net['pool2/3x3_s2'], [32, 64, 96, 128, 16, 32])) net.update( build_inception_module('inception_3b', net['inception_3a/output'], [64, 128, 128, 192, 32, 96])) net['pool3/3x3_s2'] = PoolLayer(net['inception_3b/output'], pool_size=3, stride=2) net.update( build_inception_module('inception_4a', net['pool3/3x3_s2'], [64, 192, 96, 208, 16, 48])) net.update( build_inception_module('inception_4b', net['inception_4a/output'], [64, 160, 112, 224, 24, 64])) net.update( build_inception_module('inception_4c', net['inception_4b/output'], [64, 128, 128, 256, 24, 64])) net.update( build_inception_module('inception_4d', net['inception_4c/output'], [64, 112, 144, 288, 32, 64])) net.update( build_inception_module('inception_4e', net['inception_4d/output'], [128, 256, 160, 320, 32, 128])) net['pool4/3x3_s2'] = PoolLayer(net['inception_4e/output'], pool_size=3, stride=2) net.update( build_inception_module('inception_5a', net['pool4/3x3_s2'], [128, 256, 160, 320, 32, 128])) net.update( build_inception_module('inception_5b', net['inception_5a/output'], [128, 384, 192, 384, 48, 128])) net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output']) net['loss3/classifier'] = DenseLayer(net['pool5/7x7_s1'], num_units=num_categories, nonlinearity=linear) net['prob'] = NonlinearityLayer(net['loss3/classifier'], nonlinearity=softmax) #print(net) return net['prob']
def build_resnet(): net = {} net['input'] = InputLayer((None, 3, 224, 224)) sub_net, parent_layer_name = build_simple_block( net['input'], ['conv1', 'bn_conv1', 'conv1_relu'], 64, 7, 2, 3, use_bias=True) net.update(sub_net) net['pool1'] = PoolLayer(net[parent_layer_name], pool_size=3, stride=2, pad=0, mode='max', ignore_border=False) block_size = list('abc') parent_layer_name = 'pool1' for c in block_size: if c == 'a': sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1, 1, True, 4, ix='2%s' % c) else: sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='2%s' % c) net.update(sub_net) block_size = list('abcd') for c in block_size: if c == 'a': sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/2, 1.0/2, True, 4, ix='3%s' % c) else: sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='3%s' % c) net.update(sub_net) block_size = list('abcdef') for c in block_size: if c == 'a': sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/2, 1.0/2, True, 4, ix='4%s' % c) else: sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='4%s' % c) net.update(sub_net) block_size = list('abc') for c in block_size: if c == 'a': sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/2, 1.0/2, True, 4, ix='5%s' % c) else: sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='5%s' % c) net.update(sub_net) net['pool5'] = PoolLayer(net[parent_layer_name], pool_size=7, stride=1, pad=0, mode='average_exc_pad', ignore_border=False) net['fc1000'] = DenseLayer(net['pool5'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc1000'], nonlinearity=softmax) return net
def setup_model(self, input=None): """Use lasagne to create a network of convolution layers, first using VGG19 as the framework and then adding augmentations for Semantic Style Transfer. """ net, self.channels = {}, {} # Primary network for the main image. These are convolution only, and stop at layer 4_2 (rest unused). net['img'] = input or InputLayer((None, 3, None, None)) net['conv1_1'] = ConvLayer(net['img'], 64, 3, pad=1) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1) net['pool1'] = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad') net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1) net['pool2'] = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad') net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1) net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=1) net['pool3'] = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad') net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1) net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=1) net['pool4'] = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad') net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1) net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=1) net['main'] = net['conv5_4'] # Auxiliary network for the semantic layers, and the nearest neighbors calculations. net['map'] = InputLayer((1, 1, None, None)) for j, i in itertools.product(range(5), range(4)): if j < 2 and i > 1: continue suffix = '%i_%i' % (j + 1, i + 1) if i == 0: net['map%i' % (j + 1)] = PoolLayer(net['map'], 2**j, mode='average_exc_pad') self.channels[suffix] = net['conv' + suffix].num_filters if args.semantic_weight > 0.0: net['sem' + suffix] = ConcatLayer( [net['conv' + suffix], net['map%i' % (j + 1)]]) else: net['sem' + suffix] = net['conv' + suffix] net['dup' + suffix] = InputLayer(net['sem' + suffix].output_shape) net['nn' + suffix] = ConvLayer(net['dup' + suffix], 1, 3, b=None, pad=0, flip_filters=False) self.network = net
def lasagne_layers_method(self): ''' INPUT: None OUTPUT: Dict Creates dictionary of vgg_cnn_s model Lasagne layer objects. Here the original output layer (softmax, 1000 classes) has been removed and the output layer returns a vector of shape (1,4096). ''' # Create dictionary of VGG_CNN_S model layers self.lasagne_layers = {} self.lasagne_layers['input'] = InputLayer((None, 3, 224, 224)) self.lasagne_layers['conv1'] = ConvLayer(self.lasagne_layers['input'], num_filters=96, filter_size=7, stride=2, flip_filters=False) self.lasagne_layers['norm1'] = NormLayer(self.lasagne_layers['conv1'], alpha=0.0001) self.lasagne_layers['pool1'] = PoolLayer(self.lasagne_layers['norm1'], pool_size=3, stride=3, ignore_border=False) self.lasagne_layers['conv2'] = ConvLayer(self.lasagne_layers['pool1'], num_filters=256, filter_size=5, flip_filters=False) self.lasagne_layers['pool2'] = PoolLayer(self.lasagne_layers['conv2'], pool_size=2, stride=2, ignore_border=False) self.lasagne_layers['conv3'] = ConvLayer(self.lasagne_layers['pool2'], num_filters=512, filter_size=3, pad=1, flip_filters=False) self.lasagne_layers['conv4'] = ConvLayer(self.lasagne_layers['conv3'], num_filters=512, filter_size=3, pad=1, flip_filters=False) self.lasagne_layers['conv5'] = ConvLayer(self.lasagne_layers['conv4'], num_filters=512, filter_size=3, pad=1, flip_filters=False) self.lasagne_layers['pool5'] = PoolLayer(self.lasagne_layers['conv5'], pool_size=3, stride=3, ignore_border=False) self.lasagne_layers['fc6'] = DenseLayer(self.lasagne_layers['pool5'], num_units=4096) self.lasagne_layers['drop6'] = DropoutLayer(self.lasagne_layers['fc6'], p=0.5) self.lasagne_layers['fc7'] = DenseLayer(self.lasagne_layers['drop6'], num_units=4096)
def Deconv_Tiny_VGG(params,input_var=None): net = {} net['input'] = InputLayer(shape=(None, 3, 224, 224),input_var=input_var) net['conv1_1'] = ConvLayer( net['input'], 32, 3, pad=1, W=params[0], b=params[1], flip_filters=False) net['conv1_2'] = ConvLayer( net['conv1_1'], 32, 3, pad=1, W=params[2], b=params[3], flip_filters=False) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer( net['pool1'], 32, 3, pad=1, W=params[4], b=params[5], flip_filters=False) net['conv2_2'] = ConvLayer( net['conv2_1'], 32, 3, pad=1, W=params[6], b=params[7], flip_filters=False) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer( net['pool2'], 32, 3, pad=1, W=params[8], b=params[9], flip_filters=False) net['conv3_2'] = ConvLayer( net['conv3_1'], 32, 3, pad=1, W=params[10], b=params[11], flip_filters=False) net['pool3'] = PoolLayer(net['conv3_2'], 2) #deconvolution starts here net['unpool3'] = InverseLayer(net['pool3'],net['pool3']) net['deconv3_2'] = DeconvLayer(net['unpool3'],num_filters=32, filter_size=net['conv3_2'].filter_size, stride=net['conv3_2'].stride, crop=net['conv3_2'].pad, W=params[10], b=params[9], flip_filters=True) net['deconv3_1'] = DeconvLayer(net['deconv3_2'],num_filters=32, filter_size=net['conv3_1'].filter_size, stride=net['conv3_1'].stride, crop=net['conv3_1'].pad, W=params[8], b=params[7], flip_filters=True) net['unpool2'] = InverseLayer(net['deconv3_1'],net['pool2']) net['deconv2_2'] = DeconvLayer(net['unpool2'],num_filters=32, filter_size=net['conv2_2'].filter_size, stride=net['conv2_2'].stride, crop=net['conv2_2'].pad, W=params[6], b=params[5], flip_filters=True) net['deconv2_1'] = DeconvLayer(net['deconv2_2'],num_filters=32, filter_size=net['conv2_1'].filter_size, stride=net['conv2_1'].stride, crop=net['conv2_1'].pad, W=params[4], b=params[3], flip_filters=True) net['unpool1'] = InverseLayer(net['deconv2_1'],net['pool1']) net['deconv1_2'] = DeconvLayer(net['unpool1'],num_filters=32, filter_size=net['conv1_2'].filter_size, stride=net['conv1_2'].stride, crop=net['conv1_2'].pad, W=params[2], b=params[1], flip_filters=True) net['deconv1_1'] = DeconvLayer(net['deconv1_2'],num_filters=3, filter_size=net['conv1_1'].filter_size, stride=net['conv1_1'].stride, crop=net['conv1_1'].pad, W=params[0], flip_filters=True) return net
def network_sm(ip_size, input_var): net = {} net['input'] = lasagne.layers.InputLayer(shape=(None, 1, ip_size[0], ip_size[1]), input_var=input_var) net['conv1'] = ConvLayer(net['input'], 16, 3, pad=0) net['pool1'] = PoolLayer(net['conv1'], 2) net['conv2'] = ConvLayer(net['pool1'], 16, 3, pad=0) net['pool2'] = PoolLayer(net['conv2'], 2) net['fc1'] = DenseLayer(lasagne.layers.dropout(net['pool2'], p=0.5), num_units=64, nonlinearity=lasagne.nonlinearities.rectify) net['prob'] = DenseLayer(lasagne.layers.dropout(net['fc1'], p=0.5), num_units=2, nonlinearity=lasagne.nonlinearities.softmax) return net
def build_model(input_shape): net = {} net['input'] = InputLayer(input_shape) net['conv1'] = ConvLayer(net['input'], num_filters=96, filter_size=7, stride=2, flip_filters=False) net['norm1'] = LRNLayer( net['conv1'], alpha=0.0001) # caffe has alpha = alpha * pool_size net['pool1'] = PoolLayer(net['norm1'], pool_size=3, stride=3, ignore_border=False) net['conv2'] = ConvLayer(net['pool1'], num_filters=256, filter_size=5, flip_filters=False) net['pool2'] = PoolLayer(net['conv2'], pool_size=2, stride=2, ignore_border=False) net['conv3'] = ConvLayer(net['pool2'], num_filters=512, filter_size=3, pad=1, flip_filters=False) net['conv4'] = ConvLayer(net['conv3'], num_filters=512, filter_size=3, pad=1, flip_filters=False) net['conv5'] = ConvLayer(net['conv4'], num_filters=512, filter_size=3, pad=1, flip_filters=False) net['pool5'] = PoolLayer(net['conv5'], pool_size=3, stride=3, ignore_border=False) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['drop6'] = DropoutLayer(net['fc6'], p=0.5) net['fc7'] = DenseLayer(net['drop6'], num_units=4096) net['drop7'] = DropoutLayer(net['fc7'], p=0.5) net['fc8'] = DenseLayer(net['drop7'], num_units=1000, nonlinearity=lasagne.nonlinearities.softmax) for layer in net.values(): print str(lasagne.layers.get_output_shape(layer)) return net
def setup_model(self): """Use lasagne to create a network of convolution layers, first using VGG19 as the framework and then adding augmentations for Semantic Style Transfer. """ net = {} # Primary network for the main image. These are convolution only, and stop at layer 4_2 (rest unused). net['img'] = InputLayer((1, 3, None, None)) net['conv1_1'] = ConvLayer(net['img'], 64, 3, pad=1) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1) net['pool1'] = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad') net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1) net['pool2'] = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad') net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1) net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=1) net['pool3'] = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad') net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1) net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=1) net['pool4'] = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad') net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1) net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=1) net['main'] = net['conv5_4'] # Auxiliary network for the semantic layers, and the nearest neighbors calculations. net['map'] = InputLayer((1, 3, None, None)) for j, i in itertools.product(range(5), range(4)): if j < 2 and i > 1: continue suffix = '%i_%i' % (j + 1, i + 1) net['map' + suffix] = PoolLayer(net['map'], 2**j, mode='average_exc_pad') net['nn' + suffix] = ConvLayer(net['conv' + suffix], 1, 3, b=None, pad=0) net['mm' + suffix] = ConvLayer(net['map' + suffix], 1, 3, b=None, pad=0) self.network = net