def _build(self): layer = layers.InputLayer(shape=(None, 3, 224, 224), input_var=self.X) layer = Conv2DDNNLayer(layer, num_filters=64, filter_size=(3, 3), pad=1, flip_filters=False) layer = Conv2DDNNLayer(layer, num_filters=64, filter_size=(3, 3), pad=1, flip_filters=False) layer = layers.Pool2DLayer(layer, pool_size=(2, 2), mode='max') layer = Conv2DDNNLayer(layer, num_filters=128, filter_size=(3, 3), pad=1, flip_filters=False) layer = Conv2DDNNLayer(layer, num_filters=128, filter_size=(3, 3), pad=1, flip_filters=False) layer = layers.Pool2DLayer(layer, pool_size=(2, 2), mode='max') layer = Conv2DDNNLayer(layer, num_filters=256, filter_size=(3, 3), pad=1, flip_filters=False) layer = Conv2DDNNLayer(layer, num_filters=256, filter_size=(3, 3), pad=1, flip_filters=False) layer = Conv2DDNNLayer(layer, num_filters=256, filter_size=(3, 3), pad=1, flip_filters=False) layer = layers.Pool2DLayer(layer, pool_size=(2, 2), mode='max') layer = Conv2DDNNLayer(layer, num_filters=512, filter_size=(3, 3), pad=1, flip_filters=False) layer = Conv2DDNNLayer(layer, num_filters=512, filter_size=(3, 3), pad=1, flip_filters=False) layer = Conv2DDNNLayer(layer, num_filters=512, filter_size=(3, 3), pad=1, flip_filters=False) layer = layers.Pool2DLayer(layer, pool_size=(2, 2), mode='max') layer = Conv2DDNNLayer(layer, num_filters=512, filter_size=(3, 3), pad=1, flip_filters=False) layer = Conv2DDNNLayer(layer, num_filters=512, filter_size=(3, 3), pad=1, flip_filters=False) layer = Conv2DDNNLayer(layer, num_filters=512, filter_size=(3, 3), pad=1, flip_filters=False) layer = layers.Pool2DLayer(layer, pool_size=(2, 2), mode='max') layer = layers.DenseLayer(layer, num_units=4096) layer = layers.DropoutLayer(layer, p=0.5) layer = layers.DenseLayer(layer, num_units=4096) layer = layers.DropoutLayer(layer, p=0.5) layer = layers.DenseLayer(layer, num_units=1000) layer = layers.NonlinearityLayer(layer, nonlinearity=nonlinearities.softmax) return layer
def build_fft_scale(x, y, size): W = [] pnet = ll.InputLayer((None, 3, 101, 101), input_var=None) pnet = ll.Conv2DLayer(pnet, 64, (3, 3), pad='same', nonlinearity=None) pnet = ll.NonlinearityLayer(ll.BatchNormLayer(pnet)) pnet = ll.Pool2DLayer(pnet, (3, 3), (2, 2)) pnet = ll.Conv2DLayer(pnet, 64, (3, 3), pad='same', nonlinearity=None) pnet = ll.NonlinearityLayer( ll.BatchNormLayer(pnet), nonlinearity=l.nonlinearities.LeakyRectify(0.1)) pnet = ll.Conv2DLayer(pnet, 32, (3, 3), pad='same', nonlinearity=None) pnet = ll.BatchNormLayer(pnet) x_p, y_p = ll.get_output(pnet, x), ll.get_output(pnet, y) z_p = Customfftlayer(x_p, y_p) net = ll.InputLayer((None, 64, 50, 50), input_var=z_p) net = ll.BatchNormLayer(net) net = ll.NonlinearityLayer( ll.BatchNormLayer( ll.Conv2DLayer(net, 64, (5, 5), pad='same', nonlinearity=None))) net = ll.Pool2DLayer(net, (2, 2), mode='average_inc_pad') net = ll.NonlinearityLayer( ll.BatchNormLayer( ll.Conv2DLayer(net, 64, (5, 5), pad='same', nonlinearity=None))) net = ll.BatchNormLayer(ll.Conv2DLayer(net, 10, (1, 1), nonlinearity=None)) # return scale different: x_new/x_lod-1 p_scale = ll.get_output(net) #p_scale = theano.gradient.disconnected_grad(p_scale) net_scale = ll.InputLayer((None, 10, 25, 25), p_scale) net_scale = ll.DenseLayer(net_scale, 100, b=None, nonlinearity=l.nonlinearities.tanh) W.append(net_scale.get_params(regularizable=True)[0]) net_scale = ll.DenseLayer(net_scale, 2, b=None, nonlinearity=None) # return heatmap with 2 times upsample of size net_heat = ll.DenseLayer(net, 500, b=None, nonlinearity=l.nonlinearities.tanh) W.append(net_heat.get_params(regularizable=True)[0]) net_heat = ll.DenseLayer(net, size**2, b=None, nonlinearity=None) W.append(net_heat.get_params(regularizable=True)[0]) net_heat = ll.BatchNormLayer(net_heat) net_heat = ll.ReshapeLayer(net_heat, ([0], 1, size, size)) net_heat = ll.Deconv2DLayer(net_heat, 64, (5, 5), (2, 2), b=None, crop='same', nonlinearity=None) net_heat = ll.BatchNormLayer(net_heat) net_heat = ll.Conv2DLayer(net_heat, 1, (3, 3), b=None, pad='same', nonlinearity=None) W.append(net_heat.get_params(regularizable=True)[0]) return pnet, net_scale, net_heat, W
def build_autoencoder_network(): input_var = T.tensor4('input_var'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 640, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); prely = batch_norm(layers.Conv2DLayer(layer, 1024, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); featm = batch_norm(layers.Conv2DLayer(prely, 640, filter_size=(1,1), nonlinearity=leaky_rectify)); feat_map = batch_norm(layers.Conv2DLayer(featm, 100, filter_size=(1,1), nonlinearity=rectify, name="feat_map")); maskm = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify)); mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None); mask_map = SoftThresPerc(mask_rep, perc=98.4, alpha=0.1, beta=init.Constant(0.5), tight=100.0, name="mask_map"); layer = ChInnerProdMerge(feat_map, mask_map, name="encoder"); layer = batch_norm(layers.Deconv2DLayer(layer, 1024, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); glblf = batch_norm(layers.Conv2DLayer(prely, 128, filter_size=(1,1), nonlinearity=leaky_rectify)); glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad'); glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Conv2DLayer(glblf, 5, filter_size=(1,1), nonlinearity=rectify), name="global_feature"); glblf = batch_norm(layers.Deconv2DLayer(glblf, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); layer = layers.ElemwiseSumLayer([layer, glblf]); network = ReshapeLayer(layer, ([0], -1)); mask_var = lasagne.layers.get_output(mask_map); output_var = lasagne.layers.get_output(network); return network, input_var, mask_var, output_var;
def build_autoencoder_network(): input_var = T.tensor4('input_var'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 640, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); prely = batch_norm(layers.Conv2DLayer(layer, 1024, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); featm = batch_norm(layers.Conv2DLayer(prely, 640, filter_size=(1,1), nonlinearity=leaky_rectify)); feat_map = batch_norm(layers.Conv2DLayer(featm, 100, filter_size=(1,1), nonlinearity=rectify, name="feat_map")); layer = feat_map; layer = batch_norm(layers.Deconv2DLayer(layer, 1024, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); glblf = batch_norm(layers.Conv2DLayer(prely, 128, filter_size=(1,1), nonlinearity=leaky_rectify)); glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad'); glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Conv2DLayer(glblf, 5, filter_size=(1,1), nonlinearity=rectify), name="global_feature"); glblf = batch_norm(layers.Deconv2DLayer(glblf, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); layer = layers.ElemwiseSumLayer([layer, glblf]); network = ReshapeLayer(layer, ([0], -1)); output_var = lasagne.layers.get_output(network); return network, input_var, output_var;
def build_network_from_ae(classn): input_var = T.tensor4('input_var'); target_var = T.imatrix('targets'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = (layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 120, filter_size=(4,4), stride=1, nonlinearity=leaky_rectify)); layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2); layer = (layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad'); network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid); return network, input_var, target_var;
def build_TOY(x, y): z_p = T.concatenate((x, y), axis=1) net = ll.InputLayer((None, 2, 100, 100), input_var=z_p) net = ll.BatchNormLayer(net) net = ll.NonlinearityLayer( ll.BatchNormLayer( ll.Conv2DLayer(net, 64, (5, 5), pad='same', nonlinearity=None))) net = ll.Pool2DLayer(net, (2, 2), mode='average_inc_pad') net = ll.NonlinearityLayer( ll.BatchNormLayer( ll.Conv2DLayer(net, 64, (5, 5), pad='same', nonlinearity=None))) net = ll.Pool2DLayer(net, (2, 2), mode='average_inc_pad') net = ll.BatchNormLayer(ll.Conv2DLayer(net, 10, (1, 1), nonlinearity=None)) net = ll.DenseLayer(net, 625, b=None, nonlinearity=None) net = ll.ReshapeLayer(net, ([0], 1, 25, 25)) return net
def build_correlation_fft(x, y, size): pnet = ll.InputLayer((None, 3, 101, 101), input_var=None) pnet = ll.BatchNormLayer(pnet) pnet = ll.Conv2DLayer(pnet, 64, (3, 3), pad='same', nonlinearity=None) pnet = ll.NonlinearityLayer( ll.BatchNormLayer(pnet), nonlinearity=l.nonlinearities.LeakyRectify(0.1)) pnet = ll.Pool2DLayer(pnet, (3, 3), stride=(2, 2)) pnet = ll.Conv2DLayer(pnet, 64, (3, 3), pad='same', nonlinearity=None) pnet = ll.NonlinearityLayer( ll.BatchNormLayer(pnet), nonlinearity=l.nonlinearities.LeakyRectify(0.1)) pnet = ll.Conv2DLayer(pnet, 32, (3, 3), pad='same', nonlinearity=None) pnet = ll.BatchNormLayer(pnet) x_p, y_p = ll.get_output(pnet, x), ll.get_output(pnet, y) x_p, y_p = fft.rfft(x_p, 'ortho'), fft.rfft(y_p, 'ortho') XX, XY = T.zeros_like(x_p), T.zeros_like(y_p) XX = T.set_subtensor( XX[:, :, :, :, 0], x_p[:, :, :, :, 0] * x_p[:, :, :, :, 0] + x_p[:, :, :, :, 1] * x_p[:, :, :, :, 1]) XY = T.set_subtensor( XY[:, :, :, :, 0], x_p[:, :, :, :, 0] * y_p[:, :, :, :, 0] + x_p[:, :, :, :, 1] * y_p[:, :, :, :, 1]) XY = T.set_subtensor( XY[:, :, :, :, 1], x_p[:, :, :, :, 0] * y_p[:, :, :, :, 1] - x_p[:, :, :, :, 1] * y_p[:, :, :, :, 0]) xx = fft.irfft(XX, 'ortho') xy = fft.irfft(XY, 'ortho') z_p = T.concatenate((xx, xy), axis=1) z_p *= T.constant(hanningwindow(50)) net = ll.InputLayer((None, 64, 50, 50), input_var=z_p) net = ll.BatchNormLayer(net) net = ll.NonlinearityLayer( ll.BatchNormLayer( ll.Conv2DLayer(net, 64, (5, 5), pad='same', nonlinearity=None))) net = ll.Pool2DLayer(net, (2, 2), mode='average_inc_pad') net = ll.NonlinearityLayer( ll.BatchNormLayer( ll.Conv2DLayer(net, 64, (5, 5), pad='same', nonlinearity=None))) net = ll.BatchNormLayer(ll.Conv2DLayer(net, 10, (1, 1), nonlinearity=None)) net = ll.DenseLayer(net, size**2, b=None, nonlinearity=None) net = ll.ReshapeLayer(net, ([0], 1, size, size)) return pnet, net
def _build(self): layer = layers.InputLayer(shape=(None, 3, 32, 32), input_var=self.X) layer = nin(layer, conv_filters=192, filter_size=(5, 5), pad=2, cccp1_filters=160, cccp2_filters=96) layer = layers.Pool2DLayer(layer, pool_size=(3, 3), stride=2, pad=(0, 0), ignore_border=False, mode='max') layer = layers.DropoutLayer(layer, p=0.5) layer = nin(layer, conv_filters=192, filter_size=(5, 5), pad=2, cccp1_filters=192, cccp2_filters=192) layer = layers.Pool2DLayer(layer, pool_size=(3, 3), stride=2, ignore_border=False, mode='average_exc_pad') layer = layers.DropoutLayer(layer, p=0.5) layer = nin(layer, conv_filters=192, filter_size=(3, 3), pad=1, cccp1_filters=192, cccp2_filters=10) layer = layers.Pool2DLayer(layer, pool_size=(8, 8), stride=1, ignore_border=False, mode='average_exc_pad') layer = layers.flatten(layer, outdim=2) layer = layers.NonlinearityLayer(layer, nonlinearity=nonlinearities.softmax) return layer
def _build(self): layer = layers.InputLayer(shape=(None, 1, 28, 28), input_var=self.X) layer = layers.Conv2DLayer(layer, num_filters=128, filter_size=(1, 1), stride=(1, 1), pad='same', untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify) layer = layers.Conv2DLayer(layer, num_filters=128, filter_size=(1, 1), stride=(1, 1), pad='same', untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify) layer = layers.Pool2DLayer(layer, pool_size=(2, 2), stride=None, pad=(0, 0), ignore_border=False, mode='average_exc_pad') layer = layers.Conv2DLayer(layer, num_filters=512, filter_size=(1, 1), stride=(1, 1), pad='same', untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify) layer = layers.Conv2DLayer(layer, num_filters=512, filter_size=(1, 1), stride=(1, 1), pad='same', untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify) layer = layers.Pool2DLayer(layer, pool_size=(2, 2), stride=None, pad=(0, 0), ignore_border=False, mode='average_exc_pad') layer = layers.Conv2DLayer(layer, num_filters=2048, filter_size=(1, 1), stride=(1, 1), pad='same', untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify) layer = layers.Conv2DLayer(layer, num_filters=2048, filter_size=(1, 1), stride=(1, 1), pad='same', untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify) layer = layers.Pool2DLayer(layer, pool_size=(2, 2), stride=None, pad=(0, 0), ignore_border=False, mode='max') layer = layers.flatten(layer, outdim=2) # 不加入展开层也可以,DenseLayer自动展开 layer = layers.DropoutLayer(layer, p=0.5) layer = layers.DenseLayer(layer, num_units=256, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify) layer = layers.DropoutLayer(layer, p=0.5) layer = layers.DenseLayer(layer, num_units=10, W=init.GlorotUniform(), b=init.Constant(0.), nonlinearity=nonlinearities.softmax) return layer
def build_mlp( input_var=None ): #this is the simplest network according to tutorial,u can add layers after observing accuracy of ur data l_in = L.InputLayer( (None, 784)) #multiply dimensions of ur image n replace 22500 with ur result l_shape = L.ReshapeLayer(l_in, (-1, 1, 28, 28)) l_conv = L.Conv2DLayer(l_shape, num_filters=6, filter_size=(2, 2), stride=(1, 1), pad=0, nonlinearity=l.nonlinearities.rectify) pool = L.Pool2DLayer(l_conv, 2) l_output = L.DenseLayer(pool, num_units=10, nonlinearity=l.nonlinearities.softmax) return l_output
def __init__(self, input, poolsize=(2, 2), stride=None, padding=(0, 0), mode='max'): """ Allocate a PoolLayer """ self.input = input self.output = layers.Pool2DLayer(self.input, poolsize, stride=stride, pad=padding, ignore_border=True, mode=mode)
def build_transition_down(incoming, reduction, p=0.1, W_init=lasagne.init.GlorotUniform(), b_init=None): """"Builds a transition in the DenseNet model. Transitions consist of the sequence: Batch Normalization, 1x1 Convolution, 2x2 Average Pooling. The channels can be compressed by specifying 0 < m <= 1, where num_channels = channels * m. """ num_filters = int(incoming.output_shape[1] * reduction) network = nn.BatchNormLayer(incoming) network = nn.NonlinearityLayer(network, lasagne.nonlinearities.rectify) network = nn.Conv2DLayer(network, num_filters, 1, W=W_init, b=b_init) if p > 0: network = nn.DropoutLayer(network, p=p) return nn.Pool2DLayer(network, 2, 2, mode='max')
#Y_conj = np.conj(Y) #print X_conj is X #X_conj = copy.deepcopy(X) #Y_conj = copy.deepcopy(Y) #X_conj[:,:,:,] XX = np.array(np.real(np.fft.ifftn(X_conj*X,axes=(2,3),norm='ortho')),dtype='float32') XY = np.array(np.real(np.fft.ifftn(X_conj*Y,axes=(2,3),norm='ortho')),dtype='float32') XX = np.fft.ifftshift(XX,axes=(2,3)) XY = np.fft.ifftshift(XY,axes=(2,3)) return XX, XY, label, dx, dy frame, targets = T.tensor4(), T.tensor4() net = ll.InputLayer((None,2,100,100),input_var=frame) net = ll.Conv2DLayer(net,32,(5,5),b=None,pad='same') net = ll.Pool2DLayer(net,(2,2), mode='average_inc_pad') net = ll.Conv2DLayer(net,8,(3,3),b=None,pad='same',nonlinearity=l.nonlinearities.LeakyRectify(0.1)) net = ll.Pool2DLayer(net,(2,2), mode='average_inc_pad') net = ll.DenseLayer(net,625,b=None,nonlinearity=None) net = ll.ReshapeLayer(net,([0],1,25,25)) predict = ll.get_output(net) targets_pool = pool_2d(targets, ds=(4,4), mode='average_inc_pad') loss = T.mean((predict-targets_pool)**2) params = ll.get_all_params(net,trainable=True) updates = l.updates.adam(loss,params,0.01) train_f = theano.function([frame,targets],[loss,predict],updates=updates) data = premnist() errlist = []
def resblock(net_in, filters, kernel_size, stride=1, preactivated=True, block_id=1, name=''): # Show input shape #log.p(("\t\t" + name + " IN SHAPE:", l.get_output_shape(net_in)), new_line=False) # Pre-activation if block_id > 1: net_pre = l.NonlinearityLayer(net_in, nonlinearity=nl.rectify) else: net_pre = net_in # Pre-activated shortcut? if preactivated: net_in = net_pre # Bottleneck Convolution if stride > 1: net_pre = l.batch_norm(l.Conv2DLayer(net_pre, num_filters=l.get_output_shape(net_pre)[1], filter_size=1, pad='same', stride=1, nonlinearity=nl.rectify)) # First Convolution net = l.batch_norm(l.Conv2DLayer(net_pre, num_filters=l.get_output_shape(net_pre)[1], filter_size=kernel_size, pad='same', stride=1, nonlinearity=nl.rectify)) # Pooling layer if stride > 1: net = l.MaxPool2DLayer(net, pool_size=(stride, stride)) # Dropout Layer net = l.DropoutLayer(net) # Second Convolution net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=kernel_size, pad='same', stride=1, nonlinearity=None)) # Shortcut Layer if not l.get_output_shape(net) == l.get_output_shape(net_in): # Average pooling shortcut = l.Pool2DLayer(net_in, pool_size=(stride, stride), stride=stride, mode='average_exc_pad') # Shortcut convolution shortcut = l.batch_norm(l.Conv2DLayer(shortcut, num_filters=filters, filter_size=1, pad='same', stride=1, nonlinearity=None)) else: # Shortcut = input shortcut = net_in # Merge Layer out = l.ElemwiseSumLayer([net, shortcut]) # Show output shape #log.p(("OUT SHAPE:", l.get_output_shape(out), "LAYER:", len(l.get_all_layers(out)) - 1)) return out
def build_network_from_ae(classn): input_var = T.tensor4('input_var') layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var) layer = batch_norm( layers.Conv2DLayer(layer, 100, filter_size=(5, 5), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Conv2DLayer(layer, 120, filter_size=(5, 5), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = layers.Pool2DLayer(layer, pool_size=(2, 2), stride=2, mode='average_inc_pad') layer = batch_norm( layers.Conv2DLayer(layer, 240, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Conv2DLayer(layer, 320, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = layers.Pool2DLayer(layer, pool_size=(2, 2), stride=2, mode='average_inc_pad') layer = batch_norm( layers.Conv2DLayer(layer, 640, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) prely = batch_norm( layers.Conv2DLayer(layer, 1024, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) featm = batch_norm( layers.Conv2DLayer(prely, 640, filter_size=(1, 1), nonlinearity=leaky_rectify)) feat_map = batch_norm( layers.Conv2DLayer(featm, 100, filter_size=(1, 1), nonlinearity=rectify, name="feat_map")) mask_map = feat_map enlyr = feat_map layer = batch_norm( layers.Deconv2DLayer(enlyr, 1024, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 640, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 640, filter_size=(4, 4), stride=2, crop=(1, 1), nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 320, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 320, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 240, filter_size=(4, 4), stride=2, crop=(1, 1), nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 120, filter_size=(5, 5), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 100, filter_size=(5, 5), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = layers.Deconv2DLayer(layer, 3, filter_size=(1, 1), stride=1, crop='same', nonlinearity=identity) glblf = batch_norm( layers.Conv2DLayer(prely, 128, filter_size=(1, 1), nonlinearity=leaky_rectify)) glblf = layers.Pool2DLayer(glblf, pool_size=(5, 5), stride=5, mode='average_inc_pad') glblf = batch_norm( layers.Conv2DLayer(glblf, 64, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) gllyr = batch_norm(layers.Conv2DLayer(glblf, 5, filter_size=(1, 1), nonlinearity=rectify), name="global_feature") glblf = batch_norm( layers.Deconv2DLayer(gllyr, 256, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 128, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 128, filter_size=(9, 9), stride=5, crop=(2, 2), nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 128, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 128, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 64, filter_size=(4, 4), stride=2, crop=(1, 1), nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 64, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 64, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 32, filter_size=(4, 4), stride=2, crop=(1, 1), nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 32, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 32, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1, 1), stride=1, crop='same', nonlinearity=identity) layer = layers.ElemwiseSumLayer([layer, glblf]) network = ReshapeLayer(layer, ([0], -1)) layers.set_all_param_values(network, pickle.load(open(filename_model_ae, 'rb'))) old_params = layers.get_all_params(network, trainable=True) # Adding more layers aug_var = T.matrix('aug_var') target_var = T.imatrix('targets') add_a = batch_norm( layers.Conv2DLayer(enlyr, 320, filter_size=(1, 1), nonlinearity=leaky_rectify)) add_b = batch_norm( layers.Conv2DLayer(add_a, 320, filter_size=(1, 1), nonlinearity=leaky_rectify)) add_c = batch_norm( layers.Conv2DLayer(add_b, 320, filter_size=(1, 1), nonlinearity=leaky_rectify)) add_d = batch_norm( layers.Conv2DLayer(add_c, 320, filter_size=(1, 1), nonlinearity=leaky_rectify)) add_0 = layers.Pool2DLayer(add_d, pool_size=(15, 15), stride=15, mode='average_inc_pad') add_1 = batch_norm( layers.DenseLayer(add_0, 100, nonlinearity=leaky_rectify)) add_2 = batch_norm( layers.DenseLayer(gllyr, 320, nonlinearity=leaky_rectify)) add_3 = batch_norm( layers.DenseLayer(add_2, 320, nonlinearity=leaky_rectify)) add_4 = batch_norm( layers.DenseLayer(add_3, 100, nonlinearity=leaky_rectify)) aug_layer = layers.InputLayer(shape=(None, aug_fea_n), input_var=aug_var) cat_layer = lasagne.layers.ConcatLayer([add_1, add_4, aug_layer], axis=1) hidden_layer = layers.DenseLayer(cat_layer, 80, nonlinearity=leaky_rectify) network = layers.DenseLayer(hidden_layer, classn, nonlinearity=sigmoid) all_params = layers.get_all_params(network, trainable=True) new_params = [x for x in all_params if x not in old_params] return network, new_params, input_var, aug_var, target_var
def build_autoencoder_network(): input_var = T.tensor4('input_var') layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var) layer = batch_norm( layers.Conv2DLayer(layer, 80, filter_size=(5, 5), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Conv2DLayer(layer, 80, filter_size=(5, 5), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Conv2DLayer(layer, 80, filter_size=(5, 5), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Conv2DLayer(layer, 80, filter_size=(5, 5), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Conv2DLayer(layer, 100, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Conv2DLayer(layer, 100, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Conv2DLayer(layer, 100, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) prely = batch_norm( layers.Conv2DLayer(layer, 100, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) featm = batch_norm( layers.Conv2DLayer(prely, 180, filter_size=(1, 1), nonlinearity=leaky_rectify)) feat_map = batch_norm( layers.Conv2DLayer(featm, 120, filter_size=(1, 1), nonlinearity=rectify, name="feat_map")) maskm = batch_norm( layers.Conv2DLayer(prely, 120, filter_size=(1, 1), nonlinearity=leaky_rectify)) mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1, 1), nonlinearity=None), beta=None, gamma=None) mask_map = SoftThresPerc(mask_rep, perc=99.9, alpha=0.5, beta=init.Constant(0.5), tight=100.0, name="mask_map") layer = ChInnerProdMerge(feat_map, mask_map, name="encoder") layer = batch_norm( layers.Deconv2DLayer(layer, 100, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 100, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 100, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 100, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 80, filter_size=(5, 5), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 80, filter_size=(5, 5), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 80, filter_size=(5, 5), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 80, filter_size=(5, 5), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = layers.Deconv2DLayer(layer, 3, filter_size=(1, 1), stride=1, crop='same', nonlinearity=identity) glblf = batch_norm( layers.Conv2DLayer(prely, 100, filter_size=(1, 1), nonlinearity=leaky_rectify)) glblf = layers.Pool2DLayer(glblf, pool_size=(5, 5), stride=5, mode='average_inc_pad') glblf = batch_norm( layers.Conv2DLayer(glblf, 64, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) glblf = batch_norm(layers.Conv2DLayer(glblf, 3, filter_size=(1, 1), nonlinearity=rectify), name="global_feature") glblf = batch_norm( layers.Deconv2DLayer(glblf, 64, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 64, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 64, filter_size=(9, 9), stride=5, crop=(2, 2), nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 48, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 48, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 48, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 32, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 32, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 32, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1, 1), stride=1, crop='same', nonlinearity=identity) layer = layers.ElemwiseSumLayer([layer, glblf]) network = ReshapeLayer(layer, ([0], -1)) layers.set_all_param_values(network, pickle.load(open(filename_model_ae, 'rb'))) feat_var = lasagne.layers.get_output(feat_map, deterministic=True) mask_var = lasagne.layers.get_output(mask_map, deterministic=True) outp_var = lasagne.layers.get_output(network, deterministic=True) return network, input_var, feat_var, mask_var, outp_var
def build_autoencoder_network(): input_var = T.tensor4('input_var') layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var) layer = batch_norm( layers.Conv2DLayer(layer, 80, filter_size=(5, 5), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Conv2DLayer(layer, 80, filter_size=(5, 5), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Conv2DLayer(layer, 80, filter_size=(5, 5), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Conv2DLayer(layer, 80, filter_size=(5, 5), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Conv2DLayer(layer, 100, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Conv2DLayer(layer, 100, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Conv2DLayer(layer, 100, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) prely = batch_norm( layers.Conv2DLayer(layer, 100, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) featm = batch_norm( layers.Conv2DLayer(prely, 180, filter_size=(1, 1), nonlinearity=leaky_rectify)) feat_map = batch_norm( layers.Conv2DLayer(featm, 120, filter_size=(1, 1), nonlinearity=rectify, name="feat_map")) maskm = batch_norm( layers.Conv2DLayer(prely, 100, filter_size=(1, 1), nonlinearity=leaky_rectify)) mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1, 1), nonlinearity=None), beta=None, gamma=None) mask_map = SoftThresPerc(mask_rep, perc=90.0, alpha=0.5, beta=init.Constant(0.1), tight=100.0, name="mask_map") layer = ChInnerProdMerge(feat_map, mask_map, name="encoder") layer = batch_norm( layers.Deconv2DLayer(layer, 100, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 100, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 100, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 100, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 80, filter_size=(5, 5), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 80, filter_size=(5, 5), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 80, filter_size=(5, 5), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 80, filter_size=(5, 5), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = layers.Deconv2DLayer(layer, 3, filter_size=(1, 1), stride=1, crop='same', nonlinearity=identity) glblf = batch_norm( layers.Conv2DLayer(prely, 100, filter_size=(1, 1), nonlinearity=leaky_rectify)) glblf = layers.Pool2DLayer(glblf, pool_size=(20, 20), stride=20, mode='average_inc_pad') glblf = batch_norm( layers.Conv2DLayer(glblf, 64, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) glblf = batch_norm(layers.Conv2DLayer(glblf, 3, filter_size=(1, 1), nonlinearity=rectify), name="global_feature") glblf = batch_norm( layers.Deconv2DLayer(glblf, 64, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 64, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = layers.Upscale2DLayer(glblf, scale_factor=20) glblf = batch_norm( layers.Deconv2DLayer(glblf, 48, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 48, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 48, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 32, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 32, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 32, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1, 1), stride=1, crop='same', nonlinearity=identity) layer = layers.ElemwiseSumLayer([layer, glblf]) network = ReshapeLayer(layer, ([0], -1)) mask_var = lasagne.layers.get_output(mask_map) output_var = lasagne.layers.get_output(network) return network, input_var, mask_var, output_var
def build_network_from_ae(classn): input_var = T.tensor4('input_var'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 640, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); prely = batch_norm(layers.Conv2DLayer(layer, 1024, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); featm = batch_norm(layers.Conv2DLayer(prely, 640, filter_size=(1,1), nonlinearity=leaky_rectify)); feat_map = batch_norm(layers.Conv2DLayer(featm, 100, filter_size=(1,1), nonlinearity=rectify, name="feat_map")); maskm = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify)); mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None); mask_map = SoftThresPerc(mask_rep, perc=0.0, alpha=0.1, beta=init.Constant(0.5), tight=100.0, bias=-10, name="mask_map"); enlyr = ChInnerProdMerge(feat_map, mask_map, name="encoder"); layer = batch_norm(layers.Deconv2DLayer(enlyr, 1024, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); glblf = batch_norm(layers.Conv2DLayer(prely, 128, filter_size=(1,1), nonlinearity=leaky_rectify)); glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad'); glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); gllyr = batch_norm(layers.Conv2DLayer(glblf, 5, filter_size=(1,1), nonlinearity=rectify), name="global_feature"); glblf = batch_norm(layers.Deconv2DLayer(gllyr, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); layer = layers.ElemwiseSumLayer([layer, glblf]); network = ReshapeLayer(layer, ([0], -1)); layers.set_all_param_values(network, pickle.load(open(filename_model_ae, 'rb'))); mask_map.beta.set_value(np.float32(-10.0*mask_map.beta.get_value())); # Adding more layers aug_var = T.matrix('aug_var'); target_var = T.imatrix('targets'); add_a = layers.Conv2DLayer(enlyr, 320, filter_size=(1,1), nonlinearity=leaky_rectify); add_b = layers.Conv2DLayer(add_a, 320, filter_size=(1,1), nonlinearity=leaky_rectify); add_c = layers.Conv2DLayer(add_b, 320, filter_size=(1,1), nonlinearity=leaky_rectify); add_d = layers.Conv2DLayer(add_c, 320, filter_size=(1,1), nonlinearity=leaky_rectify); add_0 = layers.Pool2DLayer(add_d, pool_size=(15,15), stride=15, mode='average_inc_pad'); add_1 = layers.DenseLayer(add_0, 100, nonlinearity=leaky_rectify); add_2 = layers.DenseLayer(gllyr, 320, nonlinearity=leaky_rectify); add_3 = layers.DenseLayer(add_2, 320, nonlinearity=leaky_rectify); add_4 = layers.DenseLayer(add_3, 100, nonlinearity=leaky_rectify); aug_layer = layers.InputLayer(shape=(None, aug_fea_n), input_var=aug_var); cat_layer = lasagne.layers.ConcatLayer([add_1, add_4, aug_layer], axis=1); hidden_layer = layers.DenseLayer(cat_layer, 80, nonlinearity=leaky_rectify); network = layers.DenseLayer(hidden_layer, classn, nonlinearity=sigmoid); new_params = [add_a.W, add_a.b, add_b.W, add_b.b, add_c.W, add_c.b, add_d.W, add_d.b, add_1.W, add_1.b, add_2.W, add_2.b, add_3.W, add_3.b, add_4.W, add_4.b, hidden_layer.W, hidden_layer.b, network.W, network.b]; return network, new_params, input_var, aug_var, target_var;
def build_network_from_ae(classn): input_var = theano.tensor.tensor4('input_var'); net = {}; net['input'] = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); # Encoding part net['conv1_1'] = batch_norm(layers.Conv2DLayer(net['input'], 64, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify)); net['conv1_2'] = batch_norm(layers.Conv2DLayer(net['conv1_1'], 64, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify)); net['pool1'] = layers.Pool2DLayer(net['conv1_2'], pool_size=(2,2), stride=2, mode='max'); net['conv2_1'] = batch_norm(layers.Conv2DLayer(net['pool1'], 128, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify)); net['conv2_2'] = batch_norm(layers.Conv2DLayer(net['conv2_1'], 128, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify)); net['pool2'] = layers.Pool2DLayer(net['conv2_2'], pool_size=(2,2), stride=2, mode='max'); net['conv3_1'] = batch_norm(layers.Conv2DLayer(net['pool2'], 256, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify)); net['conv3_2'] = batch_norm(layers.Conv2DLayer(net['conv3_1'], 256, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify)); net['conv3_3'] = batch_norm(layers.Conv2DLayer(net['conv3_2'], 256, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify)); net['pool3'] = layers.Pool2DLayer(net['conv3_3'], pool_size=(2,2), stride=2, mode='max'); net['conv4_1'] = batch_norm(layers.Conv2DLayer(net['pool3'], 512, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify)); net['conv4_2'] = batch_norm(layers.Conv2DLayer(net['conv4_1'], 512, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify)); net['conv4_3'] = batch_norm(layers.Conv2DLayer(net['conv4_2'], 512, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify)); net['pool4'] = layers.Pool2DLayer(net['conv4_3'], pool_size=(2,2), stride=2, mode='max'); net['conv5_1'] = batch_norm(layers.Conv2DLayer(net['pool4'], 512, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify)); net['conv5_2'] = batch_norm(layers.Conv2DLayer(net['conv5_1'], 512, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify)); net['conv5_3'] = batch_norm(layers.Conv2DLayer(net['conv5_2'], 512, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify)); net['pool5'] = layers.Pool2DLayer(net['conv5_3'], pool_size=(2,2), stride=2, mode='max'); net['fc6'] = batch_norm(layers.Conv2DLayer(net['pool5'], 4096, filter_size=(7,7), stride=1, pad='same', nonlinearity=leaky_rectify)); # fc7 is the encoding layer net['fc7'] = batch_norm(layers.Conv2DLayer(net['fc6'], 4096, filter_size=(1,1), stride=1, pad='same', nonlinearity=leaky_rectify)); # Decoding part net['fc6_deconv'] = batch_norm(layers.Deconv2DLayer(net['fc7'], 512, filter_size=(7,7), stride=1, crop='same', nonlinearity=leaky_rectify)); net['unpool5'] = layers.InverseLayer(net['fc6_deconv'], net['pool5']); net['deconv5_1'] = batch_norm(layers.Deconv2DLayer(net['unpool5'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['deconv5_2'] = batch_norm(layers.Deconv2DLayer(net['deconv5_1'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['deconv5_3'] = batch_norm(layers.Deconv2DLayer(net['deconv5_2'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['unpool4'] = layers.InverseLayer(net['deconv5_3'], net['pool4']); net['deconv4_1'] = batch_norm(layers.Deconv2DLayer(net['unpool4'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['deconv4_2'] = batch_norm(layers.Deconv2DLayer(net['deconv4_1'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['deconv4_3'] = batch_norm(layers.Deconv2DLayer(net['deconv4_2'], 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['unpool3'] = layers.InverseLayer(net['deconv4_3'], net['pool3']); net['deconv3_1'] = batch_norm(layers.Deconv2DLayer(net['unpool3'], 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['deconv3_2'] = batch_norm(layers.Deconv2DLayer(net['deconv3_1'], 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['deconv3_3'] = batch_norm(layers.Deconv2DLayer(net['deconv3_2'], 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['unpool2'] = layers.InverseLayer(net['deconv3_3'], net['pool2']); net['deconv2_1'] = batch_norm(layers.Deconv2DLayer(net['unpool2'], 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['deconv2_2'] = batch_norm(layers.Deconv2DLayer(net['deconv2_1'], 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['unpool1'] = layers.InverseLayer(net['deconv2_2'], net['pool1']); net['deconv1_1'] = batch_norm(layers.Deconv2DLayer(net['unpool1'], 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['deconv1_2'] = batch_norm(layers.Deconv2DLayer(net['deconv1_1'], 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); # Segmentation layer net['seg_score'] = layers.Deconv2DLayer(net['deconv1_2'], 1, filter_size=(1,1), stride=1, crop='same', nonlinearity=lasagne.nonlinearities.sigmoid); net['score_flat'] = ReshapeLayer(net['seg_score'], ([0], -1)); output_var = lasagne.layers.get_output(net['score_flat']); all_param = lasagne.layers.get_all_params(net['score_flat'], trainable=True); target_var = T.fmatrix('targets'); #return network, input_var, output_var, all_param; return net, all_param, input_var, target_var;
def build_cnn(): """ VGG-19 CNN Network Paper Name: Very Deep Convolutional Networks for Large-Scale Image Recognition """ flip = argv.filters net = {} net['input'] = layers.InputLayer((1, 3, IMAGE_DIM, IMAGE_DIM)) net['conv1_1'] = Conv2DDNNLayer(net['input'], 64, 3, pad=1, flip_filters=flip) net['conv1_2'] = Conv2DDNNLayer(net['conv1_1'], 64, 3, pad=1, flip_filters=flip) net['pool1'] = layers.Pool2DLayer(net['conv1_2'], 2, mode='average_exc_pad') net['conv2_1'] = Conv2DDNNLayer(net['pool1'], 128, 3, pad=1, flip_filters=flip) net['conv2_2'] = Conv2DDNNLayer(net['conv2_1'], 128, 3, pad=1, flip_filters=flip) net['pool2'] = layers.Pool2DLayer(net['conv2_2'], 2, mode='average_exc_pad') net['conv3_1'] = Conv2DDNNLayer(net['pool2'], 256, 3, pad=1, flip_filters=flip) net['conv3_2'] = Conv2DDNNLayer(net['conv3_1'], 256, 3, pad=1, flip_filters=flip) net['conv3_3'] = Conv2DDNNLayer(net['conv3_2'], 256, 3, pad=1, flip_filters=flip) net['conv3_4'] = Conv2DDNNLayer(net['conv3_3'], 256, 3, pad=1, flip_filters=flip) net['pool3'] = layers.Pool2DLayer(net['conv3_4'], 2, mode='average_exc_pad') net['conv4_1'] = Conv2DDNNLayer(net['pool3'], 512, 3, pad=1, flip_filters=flip) net['conv4_2'] = Conv2DDNNLayer(net['conv4_1'], 512, 3, pad=1, flip_filters=flip) net['conv4_3'] = Conv2DDNNLayer(net['conv4_2'], 512, 3, pad=1, flip_filters=flip) net['conv4_4'] = Conv2DDNNLayer(net['conv4_3'], 512, 3, pad=1, flip_filters=flip) net['pool4'] = layers.Pool2DLayer(net['conv4_4'], 2, mode='average_exc_pad') net['conv5_1'] = Conv2DDNNLayer(net['pool4'], 512, 3, pad=1, flip_filters=flip) net['conv5_2'] = Conv2DDNNLayer(net['conv5_1'], 512, 3, pad=1, flip_filters=flip) net['conv5_3'] = Conv2DDNNLayer(net['conv5_2'], 512, 3, pad=1, flip_filters=flip) net['conv5_4'] = Conv2DDNNLayer(net['conv5_3'], 512, 3, pad=1, flip_filters=flip) net['pool5'] = layers.Pool2DLayer(net['conv5_4'], 2, mode='average_exc_pad') return net
def build_uunet_network(network, input_var, num_classes, num_filters=64, nonlin=lasagne.nonlinearities.rectify, noise=0.0, # Would leaky_rectify improve network? W_init=lasagne.init.GlorotUniform(), b_init=lasagne.init.Constant(0.01), **kwargs): """Builds a fully-convolutional U-Net model.""" crop_mode = [None, None, 'center', 'center'] image_mean = np.array([103.939, 116.779, 123.68]).reshape(1, 3, 1, 1) # To use VGG weights, change RGB to BGR and subtract the mean BGR values sym_var = input_var[:, ::-1] - image_mean.astype(theano.config.floatX) input_layer = nn.InputLayer((None, 3, None, None), sym_var) network = nn.Conv2DLayer(input_layer, 64, 3, pad=1, flip_filters=False) level1 = nn.Conv2DLayer(network, 64, 3, pad=1, flip_filters=False) network = nn.Pool2DLayer(level1, 2) network = nn.Conv2DLayer(network, 128, 3, pad=1, flip_filters=False) level2 = nn.Conv2DLayer(network, 128, 3, pad=1, flip_filters=False) network = nn.Pool2DLayer(level2, 2) network = nn.Conv2DLayer(network, 256, 3, pad=1, flip_filters=False) network = nn.Conv2DLayer(network, 256, 3, pad=1, flip_filters=False) level3 = nn.Conv2DLayer(network, 256, 3, pad=1, flip_filters=False) network = nn.Pool2DLayer(level3, 2) network = nn.Conv2DLayer(network, 512, 3, pad=1, flip_filters=False) network = nn.Conv2DLayer(network, 512, 3, pad=1, flip_filters=False) level4 = nn.Conv2DLayer(network, 512, 3, pad=1, flip_filters=False) network = nn.Pool2DLayer(level4, 2) network = nn.Conv2DLayer(network, 512, 3, pad=1, flip_filters=False) network = nn.Conv2DLayer(network, 512, 3, pad=1, flip_filters=False) level5 = nn.Conv2DLayer(network, 512, 3, pad=1, flip_filters=False) network = nn.Pool2DLayer(level5, 2) # Set the weights for VGG portion vgg_layers = network # Decoder phase, all these weights will be learned network = nn.batch_norm(nn.Conv2DLayer(network, 1024, 1, pad='same')) network = nn.batch_norm(nn.Conv2DLayer(network, 1024, 1, pad='same')) network = nn.TransposedConv2DLayer(network, 512, 3, stride=2, crop='valid') network = nn.batch_norm(network) network = nn.ConcatLayer([network, level5], cropping=crop_mode) network = nn.batch_norm(nn.Conv2DLayer(network, 512, 3, pad='same')) network = nn.batch_norm(nn.Conv2DLayer(network, 521, 3, pad='same')) network = nn.TransposedConv2DLayer(network, 512, 3, stride=2, crop='valid') network = nn.batch_norm(network) network = nn.ConcatLayer([network, level4], cropping=crop_mode) # @two convolutions of the same size with a max pooling step in the middle. How is this useful? network = nn.batch_norm(nn.Conv2DLayer(network, 512, 3, pad='same')) network = nn.batch_norm(nn.Conv2DLayer(network, 521, 3, pad='same')) network = nn.TransposedConv2DLayer(network, 256, 3, stride=2, crop='valid') network = nn.batch_norm(network) network = nn.ConcatLayer([network, level3], cropping=crop_mode) network = nn.batch_norm(nn.Conv2DLayer(network, 256, 3, pad='same')) network = nn.batch_norm(nn.Conv2DLayer(network, 256, 3, pad='same')) network = nn.TransposedConv2DLayer(network, 128, 3, stride=2, crop='valid') network = nn.batch_norm(network) network = nn.ConcatLayer([network, level2], cropping=crop_mode) network = nn.batch_norm(nn.Conv2DLayer(network, 128, 3, pad='same')) network = nn.batch_norm(nn.Conv2DLayer(network, 128, 3, pad='same')) network = nn.TransposedConv2DLayer(network, 64, 3, stride=2, crop='valid') network = nn.batch_norm(network) network = nn.ConcatLayer([network, level1], cropping=crop_mode) # Final few valid convolutions to output class predictions network = nn.batch_norm(nn.Conv2DLayer(network, 64, 3, pad='same')) network = nn.Conv2DLayer(network, num_classes, 1, pad='same', nonlinearity=None) softmax = SpatialNonlinearityLayer(network, lasagne.nonlinearities.softmax) # This reshapes previous layer from 2d [batch_size, num_channels * rows * cols] # to [batch_size, num_channels, rows, cols] target_shape = (sym_var.shape[0], num_classes, sym_var.shape[2], sym_var.shape[3]) output = SpatialReshapeLayer(softmax, target_shape) # Applies a CRF to the output predictions & cleans everything up output_crf = CRFasRNNLayer(output, input_layer, normalize_final_iter=True) return softmax, output, output_crf, vgg_layers
def build_network_from_ae_old(classn): input_var = T.tensor4('input_var'); net = {} net['input'] = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); net['conv1_1'] = batch_norm(layers.Conv2DLayer(net['input'], 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); net['conv1_2'] = batch_norm(layers.Conv2DLayer(net['conv1_1'], 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); net['pool1'] = layers.Pool2DLayer(net['conv1_2'], pool_size=(2,2), stride=2, mode='max'); net['conv2_1'] = batch_norm(layers.Conv2DLayer(net['pool1'], 128, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); net['conv2_2'] = batch_norm(layers.Conv2DLayer(net['conv2_1'], 128, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); net['pool2'] = layers.Pool2DLayer(net['conv2_2'], pool_size=(2,2), stride=2, mode='max'); net['conv3_1'] = batch_norm(layers.Conv2DLayer(net['pool2'], 256, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); net['conv3_2'] = batch_norm(layers.Conv2DLayer(net['conv3_1'], 256, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); net['conv3_3'] = batch_norm(layers.Conv2DLayer(net['conv3_2'], 256, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); net['pool3'] = layers.Pool2DLayer(net['conv3_3'], pool_size=(2,2), stride=2, mode='max'); net['conv4_1'] = batch_norm(layers.Conv2DLayer(net['pool3'], 512, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); net['conv4_2'] = batch_norm(layers.Conv2DLayer(net['conv4_1'], 512, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); net['conv4_3'] = batch_norm(layers.Conv2DLayer(net['conv4_2'], 512, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); net['pool4'] = layers.Pool2DLayer(net['conv4_3'], pool_size=(2,2), stride=2, mode='max'); net['conv5_1'] = batch_norm(layers.Conv2DLayer(net['pool4'], 512, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); net['conv5_2'] = batch_norm(layers.Conv2DLayer(net['conv5_1'], 512, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); net['conv5_3'] = batch_norm(layers.Conv2DLayer(net['conv5_2'], 512, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); net['pool5'] = layers.Pool2DLayer(net['conv5_3'], pool_size=(2,2), stride=2, mode='max'); net['fc6'] = batch_norm(layers.Conv2DLayer(net['pool5'], 4096, filter_size=(7,7), stride=1, pad='valid', nonlinearity=leaky_rectify)); net['fc7'] = batch_norm(layers.Conv2DLayer(net['fc6'], 4096, filter_size=(1,1), stride=1, pad='valid', nonlinearity=leaky_rectify)); #net['fc6'] = batch_norm(layers.DenseLayer(net['pool5'], 4096, nonlinearity=leaky_rectify)); #net['fc7'] = batch_norm(layers.DenseLayer(net['fc6'], 4096, nonlinearity=leaky_rectify)); net['fc6_deconv'] = batch_norm(layers.Deconv2DLayer(net['fc7'], 512, filter_size=(7,7), stride=1, crop='valid', nonlinearity=leaky_rectify)); net['unpool5'] = layers.InverseLayer(net['fc6_deconv'], net['pool5']); net['deconv5_1'] = batch_norm(layers.Deconv2DLayer(net['unpool5'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['deconv5_2'] = batch_norm(layers.Deconv2DLayer(net['deconv5_1'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['deconv5_3'] = batch_norm(layers.Deconv2DLayer(net['deconv5_2'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['unpool4'] = layers.InverseLayer(net['deconv5_3'], net['pool4']); net['deconv4_1'] = batch_norm(layers.Deconv2DLayer(net['unpool4'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['deconv4_2'] = batch_norm(layers.Deconv2DLayer(net['deconv4_1'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['deconv4_3'] = batch_norm(layers.Deconv2DLayer(net['deconv4_2'], 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['unpool3'] = layers.InverseLayer(net['deconv4_3'], net['pool3']); net['deconv3_1'] = batch_norm(layers.Deconv2DLayer(net['unpool3'], 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['deconv3_2'] = batch_norm(layers.Deconv2DLayer(net['deconv3_1'], 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['deconv3_3'] = batch_norm(layers.Deconv2DLayer(net['deconv3_2'], 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['unpool2'] = layers.InverseLayer(net['deconv3_3'], net['pool2']); net['deconv2_1'] = batch_norm(layers.Deconv2DLayer(net['unpool2'], 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['deconv2_2'] = batch_norm(layers.Deconv2DLayer(net['deconv2_1'], 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['unpool1'] = layers.InverseLayer(net['deconv2_2'], net['pool1']); net['deconv1_1'] = batch_norm(layers.Deconv2DLayer(net['unpool1'], 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['deconv1_2'] = batch_norm(layers.Deconv2DLayer(net['deconv1_1'], 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); net['score'] = layers.Deconv2DLayer(net['deconv1_2'], 1, filter_size=(1,1), stride=1, crop='same', nonlinearity=sigmoid); net['score_flat'] = ReshapeLayer(net['score'], ([0], -1)); all_params = layers.get_all_params(net['score_flat'], trainable=True); target_var = T.fmatrix('targets'); return net, all_params, input_var, target_var;
def build_autoencoder_network(): input_var = T.tensor4('input_var') layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var) layer = batch_norm( layers.Conv2DLayer(layer, 100, filter_size=(5, 5), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Conv2DLayer(layer, 120, filter_size=(5, 5), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = layers.Pool2DLayer(layer, pool_size=(2, 2), stride=2, mode='average_inc_pad') layer = batch_norm( layers.Conv2DLayer(layer, 240, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Conv2DLayer(layer, 240, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = layers.Pool2DLayer(layer, pool_size=(2, 2), stride=2, mode='average_inc_pad') layer = batch_norm( layers.Conv2DLayer(layer, 320, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Conv2DLayer(layer, 320, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = layers.Pool2DLayer(layer, pool_size=(3, 3), stride=2, mode='average_inc_pad') layer = batch_norm( layers.Conv2DLayer(layer, 480, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Conv2DLayer(layer, 480, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = layers.Pool2DLayer(layer, pool_size=(2, 2), stride=2, mode='average_inc_pad') layer = batch_norm( layers.Conv2DLayer(layer, 640, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Conv2DLayer(layer, 640, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = layers.Pool2DLayer(layer, pool_size=(2, 2), stride=2, mode='average_inc_pad') mask_map = layer layer = batch_norm( layers.Conv2DLayer(layer, 300, filter_size=(1, 1), stride=1, nonlinearity=leaky_rectify)) layer = batch_norm( layers.Conv2DLayer(layer, 1000, filter_size=(3, 3), stride=1, nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 300, filter_size=(3, 3), stride=1, nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 640, filter_size=(4, 4), stride=2, crop=(1, 1), nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 640, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 640, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 480, filter_size=(4, 4), stride=2, crop=(1, 1), nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 480, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 480, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 320, filter_size=(3, 3), stride=2, crop=(0, 0), nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 320, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 320, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 240, filter_size=(4, 4), stride=2, crop=(1, 1), nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 240, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 240, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 120, filter_size=(4, 4), stride=2, crop=(1, 1), nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 120, filter_size=(5, 5), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 100, filter_size=(5, 5), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = layers.Deconv2DLayer(layer, 3, filter_size=(1, 1), stride=1, crop='same', nonlinearity=identity) network = ReshapeLayer(layer, ([0], -1)) mask_var = lasagne.layers.get_output(mask_map) output_var = lasagne.layers.get_output(network) return network, input_var, mask_var, output_var
def build_deconv_network(): input_var = theano.tensor.tensor4('input_var') net = {} net['input'] = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var) # Encoding part net['conv1_1'] = layers.NonlinearityLayer( batch_norm( layers.Conv2DLayer(net['input'], 64, filter_size=(3, 3), stride=1, pad=1))) net['conv1_2'] = layers.NonlinearityLayer( batch_norm( layers.Conv2DLayer(net['conv1_1'], 64, filter_size=(3, 3), stride=1, pad=1))) net['pool1'] = layers.Pool2DLayer(net['conv1_2'], pool_size=(2, 2), stride=2, mode='max') net['conv2_1'] = layers.NonlinearityLayer( batch_norm( layers.Conv2DLayer(net['pool1'], 128, filter_size=(3, 3), stride=1, pad=1))) net['conv2_2'] = layers.NonlinearityLayer( batch_norm( layers.Conv2DLayer(net['conv2_1'], 128, filter_size=(3, 3), stride=1, pad=1))) net['pool2'] = layers.Pool2DLayer(net['conv2_2'], pool_size=(2, 2), stride=2, mode='max') net['conv3_1'] = layers.NonlinearityLayer( batch_norm( layers.Conv2DLayer(net['pool2'], 256, filter_size=(3, 3), stride=1, pad=1))) net['conv3_2'] = layers.NonlinearityLayer( batch_norm( layers.Conv2DLayer(net['conv3_1'], 256, filter_size=(3, 3), stride=1, pad=1))) net['conv3_3'] = layers.NonlinearityLayer( batch_norm( layers.Conv2DLayer(net['conv3_2'], 256, filter_size=(3, 3), stride=1, pad=1))) net['pool3'] = layers.Pool2DLayer(net['conv3_3'], pool_size=(2, 2), stride=2, mode='max') net['conv4_1'] = layers.NonlinearityLayer( batch_norm( layers.Conv2DLayer(net['pool3'], 512, filter_size=(3, 3), stride=1, pad=1))) net['conv4_2'] = layers.NonlinearityLayer( batch_norm( layers.Conv2DLayer(net['conv4_1'], 512, filter_size=(3, 3), stride=1, pad=1))) net['conv4_3'] = layers.NonlinearityLayer( batch_norm( layers.Conv2DLayer(net['conv4_2'], 512, filter_size=(3, 3), stride=1, pad=1))) net['pool4'] = layers.Pool2DLayer(net['conv4_3'], pool_size=(2, 2), stride=2, mode='max') net['conv5_1'] = layers.NonlinearityLayer( batch_norm( layers.Conv2DLayer(net['pool4'], 512, filter_size=(3, 3), stride=1, pad=1))) net['conv5_2'] = layers.NonlinearityLayer( batch_norm( layers.Conv2DLayer(net['conv5_1'], 512, filter_size=(3, 3), stride=1, pad=1))) net['conv5_3'] = layers.NonlinearityLayer( batch_norm( layers.Conv2DLayer(net['conv5_2'], 512, filter_size=(3, 3), stride=1, pad=1))) net['pool5'] = layers.Pool2DLayer(net['conv5_3'], pool_size=(2, 2), stride=2, mode='max') net['fc6'] = layers.NonlinearityLayer( batch_norm( layers.Conv2DLayer(net['pool5'], 4096, filter_size=(7, 7), stride=1, pad='same'))) # fc7 is the encoding layer net['fc7'] = layers.NonlinearityLayer( batch_norm( layers.Conv2DLayer(net['fc6'], 4096, filter_size=(1, 1), stride=1, pad='same'))) # Decoding part net['fc6_deconv'] = layers.NonlinearityLayer( batch_norm( layers.Deconv2DLayer(net['fc7'], 512, filter_size=(7, 7), stride=1, crop='same'))) net['unpool5'] = layers.InverseLayer(net['fc6_deconv'], net['pool5']) net['deconv5_1'] = layers.NonlinearityLayer( batch_norm( layers.Deconv2DLayer(net['unpool5'], 512, filter_size=(3, 3), stride=1, crop='same'))) net['deconv5_2'] = layers.NonlinearityLayer( batch_norm( layers.Deconv2DLayer(net['deconv5_1'], 512, filter_size=(3, 3), stride=1, crop='same'))) net['deconv5_3'] = layers.NonlinearityLayer( batch_norm( layers.Deconv2DLayer(net['deconv5_2'], 512, filter_size=(3, 3), stride=1, crop='same'))) net['unpool4'] = layers.InverseLayer(net['deconv5_3'], net['pool4']) net['deconv4_1'] = layers.NonlinearityLayer( batch_norm( layers.Deconv2DLayer(net['unpool4'], 512, filter_size=(3, 3), stride=1, crop='same'))) net['deconv4_2'] = layers.NonlinearityLayer( batch_norm( layers.Deconv2DLayer(net['deconv4_1'], 512, filter_size=(3, 3), stride=1, crop='same'))) net['deconv4_3'] = layers.NonlinearityLayer( batch_norm( layers.Deconv2DLayer(net['deconv4_2'], 256, filter_size=(3, 3), stride=1, crop='same'))) net['unpool3'] = layers.InverseLayer(net['deconv4_3'], net['pool3']) net['deconv3_1'] = layers.NonlinearityLayer( batch_norm( layers.Deconv2DLayer(net['unpool3'], 256, filter_size=(3, 3), stride=1, crop='same'))) net['deconv3_2'] = layers.NonlinearityLayer( batch_norm( layers.Deconv2DLayer(net['deconv3_1'], 256, filter_size=(3, 3), stride=1, crop='same'))) net['deconv3_3'] = layers.NonlinearityLayer( batch_norm( layers.Deconv2DLayer(net['deconv3_2'], 128, filter_size=(3, 3), stride=1, crop='same'))) net['unpool2'] = layers.InverseLayer(net['deconv3_3'], net['pool2']) net['deconv2_1'] = layers.NonlinearityLayer( batch_norm( layers.Deconv2DLayer(net['unpool2'], 128, filter_size=(3, 3), stride=1, crop='same'))) net['deconv2_2'] = layers.NonlinearityLayer( batch_norm( layers.Deconv2DLayer(net['deconv2_1'], 64, filter_size=(3, 3), stride=1, crop='same'))) net['unpool1'] = layers.InverseLayer(net['deconv2_2'], net['pool1']) net['deconv1_1'] = layers.NonlinearityLayer( batch_norm( layers.Deconv2DLayer(net['unpool1'], 64, filter_size=(3, 3), stride=1, crop='same'))) net['deconv1_2'] = layers.NonlinearityLayer( batch_norm( layers.Deconv2DLayer(net['deconv1_1'], 64, filter_size=(3, 3), stride=1, crop='same'))) # Segmentation layer net['seg_score'] = layers.Deconv2DLayer( net['deconv1_2'], 1, filter_size=(1, 1), stride=1, crop='same', nonlinearity=lasagne.nonlinearities.sigmoid) network = ReshapeLayer(net['seg_score'], ([0], -1)) output_var = lasagne.layers.get_output(network) all_param = lasagne.layers.get_all_params(network, trainable=True) return network, input_var, output_var, all_param
def build_lasagne_model(feadim, input_length=None, pool_size=(2, 2)): input0 = lasa_layer.InputLayer(shape=(None, 1, feadim, input_length), name='input0') pool0 = lasa_layer.Pool2DLayer(input0, pool_size=pool_size) return pool0
def build_network_from_ae(classn): input_var = T.tensor4('input_var'); layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var); layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad'); layer = batch_norm(layers.Conv2DLayer(layer, 640, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); prely = batch_norm(layers.Conv2DLayer(layer, 1024, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); featm = batch_norm(layers.Conv2DLayer(prely, 640, filter_size=(1,1), nonlinearity=leaky_rectify)); feat_map = batch_norm(layers.Conv2DLayer(featm, 100, filter_size=(1,1), nonlinearity=rectify, name="feat_map")); maskm = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify)); mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None); mask_map = SoftThresPerc(mask_rep, perc=98.4, alpha=0.1, beta=init.Constant(0.5), tight=100.0, name="mask_map"); enlyr = ChInnerProdMerge(feat_map, mask_map, name="encoder"); layer = batch_norm(layers.Deconv2DLayer(enlyr, 1024, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify)); layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); glblf = batch_norm(layers.Conv2DLayer(prely, 128, filter_size=(1,1), nonlinearity=leaky_rectify)); glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad'); glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify)); gllyr = batch_norm(layers.Conv2DLayer(glblf, 5, filter_size=(1,1), nonlinearity=rectify), name="global_feature"); glblf = batch_norm(layers.Deconv2DLayer(gllyr, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify)); glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity); layer = layers.ElemwiseSumLayer([layer, glblf]); network = ReshapeLayer(layer, ([0], -1)); layers.set_all_param_values(network, pickle.load(open(filename_model_ae, 'rb'))); old_params = layers.get_all_params(network, trainable=True); # Adding more layers aug_var = T.matrix('aug_var'); target_var = T.imatrix('targets'); add_a = batch_norm(layers.Conv2DLayer(enlyr, 320, filter_size=(1,1), nonlinearity=leaky_rectify)); add_b = batch_norm(layers.Conv2DLayer(add_a, 320, filter_size=(1,1), nonlinearity=leaky_rectify)); add_c = batch_norm(layers.Conv2DLayer(add_b, 320, filter_size=(1,1), nonlinearity=leaky_rectify)); add_d = batch_norm(layers.Conv2DLayer(add_c, 320, filter_size=(1,1), nonlinearity=leaky_rectify)); add_0 = layers.Pool2DLayer(add_d, pool_size=(25,25), stride=25, mode='average_inc_pad'); add_1 = batch_norm(layers.DenseLayer(add_0, 100, nonlinearity=leaky_rectify)); add_2 = batch_norm(layers.DenseLayer(gllyr, 320, nonlinearity=leaky_rectify)); add_3 = batch_norm(layers.DenseLayer(add_2, 320, nonlinearity=leaky_rectify)); add_4 = batch_norm(layers.DenseLayer(add_3, 100, nonlinearity=leaky_rectify)); aug_layer = layers.InputLayer(shape=(None, aug_fea_n), input_var=aug_var); cat_layer = lasagne.layers.ConcatLayer([add_1, add_4, aug_layer], axis=1); hidden_layer = layers.DenseLayer(cat_layer, 80, nonlinearity=leaky_rectify); network = layers.DenseLayer(hidden_layer, classn, nonlinearity=sigmoid); layers.set_all_param_values(network, pickle.load(open('model_vals/deep_conv_classification_alt48_luad10_skcm10_lr0.py_e32_cv0.pkl', 'rb'))); all_params = layers.get_all_params(network, trainable=True); new_params = [x for x in all_params if x not in old_params]; return network, new_params, input_var, aug_var, target_var;
def build_network_from_ae(classn): input_var = T.tensor4('input_var') layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var) layer = batch_norm( layers.Conv2DLayer(layer, 100, filter_size=(5, 5), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Conv2DLayer(layer, 120, filter_size=(5, 5), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = layers.Pool2DLayer(layer, pool_size=(2, 2), stride=2, mode='average_inc_pad') layer = batch_norm( layers.Conv2DLayer(layer, 240, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Conv2DLayer(layer, 320, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) layer = layers.Pool2DLayer(layer, pool_size=(2, 2), stride=2, mode='average_inc_pad') layer = batch_norm( layers.Conv2DLayer(layer, 640, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) prely = batch_norm( layers.Conv2DLayer(layer, 1024, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) featm = batch_norm( layers.Conv2DLayer(prely, 640, filter_size=(1, 1), nonlinearity=leaky_rectify)) feat_map = batch_norm( layers.Conv2DLayer(featm, 100, filter_size=(1, 1), nonlinearity=rectify, name="feat_map")) maskm = batch_norm( layers.Conv2DLayer(prely, 100, filter_size=(1, 1), nonlinearity=leaky_rectify)) mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1, 1), nonlinearity=None), beta=None, gamma=None) mask_map = SoftThresPerc(mask_rep, perc=97.0, alpha=0.1, beta=init.Constant(0.5), tight=100.0, name="mask_map") enlyr = ChInnerProdMerge(feat_map, mask_map, name="encoder") layer = batch_norm( layers.Deconv2DLayer(enlyr, 1024, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 640, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 640, filter_size=(4, 4), stride=2, crop=(1, 1), nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 320, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 320, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 240, filter_size=(4, 4), stride=2, crop=(1, 1), nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 120, filter_size=(5, 5), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = batch_norm( layers.Deconv2DLayer(layer, 100, filter_size=(5, 5), stride=1, crop='same', nonlinearity=leaky_rectify)) layer = layers.Deconv2DLayer(layer, 3, filter_size=(1, 1), stride=1, crop='same', nonlinearity=identity) glblf = batch_norm( layers.Conv2DLayer(prely, 128, filter_size=(1, 1), nonlinearity=leaky_rectify)) glblf = layers.Pool2DLayer(glblf, pool_size=(5, 5), stride=5, mode='average_inc_pad') glblf = batch_norm( layers.Conv2DLayer(glblf, 64, filter_size=(3, 3), stride=1, pad='same', nonlinearity=leaky_rectify)) gllyr = batch_norm(layers.Conv2DLayer(glblf, 5, filter_size=(1, 1), nonlinearity=rectify), name="global_feature") glblf = batch_norm( layers.Deconv2DLayer(gllyr, 256, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 128, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 128, filter_size=(9, 9), stride=5, crop=(2, 2), nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 128, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 128, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 64, filter_size=(4, 4), stride=2, crop=(1, 1), nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 64, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 64, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 32, filter_size=(4, 4), stride=2, crop=(1, 1), nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 32, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = batch_norm( layers.Deconv2DLayer(glblf, 32, filter_size=(3, 3), stride=1, crop='same', nonlinearity=leaky_rectify)) glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1, 1), stride=1, crop='same', nonlinearity=identity) layer = layers.ElemwiseSumLayer([layer, glblf]) network = ReshapeLayer(layer, ([0], -1)) mask_map.beta.set_value(np.float32(0.9 * mask_map.beta.get_value())) old_params = layers.get_all_params(network, trainable=True) # Adding more layers aug_var = T.matrix('aug_var') target_var = T.imatrix('targets') add_a = batch_norm( layers.Conv2DLayer(enlyr, 320, filter_size=(1, 1), nonlinearity=leaky_rectify)) add_b = batch_norm( layers.Conv2DLayer(add_a, 320, filter_size=(1, 1), nonlinearity=leaky_rectify)) add_c = batch_norm( layers.Conv2DLayer(add_b, 320, filter_size=(1, 1), nonlinearity=leaky_rectify)) add_d = batch_norm( layers.Conv2DLayer(add_c, 320, filter_size=(1, 1), nonlinearity=leaky_rectify)) add_0 = layers.Pool2DLayer(add_d, pool_size=(25, 25), stride=25, mode='average_inc_pad') add_1 = batch_norm( layers.DenseLayer(add_0, 100, nonlinearity=leaky_rectify)) add_2 = batch_norm( layers.DenseLayer(gllyr, 320, nonlinearity=leaky_rectify)) add_3 = batch_norm( layers.DenseLayer(add_2, 320, nonlinearity=leaky_rectify)) add_4 = batch_norm( layers.DenseLayer(add_3, 100, nonlinearity=leaky_rectify)) aug_layer = layers.InputLayer(shape=(None, aug_fea_n), input_var=aug_var) cat_layer = lasagne.layers.ConcatLayer([add_1, add_4, aug_layer], axis=1) hidden_layer = layers.DenseLayer(cat_layer, 80, nonlinearity=leaky_rectify) network = layers.DenseLayer(hidden_layer, classn, nonlinearity=sigmoid) all_params = layers.get_all_params(network, trainable=True) new_params = [x for x in all_params if x not in old_params] return network, new_params, input_var, aug_var, target_var
def avg(_in, *args, **kwargs): return L.Pool2DLayer(_in, *args, ignore_border=True, mode='average_exc_pad', **kwargs)