def build_model(): l0 = InputLayer(data_sizes["sunny"]) l1a = ConvLayer(l0, num_filters=32, filter_size=(3, 3), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l1b = ConvLayer(l1a, num_filters=32, filter_size=(3, 3), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l1c = ConvLayer(l1b, num_filters=32, filter_size=(3, 3), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l1 = MaxPoolLayer(l1c, pool_size=(2, 2)) l2a = ConvLayer(l1, num_filters=32, filter_size=(3, 3), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l2b = ConvLayer(l2a, num_filters=32, filter_size=(3, 3), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l2c = ConvLayer(l2b, num_filters=32, filter_size=(3, 3), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l3 = MaxPoolLayer(l2c, pool_size=(3, 3)) l4a = ConvLayer(l3, num_filters=32, filter_size=(4, 4), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l4b = ConvLayer(l4a, num_filters=32, filter_size=(3, 3), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l4c = ConvLayer(l4b, num_filters=32, filter_size=(3, 3), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l5a = ConvLayer(l4c, num_filters=256, filter_size=(1, 1), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l5b = ConvLayer(l5a, num_filters=256, filter_size=(1, 1), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l5c = ConvLayer(l5b, num_filters=1, filter_size=(1, 1), W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1), nonlinearity=lasagne.nonlinearities.sigmoid) l_final = lasagne.layers.FlattenLayer(l5c, outdim=2) return { "inputs":{ "sunny": l0 }, "outputs":{ "segmentation": l_final, "top": l_final } }
def MaxPool(incoming, pool_size , stride, pad=(0,0), ignore_border=True, **kwargs): """ Overrides the default parameters for MaxPool """ ensure_set_name('conv', kwargs) return MaxPoolLayer(incoming, pool_size, stride, pad, ignore_border, **kwargs)
def build_model(input_var): # Three layer residual block def residual_block3(l, base_dim, increase_dim=False, projection=False): if increase_dim: layer_1 = batch_norm( ConvLayer(l, num_filters=base_dim, filter_size=(1, 1), stride=(2, 2), nonlinearity=None, pad='same', W=nn.init.HeNormal(gain='relu'))) else: layer_1 = batch_norm( ConvLayer(l, num_filters=base_dim, filter_size=(1, 1), stride=(1, 1), nonlinearity=rectify, pad='same', W=nn.init.HeNormal(gain='relu'))) layer_2 = batch_norm( ConvLayer(layer_1, num_filters=base_dim, filter_size=(3, 3), stride=(1, 1), nonlinearity=rectify, pad='same', W=nn.init.HeNormal(gain='relu'))) layer_3 = batch_norm( ConvLayer(layer_2, num_filters=4 * base_dim, filter_size=(1, 1), stride=(1, 1), nonlinearity=rectify, pad='same', W=nn.init.HeNormal(gain='relu'))) # add shortcut connection if increase_dim: if projection: # projection shortcut (option B in paper) projection = batch_norm( ConvLayer(l, num_filters=4 * base_dim, filter_size=(1, 1), stride=(2, 2), nonlinearity=None, pad='same', b=None)) block = NonlinearityLayer(ElemwiseSumLayer( [layer_3, projection]), nonlinearity=rectify) else: # identity shortcut (option A in paper) # we use a pooling layer to get identity with strides, # since identity layers with stride don't exist in Lasagne identity = PoolLayer(l, pool_size=1, stride=(2, 2), mode='average_exc_pad') padding = PadLayer(identity, [4 * base_dim, 0, 0], batch_ndim=1) block = NonlinearityLayer(ElemwiseSumLayer([layer_3, padding]), nonlinearity=rectify) else: block = NonlinearityLayer(ElemwiseSumLayer([layer_3, l]), nonlinearity=rectify) return block # Input of the network input_layer = InputLayer(shape=(batch_size, num_channels, input_height, input_width), input_var=input_var) # Very first conv layer l = batch_norm( ConvLayer(input_layer, num_filters=64, filter_size=(7, 7), stride=(2, 2), nonlinearity=rectify, pad='same', W=nn.init.HeNormal(gain='relu'))) # Maxpool layer l = MaxPoolLayer(l, pool_size=(3, 3), stride=(2, 2)) # Convolove with 1x1 filter to match input dimension with the upcoming residual block l = batch_norm( ConvLayer(l, num_filters=256, filter_size=(1, 1), stride=(1, 1), nonlinearity=rectify, pad='same', W=nn.init.HeNormal(gain='relu'))) ############# First residual blocks ############# for _ in range(num_blocks[0] - 1): l = residual_block3(l, base_dim=64) ############# Second residual blocks ############ # Increment Dimension l = residual_block3(l, base_dim=128, increase_dim=True, projection=True) for _ in range(num_blocks[1] - 1): l = residual_block3(l, base_dim=128) ############# Third residual blocks ############# # Increment Dimension l = residual_block3(l, base_dim=256, increase_dim=True, projection=True) for _ in range(num_blocks[2] - 1): l = residual_block3(l, base_dim=256) ############# Fourth residual blocks ############# # Increment Dimension l = residual_block3(l, base_dim=512, increase_dim=True, projection=True) for _ in range(num_blocks[2] - 1): l = residual_block3(l, base_dim=512) # Global pooling layer l = GlobalPoolLayer(l) # Softmax Layer softmax_layer = DenseLayer(l, num_units=output_dim, W=nn.init.HeNormal(), nonlinearity=softmax) return softmax_layer
def build_model(): ################# # Regular model # ################# l0 = InputLayer(data_sizes["sliced:data:ax"]) l0r = reshape(l0, ( -1, 1, ) + data_sizes["sliced:data:ax"][1:]) # (batch, channel, time, axis, x, y) # convolve over time with small filter l0t = ConvolutionOverAxisLayer( l0r, num_filters=2, filter_size=(5, ), stride=(3, ), axis=2, channel=1, W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1), ) l0r = reshape(l0t, ( -1, 1, ) + data_sizes["sliced:data:ax"][-2:]) # first do the segmentation steps l1a = ConvLayer( l0r, num_filters=32, filter_size=(5, 5), stride=2, pad='same', W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1), ) l1b = ConvLayer( l1a, num_filters=32, filter_size=(5, 5), stride=2, pad='same', W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1), ) l1b_m = MaxPoolLayer(l1b, pool_size=(2, 2)) l1c = ConvLayer( l1b_m, num_filters=64, filter_size=(3, 3), pad='same', W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1), ) l1f = ConvLayer(l1c, num_filters=32, filter_size=(3, 3), pad='same', W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1)) l1f_m = MaxPoolLayer(l1f, pool_size=(2, 2)) l1f_r = reshape(l1f_m, (batch_size, 2, 9, 15, 32, 4, 4)) l0t = ConvolutionOverAxisLayer( l1f_r, num_filters=32, filter_size=(3, ), stride=(1, ), axis=3, channel=1, W=lasagne.init.Orthogonal(), b=lasagne.init.Constant(0.1), ) l_d3 = lasagne.layers.DenseLayer( l0t, num_units=2, nonlinearity=lasagne.nonlinearities.identity) l_systole = MuLogSigmaErfLayer(l_d3) l_d3b = lasagne.layers.DenseLayer( l0t, num_units=2, nonlinearity=lasagne.nonlinearities.identity) l_diastole = MuLogSigmaErfLayer(l_d3b) return { "inputs": { "sliced:data:ax": l0 }, "outputs": { "systole": l_systole, "diastole": l_diastole } }
def build_model(input_width, input_height, output_dim, batch_size=BATCH_SIZE): l_in = lasagne.layers.InputLayer( shape=(batch_size, NUM_CHANNELS, input_width, input_height), ) l_conv1 = ConvLayer( l_in, num_filters=32, filter_size=(3, 3), nonlinearity=lasagne.nonlinearities.very_leaky_rectify, W=lasagne.init.Orthogonal(), ) l_conv1b = ConvLayer( l_conv1, num_filters=32, filter_size=(3, 3), pad=0, nonlinearity=lasagne.nonlinearities.very_leaky_rectify, W=lasagne.init.Orthogonal(), ) l_conv1c = ConvLayer( l_conv1b, num_filters=32, filter_size=(3, 3), pad=0, nonlinearity=lasagne.nonlinearities.very_leaky_rectify, W=lasagne.init.Orthogonal(), ) l_pool1 = MaxPoolLayer( l_conv1c, pool_size=(3, 3), stride=(2, 2)) l_dropout1 = lasagne.layers.DropoutLayer(l_pool1, p=0.25) l_conv2 = ConvLayer( l_dropout1, num_filters=64, filter_size=(3, 3), pad=0, nonlinearity=lasagne.nonlinearities.very_leaky_rectify, W=lasagne.init.Orthogonal(), ) l_conv2b = ConvLayer( l_conv2, num_filters=64, filter_size=(3, 3), pad=0, nonlinearity=lasagne.nonlinearities.very_leaky_rectify, W=lasagne.init.Orthogonal(), ) l_conv2c = ConvLayer( l_conv2b, num_filters=64, filter_size=(3, 3), pad=0, nonlinearity=lasagne.nonlinearities.very_leaky_rectify, W=lasagne.init.Orthogonal(), ) l_pool2 = MaxPoolLayer( l_conv2c, pool_size=(2, 2), stride=(2, 2)) l_dropout2 = lasagne.layers.DropoutLayer(l_pool2, p=0.25) l_conv3 = ConvLayer( l_dropout2, num_filters=128, filter_size=(3, 3), pad=0, nonlinearity=lasagne.nonlinearities.very_leaky_rectify, W=lasagne.init.Orthogonal(), ) l_conv3b = ConvLayer( l_conv3, num_filters=128, filter_size=(3, 3), pad=0, nonlinearity=lasagne.nonlinearities.very_leaky_rectify, W=lasagne.init.Orthogonal(), ) l_conv3c = ConvLayer( l_conv3b, num_filters=128, filter_size=(3, 3), pad=0, nonlinearity=lasagne.nonlinearities.very_leaky_rectify, W=lasagne.init.Orthogonal(), ) l_pool3 = lasagne.layers.GlobalPoolLayer( l_conv3c, pool_function=T.max) l_dropout3 = lasagne.layers.DropoutLayer(l_pool3, p=0.25) l_out = lasagne.layers.DenseLayer( l_dropout3, num_units=output_dim, nonlinearity=lasagne.nonlinearities.softmax, W=lasagne.init.Orthogonal(), ) return l_out