示例#1
0
def architecture_upconv_mp3(input_var, input_shape, n_conv_layers,
                            n_conv_filters):

    net = {}

    kwargs = dict(nonlinearity=lasagne.nonlinearities.elu,
                  W=lasagne.init.HeNormal())

    net['data'] = InputLayer(input_shape, input_var)
    print("\rLayer output shapes")
    print(net['data'].output_shape)

    # Bunch of 3 x 3 convolution layers: experimentally we found that, adding 3 conv layers in start than in middle is better: but why?
    i = 'data'
    j = 'c1'
    for idx in range(n_conv_layers):
        print("Conv layer index: %d" % (idx + 1))
        net[j] = batch_norm(
            Conv2DLayer(net[i],
                        num_filters=n_conv_filters,
                        filter_size=3,
                        stride=1,
                        pad=1,
                        **kwargs))
        print(net[j].output_shape)
        # renaming for next iteration
        i = j
        j = j[:-1] + str(idx + 2)

    # Bunch of transposed convolution layers
    net['uc1'] = batch_norm(
        TransposedConv2DLayer(net[i],
                              num_filters=n_conv_filters / 2,
                              filter_size=4,
                              stride=2,
                              crop=1,
                              **kwargs))
    print(net['uc1'].output_shape)

    net['uc2'] = batch_norm(
        TransposedConv2DLayer(net['uc1'],
                              num_filters=1,
                              filter_size=4,
                              stride=2,
                              crop=1,
                              **kwargs))
    print(net['uc2'].output_shape)

    # slicing the output to 115 x 80 size
    net['s1'] = lasagne.layers.SliceLayer(net['uc2'], slice(0, 115), axis=-2)
    print(net['s1'].output_shape)
    net['out'] = lasagne.layers.SliceLayer(net['s1'], slice(0, 80), axis=-1)
    print(net['out'].output_shape)

    print("Number of parameter to be learned: %d" %
          (lasagne.layers.count_params(net['out'])))

    return net['out']
示例#2
0
def build_baseline2_feats(input_var, nb_filter=96):
    """ Slightly more complex model. Transform x to a feature space first
    """
    net = OrderedDict()

    # Input, standardization
    last = net['input'] = InputLayer(
        (None, 3, tools.INP_PSIZE, tools.INP_PSIZE), input_var=input_var)
    last = net['norm'] = ExpressionLayer(last, lambda x: normalize(x))

    # Pretrained Encoder as before
    last = net["conv1_1"] = ConvLayer(last,
                                      nb_filter,
                                      1,
                                      pad=0,
                                      flip_filters=False,
                                      nonlinearity=linear)
    last = net["bn1_1"] = BatchNormLayer(last)
    last = net["relu1_1"] = NonlinearityLayer(last, nonlinearity=rectify)
    last = net["conv1_2"] = ConvLayer(last,
                                      nb_filter,
                                      1,
                                      pad=0,
                                      flip_filters=False,
                                      nonlinearity=linear)
    last = net["bn1_2"] = BatchNormLayer(last)
    last = net["relu1_2"] = NonlinearityLayer(last, nonlinearity=rectify)

    # Modified Middle Part
    last = net["middle"] = ConvLayer(last, nb_filter, 1, nonlinearity=linear)

    # Decoder as before
    last = net["deconv1_2"] = TransposedConv2DLayer(
        last,
        net["conv1_2"].input_shape[1],
        net["conv1_2"].filter_size,
        stride=net["conv1_2"].stride,
        crop=net["conv1_2"].pad,
        W=net["conv1_2"].W,
        flip_filters=not net["conv1_2"].flip_filters,
        nonlinearity=None)
    last = net["deconv1_1"] = TransposedConv2DLayer(
        last,
        net["conv1_1"].input_shape[1],
        net["conv1_1"].filter_size,
        stride=net["conv1_1"].stride,
        crop=net["conv1_1"].pad,
        W=net["conv1_1"].W,
        flip_filters=not net["conv1_1"].flip_filters,
        nonlinearity=None)

    last = net["bn"] = BatchNormLayer(last,
                                      beta=nn.init.Constant(128.),
                                      gamma=nn.init.Constant(25.))

    return last, net
示例#3
0
 def test_with_nones(self, DummyInputLayer, input, kernel, output, kwargs):
     if kwargs.get('untie_biases', False):
         pytest.skip()
     from lasagne.layers import TransposedConv2DLayer
     b, c, h, w = input.shape
     input_layer = DummyInputLayer((None, c, None, None))
     layer = TransposedConv2DLayer(input_layer,
                                   num_filters=kernel.shape[0],
                                   filter_size=kernel.shape[2:],
                                   W=kernel.transpose(1, 0, 2, 3),
                                   **kwargs)
     if 'output_size' not in kwargs or isinstance(kwargs['output_size'],
                                                  T.Variable):
         assert layer.output_shape == (None, output.shape[1], None, None)
     actual = layer.get_output_for(input).eval()
     assert actual.shape == output.shape
     assert np.allclose(actual, output)
     # Check get_output_shape_for for non symbolic output
     if 'output_size' in kwargs and not isinstance(kwargs['output_size'],
                                                   T.Variable):
         assert layer.get_output_shape_for(input.shape) == output.shape
         # The layer should report the output size even when it
         # doesn't know most of the input size
         assert layer.output_shape == (
             None, output.shape[1]) + kwargs['output_size']
示例#4
0
def inverse_convolution_strided_layer(input_layer, original_layer):
    return ReshapeLayer(SliceLayer(
        TransposedConv2DLayer(ReshapeLayer(input_layer, (-1, original_layer.output_shape[1], 1, original_layer.output_shape[2])),
                              original_layer.input_layer.num_filters, (1, original_layer.filter_size[0]),
                              stride=(1, original_layer.stride[0]), crop=(0, 0), flip_filters=original_layer.flip_filters, nonlinearity=nonlinearities.leaky_rectify),
        indices=slice(None, -1), axis=-1),
                        (-1, original_layer.input_shape[1], original_layer.input_shape[2]))
示例#5
0
def build_generator(input_var=None, dim_z=None, dim_h=None):
    layer = InputLayer(shape=(None, dim_z), input_var=input_var)
    
    layer = batch_norm(DenseLayer(layer, dim_h * 8 * 4 * 4))
    layer = ReshapeLayer(layer, ([0], dim_h * 8, 4, 4))
    logger.debug('Generator output 1: {}' .format(layer.output_shape))
    layer = batch_norm(TransposedConv2DLayer(
        layer, dim_h * 4, 4, stride=2, crop=1))
    logger.debug('Generator output 2: {}' .format(layer.output_shape))
    layer = batch_norm(TransposedConv2DLayer(
        layer, dim_h * 2, 4, stride=2, crop=1))
    logger.debug('Generator output 3: {}' .format(layer.output_shape))
    layer = batch_norm(TransposedConv2DLayer(
        layer, dim_h, 4, stride=2, crop=1))
    logger.debug('Generator output 4: {}' .format(layer.output_shape))
    layer = TransposedConv2DLayer(
        layer, DIM_C, 4, stride=2, crop=1, nonlinearity=NONLIN)
    
    logger.debug('Generator output: {}'.format(layer.output_shape))
    return layer
示例#6
0
def build_generator(input_var=None):
    from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer, batch_norm, TransposedConv2DLayer
    from lasagne.nonlinearities import sigmoid
    # input: 100dim
    GenL1 = InputLayer(shape=(None, CONTROLDIM), input_var=input_var)
    GenL2 = DenseLayer(GenL1, num_units=384 * 4 * 4)
    GenL2Reshaped = ReshapeLayer(GenL2, shape=(batch_size, 384, 4, 4))
    GenL3 = TransposedConv2DLayer(GenL2Reshaped,
                                  num_filters=192,
                                  filter_size=(5, 5),
                                  stride=(2, 2),
                                  crop='same',
                                  output_size=(8, 8))
    GenL3BN = batch_norm(GenL3)
    GenL4 = TransposedConv2DLayer(GenL3BN,
                                  num_filters=96,
                                  filter_size=(5, 5),
                                  stride=(2, 2),
                                  crop='same',
                                  output_size=(16, 16))
    GenL4BN = batch_norm(GenL4)
    outputLayer = TransposedConv2DLayer(
        GenL4BN,
        num_filters=3,
        filter_size=(5, 5),
        stride=(2, 2),
        nonlinearity=lasagne.nonlinearities.tanh,
        crop='same',
        output_size=(32, 32))

    # fully-connected layer
    #layer = batch_norm(DenseLayer(layer, 1024))
    # project and reshape
    #layer = batch_norm(DenseLayer(layer, 128 * 7 * 7))
    #layer = ReshapeLayer(layer, ([0], 128, 7, 7))
    # two fractional-stride convolutions
    #layer = batch_norm(Deconv2DLayer(layer, 64, 5, stride=2, pad=2))
    #layer = Deconv2DLayer(layer, 1, 5, stride=2, pad=2,
    # nonlinearity=sigmoid)
    print("Generator output:", outputLayer.output_shape)
    return outputLayer
示例#7
0
 def test_defaults(self, DummyInputLayer, input, kernel, output, kwargs):
     from lasagne.layers import TransposedConv2DLayer
     b, c, h, w = input.shape
     input_layer = DummyInputLayer((b, c, h, w))
     layer = TransposedConv2DLayer(input_layer,
                                   num_filters=kernel.shape[0],
                                   filter_size=kernel.shape[2:],
                                   W=kernel.transpose(1, 0, 2, 3),
                                   **kwargs)
     actual = layer.get_output_for(input).eval()
     assert actual.shape == output.shape
     assert actual.shape == layer.output_shape
     assert np.allclose(actual, output)
示例#8
0
def transpose(incoming, conv, nonlinearity, *args, **kwargs):
    """ Convenience function to transpose a convolutional layer
    and use weight tying
    """
    return TransposedConv2DLayer(incoming,
                                 conv.input_shape[1],
                                 conv.filter_size,
                                 stride=conv.stride,
                                 crop=conv.pad,
                                 W=conv.W,
                                 flip_filters=not conv.flip_filters,
                                 nonlinearity=nonlinearity,
                                 *args,
                                 **kwargs)
示例#9
0
 def test_with_nones(self, DummyInputLayer, input, kernel, output, kwargs):
     if kwargs.get('untie_biases', False):
         pytest.skip()
     from lasagne.layers import TransposedConv2DLayer
     b, c, h, w = input.shape
     input_layer = DummyInputLayer((None, c, None, None))
     layer = TransposedConv2DLayer(input_layer,
                                   num_filters=kernel.shape[0],
                                   filter_size=kernel.shape[2:],
                                   W=kernel.transpose(1, 0, 2, 3),
                                   **kwargs)
     assert layer.output_shape == (None, output.shape[1], None, None)
     actual = layer.get_output_for(input).eval()
     assert actual.shape == output.shape
     assert np.allclose(actual, output)
示例#10
0
        def deconv_net(input_layer, conv_layer_output_shapes):
            output = BatchNormLayer(
                DenseLayer(input_layer, 128 * 7 * 7 + self.n_mi_features))
            if self.n_mi_features != 0:
                deconv_input = SliceLayer(output,
                                          indices=slice(0, 128 * 7 * 7))
                mi_features = SliceLayer(output,
                                         indices=slice(
                                             128 * 7 * 7,
                                             128 * 7 * 7 + self.n_mi_features))

            else:
                deconv_input = output
                mi_features = None

            output = ReshapeLayer(deconv_input, (-1, 128, 7, 7))
            output = TransposedConv2DLayer(
                output,
                64,
                5,
                stride=2,
                crop='same',
                output_size=conv_layer_output_shapes[0])
            output = TransposedConv2DLayer(output,
                                           1,
                                           5,
                                           stride=2,
                                           crop='same',
                                           output_size=self.input_size,
                                           nonlinearity=sigmoid)
            output = ReshapeLayer(output, (-1, self.input_size**2))

            if mi_features is not None:
                output = ConcatLayer([output, mi_features], axis=1)

            return output
示例#11
0
def build_cnnae_network(input_shape):
    conv_filters = 16
    filter_size = 3
    pool_size = 2
    encode_size = input_shape[2] * 2

    l_in = InputLayer(shape=(None, input_shape[1], input_shape[2],
                             input_shape[3]))

    l_conv1 = Conv2DLayer(l_in,
                          num_filters=conv_filters,
                          filter_size=(filter_size, filter_size),
                          nonlinearity=None)

    l_pool1 = MaxPool2DLayer(l_conv1, pool_size=(pool_size, pool_size))

    l_dropout1 = DropoutLayer(l_pool1, p=0.5)

    l_reshape1 = ReshapeLayer(l_dropout1, shape=([0], -1))

    l_encode = DenseLayer(l_reshape1, name='encode', num_units=encode_size)

    l_decode = DenseLayer(l_encode,
                          W=l_encode.W.T,
                          num_units=l_reshape1.output_shape[1])

    l_reshape2 = ReshapeLayer(
        l_decode,
        shape=([0], conv_filters,
               int(np.sqrt(l_reshape1.output_shape[1] / conv_filters)),
               int(np.sqrt(l_reshape1.output_shape[1] / conv_filters))))

    l_unpool1 = Upscale2DLayer(l_reshape2, scale_factor=pool_size)

    l_de = TransposedConv2DLayer(l_unpool1,
                                 num_filters=l_conv1.input_shape[1],
                                 W=l_conv1.W,
                                 filter_size=l_conv1.filter_size,
                                 stride=l_conv1.stride,
                                 crop=l_conv1.pad,
                                 flip_filters=not l_conv1.flip_filters)

    l_output = ReshapeLayer(l_de, shape=([0], -1))

    return l_output
示例#12
0
 def test_defaults(self, DummyInputLayer, input, kernel, output, kwargs):
     from lasagne.layers import TransposedConv2DLayer
     b, c, h, w = input.shape
     input_layer = DummyInputLayer((b, c, h, w))
     layer = TransposedConv2DLayer(input_layer,
                                   num_filters=kernel.shape[0],
                                   filter_size=kernel.shape[2:],
                                   W=kernel.transpose(1, 0, 2, 3),
                                   **kwargs)
     actual = layer.get_output_for(input).eval()
     assert actual.shape == output.shape
     # layer.output_shape == actual.shape or None
     assert all(
         [s1 == s2 for (s1, s2) in zip(actual.shape, output.shape) if s2])
     assert np.allclose(actual, output)
     # Check get_output_shape_for for symbolic output
     if 'output_size' in kwargs and isinstance(kwargs['output_size'],
                                               T.Variable):
         assert all(el is None
                    for el in layer.get_output_shape_for(input.shape)[2:])
示例#13
0
def transposed_conv_layer(input,
                          n_filters,
                          stride,
                          name,
                          network_weights,
                          output_size,
                          nonlinearity=elu,
                          bn=False):

    layer = TransposedConv2DLayer(input,
                                  n_filters,
                                  filter_size=3,
                                  stride=stride,
                                  W=get_W(network_weights, name),
                                  b=get_b(network_weights, name),
                                  nonlinearity=nonlinearity,
                                  name=name,
                                  crop='same',
                                  output_size=output_size)
    if bn:
        layer = batch_norm(layer)
    return layer
def build_model():
    """ Compile net architecture """

    l_in = lasagne.layers.InputLayer(shape=(None, INPUT_SHAPE[0],
                                            INPUT_SHAPE[1], INPUT_SHAPE[2]),
                                     name='Input')
    net1 = batch_norm(l_in)

    # --- preprocessing ---
    net1 = conv_bn(net1,
                   num_filters=10,
                   filter_size=1,
                   nonlinearity=nonlin,
                   pad='same')
    net1 = conv_bn(net1,
                   num_filters=1,
                   filter_size=1,
                   nonlinearity=nonlin,
                   pad='same',
                   name='color_deconv_preproc')

    # number of filters in first layer
    # decreased by factor 2 in each block
    nf0 = 16

    # --- encoder ---
    net1 = conv_bn(net1,
                   num_filters=nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')
    net1 = conv_bn(net1,
                   num_filters=nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')
    p1 = net1
    net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool1')

    net1 = conv_bn(net1,
                   num_filters=2 * nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')
    net1 = conv_bn(net1,
                   num_filters=2 * nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')
    p2 = net1
    net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool2')

    net1 = conv_bn(net1,
                   num_filters=4 * nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')
    net1 = conv_bn(net1,
                   num_filters=4 * nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')
    p3 = net1
    net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool3')

    net1 = conv_bn(net1,
                   num_filters=8 * nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')
    net1 = conv_bn(net1,
                   num_filters=8 * nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')

    # --- decoder ---
    net1 = TransposedConv2DLayer(net1,
                                 num_filters=4 * nf0,
                                 filter_size=2,
                                 stride=2,
                                 name='upconv')
    net1 = ConcatLayer((p3, net1), name='concat')
    net1 = conv_bn(net1,
                   num_filters=4 * nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')
    net1 = conv_bn(net1,
                   num_filters=4 * nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')

    net1 = TransposedConv2DLayer(net1,
                                 num_filters=2 * nf0,
                                 filter_size=2,
                                 stride=2,
                                 name='upconv')
    net1 = ConcatLayer((p2, net1), name='concat')
    net1 = conv_bn(net1,
                   num_filters=2 * nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')
    net1 = conv_bn(net1,
                   num_filters=2 * nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')

    net1 = TransposedConv2DLayer(net1,
                                 num_filters=nf0,
                                 filter_size=2,
                                 stride=2,
                                 name='upconv')
    net1 = ConcatLayer((p1, net1), name='concat')
    net1 = conv_bn(net1,
                   num_filters=nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')
    net1 = conv_bn(net1,
                   num_filters=nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')

    net1 = Conv2DLayer(net1,
                       num_filters=1,
                       filter_size=1,
                       nonlinearity=sigmoid,
                       pad='same',
                       name='segmentation')

    return net1
示例#15
0
def build_net(input_dim=572, no_channels=3, seg_entities=2):
    """Implementation of 'U-Net: Convolutional Networks for Biomedical Image Segmentation',
       https://arxiv.org/pdf/1505.04597.pdf

    :param input_dim: x and y dimensions of 3D input
    :param no_channels: z dimension of 3D input
    :param seg_entities: number of classes to segment, i.e. number of categories per pixel for the softmax function
    """

    nonlin = rectify
    pad = 'valid'

    net = OrderedDict()

    net['input'] = InputLayer((None, no_channels, input_dim, input_dim))

    net['encode/conv1_1'] = ConvLayer(net['input'], 64, 3, pad=pad, nonlinearity=nonlin, W=HeNormal(gain="relu"))
    net['encode/conv1_2'] = ConvLayer(net['encode/conv1_1'], 64, 3, pad=pad, nonlinearity=nonlin, W=HeNormal(gain="relu"))
    net['encode/pool1'] = PoolLayer(net['encode/conv1_2'], 2)

    net['encode/conv2_1'] = ConvLayer(net['encode/pool1'], 128, 3, pad=pad, nonlinearity=nonlin, W=HeNormal(gain="relu"))
    net['encode/conv2_2'] = ConvLayer(net['encode/conv2_1'], 128, 3, pad=pad, nonlinearity=nonlin, W=HeNormal(gain="relu"))
    net['encode/pool2'] = PoolLayer(net['encode/conv2_2'], 2)

    net['encode/conv3_1'] = ConvLayer(net['encode/pool2'], 256, 3, pad=pad, nonlinearity=nonlin, W=HeNormal(gain="relu"))
    net['encode/conv3_2'] = ConvLayer(net['encode/conv3_1'], 256, 3, pad=pad, nonlinearity=nonlin, W=HeNormal(gain="relu"))
    net['encode/pool3'] = PoolLayer(net['encode/conv3_2'], 2)

    net['encode/conv4_1'] = ConvLayer(net['encode/pool3'], 512, 3, pad=pad, nonlinearity=nonlin, W=HeNormal(gain="relu"))
    net['encode/conv4_2'] = ConvLayer(net['encode/conv4_1'], 512, 3, pad=pad, nonlinearity=nonlin, W=HeNormal(gain="relu"))
    net['encode/pool4'] = PoolLayer(net['encode/conv4_2'], 2)

    net['encode/conv5_1'] = ConvLayer(net['encode/pool4'], 1024, 3, pad=pad, nonlinearity=nonlin, W=HeNormal(gain="relu"))
    net['encode/conv5_2'] = ConvLayer(net['encode/conv5_1'], 1024, 3, pad=pad, nonlinearity=nonlin, W=HeNormal(gain="relu"))

    net['decode/up_conv1'] = TransposedConv2DLayer(net['encode/conv5_2'], 512, 2, stride=2, crop='valid', nonlinearity=None)
    net['decode/concat_c4_u1'] = ConcatLayer([net['encode/conv4_2'], net['decode/up_conv1']], axis=1, cropping=(None, None, 'center', 'center'))
    net['decode/conv1_1'] = ConvLayer(net['decode/concat_c4_u1'], 512, 3, pad=pad, nonlinearity=nonlin, W=HeNormal(gain="relu"))
    net['decode/conv1_2'] = ConvLayer(net['decode/conv1_1'], 512, 3, pad=pad, nonlinearity=nonlin, W=HeNormal(gain="relu"))

    net['decode/up_conv2'] = TransposedConv2DLayer(net['decode/conv1_2'], 256, 2, stride=2, crop='valid', nonlinearity=None)
    net['decode/concat_c3_u2'] = ConcatLayer([net['encode/conv3_2'], net['decode/up_conv2']], axis=1, cropping=(None, None, 'center', 'center'))
    net['decode/conv2_1'] = ConvLayer(net['decode/concat_c3_u2'], 256, 3, pad=pad, nonlinearity=nonlin, W=HeNormal(gain="relu"))
    net['decode/conv2_2'] = ConvLayer(net['decode/conv2_1'], 256, 3, pad=pad, nonlinearity=nonlin, W=HeNormal(gain="relu"))

    net['decode/up_conv3'] = TransposedConv2DLayer(net['decode/conv2_2'], 128, 2, stride=2, crop='valid', nonlinearity=None)
    net['decode/concat_c2_u3'] = ConcatLayer([net['encode/conv2_2'], net['decode/up_conv3']], axis=1, cropping=(None, None, 'center', 'center'))
    net['decode/conv3_1'] = ConvLayer(net['decode/concat_c2_u3'], 128, 3, pad=pad, nonlinearity=nonlin, W=HeNormal(gain="relu"))
    net['decode/conv3_2'] = ConvLayer(net['decode/conv3_1'], 128, 3, pad=pad, nonlinearity=nonlin, W=HeNormal(gain="relu"))

    net['decode/up_conv4'] = TransposedConv2DLayer(net['decode/conv3_2'], 64, 2, stride=2, crop='valid', nonlinearity=None)
    net['decode/concat_c1_u4'] = ConcatLayer([net['encode/conv1_2'], net['decode/up_conv4']], axis=1, cropping=(None, None, 'center', 'center'))
    net['decode/conv4_1'] = ConvLayer(net['decode/concat_c1_u4'], 128, 3, pad=pad, nonlinearity=nonlin, W=HeNormal(gain="relu"))
    net['decode/conv4_2'] = ConvLayer(net['decode/conv4_1'], 128, 3, pad=pad, nonlinearity=nonlin, W=HeNormal(gain="relu"))

    net['seg_map'] = ConvLayer(net['decode/conv4_2'], seg_entities, 1, pad=pad, nonlinearity=None, W=HeNormal(gain="relu"))

    net['seg_map/dimshuffle'] = DimshuffleLayer(net['seg_map'], (1, 0, 2, 3))
    net['seg_map/reshape'] = ReshapeLayer(net['seg_map/dimshuffle'], (seg_entities, -1))
    net['seg_map/flattened'] = DimshuffleLayer(net['seg_map/reshape'], (1, 0))
    net['out'] = NonlinearityLayer(net['seg_map/flattened'], nonlinearity=softmax)

    return net
def build_model(in_shape=INPUT_SHAPE):
    """ Compile net architecture """
    nonlin = elu

    net1 = lasagne.layers.InputLayer(shape=(None, in_shape[0], in_shape[1],
                                            in_shape[2]),
                                     name='Input')

    # number of filters in first layer
    # decreased by factor 2 in each block
    nf0 = 8

    # --- encoder ---
    net1 = conv_bn(net1,
                   num_filters=nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')
    net1 = conv_bn(net1,
                   num_filters=nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')
    p1 = net1
    net1 = MaxPool2DLayer(net1, pool_size=2, stride=2)

    net1 = conv_bn(net1,
                   num_filters=2 * nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')
    net1 = conv_bn(net1,
                   num_filters=2 * nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')
    p2 = net1
    net1 = MaxPool2DLayer(net1, pool_size=2, stride=2)

    net1 = conv_bn(net1,
                   num_filters=4 * nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')
    net1 = conv_bn(net1,
                   num_filters=4 * nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')
    p3 = net1
    net1 = MaxPool2DLayer(net1, pool_size=2, stride=2)

    net1 = conv_bn(net1,
                   num_filters=8 * nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')
    net1 = conv_bn(net1,
                   num_filters=8 * nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')

    # --- decoder ---
    net1 = TransposedConv2DLayer(net1,
                                 num_filters=4 * nf0,
                                 filter_size=2,
                                 stride=2)
    net1 = batch_norm(net1)
    net1 = ElemwiseSumLayer((p3, net1))
    net1 = batch_norm(net1)
    net1 = conv_bn(net1,
                   num_filters=4 * nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')
    net1 = conv_bn(net1,
                   num_filters=4 * nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')
    net1 = DropoutLayer(net1, p=0.2)

    net1 = TransposedConv2DLayer(net1,
                                 num_filters=2 * nf0,
                                 filter_size=2,
                                 stride=2)
    net1 = batch_norm(net1)
    net1 = ElemwiseSumLayer((p2, net1))
    net1 = batch_norm(net1)
    net1 = conv_bn(net1,
                   num_filters=2 * nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')
    net1 = conv_bn(net1,
                   num_filters=2 * nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')
    net1 = DropoutLayer(net1, p=0.1)

    net1 = TransposedConv2DLayer(net1,
                                 num_filters=nf0,
                                 filter_size=2,
                                 stride=2)
    net1 = batch_norm(net1)
    net1 = ElemwiseSumLayer((p1, net1))
    net1 = batch_norm(net1)
    net1 = conv_bn(net1,
                   num_filters=nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')
    net1 = conv_bn(net1,
                   num_filters=nf0,
                   filter_size=3,
                   nonlinearity=nonlin,
                   pad='same')

    net1 = Conv2DLayer(net1,
                       num_filters=1,
                       filter_size=1,
                       nonlinearity=sigmoid,
                       pad='same')
    return net1
示例#17
0
def architecture_upconv_mp6(input_var, input_shape, n_conv_layers,
                            n_conv_filters):

    net = {}

    kwargs = dict(nonlinearity=lasagne.nonlinearities.elu,
                  W=lasagne.init.HeNormal())

    net['data'] = InputLayer(input_shape, input_var)
    print(net['data'].output_shape)
    print("\r Layer-by-layer output shapes of the upconvolutional network")

    # Bunch of 3 x 3 convolution layers: experimentally we found that, adding 3 conv layers in start than in middle is better: but why?
    net['c1'] = batch_norm(
        Conv2DLayer(net['data'],
                    num_filters=64,
                    filter_size=3,
                    stride=1,
                    pad=1,
                    **kwargs))
    print(net['c1'].output_shape)
    '''net['c2'] = batch_norm(Conv2DLayer(net['c1'], num_filters= 64, filter_size= 3, stride = 1, pad=1, **kwargs))
    print(net['c2'].output_shape)
    net['c3'] = batch_norm(Conv2DLayer(net['c2'], num_filters= 64, filter_size= 3, stride = 1, pad=1, **kwargs))
    print(net['c3'].output_shape)'''
    '''net['c4'] = batch_norm(Conv2DLayer(net['c3'], num_filters= 64, filter_size= 3, stride = 1, pad=1, **kwargs))
    print(net['c4'].output_shape)'''
    '''net['c5'] = batch_norm(Conv2DLayer(net['c4'], num_filters= 64, filter_size= 3, stride = 1, pad=1, **kwargs))
    print(net['c5'].output_shape)'''

    # Bunch of transposed convolution layers
    net['uc1'] = batch_norm(
        TransposedConv2DLayer(net['c1'],
                              num_filters=32,
                              filter_size=4,
                              stride=2,
                              crop=1,
                              **kwargs))
    print(net['uc1'].output_shape)
    '''net['c1'] = batch_norm(Conv2DLayer(net['uc1'], num_filters= 32, filter_size= 3, stride = 1, pad=1, **kwargs))
    print(net['c1'].output_shape)'''

    net['uc2'] = batch_norm(
        TransposedConv2DLayer(net['uc1'],
                              num_filters=16,
                              filter_size=4,
                              stride=2,
                              crop=1,
                              **kwargs))
    print(net['uc2'].output_shape)
    '''net['c2'] = batch_norm(Conv2DLayer(net['uc2'], num_filters= 16, filter_size= 3, stride = 1, pad=1, **kwargs))
    print(net['c2'].output_shape)'''

    net['uc3'] = batch_norm(
        TransposedConv2DLayer(net['uc2'],
                              num_filters=8,
                              filter_size=4,
                              stride=2,
                              crop=1,
                              **kwargs))
    print(net['uc3'].output_shape)
    '''net['c3'] = batch_norm(Conv2DLayer(net['uc3'], num_filters= 8, filter_size= 3, stride = 1, pad=1, **kwargs))
    print(net['c3'].output_shape)'''

    net['uc4'] = batch_norm(
        TransposedConv2DLayer(net['uc3'],
                              num_filters=1,
                              filter_size=4,
                              stride=2,
                              crop=1,
                              **kwargs))
    print(net['uc4'].output_shape)

    # slicing the output to 115 x 80 size
    net['s1'] = lasagne.layers.SliceLayer(net['uc4'], slice(0, 115), axis=-2)
    print(net['s1'].output_shape)
    net['out'] = lasagne.layers.SliceLayer(net['s1'], slice(0, 80), axis=-1)
    print(net['out'].output_shape)
    print("Number of parameter to be learned: %d" %
          (lasagne.layers.count_params(net['out'])))

    return net['out']
示例#18
0
    def _build_gen(self):
        size = 64
        s, s2, s4, s8, s16 = size, size // 2, size // 4, size // 8, size // 16
        inputs = OrderedDict()
        inputs['c'] = InputLayer((None, 843))
        inputs['v'] = InputLayer((None, 4))
        inputs['t'] = InputLayer((None, 8))

        layer_c = inputs['c']
        layer_c = DenseLayer(layer_c, 512, nonlinearity=rectify)
        layer_c.params[layer_c.W].add('dense')
        layer_c = DenseLayer(layer_c, 512, nonlinearity=rectify)
        layer_c.params[layer_c.W].add('dense')

        layer_v = inputs['v']
        layer_v = DenseLayer(layer_v, 512, nonlinearity=rectify)
        layer_v.params[layer_v.W].add('dense')
        layer_v = DenseLayer(layer_v, 512, nonlinearity=rectify)
        layer_v.params[layer_v.W].add('dense')

        layer_t = inputs['t']
        layer_t = DenseLayer(layer_t, 512, nonlinearity=rectify)
        layer_t.params[layer_t.W].add('dense')
        layer_t = DenseLayer(layer_t, 512, nonlinearity=rectify)
        layer_t.params[layer_t.W].add('dense')

        layer = ConcatLayer([layer_c, layer_v, layer_t])
        layer = DenseLayer(layer, 1024, nonlinearity=rectify)
        layer.params[layer.W].add('dense')
        layer = DenseLayer(layer, 1024, nonlinearity=rectify)
        layer.params[layer.W].add('dense')

        layer = DenseLayer(layer, 768 * s16 * s16, nonlinearity=rectify)
        layer.params[layer.W].add('dense')
        layer = ReshapeLayer(layer, (-1, 768, s16, s16))

        layer = InstanceNormalization(layer, True)
        layer = weight_norm(TransposedConv2DLayer(layer,
                                                  384,
                                                  5,
                                                  2,
                                                  'same',
                                                  output_size=(s8, s8),
                                                  nonlinearity=None,
                                                  b=None),
                            transposed=True)
        if self.reg: layer = dropout(layer)
        layer = NonlinearityLayer(layer, rectify)
        layer = weight_norm(TransposedConv2DLayer(layer,
                                                  256,
                                                  5,
                                                  2,
                                                  'same',
                                                  output_size=(s4, s4),
                                                  nonlinearity=None,
                                                  b=None),
                            transposed=True)
        if self.reg: layer = dropout(layer)
        layer = NonlinearityLayer(layer, rectify)
        layer = weight_norm(TransposedConv2DLayer(layer,
                                                  192,
                                                  5,
                                                  2,
                                                  'same',
                                                  output_size=(s2, s2),
                                                  nonlinearity=None,
                                                  b=None),
                            transposed=True)
        if self.reg: layer = dropout(layer)
        layer = NonlinearityLayer(layer, rectify)

        layer_img = TransposedConv2DLayer(layer,
                                          3,
                                          5,
                                          2,
                                          'same',
                                          output_size=(s, s),
                                          nonlinearity=tanh)
        layer_msk = TransposedConv2DLayer(layer,
                                          1,
                                          5,
                                          2,
                                          'same',
                                          output_size=(s, s),
                                          nonlinearity=sigmoid)

        layer = ConcatLayer([layer_img, layer_msk])
        outputs = OrderedDict()
        outputs['x'] = layer
        self.gen_inputs = inputs
        self.gen_outputs = outputs
示例#19
0
def build_model_L(in_channel=3, out_channel=3, kernel_size=(3,3), stride=(1,1), pad='valid', dilation=(1,1), num_groups=1):
    input_var = tensor.ftensor4('x')  # (B, C, H, W)
    input0 = InputLayer(shape=(None, in_channel, None, None), input_var=input_var, name='input0')
    tconv0  = TransposedConv2DLayer(input0, num_filters=out_channel, filter_size=kernel_size, stride=stride, crop=pad, nonlinearity=LACT.linear,
                        name='tconv0')
    return tconv0
示例#20
0
def architecture_upconv_fc8(input_var, input_shape):
    """
    model architecture of the fc8 feature inverter
    """

    net = {}
    #number of filters in the uconv layer
    n_filters = 64

    net['data'] = InputLayer(input_shape, input_var)
    print("\n")
    print("Input data shape")
    print(net['data'].output_shape)
    print("Layer-wise output shape")
    net['fc1'] = batch_norm(
        DenseLayer(net['data'],
                   num_units=64,
                   W=lasagne.init.HeNormal(),
                   nonlinearity=lasagne.nonlinearities.elu))
    print(net['fc1'].output_shape)
    net['fc2'] = batch_norm(
        DenseLayer(net['fc1'],
                   num_units=256,
                   W=lasagne.init.HeNormal(),
                   nonlinearity=lasagne.nonlinearities.elu))
    print(net['fc2'].output_shape)
    net['rs1'] = ReshapeLayer(
        net['fc2'], (32, 16, 4, 4)
    )  # CAUTION: assuming that the shape is batch x depth x row x columns

    kwargs = dict(nonlinearity=lasagne.nonlinearities.elu,
                  W=lasagne.init.HeNormal())

    net['uc1'] = batch_norm(
        TransposedConv2DLayer(net['rs1'],
                              num_filters=n_filters,
                              filter_size=4,
                              stride=2,
                              crop=1,
                              **kwargs))
    print(net['uc1'].output_shape)
    net['c1'] = batch_norm(
        Conv2DLayer(net['uc1'],
                    num_filters=n_filters,
                    filter_size=3,
                    stride=1,
                    pad=1,
                    **kwargs))
    print(net['c1'].output_shape)

    net['uc2'] = batch_norm(
        TransposedConv2DLayer(net['c1'],
                              num_filters=n_filters / 2,
                              filter_size=4,
                              stride=2,
                              crop=1,
                              **kwargs))
    print(net['uc2'].output_shape)
    net['c2'] = batch_norm(
        Conv2DLayer(net['uc2'],
                    num_filters=n_filters / 2,
                    filter_size=3,
                    stride=1,
                    pad=1,
                    **kwargs))
    print(net['c2'].output_shape)

    net['uc3'] = batch_norm(
        TransposedConv2DLayer(net['c2'],
                              num_filters=n_filters / 4,
                              filter_size=4,
                              stride=2,
                              crop=1,
                              **kwargs))
    print(net['uc3'].output_shape)
    net['c3'] = batch_norm(
        Conv2DLayer(net['uc3'],
                    num_filters=n_filters / 4,
                    filter_size=3,
                    stride=1,
                    pad=1,
                    **kwargs))
    print(net['c3'].output_shape)

    net['uc4'] = batch_norm(
        TransposedConv2DLayer(net['c3'],
                              num_filters=n_filters / 8,
                              filter_size=4,
                              stride=2,
                              crop=1,
                              **kwargs))
    print(net['uc4'].output_shape)
    net['c4'] = batch_norm(
        Conv2DLayer(net['uc4'],
                    num_filters=n_filters / 8,
                    filter_size=3,
                    stride=1,
                    pad=1,
                    **kwargs))
    print(net['c4'].output_shape)

    net['uc5'] = TransposedConv2DLayer(net['c4'],
                                       num_filters=1,
                                       filter_size=4,
                                       stride=2,
                                       crop=1,
                                       **kwargs)
    print(net['uc5'].output_shape)

    # slicing the output to 115 x 80 size
    net['s1'] = lasagne.layers.SliceLayer(net['uc5'], slice(0, 115), axis=-2)
    net['out'] = lasagne.layers.SliceLayer(net['s1'], slice(0, 80), axis=-1)
    print(net['out'].output_shape)

    print("Number of parameter to be learned: %d\n" %
          (lasagne.layers.count_params(net['out'])))

    return net['out']
示例#21
0
def Decoder(latent_var, use_batch_norm=False):
    net = InputLayer(shape=(None, 20), input_var=latent_var)

    net = DenseLayer(net,
                     num_units=64,
                     nonlinearity=lasagne.nonlinearities.elu)

    net = DenseLayer(net,
                     num_units=64 * 16,
                     nonlinearity=lasagne.nonlinearities.elu)

    if use_batch_norm:
        net = BatchNormLayer(net)

    net = ReshapeLayer(net, (-1, 16, 8, 8))

    net = Conv2DLayer(net,
                      num_filters=32,
                      filter_size=(3, 3),
                      nonlinearity=lasagne.nonlinearities.elu)
    if use_batch_norm:
        net = BatchNormLayer(net)

    net = Conv2DLayer(net,
                      num_filters=32,
                      filter_size=(3, 3),
                      nonlinearity=lasagne.nonlinearities.elu)
    if use_batch_norm:
        net = BatchNormLayer(net)

    net = TransposedConv2DLayer(net,
                                8,
                                4,
                                stride=(2, 2),
                                nonlinearity=lasagne.nonlinearities.elu)
    net = Conv2DLayer(net,
                      num_filters=32,
                      filter_size=(3, 3),
                      nonlinearity=lasagne.nonlinearities.elu)
    if use_batch_norm:
        net = BatchNormLayer(net)

    net = TransposedConv2DLayer(net,
                                8,
                                4,
                                stride=(2, 2),
                                nonlinearity=lasagne.nonlinearities.elu)
    net = Conv2DLayer(net,
                      num_filters=32,
                      filter_size=(3, 3),
                      nonlinearity=lasagne.nonlinearities.elu)
    net = TransposedConv2DLayer(net,
                                8,
                                4,
                                stride=(2, 2),
                                nonlinearity=lasagne.nonlinearities.elu)
    net = Conv2DLayer(net,
                      num_filters=32,
                      filter_size=(5, 5),
                      nonlinearity=lasagne.nonlinearities.elu)
    if use_batch_norm:
        net = BatchNormLayer(net)

    net = Conv2DLayer(net,
                      num_filters=32,
                      filter_size=(3, 3),
                      nonlinearity=lasagne.nonlinearities.elu)

    if use_batch_norm:
        net = BatchNormLayer(net)

    net = Conv2DLayer(net,
                      num_filters=8,
                      filter_size=(1, 1),
                      nonlinearity=lasagne.nonlinearities.elu)
    if use_batch_norm:
        net = BatchNormLayer(net)

    net = Conv2DLayer(net,
                      num_filters=1,
                      filter_size=(1, 1),
                      nonlinearity=lasagne.nonlinearities.sigmoid)

    return net
示例#22
0
def VAE(n_input_channels=1,
        input_var=None,
        BATCH_SIZE=None,
        pad='same',
        input_dim=(128, 128),
        base_n_filters=64,
        nonlinearity=lasagne.nonlinearities.rectify):
    net = OrderedDict()
    net['input'] = InputLayer(
        (BATCH_SIZE, n_input_channels, input_dim[0], input_dim[1]), input_var)
    net['contr_1'] = batch_norm(
        ConvLayer(net['input'],
                  base_n_filters,
                  3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=lasagne.init.HeNormal(gain="relu")))
    net['contr__2_1'] = batch_norm(
        ConvLayer(net['contr_1'],
                  base_n_filters,
                  3,
                  stride=(2, 2),
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=lasagne.init.HeNormal(gain="relu")))
    net['contr__2_2'] = batch_norm(
        ConvLayer(net['contr__2_1'],
                  base_n_filters,
                  3,
                  stride=(1, 1),
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=lasagne.init.HeNormal(gain="relu")))

    net['contr__3_1'] = batch_norm(
        ConvLayer(net['contr__2_2'],
                  base_n_filters,
                  3,
                  stride=(2, 2),
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=lasagne.init.HeNormal(gain="relu")))
    net['contr__3_2'] = batch_norm(
        ConvLayer(net['contr__3_1'],
                  base_n_filters,
                  3,
                  stride=(1, 1),
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=lasagne.init.HeNormal(gain="relu")))

    net['contr__4_1'] = batch_norm(
        ConvLayer(net['contr__3_2'],
                  base_n_filters,
                  3,
                  stride=(2, 2),
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=lasagne.init.HeNormal(gain="relu")))
    net['contr__4_2'] = batch_norm(
        ConvLayer(net['contr__4_1'],
                  base_n_filters,
                  3,
                  stride=(1, 1),
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=lasagne.init.HeNormal(gain="relu")))

    net['contr__5_1'] = batch_norm(
        ConvLayer(net['contr__4_2'],
                  base_n_filters,
                  3,
                  stride=(2, 2),
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=lasagne.init.HeNormal(gain="relu")))
    net['contr__5_2'] = batch_norm(
        ConvLayer(net['contr__5_1'],
                  base_n_filters,
                  3,
                  stride=(1, 1),
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=lasagne.init.HeNormal(gain="relu")))
    net['contr__6_1'] = batch_norm(
        ConvLayer(net['contr__5_2'],
                  base_n_filters,
                  3,
                  stride=(2, 2),
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=lasagne.init.HeNormal(gain="relu")))
    net['contr__6_2'] = batch_norm(
        ConvLayer(net['contr__6_1'],
                  base_n_filters,
                  3,
                  stride=(1, 1),
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=lasagne.init.HeNormal(gain="relu")))
    net['dimshuffle'] = DimshuffleLayer(net['contr__6_2'],
                                        (0, 2, 3, 1))  # change dimensions
    batch_size, n_rows, n_cols, _ = lasagne.layers.get_output(
        net['dimshuffle']).shape
    net['flatten'] = ReshapeLayer(
        net['dimshuffle'], (BATCH_SIZE, 121))  # reshape tensor as [-1,1]

    net['fc_1'] = DenseLayer(net['flatten'],
                             num_units=64,
                             nonlinearity=nonlinearity)
    net['fc_2'] = DenseLayer(net['fc_1'], 121, nonlinearity=nonlinearity)
    net['flattened_2d'] = ReshapeLayer(
        net['fc_2'], (batch_size, n_input_channels, n_rows, n_cols))

    net['decode__1_1'] = batch_norm(
        ConvLayer(net['flattened_2d'],
                  base_n_filters,
                  3,
                  stride=(1, 1),
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=lasagne.init.HeNormal(gain="relu")))
    net['decode__1_2'] = batch_norm(
        TransposedConv2DLayer(net['decode__1_1'],
                              base_n_filters,
                              4,
                              stride=(2, 2),
                              nonlinearity=nonlinearity,
                              crop=1,
                              W=lasagne.init.HeNormal(gain="relu")))

    net['decode__2_1'] = batch_norm(
        ConvLayer(net['decode__1_2'],
                  base_n_filters,
                  3,
                  stride=(1, 1),
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=lasagne.init.HeNormal(gain="relu")))
    net['decode__2_2'] = batch_norm(
        TransposedConv2DLayer(net['decode__2_1'],
                              base_n_filters,
                              4,
                              stride=(2, 2),
                              nonlinearity=nonlinearity,
                              crop=1,
                              W=lasagne.init.HeNormal(gain="relu")))

    net['decode__3_1'] = batch_norm(
        ConvLayer(net['decode__2_2'],
                  base_n_filters,
                  3,
                  stride=(1, 1),
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=lasagne.init.HeNormal(gain="relu")))
    net['decode__3_2'] = batch_norm(
        TransposedConv2DLayer(net['decode__3_1'],
                              base_n_filters,
                              4,
                              stride=(2, 2),
                              nonlinearity=nonlinearity,
                              crop=1,
                              W=lasagne.init.HeNormal(gain="relu")))

    net['decode__4_1'] = batch_norm(
        ConvLayer(net['decode__3_2'],
                  base_n_filters,
                  3,
                  stride=(1, 1),
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=lasagne.init.HeNormal(gain="relu")))
    net['decode__4_2'] = batch_norm(
        TransposedConv2DLayer(net['decode__4_1'],
                              base_n_filters,
                              4,
                              stride=(2, 2),
                              nonlinearity=nonlinearity,
                              crop=1,
                              W=lasagne.init.HeNormal(gain="relu")))

    net['decode__5_1'] = batch_norm(
        ConvLayer(net['decode__4_2'],
                  base_n_filters,
                  3,
                  stride=(1, 1),
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=lasagne.init.HeNormal(gain="relu")))
    net['decode__5_2'] = batch_norm(
        TransposedConv2DLayer(net['decode__5_1'],
                              base_n_filters,
                              4,
                              stride=(2, 2),
                              nonlinearity=nonlinearity,
                              crop=1,
                              W=lasagne.init.HeNormal(gain="relu")))
    return net['decode__5_2']
    pass
示例#23
0
def build_mitosis_encoder(input_shape, encoding_size=32, withst=False):
    # Parameters
    filter_size = (3, 3)
    num_filters = 32
    pool_size = (2, 2)
    # Localization Network

    l_input = InputLayer(shape=(None, input_shape[1], input_shape[2],
                                input_shape[3]))
    l_conv1 = Conv2DLayer(l_input,
                          num_filters=num_filters,
                          filter_size=filter_size)
    l_conv2 = Conv2DLayer(l_conv1,
                          num_filters=num_filters,
                          filter_size=filter_size)
    l_pool1 = MaxPool2DLayer(l_conv2, pool_size=pool_size)
    l_pipe1_layer = l_pool1  # We need this

    # ST Network
    if withst:
        # ST Params
        b = np.zeros((2, 3), dtype=theano.config.floatX)
        b[0, 0] = 1
        b[1, 1] = 1
        b = b.flatten()

        # ST Layers
        st_encode1 = DenseLayer(l_pool1,
                                num_units=50,
                                W=lasagne.init.HeUniform('relu'))
        st_encode2 = DenseLayer(st_encode1,
                                num_units=6,
                                b=b,
                                W=lasagne.init.Constant(0.0))
        l_trans1 = TransformerLayer(l_input, st_encode2, downsample_factor=1.0)

        # Localization Network

        st_conv1 = Conv2DLayer(l_trans1,
                               num_filters=num_filters,
                               filter_size=filter_size)
        st_covn2 = Conv2DLayer(st_conv1,
                               num_filters=num_filters,
                               filter_size=filter_size)
        st_pool1 = MaxPool2DLayer(st_covn2, pool_size=pool_size)
        l_pipe1_layer = st_pool1

    # Encoding Step
    l_reshape1 = ReshapeLayer(l_pipe1_layer, shape=([0], -1))
    l_encode = DenseLayer(l_reshape1,
                          num_units=encoding_size,
                          W=lasagne.init.HeUniform('relu'),
                          name='encoder')

    # Decoding Step
    l_decode = DenseLayer(l_encode,
                          W=l_encode.W.T,
                          num_units=l_reshape1.output_shape[1])
    l_reshape2 = ReshapeLayer(
        l_decode,
        shape=([0], num_filters,
               int(np.sqrt(l_reshape1.output_shape[1] / num_filters)),
               int(np.sqrt(l_reshape1.output_shape[1] / num_filters))))

    # Deconv Network
    l_unpool1 = Upscale2DLayer(l_reshape2, scale_factor=pool_size)
    l_deconv2 = TransposedConv2DLayer(l_unpool1,
                                      num_filters=l_conv2.input_shape[1],
                                      W=l_conv2.W,
                                      filter_size=l_conv2.filter_size,
                                      stride=l_conv2.stride,
                                      crop=l_conv2.pad,
                                      flip_filters=not l_conv2.flip_filters)

    l_deconv1 = TransposedConv2DLayer(l_deconv2,
                                      num_filters=l_conv1.input_shape[1],
                                      W=l_conv1.W,
                                      filter_size=l_conv1.filter_size,
                                      stride=l_conv1.stride,
                                      crop=l_conv1.pad,
                                      flip_filters=not l_conv1.flip_filters)

    return l_deconv1
示例#24
0
def build_network(batch_size, z_shape, img_height, img_width,
                  conv_nonlinearity, dense_nonlinearity):
    # Draws heavy inspiration from ResNet
    num_filters = 32
    filter_shape = (5, 5)

    l_in = InputLayer((batch_size, 1, z_shape))

    dense_nonlinearity = lasagne.nonlinearities.rectify
    conv_nonlinearity = lasagne.nonlinearities.rectify

    config = {
        'conv_1_repeats': 0,
        'conv_2_repeats': 0,
        'conv_3_repeats': 0,
        'conv_4_repeats': 0
    }

    #####################
    ### Decoding half ###
    #####################
    h_test = 2
    w_test = 5

    dec_2_size = h_test * w_test * num_filters * 8

    l_hid_dec_2 = batch_norm(
        DenseLayer(l_in, dec_2_size, nonlinearity=dense_nonlinearity))
    l_dec_reshape = ReshapeLayer(
        l_hid_dec_2,
        [batch_size, dec_2_size / h_test / w_test, h_test, w_test])

    conv_1 = batch_norm(
        TransposedConv2DLayer(l_dec_reshape,
                              num_filters * 8,
                              filter_shape,
                              nonlinearity=conv_nonlinearity,
                              untie_biases=True))
    for _ in range(config['conv_1_repeats']):
        conv_1 = batch_norm(
            TransposedConv2DLayer(conv_1,
                                  num_filters * 8,
                                  filter_shape,
                                  nonlinearity=conv_nonlinearity,
                                  untie_biases=True,
                                  crop='same'))

    conv_2 = batch_norm(
        TransposedConv2DLayer(conv_1,
                              num_filters * 4,
                              filter_shape,
                              nonlinearity=conv_nonlinearity,
                              untie_biases=True,
                              stride=(2, 2),
                              crop='same'))
    for _ in range(config['conv_2_repeats']):
        conv_2 = batch_norm(
            TransposedConv2DLayer(conv_2,
                                  num_filters * 4,
                                  filter_shape,
                                  nonlinearity=conv_nonlinearity,
                                  untie_biases=True,
                                  crop='same'))

    conv_3 = batch_norm(
        TransposedConv2DLayer(conv_2,
                              num_filters * 2,
                              filter_shape,
                              nonlinearity=conv_nonlinearity,
                              untie_biases=True,
                              stride=(2, 2),
                              crop='same'))
    for _ in range(config['conv_3_repeats']):
        conv_3 = batch_norm(
            TransposedConv2DLayer(conv_3,
                                  num_filters * 2,
                                  filter_shape,
                                  nonlinearity=conv_nonlinearity,
                                  untie_biases=True,
                                  crop='same'))

    conv_4 = batch_norm(
        TransposedConv2DLayer(conv_3,
                              num_filters,
                              filter_shape,
                              nonlinearity=conv_nonlinearity,
                              untie_biases=True,
                              stride=(2, 2),
                              crop='same'))
    for _ in range(config['conv_4_repeats']):
        conv_4 = batch_norm(
            TransposedConv2DLayer(conv_4,
                                  num_filters,
                                  filter_shape,
                                  nonlinearity=conv_nonlinearity,
                                  untie_biases=True,
                                  crop='same'))

    l_out = batch_norm(
        TransposedConv2DLayer(conv_4,
                              1,
                              filter_shape,
                              nonlinearity=lasagne.nonlinearities.sigmoid,
                              untie_biases=True,
                              crop='same'))

    return l_in, l_out