示例#1
0
def build_st_spline_network(input_shape):
    W = b = lasagne.init.Constant(0.0)
    num_points = 4
    num_filters = 64
    filter_size = (3, 3)
    pool_size = (2, 2)

    l_in = InputLayer(shape=(None, input_shape[1], input_shape[2],
                             input_shape[3]))

    l_conv1 = Conv2DLayer(l_in,
                          num_filters=num_filters,
                          filter_size=filter_size)

    l_pool1 = MaxPool2DLayer(l_conv1, pool_size=pool_size)

    l_conv2 = Conv2DLayer(l_pool1,
                          num_filters=num_filters,
                          filter_size=filter_size)

    l_pool2 = MaxPool2DLayer(l_conv2, pool_size=pool_size)

    l_dense1 = DenseLayer(l_pool2, num_units=128)

    l_dense2 = DenseLayer(l_dense1,
                          num_units=num_points * 2,
                          W=W,
                          b=b,
                          nonlinearity=None)

    l_st = TPSTransformerLayer(l_in, l_dense2, control_points=num_points)

    l_output = ReshapeLayer(l_st, shape=([0], -1))

    return l_output
示例#2
0
def create_network():
    l = 1000
    pool_size = 5
    test_size1 = 13
    test_size2 = 7
    test_size3 = 5
    kernel1 = 128
    kernel2 = 128
    kernel3 = 128
    layer1 = InputLayer(shape=(None, 1, 4, l + 1024))
    layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis=-1)
    layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis=-1)
    layer2_3 = SliceLayer(layer2_2, indices=slice(0, 4), axis=-2)
    layer2_f = FlattenLayer(layer2_3)
    layer3 = Conv2DLayer(layer2_1,
                         num_filters=kernel1,
                         filter_size=(4, test_size1))
    layer4 = Conv2DLayer(layer3,
                         num_filters=kernel1,
                         filter_size=(1, test_size1))
    layer5 = Conv2DLayer(layer4,
                         num_filters=kernel1,
                         filter_size=(1, test_size1))
    layer6 = MaxPool2DLayer(layer5, pool_size=(1, pool_size))
    layer7 = Conv2DLayer(layer6,
                         num_filters=kernel2,
                         filter_size=(1, test_size2))
    layer8 = Conv2DLayer(layer7,
                         num_filters=kernel2,
                         filter_size=(1, test_size2))
    layer9 = Conv2DLayer(layer8,
                         num_filters=kernel2,
                         filter_size=(1, test_size2))
    layer10 = MaxPool2DLayer(layer9, pool_size=(1, pool_size))
    layer11 = Conv2DLayer(layer10,
                          num_filters=kernel3,
                          filter_size=(1, test_size3))
    layer12 = Conv2DLayer(layer11,
                          num_filters=kernel3,
                          filter_size=(1, test_size3))
    layer13 = Conv2DLayer(layer12,
                          num_filters=kernel3,
                          filter_size=(1, test_size3))
    layer14 = MaxPool2DLayer(layer13, pool_size=(1, pool_size))
    layer14_d = DenseLayer(layer14, num_units=256)
    layer3_2 = DenseLayer(layer2_f, num_units=128)
    layer15 = ConcatLayer([layer14_d, layer3_2])
    layer16 = DropoutLayer(layer15, p=0.5)
    layer17 = DenseLayer(layer16, num_units=256)
    network = DenseLayer(layer17, num_units=2, nonlinearity=softmax)
    return network
示例#3
0
def build_st_network(b_size, input_shape, withdisc=True):
    # General Params
    num_filters = 64
    filter_size = (3, 3)
    pool_size = (2, 2)

    # SP Param
    b = np.zeros((2, 3), dtype=theano.config.floatX)
    b[0, 0] = 1
    b[1, 1] = 1
    b = b.flatten()  # identity transform

    # Localization Network
    l_in = InputLayer(shape=(None, input_shape[1], input_shape[2],
                             input_shape[3]))

    l_conv1 = Conv2DLayer(l_in,
                          num_filters=num_filters,
                          filter_size=filter_size)

    l_pool1 = MaxPool2DLayer(l_conv1, pool_size=pool_size)

    l_conv2 = Conv2DLayer(l_pool1,
                          num_filters=num_filters,
                          filter_size=filter_size)

    l_pool2 = MaxPool2DLayer(l_conv2, pool_size=pool_size)

    l_loc = DenseLayer(l_pool2, num_units=64, W=lasagne.init.HeUniform('relu'))

    l_param_reg = DenseLayer(l_loc,
                             num_units=6,
                             b=b,
                             nonlinearity=lasagne.nonlinearities.linear,
                             W=lasagne.init.Constant(0.0),
                             name='param_regressor')

    if withdisc:
        l_dis = DiscreteLayer(l_param_reg,
                              start=Constant(-3.),
                              stop=Constant(3.),
                              linrange=Constant(50.))
    else:
        l_dis = l_param_reg

    # Transformer Network
    l_trans = TransformerLayer(l_in, l_dis, downsample_factor=1.0)

    final = ReshapeLayer(l_trans, shape=([0], -1))
    return final
def net_lenet5(input_shape, nclass):
    input_x, target_y, Winit = T.tensor4("input"), T.vector(
        "target", dtype='int32'), init.Normal()

    net = ll.InputLayer(input_shape, input_x)

    net = ConvLayer(net, 20, 5, W=init.Normal())
    net = MaxPool2DLayer(net, 2)

    net = ConvLayer(net, 50, 5, W=init.Normal())
    net = MaxPool2DLayer(net, 2)

    net = ll.DenseLayer(net, 500, W=init.Normal())
    net = ll.DenseLayer(net, nclass, W=init.Normal(), nonlinearity=nl.softmax)

    return net, input_x, target_y, 1
    def _build_middle(self,
                      l_in,
                      num_conv_layers=1,
                      num_dense_layers=1,
                      **kwargs):
        assert len(l_in.shape) == 4, 'InputLayer shape must be (batch_size, channels, width, height) -- ' \
                                     'reshape data or use RGB format?'

        l_bottom = l_in
        for i in xrange(num_conv_layers):
            conv_kwargs = self._extract_layer_kwargs('c', i, kwargs)
            if 'border_mode' not in conv_kwargs:
                conv_kwargs['border_mode'] = 'same'
            has_max_pool = conv_kwargs.pop('mp', False)
            l_bottom = Conv2DLayer(l_bottom,
                                   W=HeUniform(gain='relu'),
                                   **conv_kwargs)

            if has_max_pool:
                max_pool_kwargs = self._extract_layer_kwargs('m', i, kwargs)
                if 'pool_size' not in max_pool_kwargs:
                    max_pool_kwargs['pool_size'] = (2, 2)
                l_bottom = MaxPool2DLayer(l_bottom, **max_pool_kwargs)

        for i in xrange(num_dense_layers):
            dense_kwargs = self._extract_layer_kwargs('d', i, kwargs)
            dropout = dense_kwargs.pop('dropout', 0.5)
            l_bottom = DenseLayer(l_bottom,
                                  W=HeUniform(gain='relu'),
                                  **dense_kwargs)
            if dropout:
                l_bottom = DropoutLayer(l_bottom, p=dropout)

        return l_bottom
def net_vgglike(k, input_shape, nclass):
    input_x, target_y, Winit = T.tensor4("input"), T.vector(
        "target", dtype='int32'), init.Normal()

    net = ll.InputLayer(input_shape, input_x)
    net = conv_bn_rectify(net, 64 * k)
    net = ll.DropoutLayer(net, 0.3)
    net = conv_bn_rectify(net, 64 * k)
    net = MaxPool2DLayer(net, 2, 2)

    net = conv_bn_rectify(net, 128 * k)
    net = ll.DropoutLayer(net, 0.4)
    net = conv_bn_rectify(net, 128 * k)
    net = MaxPool2DLayer(net, 2, 2)

    net = conv_bn_rectify(net, 256 * k)
    net = ll.DropoutLayer(net, 0.4)
    net = conv_bn_rectify(net, 256 * k)
    net = ll.DropoutLayer(net, 0.4)
    net = conv_bn_rectify(net, 256 * k)
    net = MaxPool2DLayer(net, 2, 2)

    net = conv_bn_rectify(net, 512 * k)
    net = ll.DropoutLayer(net, 0.4)
    net = conv_bn_rectify(net, 512 * k)
    net = ll.DropoutLayer(net, 0.4)
    net = conv_bn_rectify(net, 512 * k)
    net = MaxPool2DLayer(net, 2, 2)

    net = conv_bn_rectify(net, 512 * k)
    net = ll.DropoutLayer(net, 0.4)
    net = conv_bn_rectify(net, 512 * k)
    net = ll.DropoutLayer(net, 0.4)
    net = conv_bn_rectify(net, 512 * k)
    net = MaxPool2DLayer(net, 2, 2)

    net = ll.DenseLayer(net,
                        int(512 * k),
                        W=init.Normal(),
                        nonlinearity=nl.rectify)
    net = BatchNormLayer(net, epsilon=1e-3)
    net = ll.NonlinearityLayer(net)
    net = ll.DropoutLayer(net, 0.5)
    net = ll.DenseLayer(net, nclass, W=init.Normal(), nonlinearity=nl.softmax)

    return net, input_x, target_y, k
示例#7
0
def model_train(X_train, y_train, learning_rate=1e-4, epochs=10):
    l = 1000
    layer1 = InputLayer(shape=(None, 1, 4, l + 1024))
    layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis=-1)
    layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis=-1)
    layer2_3 = SliceLayer(layer2_2, indices=slice(0, 4), axis=-2)
    layer2_f = FlattenLayer(layer2_3)
    layer3 = Conv2DLayer(layer2_1, num_filters=64, filter_size=(4, 7))
    layer4 = Conv2DLayer(layer3, num_filters=64, filter_size=(1, 7))
    layer5 = Conv2DLayer(layer4, num_filters=64, filter_size=(1, 7))
    layer6 = MaxPool2DLayer(layer5, pool_size=(1, 6))
    layer7 = Conv2DLayer(layer6, num_filters=64, filter_size=(1, 7))
    layer8 = Conv2DLayer(layer7, num_filters=64, filter_size=(1, 7))
    layer9 = Conv2DLayer(layer8, num_filters=64, filter_size=(1, 7))
    layer10 = MaxPool2DLayer(layer9, pool_size=(1, 6))
    layer11 = Conv2DLayer(layer10, num_filters=64, filter_size=(1, 7))
    layer12 = Conv2DLayer(layer11, num_filters=64, filter_size=(1, 7))
    layer13 = Conv2DLayer(layer12, num_filters=64, filter_size=(1, 7))
    layer14 = MaxPool2DLayer(layer13, pool_size=(1, 6))
    layer14_d = DenseLayer(layer14, num_units=64)
    layer3_2 = DenseLayer(layer2_f, num_units=64)
    layer15 = ConcatLayer([layer14_d, layer3_2])
    #layer15 = ConcatLayer([layer10_d,])
    layer16 = DropoutLayer(layer15)
    layer17 = DenseLayer(layer16, num_units=32)
    network = DenseLayer(layer17, num_units=2, nonlinearity=None)
    lr = theano.shared(np.float32(learning_rate))
    net = NeuralNet(
        network,
        max_epochs=epochs,
        update=adam,
        update_learning_rate=lr,
        regression=True,
        train_split=TrainSplit(eval_size=0.1),
        objective_loss_function=squared_error,
        #on_epoch_finished=[AdjustVariable(lr, target=1e-8, half_life=20)],
        verbose=4)
    net.fit(X_train, y_train)
    return net
示例#8
0
def build_cnnae_network(input_shape):
    conv_filters = 16
    filter_size = 3
    pool_size = 2
    encode_size = input_shape[2] * 2

    l_in = InputLayer(shape=(None, input_shape[1], input_shape[2],
                             input_shape[3]))

    l_conv1 = Conv2DLayer(l_in,
                          num_filters=conv_filters,
                          filter_size=(filter_size, filter_size),
                          nonlinearity=None)

    l_pool1 = MaxPool2DLayer(l_conv1, pool_size=(pool_size, pool_size))

    l_dropout1 = DropoutLayer(l_pool1, p=0.5)

    l_reshape1 = ReshapeLayer(l_dropout1, shape=([0], -1))

    l_encode = DenseLayer(l_reshape1, name='encode', num_units=encode_size)

    l_decode = DenseLayer(l_encode,
                          W=l_encode.W.T,
                          num_units=l_reshape1.output_shape[1])

    l_reshape2 = ReshapeLayer(
        l_decode,
        shape=([0], conv_filters,
               int(np.sqrt(l_reshape1.output_shape[1] / conv_filters)),
               int(np.sqrt(l_reshape1.output_shape[1] / conv_filters))))

    l_unpool1 = Upscale2DLayer(l_reshape2, scale_factor=pool_size)

    l_de = TransposedConv2DLayer(l_unpool1,
                                 num_filters=l_conv1.input_shape[1],
                                 W=l_conv1.W,
                                 filter_size=l_conv1.filter_size,
                                 stride=l_conv1.stride,
                                 crop=l_conv1.pad,
                                 flip_filters=not l_conv1.flip_filters)

    l_output = ReshapeLayer(l_de, shape=([0], -1))

    return l_output
示例#9
0
def build_mitosis_encoder(input_shape, encoding_size=32, withst=False):
    # Parameters
    filter_size = (3, 3)
    num_filters = 32
    pool_size = (2, 2)
    # Localization Network

    l_input = InputLayer(shape=(None, input_shape[1], input_shape[2],
                                input_shape[3]))
    l_conv1 = Conv2DLayer(l_input,
                          num_filters=num_filters,
                          filter_size=filter_size)
    l_conv2 = Conv2DLayer(l_conv1,
                          num_filters=num_filters,
                          filter_size=filter_size)
    l_pool1 = MaxPool2DLayer(l_conv2, pool_size=pool_size)
    l_pipe1_layer = l_pool1  # We need this

    # ST Network
    if withst:
        # ST Params
        b = np.zeros((2, 3), dtype=theano.config.floatX)
        b[0, 0] = 1
        b[1, 1] = 1
        b = b.flatten()

        # ST Layers
        st_encode1 = DenseLayer(l_pool1,
                                num_units=50,
                                W=lasagne.init.HeUniform('relu'))
        st_encode2 = DenseLayer(st_encode1,
                                num_units=6,
                                b=b,
                                W=lasagne.init.Constant(0.0))
        l_trans1 = TransformerLayer(l_input, st_encode2, downsample_factor=1.0)

        # Localization Network

        st_conv1 = Conv2DLayer(l_trans1,
                               num_filters=num_filters,
                               filter_size=filter_size)
        st_covn2 = Conv2DLayer(st_conv1,
                               num_filters=num_filters,
                               filter_size=filter_size)
        st_pool1 = MaxPool2DLayer(st_covn2, pool_size=pool_size)
        l_pipe1_layer = st_pool1

    # Encoding Step
    l_reshape1 = ReshapeLayer(l_pipe1_layer, shape=([0], -1))
    l_encode = DenseLayer(l_reshape1,
                          num_units=encoding_size,
                          W=lasagne.init.HeUniform('relu'),
                          name='encoder')

    # Decoding Step
    l_decode = DenseLayer(l_encode,
                          W=l_encode.W.T,
                          num_units=l_reshape1.output_shape[1])
    l_reshape2 = ReshapeLayer(
        l_decode,
        shape=([0], num_filters,
               int(np.sqrt(l_reshape1.output_shape[1] / num_filters)),
               int(np.sqrt(l_reshape1.output_shape[1] / num_filters))))

    # Deconv Network
    l_unpool1 = Upscale2DLayer(l_reshape2, scale_factor=pool_size)
    l_deconv2 = TransposedConv2DLayer(l_unpool1,
                                      num_filters=l_conv2.input_shape[1],
                                      W=l_conv2.W,
                                      filter_size=l_conv2.filter_size,
                                      stride=l_conv2.stride,
                                      crop=l_conv2.pad,
                                      flip_filters=not l_conv2.flip_filters)

    l_deconv1 = TransposedConv2DLayer(l_deconv2,
                                      num_filters=l_conv1.input_shape[1],
                                      W=l_conv1.W,
                                      filter_size=l_conv1.filter_size,
                                      stride=l_conv1.stride,
                                      crop=l_conv1.pad,
                                      flip_filters=not l_conv1.flip_filters)

    return l_deconv1
示例#10
0
def build_st_network_MNIST(input_shape, mins, maxs, ranges, withdisc=True):
    # General Params
    num_filters = 64
    filter_size = (3, 3)
    pool_size = (2, 2)

    # SP Param
    b = np.zeros((2, 3), dtype=theano.config.floatX)
    b[0, 0] = 1
    b[1, 1] = 1
    b = b.flatten()  # identity transform

    # Localization Network
    l_in = InputLayer(shape=(None, input_shape[1], input_shape[2],
                             input_shape[3]))

    l_conv1 = Conv2DLayer(l_in,
                          num_filters=num_filters,
                          filter_size=filter_size)

    l_pool1 = MaxPool2DLayer(l_conv1, pool_size=pool_size)

    l_conv2 = Conv2DLayer(l_pool1,
                          num_filters=num_filters,
                          filter_size=filter_size)

    l_pool2 = MaxPool2DLayer(l_conv2, pool_size=pool_size)

    l_loc = DenseLayer(l_pool2, num_units=64, W=lasagne.init.HeUniform('relu'))

    l_param_reg = DenseLayer(l_loc,
                             num_units=6,
                             b=b,
                             nonlinearity=lasagne.nonlinearities.linear,
                             W=lasagne.init.Constant(0.0),
                             name='param_regressor')

    if withdisc:
        l_dis = DiscreteLayer(l_param_reg, mins, maxs, ranges)
    else:
        l_dis = l_param_reg

    # Transformer Network
    l_trans = TransformerLayer(l_in, l_dis, downsample_factor=1.0)

    # Classification Network
    network = lasagne.layers.Conv2DLayer(
        l_trans,
        num_filters=32,
        filter_size=(5, 5),
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.GlorotUniform())
    # Expert note: Lasagne provides alternative convolutional layers that
    # override Theano's choice of which implementation to use; for details
    # please see http://lasagne.readthedocs.org/en/latest/user/tutorial.html.

    # Max-pooling layer of factor 2 in both dimensions:
    network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))

    # Another convolution with 32 5x5 kernels, and another 2x2 pooling:
    network = lasagne.layers.Conv2DLayer(
        network,
        num_filters=32,
        filter_size=(5, 5),
        nonlinearity=lasagne.nonlinearities.rectify)
    network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))

    # A fully-connected layer of 256 units with 50% dropout on its inputs:
    network = lasagne.layers.DenseLayer(
        lasagne.layers.dropout(network, p=.5),
        num_units=256,
        nonlinearity=lasagne.nonlinearities.rectify)

    # And, finally, the 10-unit output layer with 50% dropout on its inputs:
    network = lasagne.layers.DenseLayer(
        lasagne.layers.dropout(network, p=.5),
        num_units=10,
        nonlinearity=lasagne.nonlinearities.softmax)

    return network
示例#11
0
def build_model(batch_size=BATCH_SIZE):
    """ Compile net architecture """
    nonlin = lasagne.nonlinearities.rectify

    # --- input layers ---
    l_in = lasagne.layers.InputLayer(shape=(None, INPUT_SHAPE[0],
                                            INPUT_SHAPE[1], INPUT_SHAPE[2]),
                                     name='Input')
    net = l_in

    nf = 64

    # --- conv layers ---
    net = Conv2DLayer(net,
                      num_filters=nf,
                      filter_size=5,
                      stride=2,
                      pad=2,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = Conv2DLayer(net,
                      num_filters=nf,
                      filter_size=3,
                      stride=1,
                      pad=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = MaxPool2DLayer(net, pool_size=2)
    net = DropoutLayer(net, p=0.3)

    net = Conv2DLayer(net,
                      num_filters=2 * nf,
                      filter_size=3,
                      stride=1,
                      pad=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = Conv2DLayer(net,
                      num_filters=2 * nf,
                      filter_size=3,
                      stride=1,
                      pad=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = MaxPool2DLayer(net, pool_size=2)
    net = DropoutLayer(net, p=0.3)

    net = Conv2DLayer(net,
                      num_filters=4 * nf,
                      filter_size=3,
                      stride=1,
                      pad=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = DropoutLayer(net, p=0.3)
    net = Conv2DLayer(net,
                      num_filters=4 * nf,
                      filter_size=3,
                      stride=1,
                      pad=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = DropoutLayer(net, p=0.3)
    net = Conv2DLayer(net,
                      num_filters=6 * nf,
                      filter_size=3,
                      stride=1,
                      pad=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = DropoutLayer(net, p=0.3)
    net = Conv2DLayer(net,
                      num_filters=6 * nf,
                      filter_size=3,
                      stride=1,
                      pad=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = MaxPool2DLayer(net, pool_size=2)
    net = DropoutLayer(net, p=0.3)

    net = Conv2DLayer(net,
                      num_filters=8 * nf,
                      filter_size=3,
                      stride=1,
                      pad=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = Conv2DLayer(net,
                      num_filters=8 * nf,
                      filter_size=3,
                      stride=1,
                      pad=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = MaxPool2DLayer(net, pool_size=(1, 2))
    net = DropoutLayer(net, p=0.3)

    net = Conv2DLayer(net,
                      num_filters=8 * nf,
                      filter_size=3,
                      stride=1,
                      pad=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = Conv2DLayer(net,
                      num_filters=8 * nf,
                      filter_size=3,
                      stride=1,
                      pad=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = MaxPool2DLayer(net, pool_size=(1, 2))
    net = DropoutLayer(net, p=0.3)

    net = Conv2DLayer(net,
                      num_filters=8 * nf,
                      filter_size=3,
                      pad=0,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = DropoutLayer(net, p=0.5)
    net = Conv2DLayer(net,
                      num_filters=8 * nf,
                      filter_size=1,
                      pad=0,
                      W=init_conv(gain="relu"),
                      nonlinearity=nonlin)
    net = batch_norm(net, alpha=0.1)
    net = DropoutLayer(net, p=0.5)

    # --- feed forward part ---
    net = Conv2DLayer(net,
                      num_filters=41,
                      filter_size=1,
                      W=init_conv(gain="relu"),
                      nonlinearity=None)
    net = batch_norm(net, alpha=0.1)
    net = GlobalPoolLayer(net)
    net = FlattenLayer(net)
    net = NonlinearityLayer(net, nonlinearity=lasagne.nonlinearities.softmax)

    return net
示例#12
0
def build_model(num_classes, pretrained_w_path="", input_var=None, drop_p=0.5):
    net = {}
    net['data'] = InputLayer(shape=(None, 3, 227, 227), input_var=input_var)

    # conv1
    net['conv1'] = Conv2DLayer(net['data'],
                               num_filters=96,
                               filter_size=(11, 11),
                               stride=4,
                               nonlinearity=lasagne.nonlinearities.rectify)

    # pool1
    net['pool1'] = MaxPool2DLayer(net['conv1'], pool_size=(3, 3), stride=2)

    # norm1
    net['norm1'] = LocalResponseNormalization2DLayer(net['pool1'],
                                                     n=5,
                                                     alpha=0.0001 / 5.0,
                                                     beta=0.75,
                                                     k=1)

    # conv2
    # The caffe reference model uses a parameter called group.
    # This parameter splits input to the convolutional layer.
    # The first half of the filters operate on the first half
    # of the input from the previous layer. Similarly, the
    # second half operate on the second half of the input.
    #
    # Lasagne does not have this group parameter, but we can
    # do it ourselves.
    #
    # see https://github.com/BVLC/caffe/issues/778
    # also see https://code.google.com/p/cuda-convnet/wiki/LayerParams

    # before conv2 split the data
    # norm1 size: 23 x 23 x 96
    net['conv2_data1'] = SliceLayer(net['norm1'], indices=slice(0, 48), axis=1)
    net['conv2_data2'] = SliceLayer(net['norm1'],
                                    indices=slice(48, 96),
                                    axis=1)

    # now do the convolutions
    net['conv2_part1'] = Conv2DLayer(net['conv2_data1'],
                                     num_filters=128,
                                     filter_size=(5, 5),
                                     pad=2)
    net['conv2_part2'] = Conv2DLayer(net['conv2_data2'],
                                     num_filters=128,
                                     filter_size=(5, 5),
                                     pad=2)

    # now combine
    net['conv2'] = concat((net['conv2_part1'], net['conv2_part2']), axis=1)

    # pool2
    net['pool2'] = MaxPool2DLayer(net['conv2'], pool_size=(3, 3), stride=2)

    # norm2
    net['norm2'] = LocalResponseNormalization2DLayer(net['pool2'],
                                                     n=5,
                                                     alpha=0.0001 / 5.0,
                                                     beta=0.75,
                                                     k=1)

    # conv3
    # no group
    net['conv3'] = Conv2DLayer(net['norm2'],
                               num_filters=384,
                               filter_size=(3, 3),
                               pad=1)

    # conv4
    # group = 2
    net['conv4_data1'] = SliceLayer(net['conv3'],
                                    indices=slice(0, 192),
                                    axis=1)
    net['conv4_data2'] = SliceLayer(net['conv3'],
                                    indices=slice(192, 384),
                                    axis=1)
    net['conv4_part1'] = Conv2DLayer(net['conv4_data1'],
                                     num_filters=192,
                                     filter_size=(3, 3),
                                     pad=1)
    net['conv4_part2'] = Conv2DLayer(net['conv4_data2'],
                                     num_filters=192,
                                     filter_size=(3, 3),
                                     pad=1)
    net['conv4'] = concat((net['conv4_part1'], net['conv4_part2']), axis=1)

    # conv5
    # group 2
    net['conv5_data1'] = SliceLayer(net['conv4'],
                                    indices=slice(0, 192),
                                    axis=1)
    net['conv5_data2'] = SliceLayer(net['conv4'],
                                    indices=slice(192, 384),
                                    axis=1)
    net['conv5_part1'] = Conv2DLayer(net['conv5_data1'],
                                     num_filters=128,
                                     filter_size=(3, 3),
                                     pad=1)
    net['conv5_part2'] = Conv2DLayer(net['conv5_data2'],
                                     num_filters=128,
                                     filter_size=(3, 3),
                                     pad=1)
    net['conv5'] = concat((net['conv5_part1'], net['conv5_part2']), axis=1)

    # pool 5
    net['pool5'] = MaxPool2DLayer(net['conv5'], pool_size=(3, 3), stride=2)

    # fc6
    net['fc6'] = DenseLayer(net['pool5'],
                            num_units=4096,
                            nonlinearity=lasagne.nonlinearities.rectify)

    # fc7
    net['fc7'] = DenseLayer(DropoutLayer(net['fc6'], p=drop_p),
                            num_units=4096,
                            nonlinearity=lasagne.nonlinearities.rectify)

    # fc8 - changes: (i) num_classes, (ii) sigmoid activation
    net['fc8'] = DenseLayer(DropoutLayer(net['fc7'], p=drop_p),
                            num_units=num_classes,
                            nonlinearity=lasagne.nonlinearities.sigmoid)

    return net['fc8']
示例#13
0
def build_stereo_cnn(input_var=None, in_shape_1=100, in_shape_2=150):

    conv_num_filters1 = 16
    conv_num_filters2 = 32
    conv_num_filters3 = 64
    conv_num_filters4 = 128
    filter_size1 = 7
    filter_size2 = 5
    filter_size3 = 3
    filter_size4 = 3
    pool_size = 2
    scale_factor = 2
    pad_in = 'valid'
    pad_out = 'full'

    # Input layer, as usual:
    network = InputLayer(shape=(None, 2, in_shape_1, in_shape_2),
                         input_var=input_var,
                         name="input_layer")

    network = batch_norm(
        Conv2DLayer(network,
                    num_filters=conv_num_filters1,
                    filter_size=(filter_size1, filter_size1),
                    pad=pad_in,
                    nonlinearity=lasagne.nonlinearities.rectify,
                    W=lasagne.init.GlorotUniform(),
                    name="conv1"))

    network = MaxPool2DLayer(network,
                             pool_size=(pool_size, pool_size),
                             name="pool1")

    network = batch_norm(
        Conv2DLayer(network,
                    num_filters=conv_num_filters2,
                    filter_size=(filter_size2, filter_size2),
                    pad=pad_in,
                    nonlinearity=lasagne.nonlinearities.rectify,
                    W=lasagne.init.GlorotUniform(),
                    name="conv2"))

    network = MaxPool2DLayer(network,
                             pool_size=(pool_size, pool_size),
                             name="pool2")

    network = batch_norm(
        Conv2DLayer(network,
                    num_filters=conv_num_filters3,
                    filter_size=(filter_size3, filter_size3),
                    pad=pad_in,
                    nonlinearity=lasagne.nonlinearities.rectify,
                    W=lasagne.init.GlorotUniform(),
                    name="conv3"))

    network = MaxPool2DLayer(network,
                             pool_size=(pool_size, pool_size),
                             name="pool3")

    network = batch_norm(
        Conv2DLayer(network,
                    num_filters=conv_num_filters4,
                    filter_size=(filter_size4, filter_size4),
                    pad=pad_in,
                    nonlinearity=lasagne.nonlinearities.rectify,
                    W=lasagne.init.GlorotUniform(),
                    name="conv4"))

    network = batch_norm(
        Conv2DLayer(network,
                    num_filters=32,
                    filter_size=(filter_size4, filter_size4),
                    pad=pad_out,
                    nonlinearity=lasagne.nonlinearities.rectify,
                    W=lasagne.init.GlorotUniform(),
                    name="deconv1"))

    network = Upscale2DLayer(network,
                             scale_factor=(pool_size, pool_size),
                             name="upscale1")

    network = batch_norm(
        Conv2DLayer(network,
                    num_filters=16,
                    filter_size=(filter_size3, filter_size3),
                    pad=pad_out,
                    nonlinearity=lasagne.nonlinearities.rectify,
                    W=lasagne.init.GlorotUniform(),
                    name="deconv2"))

    network = Upscale2DLayer(network,
                             scale_factor=(pool_size, pool_size),
                             name="upscale2")

    network = batch_norm(
        Conv2DLayer(network,
                    num_filters=8,
                    filter_size=(filter_size2, filter_size2),
                    pad=pad_out,
                    nonlinearity=lasagne.nonlinearities.rectify,
                    W=lasagne.init.GlorotUniform(),
                    name="deconv3"))

    network = Upscale2DLayer(network,
                             scale_factor=(pool_size, pool_size),
                             name="upscale3")

    network = batch_norm(
        Conv2DLayer(network,
                    num_filters=1,
                    filter_size=(filter_size1, filter_size1),
                    pad=pad_out,
                    nonlinearity=lasagne.nonlinearities.sigmoid,
                    W=lasagne.init.GlorotUniform(),
                    name="deconv4"))

    return network
示例#14
0
    def build_model(width=512, height=512, filename=None,
                    n_classes=5, batch_size=None, p_conv=0.0):
        """Setup network structure for the original formulation of JeffreyDF's
           network and optionally load pretrained weights

        Parameters
        ----------
        width : Optional[int]
            image width
        height : Optional[int]
            image height
        filename : Optional[str]
            if filename is not None, weights are loaded from filename
        n_classes : Optional[int]
            default 5 for transfer learning on Kaggle DR data
        batch_size : should only be set if all batches have the same size!
        p_conv: dropout applied to conv. layers, by default turned off (0.0)

        Returns
        -------
        dict
            one lasagne layer per key

        Notes
        -----
            Reference: Jeffrey De Fauw, 2015:
            http://jeffreydf.github.io/diabetic-retinopathy-detection/

            Download pretrained weights from:
            https://github.com/JeffreyDF/kaggle_diabetic_retinopathy/blob/
            master/dumps/2015_07_17_123003_PARAMSDUMP.pkl

           original net has leaky rectifier units

        """

        net = OrderedDict()

        net['0'] = InputLayer((batch_size, 3, width, height), name='images')
        net['1'] = ConvLayer(net['0'], 32, 7, stride=(2, 2), pad='same',
                             untie_biases=True,
                             nonlinearity=LeakyRectify(leakiness=0.5),
                             W=lasagne.init.Orthogonal(1.0),
                             b=lasagne.init.Constant(0.1))
        net['1d'] = DropoutLayer(net['1'], p=p_conv)
        net['2'] = MaxPool2DLayer(net['1d'], 3, stride=(2, 2))
        net['3'] = ConvLayer(net['2'], 32, 3, stride=(1, 1), pad='same',
                             untie_biases=True,
                             nonlinearity=LeakyRectify(leakiness=0.5),
                             W=lasagne.init.Orthogonal(1.0),
                             b=lasagne.init.Constant(0.1))
        net['3d'] = DropoutLayer(net['3'], p=p_conv)
        net['4'] = ConvLayer(net['3d'], 32, 3, stride=(1, 1), pad='same',
                             untie_biases=True,
                             nonlinearity=LeakyRectify(leakiness=0.5),
                             W=lasagne.init.Orthogonal(1.0),
                             b=lasagne.init.Constant(0.1))
        net['4d'] = DropoutLayer(net['4'], p=p_conv)
        net['5'] = MaxPool2DLayer(net['4d'], 3, stride=(2, 2))
        net['6'] = ConvLayer(net['5'], 64, 3, stride=(1, 1), pad='same',
                             untie_biases=True,
                             nonlinearity=LeakyRectify(leakiness=0.5),
                             W=lasagne.init.Orthogonal(1.0),
                             b=lasagne.init.Constant(0.1))
        net['6d'] = DropoutLayer(net['6'], p=p_conv)
        net['7'] = ConvLayer(net['6d'], 64, 3, stride=(1, 1), pad='same',
                             untie_biases=True,
                             nonlinearity=LeakyRectify(leakiness=0.5),
                             W=lasagne.init.Orthogonal(1.0),
                             b=lasagne.init.Constant(0.1))
        net['7d'] = DropoutLayer(net['7'], p=p_conv)
        net['8'] = MaxPool2DLayer(net['7d'], 3, stride=(2, 2))
        net['9'] = ConvLayer(net['8'], 128, 3, stride=(1, 1), pad='same',
                             untie_biases=True,
                             nonlinearity=LeakyRectify(leakiness=0.5),
                             W=lasagne.init.Orthogonal(1.0),
                             b=lasagne.init.Constant(0.1))
        net['9d'] = DropoutLayer(net['9'], p=p_conv)
        net['10'] = ConvLayer(net['9d'], 128, 3, stride=(1, 1), pad='same',
                              untie_biases=True,
                              nonlinearity=LeakyRectify(leakiness=0.5),
                              W=lasagne.init.Orthogonal(1.0),
                              b=lasagne.init.Constant(0.1))
        net['10d'] = DropoutLayer(net['10'], p=p_conv)
        net['11'] = ConvLayer(net['10d'], 128, 3, stride=(1, 1), pad='same',
                              untie_biases=True,
                              nonlinearity=LeakyRectify(leakiness=0.5),
                              W=lasagne.init.Orthogonal(1.0),
                              b=lasagne.init.Constant(0.1))
        net['11d'] = DropoutLayer(net['11'], p=p_conv)
        net['12'] = ConvLayer(net['11d'], 128, 3, stride=(1, 1), pad='same',
                              untie_biases=True,
                              nonlinearity=LeakyRectify(leakiness=0.5),
                              W=lasagne.init.Orthogonal(1.0),
                              b=lasagne.init.Constant(0.1))
        net['12d'] = DropoutLayer(net['12'], p=p_conv)
        net['13'] = MaxPool2DLayer(net['12d'], 3, stride=(2, 2))
        net['14'] = ConvLayer(net['13'], 256, 3, stride=(1, 1), pad='same',
                              untie_biases=True,
                              nonlinearity=LeakyRectify(leakiness=0.5),
                              W=lasagne.init.Orthogonal(1.0),
                              b=lasagne.init.Constant(0.1))
        net['14d'] = DropoutLayer(net['14'], p=p_conv)
        net['15'] = ConvLayer(net['14d'], 256, 3, stride=(1, 1), pad='same',
                              untie_biases=True,
                              nonlinearity=LeakyRectify(leakiness=0.5),
                              W=lasagne.init.Orthogonal(1.0),
                              b=lasagne.init.Constant(0.1))
        net['15d'] = DropoutLayer(net['15'], p=p_conv)
        net['16'] = ConvLayer(net['15'], 256, 3, stride=(1, 1), pad='same',
                              untie_biases=True,
                              nonlinearity=LeakyRectify(leakiness=0.5),
                              W=lasagne.init.Orthogonal(1.0),
                              b=lasagne.init.Constant(0.1))
        net['16d'] = DropoutLayer(net['16'], p=p_conv)
        net['17'] = ConvLayer(net['16d'], 256, 3, stride=(1, 1), pad='same',
                              untie_biases=True,
                              nonlinearity=LeakyRectify(leakiness=0.5),
                              W=lasagne.init.Orthogonal(1.0),
                              b=lasagne.init.Constant(0.1))
        net['17d'] = DropoutLayer(net['17'], p=p_conv)
        net['18'] = MaxPool2DLayer(net['17d'], 3, stride=(2, 2),
                                   name='coarse_last_pool')
        net['19'] = DropoutLayer(net['18'], p=0.5)
        net['20'] = DenseLayer(net['19'], num_units=1024, nonlinearity=None,
                               W=lasagne.init.Orthogonal(1.0),
                               b=lasagne.init.Constant(0.1),
                               name='first_fc_0')
        net['21'] = FeaturePoolLayer(net['20'], 2)
        net['22'] = InputLayer((batch_size, 2), name='imgdim')
        net['23'] = ConcatLayer([net['21'], net['22']])
        # Combine representations of both eyes
        net['24'] = ReshapeLayer(net['23'],
                                 (-1, net['23'].output_shape[1] * 2))
        net['25'] = DropoutLayer(net['24'], p=0.5)
        net['26'] = DenseLayer(net['25'], num_units=1024, nonlinearity=None,
                               W=lasagne.init.Orthogonal(1.0),
                               b=lasagne.init.Constant(0.1),
                               name='combine_repr_fc')
        net['27'] = FeaturePoolLayer(net['26'], 2)
        net['28'] = DropoutLayer(net['27'], p=0.5)
        net['29'] = DenseLayer(net['28'],
                               num_units=n_classes * 2,
                               nonlinearity=None,
                               W=lasagne.init.Orthogonal(1.0),
                               b=lasagne.init.Constant(0.1))
        # Reshape back to the number of desired classes
        net['30'] = ReshapeLayer(net['29'], (-1, n_classes))
        net['31'] = NonlinearityLayer(net['30'], nonlinearity=softmax)

        if filename is not None:
            with open(filename, 'r') as f:
                weights = pickle.load(f)
            set_all_param_values(net['31'], weights)

        return net