Пример #1
0
    def buildSectorNet(self):

        sectorNet = InputLayer(self.inputShape, self.inputVar)

        for i, layer in enumerate(self.layerCategory):

            self.logger.debug('Build {}th conv layer'.format(i))
            self.logger.debug('The output shape of {}th layer equal {}'.format(
                i - 1, get_output_shape(sectorNet)))

            kernelXDim = int(layer[-1])
            kernelDim = (kernelXDim, ) * 3

            conv3D = batch_norm(
                Conv3DLayer(incoming=sectorNet,
                            num_filters=self.numOfFMs[i],
                            filter_size=kernelDim,
                            W=HeUniform(gain='relu'),
                            nonlinearity=rectify,
                            name='Conv3D'))
            self.logger.debug(
                'The shape of {}th conv3D layer equals {}'.format(
                    i, get_output_shape(conv3D)))

            sectorNet = ConcatLayer(
                [conv3D, sectorNet],
                1,
                cropping=['center', 'None', 'center', 'center', 'center'])

            self.logger.debug(
                'The shape of {}th concat layer equals {}'.format(
                    i, get_output_shape(sectorNet)))

        assert get_output_shape(sectorNet) == (None, sum(self.numOfFMs) + 1, 1,
                                               1, 1)

        sectorNet = batch_norm(
            Conv3DLayer(incoming=sectorNet,
                        num_filters=2,
                        filter_size=(1, 1, 1),
                        W=HeUniform(gain='relu')))

        self.logger.debug('The shape of last con3D layer equals {}'.format(
            get_output_shape(sectorNet)))

        sectorNet = ReshapeLayer(sectorNet, ([0], -1))
        self.logger.debug('The shape of ReshapeLayer equals {}'.format(
            get_output_shape(sectorNet)))

        sectorNet = NonlinearityLayer(sectorNet, softmax)
        self.logger.debug(
            'The shape of output layer, i.e. NonlinearityLayer, equals {}'.
            format(get_output_shape(sectorNet)))

        assert get_output_shape(sectorNet) == (None, self.numOfOutputClass)

        return sectorNet
Пример #2
0
    def __init__(self, input_shape=(None, 1, 33, 33, 33)):
        self.cubeSize = input_shape[-1]

        # Theano variables
        self.input_var = T.tensor5('input_var')  # input image
        self.target_var = T.ivector('target_var')  # target

        self.logger = logging.getLogger(__name__)

        input_layer = InputLayer(input_shape, self.input_var)
        self.logger.info('The shape of input layer is {}'.format(
            get_output_shape(input_layer)))

        hidden_layer1 = Conv3DLayer(incoming=input_layer,
                                    num_filters=16,
                                    filter_size=(3, 3, 3),
                                    W=HeUniform(gain='relu'),
                                    nonlinearity=rectify)
        self.logger.info('The shape of first hidden layer is {}'.format(
            get_output_shape(hidden_layer1)))

        hidden_layer2 = Conv3DLayer(incoming=hidden_layer1,
                                    num_filters=32,
                                    filter_size=(3, 3, 3),
                                    W=HeUniform(gain='relu'),
                                    nonlinearity=rectify)
        self.logger.info('The shape of second hidden layer is {}'.format(
            get_output_shape(hidden_layer2)))

        hidden_layer3 = Conv3DLayer(incoming=hidden_layer2,
                                    num_filters=2,
                                    filter_size=(1, 1, 1),
                                    W=HeUniform(gain='relu'),
                                    nonlinearity=rectify)
        self.logger.info('The shape of third hidden layer is {}'.format(
            get_output_shape(hidden_layer3)))

        shuffledLayer = DimshuffleLayer(hidden_layer3, (0, 2, 3, 4, 1))
        self.logger.info('The shape of shuffled layer is {}'.format(
            get_output_shape(shuffledLayer)))

        reshapedLayer = ReshapeLayer(shuffledLayer, ([0], -1))
        self.logger.info('The shape of reshaped layer is {}'.format(
            get_output_shape(reshapedLayer)))

        self.output_layer = NonlinearityLayer(reshapedLayer, softmax)
        self.logger.info('The shape of output layer is {}'.format(
            get_output_shape(self.output_layer)))
 def lrelu_conv(l_in, feat_out, stride=1, filter_size=3):
     l = NonlinearityLayer(l_in, nonlin)
     return Conv3DLayer(l,
                        feat_out,
                        filter_size,
                        stride,
                        'same',
                        nonlinearity=linear,
                        W=HeNormal(gain='relu'))
 def norm_lrelu_conv(l_in, feat_out, stride=1, filter_size=3):
     if do_norm:
         l_in = BatchNormLayer(l_in, axes=axes)
     l = NonlinearityLayer(l_in, nonlin)
     return Conv3DLayer(l,
                        feat_out,
                        filter_size,
                        stride,
                        'same',
                        nonlinearity=linear,
                        W=HeNormal(gain='relu'))
 def conv_norm_lrelu(l_in, feat_out):
     l = Conv3DLayer(l_in,
                     feat_out,
                     3,
                     1,
                     'same',
                     nonlinearity=linear,
                     W=HeNormal(gain='relu'))
     if do_norm:
         l = BatchNormLayer(l, axes=axes)
     return NonlinearityLayer(l, nonlin)
 def norm_lrelu_upscale_conv_norm_lrelu(l_in, feat_out):
     if do_norm:
         l_in = BatchNormLayer(l_in, axes=axes)
     l = NonlinearityLayer(l_in, nonlin)
     l = Upscale3DLayer(l, 2)
     l = Conv3DLayer(l,
                     feat_out,
                     3,
                     1,
                     'same',
                     nonlinearity=linear,
                     W=HeNormal(gain='relu'))
     if do_norm:
         l = BatchNormLayer(l, axes=axes)
     l = NonlinearityLayer(l, nonlin)
     return l
from lasagne.layers import *

import pickle
import theano.misc.pkl_utils

cachefile = os.path.dirname(os.path.realpath(__file__)) + "/model6tissues.pkl"

if not os.path.exists(cachefile):

    l = InputLayer(shape=(None, 1, 64, 64, 64), name="input")
    l_input = l

    l = Conv3DLayer(l,
                    num_filters=16,
                    filter_size=(3, 3, 3),
                    pad='same',
                    name="conv",
                    nonlinearity=elu)
    l = Conv3DLayer(l,
                    num_filters=16,
                    filter_size=(3, 3, 3),
                    pad='same',
                    name="conv",
                    nonlinearity=elu)
    l = batch_norm(l)
    li0 = l

    l = MaxPool3DLayer(l, pool_size=2, name='maxpool')
    l = Conv3DLayer(l,
                    num_filters=24,
                    filter_size=(3, 3, 3),
def cascade_model(options):
    """
    3D cascade model using Nolearn and Lasagne
    
    Inputs:
    - model_options:
    - weights_path: path to where weights should be saved

    Output:
    - nets = list of NeuralNets (CNN1, CNN2)
    """

    # model options
    channels = len(options['modalities'])
    train_split_perc = options['train_split']
    num_epochs = options['max_epochs']
    max_epochs_patience = options['patience']

    # save model to disk to re-use it. Create an experiment folder
    # organize experiment
    if not os.path.exists(
            os.path.join(options['weight_paths'], options['experiment'])):
        os.mkdir(os.path.join(options['weight_paths'], options['experiment']))
    if not os.path.exists(
            os.path.join(options['weight_paths'], options['experiment'],
                         'nets')):
        os.mkdir(
            os.path.join(options['weight_paths'], options['experiment'],
                         'nets'))

    # --------------------------------------------------
    # first model
    # --------------------------------------------------

    layer1 = InputLayer(name='in1',
                        shape=(None, channels) + options['patch_size'])
    layer1 = batch_norm(Conv3DLayer(layer1,
                                    name='conv1_1',
                                    num_filters=32,
                                    filter_size=3,
                                    pad='same'),
                        name='BN1')
    layer1 = Pool3DLayer(layer1,
                         name='avgpool_1',
                         mode='max',
                         pool_size=2,
                         stride=2)
    layer1 = batch_norm(Conv3DLayer(layer1,
                                    name='conv2_1',
                                    num_filters=64,
                                    filter_size=3,
                                    pad='same'),
                        name='BN2')
    layer1 = Pool3DLayer(layer1,
                         name='avgpoo2_1',
                         mode='max',
                         pool_size=2,
                         stride=2)
    layer1 = DropoutLayer(layer1, name='l2drop', p=0.5)
    layer1 = DenseLayer(layer1, name='d_1', num_units=256)
    layer1 = DenseLayer(layer1,
                        name='out',
                        num_units=2,
                        nonlinearity=nonlinearities.softmax)

    # save weights
    net_model = 'model_1'
    net_weights = os.path.join(options['weight_paths'], options['experiment'],
                               'nets', net_model + '.pkl')
    net_history = os.path.join(options['weight_paths'], options['experiment'],
                               'nets', net_model + '_history.pkl')

    net1 = NeuralNet(
        layers=layer1,
        objective_loss_function=objectives.categorical_crossentropy,
        batch_iterator_train=Rotate_batch_Iterator(batch_size=128),
        update=updates.adadelta,
        on_epoch_finished=[
            SaveWeights(net_weights, only_best=True, pickle=False),
            SaveTrainingHistory(net_history),
            EarlyStopping(patience=max_epochs_patience)
        ],
        verbose=options['net_verbose'],
        max_epochs=num_epochs,
        train_split=TrainSplit(eval_size=train_split_perc),
    )

    # --------------------------------------------------
    # second model
    # --------------------------------------------------

    layer2 = InputLayer(name='in2',
                        shape=(None, channels) + options['patch_size'])
    layer2 = batch_norm(Conv3DLayer(layer2,
                                    name='conv1_1',
                                    num_filters=32,
                                    filter_size=3,
                                    pad='same'),
                        name='BN1')
    layer2 = Pool3DLayer(layer2,
                         name='avgpool_1',
                         mode='max',
                         pool_size=2,
                         stride=2)
    layer2 = batch_norm(Conv3DLayer(layer2,
                                    name='conv2_1',
                                    num_filters=64,
                                    filter_size=3,
                                    pad='same'),
                        name='BN2')
    layer2 = Pool3DLayer(layer2,
                         name='avgpoo2_1',
                         mode='max',
                         pool_size=2,
                         stride=2)
    layer2 = DropoutLayer(layer2, name='l2drop', p=0.5)
    layer2 = DenseLayer(layer2, name='d_1', num_units=256)
    layer2 = DenseLayer(layer2,
                        name='out',
                        num_units=2,
                        nonlinearity=nonlinearities.softmax)

    # save weights
    net_model = 'model_2'
    net_weights2 = os.path.join(options['weight_paths'], options['experiment'],
                                'nets', net_model + '.pkl')
    net_history2 = os.path.join(options['weight_paths'], options['experiment'],
                                'nets', net_model + '_history.pkl')

    net2 = NeuralNet(
        layers=layer2,
        objective_loss_function=objectives.categorical_crossentropy,
        batch_iterator_train=Rotate_batch_Iterator(batch_size=128),
        update=updates.adadelta,
        on_epoch_finished=[
            SaveWeights(net_weights2, only_best=True, pickle=False),
            SaveTrainingHistory(net_history2),
            EarlyStopping(patience=max_epochs_patience)
        ],
        verbose=options['net_verbose'],
        max_epochs=num_epochs,
        train_split=TrainSplit(eval_size=train_split_perc),
    )

    return [net1, net2]
Пример #9
0
def build_cnn3d(input_var=None,
                w_init=None,
                n_layers=(4, 2, 1),
                n_filters_first=32,
                imsize=[32, 32],
                n_colors=3,
                n_timewin=5,
                isMaxpool=True,
                filter_size=[(3, 1, 1), (3, 3, 3), (3, 3, 3)],
                dropout=0.0,
                input_dropout=0.0,
                batch_norm_conv=False,
                padding='same',
                pool_size=[(2, 1, 1), (2, 2, 2), (2, 2, 2)],
                factor=2):
    """
    Builds a VGG style 3D CNN network followed by a fully-connected layer and a softmax layer.
    Stacks are separated by a maxpool layer. Number of kernels in each layer is twice
    the number in previous stack.
    input_var: Theano variable for input to the network
    outputs: pointer to the output of the last layer of network (softmax)

    :param input_var: theano variable as input to the network
    :param w_init: Initial weight values
    :param n_layers: number of layers in each stack. An array of integers with each
                    value corresponding to the number of layers hesain each stack.
                    (e.g. [4, 2, 1] == 3 stacks with 4, 2, and 1 layers in each.
    :param n_filters_first: number of filters in the first layer
    :param imSize: Size of the image
    :param n_colors: Number of color channels (depth)
    :return: a pointer to the output of last layer
    """
    weights = []  # Keeps the weights for all layers
    count = 0
    # If no initial weight is given, initialize with GlorotUniform
    if w_init is None:
        w_init = [lasagne.init.GlorotUniform()] * sum(n_layers)
    # Input layer
    network = InputLayer(shape=(n_timewin, None, n_colors, imsize[0],
                                imsize[1]),
                         input_var=input_var)

    network = DimshuffleLayer(network, (1, 2, 0, 3, 4))

    if input_dropout:
        network = lasagne.layers.dropout(network, p=input_dropout)

    for i, s in enumerate(n_layers):
        for l in range(s):
            network = Conv3DLayer(network,
                                  num_filters=n_filters_first * (factor**i),
                                  filter_size=filter_size[i],
                                  W=w_init[count],
                                  pad=padding)
            count += 1
            weights.append(network.W)
            if dropout:
                network = lasagne.layers.dropout(network, p=dropout)
        if batch_norm_conv:
            network = batch_norm(network)
        if isMaxpool:
            network = MaxPool3DLayer(network, pool_size=pool_size[i])
    return network, weights
Пример #10
0
def build_net(nz=200):

    input_depth = 160
    input_rows = 64
    input_columns = 64

    #Encoder

    enc = InputLayer(shape=(None, 1, input_depth, input_rows,
                            input_columns))  #5D tensor
    enc = Conv3DLayer(incoming=enc,
                      num_filters=64,
                      filter_size=5,
                      stride=2,
                      nonlinearity=lrelu(0.2),
                      pad=2)
    enc = Conv3DLayer(incoming=enc,
                      num_filters=128,
                      filter_size=5,
                      stride=2,
                      nonlinearity=lrelu(0.2),
                      pad=2)
    enc = Conv3DLayer(incoming=enc,
                      num_filters=256,
                      filter_size=5,
                      stride=2,
                      nonlinearity=lrelu(0.2),
                      pad=2)
    enc = Conv3DLayer(incoming=enc,
                      num_filters=256,
                      filter_size=5,
                      stride=2,
                      nonlinearity=lrelu(0.2),
                      pad=2)
    enc = reshape(incoming=enc, shape=(-1, 256 * 4 * 4 * 10))
    enc = DenseLayer(incoming=enc, num_units=nz, nonlinearity=sigmoid)

    #Decoder

    dec = InputLayer(shape=(None, nz))
    dec = DenseLayer(incoming=dec, num_units=256 * 4 * 4 * 10)
    dec = reshape(incoming=dec, shape=(-1, 256, 10, 4, 4))
    dec = Deconv3D(incoming=dec,
                   num_filters=256,
                   filter_size=4,
                   stride=2,
                   crop=1,
                   nonlinearity=relu)
    dec = Deconv3D(incoming=dec,
                   num_filters=128,
                   filter_size=4,
                   stride=2,
                   crop=1,
                   nonlinearity=relu)
    dec = Deconv3D(incoming=dec,
                   num_filters=64,
                   filter_size=4,
                   stride=2,
                   crop=1,
                   nonlinearity=relu)
    dec = Deconv3D(incoming=dec,
                   num_filters=1,
                   filter_size=4,
                   stride=2,
                   crop=1,
                   nonlinearity=sigmoid)

    return enc, dec
    def build_network(self):
        self.input_var = tensor5()
        self.output_var = matrix()
        net = OrderedDict()
        if self.instance_norm:
            norm_fct = batch_norm
            norm_kwargs = {'axes': (2, 3, 4)}
        else:
            norm_fct = batch_norm
            norm_kwargs = {'axes': 'auto'}

        self.input_layer = net['input'] = InputLayer(
            (self.batch_size, self.n_input_channels, self.input_dim[0],
             self.input_dim[1], self.input_dim[2]), self.input_var)

        net['contr_1_1'] = norm_fct(
            Conv3DLayer(net['input'],
                        self.base_n_filters,
                        3,
                        nonlinearity=self.nonlinearity,
                        pad=self.pad,
                        W=lasagne.init.HeNormal(gain="relu")), **norm_kwargs)
        net['contr_1_2'] = norm_fct(
            Conv3DLayer(net['contr_1_1'],
                        self.base_n_filters,
                        3,
                        nonlinearity=self.nonlinearity,
                        pad=self.pad,
                        W=lasagne.init.HeNormal(gain="relu")), **norm_kwargs)
        net['pool1'] = Pool3DLayer(net['contr_1_2'], (1, 2, 2))

        net['contr_2_1'] = norm_fct(
            Conv3DLayer(net['pool1'],
                        self.base_n_filters * 2,
                        3,
                        nonlinearity=self.nonlinearity,
                        pad=self.pad,
                        W=lasagne.init.HeNormal(gain="relu")), **norm_kwargs)
        net['contr_2_2'] = norm_fct(
            Conv3DLayer(net['contr_2_1'],
                        self.base_n_filters * 2,
                        3,
                        nonlinearity=self.nonlinearity,
                        pad=self.pad,
                        W=lasagne.init.HeNormal(gain="relu")), **norm_kwargs)
        l = net['pool2'] = Pool3DLayer(net['contr_2_2'], (1, 2, 2))
        if self.dropout is not None:
            l = DropoutLayer(l, p=self.dropout)

        net['contr_3_1'] = norm_fct(
            Conv3DLayer(l,
                        self.base_n_filters * 4,
                        3,
                        nonlinearity=self.nonlinearity,
                        pad=self.pad,
                        W=lasagne.init.HeNormal(gain="relu")), **norm_kwargs)
        net['contr_3_2'] = norm_fct(
            Conv3DLayer(net['contr_3_1'],
                        self.base_n_filters * 4,
                        3,
                        nonlinearity=self.nonlinearity,
                        pad=self.pad,
                        W=lasagne.init.HeNormal(gain="relu")), **norm_kwargs)
        l = net['pool3'] = Pool3DLayer(net['contr_3_2'], (1, 2, 2))
        if self.dropout is not None:
            l = DropoutLayer(l, p=self.dropout)

        net['contr_4_1'] = norm_fct(
            Conv3DLayer(l,
                        self.base_n_filters * 8,
                        3,
                        nonlinearity=self.nonlinearity,
                        pad=self.pad,
                        W=lasagne.init.HeNormal(gain="relu")), **norm_kwargs)
        net['contr_4_2'] = norm_fct(
            Conv3DLayer(net['contr_4_1'],
                        self.base_n_filters * 8,
                        3,
                        nonlinearity=self.nonlinearity,
                        pad=self.pad,
                        W=lasagne.init.HeNormal(gain="relu")), **norm_kwargs)
        l = net['pool4'] = Pool3DLayer(net['contr_4_2'], (1, 2, 2))
        if self.dropout is not None:
            l = DropoutLayer(l, p=self.dropout)

        net['encode_1'] = norm_fct(
            Conv3DLayer(l,
                        self.base_n_filters * 16,
                        3,
                        nonlinearity=self.nonlinearity,
                        pad=self.pad,
                        W=lasagne.init.HeNormal(gain="relu")), **norm_kwargs)
        l = net['encode_2'] = norm_fct(
            Conv3DLayer(net['encode_1'],
                        self.base_n_filters * 16,
                        3,
                        nonlinearity=self.nonlinearity,
                        pad=self.pad,
                        W=lasagne.init.HeNormal(gain="relu")), **norm_kwargs)
        net['upscale1'] = Upscale3DLayer(l, (1, 2, 2))

        l = net['concat1'] = ConcatLayer([net['upscale1'], net['contr_4_2']],
                                         cropping=(None, None, "center",
                                                   "center", "center"))
        if self.dropout is not None:
            l = DropoutLayer(l, p=self.dropout)
        net['expand_1_1'] = norm_fct(
            Conv3DLayer(l,
                        self.base_n_filters * 8,
                        3,
                        nonlinearity=self.nonlinearity,
                        pad=self.pad,
                        W=lasagne.init.HeNormal(gain="relu")), **norm_kwargs)
        l = net['expand_1_2'] = norm_fct(
            Conv3DLayer(net['expand_1_1'],
                        self.base_n_filters * 8,
                        3,
                        nonlinearity=self.nonlinearity,
                        pad=self.pad,
                        W=lasagne.init.HeNormal(gain="relu")), **norm_kwargs)
        net['upscale2'] = Upscale3DLayer(l, (1, 2, 2))

        l = net['concat2'] = ConcatLayer([net['upscale2'], net['contr_3_2']],
                                         cropping=(None, None, "center",
                                                   "center", "center"))
        if self.dropout is not None:
            l = DropoutLayer(l, p=self.dropout)
        net['expand_2_1'] = norm_fct(
            Conv3DLayer(l,
                        self.base_n_filters * 4,
                        3,
                        nonlinearity=self.nonlinearity,
                        pad=self.pad,
                        W=lasagne.init.HeNormal(gain="relu")), **norm_kwargs)
        ds2 = l = net['expand_2_2'] = norm_fct(
            Conv3DLayer(net['expand_2_1'],
                        self.base_n_filters * 4,
                        3,
                        nonlinearity=self.nonlinearity,
                        pad=self.pad,
                        W=lasagne.init.HeNormal(gain="relu")), **norm_kwargs)
        net['upscale3'] = Upscale3DLayer(l, (1, 2, 2))

        l = net['concat3'] = ConcatLayer([net['upscale3'], net['contr_2_2']],
                                         cropping=(None, None, "center",
                                                   "center", "center"))
        if self.dropout is not None:
            l = DropoutLayer(l, p=self.dropout)
        net['expand_3_1'] = norm_fct(
            Conv3DLayer(l,
                        self.base_n_filters * 2,
                        3,
                        nonlinearity=self.nonlinearity,
                        pad=self.pad,
                        W=lasagne.init.HeNormal(gain="relu")), **norm_kwargs)
        l = net['expand_3_2'] = norm_fct(
            Conv3DLayer(net['expand_3_1'],
                        self.base_n_filters * 2,
                        3,
                        nonlinearity=self.nonlinearity,
                        pad=self.pad,
                        W=lasagne.init.HeNormal(gain="relu")), **norm_kwargs)
        net['upscale4'] = Upscale3DLayer(l, (1, 2, 2))

        net['concat4'] = ConcatLayer([net['upscale4'], net['contr_1_2']],
                                     cropping=(None, None, "center", "center",
                                               "center"))
        net['expand_4_1'] = norm_fct(
            Conv3DLayer(net['concat4'],
                        self.base_n_filters,
                        3,
                        nonlinearity=self.nonlinearity,
                        pad=self.pad,
                        W=lasagne.init.HeNormal(gain="relu")), **norm_kwargs)
        net['expand_4_2'] = norm_fct(
            Conv3DLayer(net['expand_4_1'],
                        self.base_n_filters,
                        3,
                        nonlinearity=self.nonlinearity,
                        pad=self.pad,
                        W=lasagne.init.HeNormal(gain="relu")), **norm_kwargs)

        net['output_segmentation'] = Conv3DLayer(net['expand_4_2'],
                                                 self.num_classes,
                                                 1,
                                                 nonlinearity=None)

        ds2_1x1_conv = Conv3DLayer(ds2,
                                   self.num_classes,
                                   1,
                                   1,
                                   'same',
                                   nonlinearity=lasagne.nonlinearities.linear,
                                   W=lasagne.init.HeNormal(gain='relu'))
        ds1_ds2_sum_upscale = Upscale3DLayer(ds2_1x1_conv, (1, 2, 2))
        ds3_1x1_conv = Conv3DLayer(net['expand_3_2'],
                                   self.num_classes,
                                   1,
                                   1,
                                   'same',
                                   nonlinearity=lasagne.nonlinearities.linear,
                                   W=lasagne.init.HeNormal(gain='relu'))
        ds1_ds2_sum_upscale_ds3_sum = ElemwiseSumLayer(
            (ds1_ds2_sum_upscale, ds3_1x1_conv))
        ds1_ds2_sum_upscale_ds3_sum_upscale = Upscale3DLayer(
            ds1_ds2_sum_upscale_ds3_sum, (1, 2, 2))

        self.seg_layer = l = ElemwiseSumLayer(
            (net['output_segmentation'], ds1_ds2_sum_upscale_ds3_sum_upscale))

        net['dimshuffle'] = DimshuffleLayer(l, (0, 2, 3, 4, 1))
        batch_size, n_z, n_rows, n_cols, _ = lasagne.layers.get_output(
            net['dimshuffle']).shape
        net['reshapeSeg'] = ReshapeLayer(
            net['dimshuffle'],
            (batch_size * n_rows * n_cols * n_z, self.num_classes))
        self.output_layer = net['output_flattened'] = NonlinearityLayer(
            net['reshapeSeg'], nonlinearity=lasagne.nonlinearities.softmax)
Пример #12
0
    def buildBaseNet(self, inputShape=(None, 4, 25, 25, 25), forSummary=False):

        if not forSummary:
            message = 'Building the Architecture of BaseNet'
            self.logger.info(logMessage('+', message))

        baseNet = InputLayer(self.inputShape, self.inputVar)

        if not forSummary:
            message = 'Building the convolution layers'
            self.logger.info(logMessage('-', message))

        kernelShapeListLen = len(self.kernelNumList)

        summary = '\n' + '.' * 130 + '\n'
        summary += '    {:<15} {:<50} {:<29} {:<29}\n'.format(
            'Layer', 'Input shape', 'W shape', 'Output shape')
        summary += '.' * 130 + '\n'

        summary += '{:<3} {:<15} {:<50} {:<29} {:<29}\n'.format(
            1, 'Input', inputShape, '',
            get_output_shape(baseNet, input_shapes=inputShape))

        for i in xrange(kernelShapeListLen - 1):

            kernelShape = self.kernelShapeList[i]
            kernelNum = self.kernelNumList[i]

            conv3D = Conv3DLayer(incoming=baseNet,
                                 num_filters=kernelNum,
                                 filter_size=kernelShape,
                                 W=HeNormal(gain='relu'),
                                 nonlinearity=linear,
                                 name='Conv3D{}'.format(i))

            # Just for summary the fitler shape.
            WShape = conv3D.W.get_value().shape

            summary += '{:<3} {:<15} {:<50} {:<29} {:<29}\n'.format(
                i + 2, 'Conv3D',
                get_output_shape(baseNet, input_shapes=inputShape), WShape,
                get_output_shape(conv3D, input_shapes=inputShape))

            batchNormLayer = BatchNormLayer(conv3D)
            preluLayer = prelu(batchNormLayer)

            concatLayerInputShape = '{:<25}{:<25}'.format(
                get_output_shape(conv3D, input_shapes=inputShape),
                get_output_shape(baseNet, input_shapes=inputShape))

            baseNet = ConcatLayer(
                [preluLayer, baseNet],
                1,
                cropping=['center', 'None', 'center', 'center', 'center'])

            summary += '    {:<15} {:<50} {:<29} {:<29}\n'.format(
                'Concat', concatLayerInputShape, '',
                get_output_shape(baseNet, input_shapes=inputShape))
        if not forSummary:
            message = 'Finish Built the convolution layers'
            self.logger.info(logMessage('-', message))

            message = 'Building the last classfication layers'
            self.logger.info(logMessage('-', message))

        assert self.kernelShapeList[-1] == [1, 1, 1]

        kernelShape = self.kernelShapeList[-1]
        kernelNum = self.kernelNumList[-1]

        conv3D = Conv3DLayer(incoming=baseNet,
                             num_filters=kernelNum,
                             filter_size=kernelShape,
                             W=HeNormal(gain='relu'),
                             nonlinearity=linear,
                             name='Classfication Layer')

        receptiveFieldList = [
            inputShape[idx] -
            get_output_shape(conv3D, input_shapes=inputShape)[idx] + 1
            for idx in xrange(-3, 0)
        ]
        assert receptiveFieldList != []
        receptiveFieldSet = set(receptiveFieldList)
        assert len(receptiveFieldSet) == 1, (receptiveFieldSet, inputShape,
                                             get_output_shape(
                                                 conv3D,
                                                 input_shapes=inputShape))
        self.receptiveField = list(receptiveFieldSet)[0]

        # Just for summary the fitler shape.
        WShape = conv3D.W.get_value().shape

        summary += '{:<3} {:<15} {:<50} {:<29} {:<29}\n'.format(
            kernelShapeListLen + 1, 'Conv3D',
            get_output_shape(baseNet, input_shapes=inputShape), WShape,
            get_output_shape(conv3D, input_shapes=inputShape))

        # The output shape should be (batchSize, numOfClasses, zSize, xSize, ySize).
        # We will reshape it to (batchSize * zSize * xSize * ySize, numOfClasses),
        # because, the softmax in theano can only receive matrix.

        baseNet = DimshuffleLayer(conv3D, (0, 2, 3, 4, 1))
        summary += '    {:<15} {:<50} {:<29} {:<29}\n'.format(
            'Dimshuffle', get_output_shape(conv3D, input_shapes=inputShape),
            '', get_output_shape(baseNet, input_shapes=inputShape))

        batchSize, zSize, xSize, ySize, _ = get_output(baseNet).shape
        reshapeLayerInputShape = get_output_shape(baseNet,
                                                  input_shapes=inputShape)
        baseNet = ReshapeLayer(baseNet,
                               (batchSize * zSize * xSize * ySize, kernelNum))
        summary += '    {:<15} {:<50} {:<29} {:<29}\n'.format(
            'Reshape', reshapeLayerInputShape, '',
            get_output_shape(baseNet, input_shapes=inputShape))

        nonlinearityLayerInputShape = get_output_shape(baseNet,
                                                       input_shapes=inputShape)
        baseNet = NonlinearityLayer(baseNet, softmax)
        summary += '    {:<15} {:<50} {:<29} {:<29}\n'.format(
            'Nonlinearity', nonlinearityLayerInputShape, '',
            get_output_shape(baseNet, input_shapes=inputShape))

        if not forSummary:
            message = 'Finish Built the last classfication layers'
            self.logger.info(logMessage('-', message))

            message = 'The Receptivr Field of BaseNet equal {}'.format(
                self.receptiveField)
            self.logger.info(logMessage('*', message))

            message = 'Finish Building the Architecture of BaseNet'
            self.logger.info(logMessage('+', message))

        summary += '.' * 130 + '\n'
        self._summary = summary

        return baseNet
Пример #13
0
                    output = self.merge_function(output, input)
                else:
                    output = input
            return output

    # Definition of the network
    conv_num_filters = 48
    l = InputLayer(shape=(None, 1, 48, 72, 64), name="input")
    l_input = l

    # # # #
    # encoding
    # # # #
    l = Conv3DLayer(l,
                    flip_filters=False,
                    num_filters=16,
                    filter_size=(1, 1, 3),
                    pad='valid',
                    name="conv")
    l = Conv3DLayer(l,
                    flip_filters=False,
                    num_filters=16,
                    filter_size=(1, 3, 1),
                    pad='valid',
                    name="conv")
    l_conv_0 = l = Conv3DLayer(l,
                               flip_filters=False,
                               num_filters=16,
                               filter_size=(3, 1, 1),
                               pad='valid',
                               name="conv")
Пример #14
0
def build_model(input_var=None):

    # Define common parameters
    input_shape = (None, 1, 145, 182, 155)

    n_filters = {
        'l1': 32,
        'l2': 64,
        'l3': 128,
        'l4': 256,
        'l5': 256,
        'fc': 512
    }

    kws_conv = {
        'filter_size': (3, 3, 3),
        'stride': (1, 1, 1),
        'pad': 'same',  # (1,1,1) -- same might be faster
        'W': GlorotUniform(gain='relu'),
        'nonlinearity': rectify,
        'flip_filters': False
    }

    kws_maxpool = {'pool_size': (2, 2, 2), 'stride': (2, 2, 2)}
    # 'pad': (0,0,0)} -- should be defined per layer

    kws_dense = {'W': GlorotUniform(gain='relu'), 'nonlinearity': rectify}

    kws_dropout = {'p': 0.5}

    # Define network architecture
    net = InputLayer(input_shape, input_var, name='input')

    # ----------- 1st layer group ---------------
    net = batch_norm(
        Conv3DLayer(net, n_filters['l1'], name='conv1a', **kws_conv))
    net = MaxPool3DLayer(net, name='pool1', pad=(1, 0, 1), **kws_maxpool)

    # ------------- 2nd layer group --------------
    net = batch_norm(
        Conv3DLayer(net, n_filters['l2'], name='conv2a', **kws_conv))
    net = MaxPool3DLayer(net, name='pool2', pad=(1, 1, 0), **kws_maxpool)

    # ----------------- 3rd layer group --------------
    net = batch_norm(
        Conv3DLayer(net, n_filters['l3'], name='conv3a', **kws_conv))
    net = batch_norm(
        Conv3DLayer(net, n_filters['l3'], name='conv3b', **kws_conv))
    net = MaxPool3DLayer(net, name='pool3', pad=(1, 0, 1), **kws_maxpool)

    # ----------------- 4th layer group --------------
    net = batch_norm(
        Conv3DLayer(net, n_filters['l4'], name='conv4a', **kws_conv))
    net = batch_norm(
        Conv3DLayer(net, n_filters['l4'], name='conv4b', **kws_conv))
    net = MaxPool3DLayer(net, name='pool4', pad=(1, 1, 0), **kws_maxpool)

    # ----------------- 5th layer group --------------
    net = batch_norm(
        Conv3DLayer(net, n_filters['l5'], name='conv5a', **kws_conv))
    net = batch_norm(
        Conv3DLayer(net, n_filters['l5'], name='conv5b', **kws_conv))
    net = MaxPool3DLayer(net, name='pool5', **kws_maxpool)

    # ----------------- FC layers group --------------
    net = batch_norm(DenseLayer(net, n_filters['fc'], name='fc6', **kws_dense))
    net = DropoutLayer(net, name='fc6_dropout', **kws_dropout)

    net = batch_norm(DenseLayer(net, n_filters['fc'], name='fc7', **kws_dense))
    net = DropoutLayer(net, name='fc7_dropout', **kws_dropout)

    # ----------------- Output layers group --------------
    net = batch_norm(DenseLayer(net, 3, nonlinearity=None, name='fc8'))
    net = NonlinearityLayer(net, nonlinearity=softmax, name='prob')

    return net
def build_net(input_var=None,
              input_shape=(128, 128, 128),
              num_output_classes=4,
              num_input_channels=4,
              base_n_filter=8,
              do_instance_norm=True,
              batch_size=None,
              dropout_p=0.3,
              do_norm=True):
    nonlin = lasagne.nonlinearities.leaky_rectify
    if do_instance_norm:
        axes = (2, 3, 4)
    else:
        axes = 'auto'

    def conv_norm_lrelu(l_in, feat_out):
        l = Conv3DLayer(l_in,
                        feat_out,
                        3,
                        1,
                        'same',
                        nonlinearity=linear,
                        W=HeNormal(gain='relu'))
        if do_norm:
            l = BatchNormLayer(l, axes=axes)
        return NonlinearityLayer(l, nonlin)

    def norm_lrelu_conv(l_in, feat_out, stride=1, filter_size=3):
        if do_norm:
            l_in = BatchNormLayer(l_in, axes=axes)
        l = NonlinearityLayer(l_in, nonlin)
        return Conv3DLayer(l,
                           feat_out,
                           filter_size,
                           stride,
                           'same',
                           nonlinearity=linear,
                           W=HeNormal(gain='relu'))

    def lrelu_conv(l_in, feat_out, stride=1, filter_size=3):
        l = NonlinearityLayer(l_in, nonlin)
        return Conv3DLayer(l,
                           feat_out,
                           filter_size,
                           stride,
                           'same',
                           nonlinearity=linear,
                           W=HeNormal(gain='relu'))

    def norm_lrelu_upscale_conv_norm_lrelu(l_in, feat_out):
        if do_norm:
            l_in = BatchNormLayer(l_in, axes=axes)
        l = NonlinearityLayer(l_in, nonlin)
        l = Upscale3DLayer(l, 2)
        l = Conv3DLayer(l,
                        feat_out,
                        3,
                        1,
                        'same',
                        nonlinearity=linear,
                        W=HeNormal(gain='relu'))
        if do_norm:
            l = BatchNormLayer(l, axes=axes)
        l = NonlinearityLayer(l, nonlin)
        return l

    l_in = InputLayer(shape=(batch_size, num_input_channels, input_shape[0],
                             input_shape[1], input_shape[2]),
                      input_var=input_var)

    l = r = Conv3DLayer(l_in,
                        num_filters=base_n_filter,
                        filter_size=3,
                        stride=1,
                        nonlinearity=linear,
                        pad='same',
                        W=HeNormal(gain='relu'))
    l = NonlinearityLayer(l, nonlin)
    l = Conv3DLayer(l,
                    num_filters=base_n_filter,
                    filter_size=3,
                    stride=1,
                    nonlinearity=linear,
                    pad='same',
                    W=HeNormal(gain='relu'))
    l = DropoutLayer(l, dropout_p)
    l = lrelu_conv(l, base_n_filter, 1, 3)
    l = ElemwiseSumLayer((l, r))
    skip1 = NonlinearityLayer(l, nonlin)
    if do_norm:
        l = BatchNormLayer(l, axes=axes)
    l = NonlinearityLayer(l, nonlin)

    l = r = Conv3DLayer(l,
                        base_n_filter * 2,
                        3,
                        2,
                        'same',
                        nonlinearity=linear,
                        W=HeNormal(gain='relu'))
    l = norm_lrelu_conv(l, base_n_filter * 2)
    l = DropoutLayer(l, dropout_p)
    l = norm_lrelu_conv(l, base_n_filter * 2)
    l = ElemwiseSumLayer((l, r))
    if do_norm:
        l = BatchNormLayer(l, axes=axes)
    l = skip2 = NonlinearityLayer(l, nonlin)

    l = r = Conv3DLayer(l,
                        base_n_filter * 4,
                        3,
                        2,
                        'same',
                        nonlinearity=linear,
                        W=HeNormal(gain='relu'))
    l = norm_lrelu_conv(l, base_n_filter * 4)
    l = DropoutLayer(l, dropout_p)
    l = norm_lrelu_conv(l, base_n_filter * 4)
    l = ElemwiseSumLayer((l, r))
    if do_norm:
        l = BatchNormLayer(l, axes=axes)
    l = skip3 = NonlinearityLayer(l, nonlin)

    l = r = Conv3DLayer(l,
                        base_n_filter * 8,
                        3,
                        2,
                        'same',
                        nonlinearity=linear,
                        W=HeNormal(gain='relu'))
    l = norm_lrelu_conv(l, base_n_filter * 8)
    l = DropoutLayer(l, dropout_p)
    l = norm_lrelu_conv(l, base_n_filter * 8)
    l = ElemwiseSumLayer((l, r))
    if do_norm:
        l = BatchNormLayer(l, axes=axes)
    l = skip4 = NonlinearityLayer(l, nonlin)

    l = r = Conv3DLayer(l,
                        base_n_filter * 16,
                        3,
                        2,
                        'same',
                        nonlinearity=linear,
                        W=HeNormal(gain='relu'))
    l = norm_lrelu_conv(l, base_n_filter * 16)
    l = DropoutLayer(l, dropout_p)
    l = norm_lrelu_conv(l, base_n_filter * 16)
    l = ElemwiseSumLayer((l, r))
    l = norm_lrelu_upscale_conv_norm_lrelu(l, base_n_filter * 8)

    l = Conv3DLayer(l,
                    base_n_filter * 8,
                    1,
                    1,
                    'same',
                    nonlinearity=linear,
                    W=HeNormal(gain='relu'))
    if do_norm:
        l = BatchNormLayer(l, axes=axes)
    l = NonlinearityLayer(l, nonlin)

    l = ConcatLayer((skip4, l), cropping=[None, None, 'center', 'center'])
    l = conv_norm_lrelu(l, base_n_filter * 16)
    l = Conv3DLayer(l,
                    base_n_filter * 8,
                    1,
                    1,
                    'same',
                    nonlinearity=linear,
                    W=HeNormal(gain='relu'))
    l = norm_lrelu_upscale_conv_norm_lrelu(l, base_n_filter * 4)

    l = ConcatLayer((skip3, l), cropping=[None, None, 'center', 'center'])
    l = ds2 = conv_norm_lrelu(l, base_n_filter * 8)
    l = Conv3DLayer(l,
                    base_n_filter * 4,
                    1,
                    1,
                    'same',
                    nonlinearity=linear,
                    W=HeNormal(gain='relu'))
    l = norm_lrelu_upscale_conv_norm_lrelu(l, base_n_filter * 2)

    l = ConcatLayer((skip2, l), cropping=[None, None, 'center', 'center'])
    l = ds3 = conv_norm_lrelu(l, base_n_filter * 4)
    l = Conv3DLayer(l,
                    base_n_filter * 2,
                    1,
                    1,
                    'same',
                    nonlinearity=linear,
                    W=HeNormal(gain='relu'))
    l = norm_lrelu_upscale_conv_norm_lrelu(l, base_n_filter)

    l = ConcatLayer((skip1, l), cropping=[None, None, 'center', 'center'])
    l = conv_norm_lrelu(l, base_n_filter * 2)
    l_pred = Conv3DLayer(l,
                         num_output_classes,
                         1,
                         pad='same',
                         nonlinearity=None)

    ds2_1x1_conv = Conv3DLayer(ds2,
                               num_output_classes,
                               1,
                               1,
                               'same',
                               nonlinearity=linear,
                               W=HeNormal(gain='relu'))
    ds1_ds2_sum_upscale = Upscale3DLayer(ds2_1x1_conv, 2)
    ds3_1x1_conv = Conv3DLayer(ds3,
                               num_output_classes,
                               1,
                               1,
                               'same',
                               nonlinearity=linear,
                               W=HeNormal(gain='relu'))
    ds1_ds2_sum_upscale_ds3_sum = ElemwiseSumLayer(
        (ds1_ds2_sum_upscale, ds3_1x1_conv))
    ds1_ds2_sum_upscale_ds3_sum_upscale = Upscale3DLayer(
        ds1_ds2_sum_upscale_ds3_sum, 2)

    l = seg_layer = ElemwiseSumLayer(
        (l_pred, ds1_ds2_sum_upscale_ds3_sum_upscale))
    l = DimshuffleLayer(l, (0, 2, 3, 4, 1))
    batch_size, n_rows, n_cols, n_z, _ = lasagne.layers.get_output(l).shape
    l = ReshapeLayer(l,
                     (batch_size * n_rows * n_cols * n_z, num_output_classes))
    l = NonlinearityLayer(l, nonlinearity=lasagne.nonlinearities.softmax)
    return l, seg_layer