コード例 #1
0
def build_model(input_shape, input_var, dense=True):
    net = {}
    net['input'] = InputLayer(input_shape, input_var=input_var)
    net['input'].num_filters = input_shape[1]
    net['conv1'] = ConvLayer(net['input'], num_filters=128, filter_size=3, nonlinearity=nonlinearities.leaky_rectify, pad='same')
    net['conv2'] = ConvLayer(net['conv1'], num_filters=256, filter_size=3, nonlinearity=nonlinearities.leaky_rectify, pad='same')
    net['pool1'] = ConvLayer(net['conv2'], num_filters=256, filter_size=3, stride=2, nonlinearity=nonlinearities.leaky_rectify, pad='same')
    net['conv3'] = ConvLayer(net['pool1'], num_filters=512, filter_size=3, nonlinearity=nonlinearities.leaky_rectify, pad='same')
    net['pool2'] = ConvLayer(net['conv3'], num_filters=512, filter_size=3, stride=2, nonlinearity=nonlinearities.leaky_rectify, pad='same')
    if dense:
        net['dense'] = dropout(DenseLayer(net['pool2'], num_units=1024, nonlinearity=nonlinearities.leaky_rectify), 0.5)
        # Deconv
        net['dense/inverse'] = inverse_dense_layer(net['dense'], net['dense'], net['pool2'].output_shape)
        net['pool2/inverse'] = inverse_convolution_strided_layer(net['dense/inverse'], net['pool2'])
    else:
        net['pool2/inverse'] = inverse_convolution_strided_layer(net['pool2'], net['pool2'])
    net['conv3/inverse'] = inverse_convolution_layer(net['pool2/inverse'], net['conv3'])
    net['pool1/inverse'] = inverse_convolution_strided_layer(net['conv3/inverse'], net['pool1'])
    net['conv2/inverse'] = inverse_convolution_layer(net['pool1/inverse'], net['conv2'])
    net['conv1/inverse'] = inverse_convolution_layer(net['conv2/inverse'], net['conv1'])
    net['conv0/inverse'] = ConvLayer(net['conv1/inverse'], num_filters=input_shape[1], filter_size=1, nonlinearity=nonlinearities.linear, pad='same')

    net['prob'] = net['conv0/inverse']

    for layer in get_all_layers(net['prob']):
        print layer
        print layer.output_shape
    return net
コード例 #2
0
def build_model_dense(input_shape, input_var):
    net = {}
    net['input'] = InputLayer(input_shape, input_var=input_var)
    net['input'].num_filters = input_shape[1]
    net['conv1'] = ConvLayer(net['input'], num_filters=256, filter_size=3, nonlinearity=nonlinearities.leaky_rectify, pad='same')
    net['conv2'] = ConvLayer(net['conv1'], num_filters=256, filter_size=3, nonlinearity=nonlinearities.leaky_rectify, pad='same')
    net['conv2/reshape'] = ReshapeLayer(net['conv2'], (-1, net['conv2'].output_shape[1] * net['conv2'].output_shape[2]))
    net['dense'] = dropout(DenseLayer(net['conv2/reshape'], num_units=1024, nonlinearity=nonlinearities.leaky_rectify), 0.5)

    net['dense/inverse'] = inverse_dense_layer(net['dense'], net['dense'], net['conv2'].output_shape)
    net['conv2/inverse'] = inverse_convolution_layer(net['dense/inverse'], net['conv2'])
    net['conv1/inverse'] = inverse_convolution_layer(net['conv2/inverse'], net['conv1'])
    net['conv0/inverse'] = ConvLayer(net['conv1/inverse'], num_filters=input_shape[1], filter_size=1,nonlinearity=nonlinearities.linear, pad='same')
    net['prob'] = net['conv0/inverse']
    for layer in get_all_layers(net['prob']):
        print layer
        print layer.output_shape
    return net
コード例 #3
0
def build_model_small(input_shape, input_var):
    net = {}
    net['input'] = InputLayer(input_shape, input_var=input_var)
    net['input'].num_filters = input_shape[1]
    net['conv1'] = batch_norm(ConvLayer(net['input'], num_filters=256, filter_size=11, nonlinearity=nonlinearities.leaky_rectify, pad='same'))
    net['pool1'] = dropout(PoolLayer(net['conv1'], 2, mode='max'), 0.5)
    net['conv2'] = batch_norm(ConvLayer(net['pool1'], num_filters=256, filter_size=7, nonlinearity=nonlinearities.leaky_rectify, pad='same'))
    net['pool2'] = dropout(PoolLayer(net['conv2'], 2, mode='max'), 0.5)
    net['conv3'] = batch_norm(ConvLayer(net['pool2'], num_filters=396, filter_size=5, nonlinearity=nonlinearities.leaky_rectify, pad='same'))
    net['pool3'] = dropout(PoolLayer(net['conv3'], 2, mode='max'), 0.5)
    net['conv4'] = dropout(batch_norm(ConvLayer(net['pool3'], num_filters=512, filter_size=3, nonlinearity=nonlinearities.leaky_rectify, pad='same')), 0.5)
    net['conv5'] = dropout(batch_norm(ConvLayer(net['conv4'], num_filters=1024, filter_size=1, nonlinearity=nonlinearities.leaky_rectify,pad='same')), 0.5)
    net['dense1'] = dropout(batch_norm(DenseLayer(net['conv5'], num_units=1024, nonlinearity=nonlinearities.leaky_rectify)), 0.5)
    net['dense2'] = DenseLayer(net['dense1'], num_units=11, nonlinearity=nonlinearities.softmax)
    net['prob'] = net['dense2']
    for layer in get_all_layers(net['prob']):
        print layer
        print layer.output_shape
    return net
コード例 #4
0
def build_model_small(input_shape, input_var):
    net = {}
    net['input'] = InputLayer(input_shape, input_var=input_var)
    net['input'].num_filters = input_shape[1]
    net['conv1'] = ConvLayer(net['input'], num_filters=256, filter_size=11, nonlinearity=nonlinearities.leaky_rectify, pad='same')
    net['conv2'] = ConvLayer(net['conv1'], num_filters=256, filter_size=7, nonlinearity=nonlinearities.leaky_rectify, pad='same')
    net['conv3'] = ConvLayer(net['conv2'], num_filters=396, filter_size=5, nonlinearity=nonlinearities.leaky_rectify, pad='same')
    net['conv4'] = ConvLayer(net['conv3'], num_filters=512, filter_size=3, nonlinearity=nonlinearities.leaky_rectify, pad='same')
    net['conv5'] = ConvLayer(net['conv4'], num_filters=1024, filter_size=1, nonlinearity=nonlinearities.leaky_rectify,pad='same')
    net['conv5/inverse'] = inverse_convolution_layer(net['conv5'], net['conv5'])
    net['conv4/inverse'] = inverse_convolution_layer(net['conv5/inverse'], net['conv4'])
    net['conv3/inverse'] = inverse_convolution_layer(net['conv4/inverse'], net['conv3'])
    net['conv2/inverse'] = inverse_convolution_layer(net['conv3/inverse'], net['conv2'])
    net['conv1/inverse'] = inverse_convolution_layer(net['conv2/inverse'], net['conv1'])
    net['conv0/inverse'] = ConvLayer(net['conv1/inverse'], num_filters=input_shape[1], filter_size=1,nonlinearity=nonlinearities.linear, pad='same')
    net['prob'] = net['conv0/inverse']
    for layer in get_all_layers(net['prob']):
        print layer
        print layer.output_shape
    return net
コード例 #5
0
def build_network_zeta(inputlist,
                       imgh=(50, 25, 25),
                       imgw=127,
                       convpooldictlist=None,
                       nhidden=None,
                       dropoutp=None,
                       noutputs=11,
                       depth=1
):
    """
    here, `inputlist` should have img tensors for x, u, v, and for muon_data

    here, `imgh` is a tuple of sizes for `(x, u, v)`. `imgw` is the same
    for all three views.

    also, the `convpooldictlist` here must be a dictionary of dictionaries,
    with the set of convolution and pooling defined independently for 'x', 'u',
    and 'v' - e.g., `convpooldictlist['x']` will be a dictionary similar to
    the dictionaries used by network models like `beta`, etc.
    """
    net = {}
    # Input layer
    input_var_x, input_var_u, input_var_v, input_var_muon = \
        inputlist[0], inputlist[1], inputlist[2], inputlist[3]
    net['input-x'] = InputLayer(shape=(None, depth, imgw, imgh[0]),
                                input_var=input_var_x)
    net['input-u'] = InputLayer(shape=(None, depth, imgw, imgh[1]),
                                input_var=input_var_u)
    net['input-v'] = InputLayer(shape=(None, depth, imgw, imgh[2]),
                                input_var=input_var_v)
    net['input-muon-dat'] = InputLayer(shape=(None, 10),
                                       input_var=input_var_muon)

    if convpooldictlist is None:
        raise Exception('Conv-pool dictionaries must be defined!')

    if nhidden is None:
        nhidden = 256

    if dropoutp is None:
        dropoutp = 0.5

    net.update(
        make_Nconvpool_1dense_branch('x', net['input-x'],
                                     convpooldictlist['x'],
                                     nhidden, dropoutp))
    net.update(
        make_Nconvpool_1dense_branch('u', net['input-u'],
                                     convpooldictlist['u'],
                                     nhidden, dropoutp))
    net.update(
        make_Nconvpool_1dense_branch('v', net['input-v'],
                                     convpooldictlist['v'],
                                     nhidden, dropoutp))

    # put a softmax on the muon vars
    net['softed-muon-dat'] = DenseLayer(
        net['input-muon-dat'],
        num_units=noutputs,
        nonlinearity=lasagne.nonlinearities.softmax
    )
    logger.info("Softmax on muon dat with n_units = {}".format(noutputs))


    # Concatenate the parallel inputs, include the muon data
    net['concat'] = ConcatLayer((
        net['dense-x'],
        net['dense-u'],
        net['dense-v'],
        net['softed-muon-dat']
    ))
    logger.info("Network: concat columns...")

    # One more dense layer
    net['dense-across'] = DenseLayer(
        dropout(net['concat'], p=dropoutp),
        num_units=(nhidden // 2),
        nonlinearity=lasagne.nonlinearities.rectify)
    logger.info("Dense {} with nhidden = {}, dropout = {}".format(
        'dense-across', nhidden // 2, dropoutp))

    # And, finally, the `noutputs`-unit output layer
    net['output_prob'] = DenseLayer(
        net['dense-across'],
        num_units=noutputs,
        nonlinearity=lasagne.nonlinearities.softmax
    )
    logger.info("Softmax output prob with n_units = {}".format(noutputs))

    logger.info(
        "n-parameters: %s" % lasagne.layers.count_params(net['output_prob'])
    )
    return net['output_prob']
コード例 #6
0
def build_beta_single_view(inputlist, view='x', imgh=68, imgw=127,
                           convpooldictlist=None,
                           nhidden=None, dropoutp=None, noutputs=11,
                           depth=1
):
    """
    This network is modeled after the 'triamese' (tri-columnar) beta model,
    but is meant to operate on one view only.

    This function has a differen signature than the rest of the functions
    in this module, so it is really not meant to be used as a `build_cnn`
    function in the runner scripts (although, in Python, that would work).
    """
    net = {}
    # Input layer
    input_var = inputlist[0]
    tshape = (None, depth, imgw, imgh)
    input_name = 'input-' + view
    net[input_name] = InputLayer(shape=tshape, input_var=input_var)

    if convpooldictlist is None:
        convpooldictlist = []
        convpool1dict = {}
        convpool1dict['nfilters'] = 32
        convpool1dict['filter_size'] = (3, 3)
        convpool1dict['pool_size'] = (2, 2)
        convpooldictlist.append(convpool1dict)
        convpool2dict = {}
        convpool2dict['nfilters'] = 32
        convpool2dict['filter_size'] = (3, 3)
        convpool2dict['pool_size'] = (2, 2)
        convpooldictlist.append(convpool2dict)

    if nhidden is None:
        nhidden = 256

    if dropoutp is None:
        dropoutp = 0.5

    net.update(
        make_Nconvpool_1dense_branch(view, net[input_name], convpooldictlist,
                                     nhidden, dropoutp))

    # One more dense layer
    dense_name = 'dense-' + view
    net['dense-across'] = DenseLayer(
        dropout(net[dense_name], p=dropoutp),
        num_units=(nhidden // 2),
        nonlinearity=lasagne.nonlinearities.rectify)
    logger.info("Dense {} with nhidden = {}, dropout = {}".format(
        'dense-across', nhidden // 2, dropoutp))

    # And, finally, the `noutputs`-unit output layer
    net['output_prob'] = DenseLayer(
        net['dense-across'],
        num_units=noutputs,
        nonlinearity=lasagne.nonlinearities.softmax
    )
    logger.info("Softmax output prob with n_units = {}".format(noutputs))

    logger.info("n-parameters: {}".format(
        lasagne.layers.count_params(net['output_prob']))
    )
    return net['output_prob']
コード例 #7
0
def build_triamese_delta(inputlist, imgh=68, imgw=127, convpooldictlist=None,
                         nhidden=None, dropoutp=None, noutputs=67, depth=1):
    """
    'triamese' (one branch for each view, feeding a fully-connected network),
    model using two layers of convolutions and pooling.

    This model is basically identical to the `beta` model, except we have
    a softmax output of `noutputs` (def 67) for the full set of planecodes.
    """
    net = {}
    # Input layer
    input_var_x, input_var_u, input_var_v = \
        inputlist[0], inputlist[1], inputlist[2]
    tshape = (None, depth, imgw, imgh)
    net['input-x'] = InputLayer(shape=tshape, input_var=input_var_x)
    net['input-u'] = InputLayer(shape=tshape, input_var=input_var_u)
    net['input-v'] = InputLayer(shape=tshape, input_var=input_var_v)

    if convpooldictlist is None:
        convpooldictlist = []
        convpool1dict = {}
        convpool1dict['nfilters'] = 32
        convpool1dict['filter_size'] = (3, 3)
        convpool1dict['pool_size'] = (2, 2)
        convpooldictlist.append(convpool1dict)
        convpool2dict = {}
        convpool2dict['nfilters'] = 32
        convpool2dict['filter_size'] = (3, 3)
        convpool2dict['pool_size'] = (2, 2)
        convpooldictlist.append(convpool2dict)

    if nhidden is None:
        nhidden = 256

    if dropoutp is None:
        dropoutp = 0.5

    net.update(
        make_Nconvpool_1dense_branch('x', net['input-x'], convpooldictlist,
                                     nhidden, dropoutp))
    net.update(
        make_Nconvpool_1dense_branch('u', net['input-u'], convpooldictlist,
                                     nhidden, dropoutp))
    net.update(
        make_Nconvpool_1dense_branch('v', net['input-v'], convpooldictlist,
                                     nhidden, dropoutp))

    # Concatenate the two parallel inputs
    net['concat'] = ConcatLayer((net['dense-x'],
                                 net['dense-u'],
                                 net['dense-v']))
    logger.info("Network: concat columns...")

    # One more dense layer
    net['dense-across'] = DenseLayer(
        dropout(net['concat'], p=dropoutp),
        num_units=(nhidden // 2),
        nonlinearity=lasagne.nonlinearities.rectify)
    logger.info("Dense {} with nhidden = {}, dropout = {}".format(
        'dense-across', nhidden // 2, dropoutp))

    # And, finally, the `noutputs`-unit output layer
    net['output_prob'] = DenseLayer(
        net['dense-across'],
        num_units=noutputs,
        nonlinearity=lasagne.nonlinearities.softmax
    )
    logger.info("Softmax output prob with n_units = {}".format(noutputs))

    logger.info("n-parameters: {}".format(
        lasagne.layers.count_params(net['output_prob']))
    )
    return net['output_prob']
コード例 #8
0
def build_triamese_gamma(inputlist, imgh=50, imgw=50, convpooldictlist=None,
                         nhidden=None, dropoutp=None, noutputs=11, depth=1):
    """
    'triamese' (one branch for each view, feeding a fully-connected network),
    model using two layers of convolutions - no pooling.
    """
    net = {}
    # Input layer
    input_var_x, input_var_u, input_var_v = \
        inputlist[0], inputlist[1], inputlist[2]
    tshape = (None, depth, imgw, imgh)
    net['input-x'] = InputLayer(shape=tshape, input_var=input_var_x)
    net['input-u'] = InputLayer(shape=tshape, input_var=input_var_u)
    net['input-v'] = InputLayer(shape=tshape, input_var=input_var_v)

    if convpooldictlist is None:
        convpooldictlist = []
        convpool1dict = {}
        convpool1dict['nfilters'] = 32
        convpool1dict['filter_size'] = (3, 3)
        convpooldictlist.append(convpool1dict)
        convpool2dict = {}
        convpool2dict['nfilters'] = 16
        convpool2dict['filter_size'] = (3, 3)
        convpooldictlist.append(convpool2dict)

    if nhidden is None:
        nhidden = 256

    if dropoutp is None:
        dropoutp = 0.5

    def make_branch(view, input_layer, cpdictlist, nhidden=256, dropoutp=0.5):
        """
        see: http://lasagne.readthedocs.org/en/latest/modules/layers.html
        convolution only - no pooling
        """
        net = {}
        convname = ''
        prev_layername = ''
        for i, cpdict in enumerate(cpdictlist):
            convname = 'conv-{}-{}'.format(view, i)
            logger.info("Convpool {} params: {}".format(convname, cpdict))
            # the first time through, use `input`, after use the last layer
            # from the previous iteration - ah loose scoping rules...
            if i == 0:
                layer = input_layer
            else:
                layer = net[prev_layername]
            net[convname] = Conv2DLayer(
                layer, num_filters=cpdict['nfilters'],
                filter_size=cpdict['filter_size'],
                nonlinearity=lasagne.nonlinearities.rectify,
                W=lasagne.init.GlorotUniform())
            prev_layername = convname
        densename = 'dense-{}'.format(view)
        net[densename] = DenseLayer(
            dropout(net[convname], p=dropoutp),
            num_units=nhidden,
            nonlinearity=lasagne.nonlinearities.rectify)
        logger.info("Dense {} with nhidden = {}, dropout = {}".format(
            densename, nhidden, dropoutp))
        return net

    net.update(make_branch('x', net['input-x'], convpooldictlist,
                           nhidden, dropoutp))
    net.update(make_branch('u', net['input-u'], convpooldictlist,
                           nhidden, dropoutp))
    net.update(make_branch('v', net['input-v'], convpooldictlist,
                           nhidden, dropoutp))

    # Concatenate the two parallel inputs
    net['concat'] = ConcatLayer((net['dense-x'],
                                 net['dense-u'],
                                 net['dense-v']))
    logger.info("Network: concat columns...")

    # One more dense layer
    net['dense-across'] = DenseLayer(
        dropout(net['concat'], p=dropoutp),
        num_units=(nhidden // 2),
        nonlinearity=lasagne.nonlinearities.rectify)
    logger.info("Dense {} with nhidden = {}, dropout = {}".format(
        'dense-across', nhidden // 2, dropoutp))

    # And, finally, the `noutputs`-unit output layer
    net['output_prob'] = DenseLayer(
        net['dense-across'],
        num_units=noutputs,
        nonlinearity=lasagne.nonlinearities.softmax
    )
    logger.info("Softmax output prob with n_units = {}".format(noutputs))

    logger.info("n-parameters: {}".format(
        lasagne.layers.count_params(net['output_prob']))
    )
    return net['output_prob']
コード例 #9
0
def build_model():

    #################
    # Regular model #
    #################
    data_size = data_sizes["sliced:data:ax:noswitch"]

    l0 = InputLayer(data_size)
    l0r = batch_norm(reshape(l0, (-1, 1, ) + data_size[1:]))

    # (batch, channel, axis, time, x, y)

    # convolve over time
    l1 = batch_norm(ConvolutionOverAxisLayer(l0r, num_filters=4, filter_size=(3,), axis=(3,), channel=1,
                                   W=lasagne.init.Orthogonal(),
                                   b=lasagne.init.Constant(0.0),
                                   ))
    l1m = batch_norm(MaxPoolOverAxisLayer(l1, pool_size=(4,), axis=(3,)))

    # convolve over x and y
    l2a = batch_norm(ConvolutionOver2DAxisLayer(l1m, num_filters=8, filter_size=(3, 3),
                                     axis=(4,5), channel=1,
                                     W=lasagne.init.Orthogonal(),
                                     b=lasagne.init.Constant(0.0),
                                     ))
    l2b = batch_norm(ConvolutionOver2DAxisLayer(l2a, num_filters=8, filter_size=(3, 3),
                                     axis=(4,5), channel=1,
                                     W=lasagne.init.Orthogonal(),
                                     b=lasagne.init.Constant(0.0),
                                     ))
    l2m = batch_norm(MaxPoolOver2DAxisLayer(l2b, pool_size=(2, 2), axis=(4,5)))


    # convolve over x, y, time
    l3a = batch_norm(ConvolutionOver3DAxisLayer(l2m, num_filters=32, filter_size=(3, 3, 3),
                                     axis=(3,4,5), channel=1,
                                     W=lasagne.init.Orthogonal(),
                                     b=lasagne.init.Constant(0.1),
                                     ))

    l3b = batch_norm(ConvolutionOver2DAxisLayer(l3a, num_filters=32, filter_size=(3, 3),
                                     axis=(4,5), channel=1,
                                     W=lasagne.init.Orthogonal(),
                                     b=lasagne.init.Constant(0.1),
                                     ))
    l3m = batch_norm(MaxPoolOver2DAxisLayer(l3b, pool_size=(2, 2), axis=(4,5)))

    # convolve over time
    l4 = batch_norm(ConvolutionOverAxisLayer(l3m, num_filters=32, filter_size=(3,), axis=(3,), channel=1,
                                   W=lasagne.init.Orthogonal(),
                                   b=lasagne.init.Constant(0.1),
                                   ))
    l4m = batch_norm(MaxPoolOverAxisLayer(l4, pool_size=(2,), axis=(2,)))

    # maxpool over axis
    l5 = batch_norm(MaxPoolOverAxisLayer(l3m, pool_size=(4,), axis=(2,)))

    # convolve over x and y
    l6a = batch_norm(ConvolutionOver2DAxisLayer(l5, num_filters=128, filter_size=(3, 3),
                                     axis=(4,5), channel=1,
                                     W=lasagne.init.Orthogonal(),
                                     b=lasagne.init.Constant(0.1),
                                     ))
    l6b = batch_norm(ConvolutionOver2DAxisLayer(l6a, num_filters=128, filter_size=(3, 3),
                                     axis=(4,5), channel=1,
                                     W=lasagne.init.Orthogonal(),
                                     b=lasagne.init.Constant(0.1),
                                     ))
    l6m = batch_norm(MaxPoolOver2DAxisLayer(l6b, pool_size=(2, 2), axis=(4,5)))

    # convolve over time and x,y
    l7 = ConvolutionOver3DAxisLayer(l6m, num_filters=128, filter_size=(3,3,3), axis=(3,4,5), channel=1,
                                   W=lasagne.init.Orthogonal(),
                                   b=lasagne.init.Constant(0.1),
                                   )

    l8 = lasagne.layers.DropoutLayer(l7, p=0.5)

    l_systole = CumSumLayer(lasagne.layers.DenseLayer(l8,
                              num_units=600,
                              nonlinearity=lasagne.nonlinearities.softmax))

    l_diastole = CumSumLayer(lasagne.layers.DenseLayer(l8,
                              num_units=600,
                              nonlinearity=lasagne.nonlinearities.softmax))

    return {
        "inputs":{
            "sliced:data:ax:noswitch": l0
        },
        "outputs": {
            "systole": l_systole,
            "diastole": l_diastole,
        },
        "regularizable": {
        }
    }
コード例 #10
0
def build_network(pooling='avg'):
    net = {}

    net['input'] = InputLayer((None, 3, 299, 299))
    net['conv'] = bn_conv(net['input'],
                          num_filters=32,
                          filter_size=3,
                          stride=2)
    net['conv_1'] = bn_conv(net['conv'], num_filters=32, filter_size=3)
    net['conv_2'] = bn_conv(net['conv_1'],
                            num_filters=64,
                            filter_size=3,
                            pad=1)
    net['pool'] = Pool2DLayer(net['conv_2'], pool_size=3, stride=2, mode='max')

    net['conv_3'] = bn_conv(net['pool'], num_filters=80, filter_size=1)

    net['conv_4'] = bn_conv(net['conv_3'], num_filters=192, filter_size=3)

    net['pool_1'] = Pool2DLayer(net['conv_4'],
                                pool_size=3,
                                stride=2,
                                mode='max')
    net['mixed/join'] = inceptionA(net['pool_1'],
                                   nfilt=((64, ), (48, 64), (64, 96, 96),
                                          (32, )))
    net['mixed_1/join'] = inceptionA(net['mixed/join'],
                                     nfilt=((64, ), (48, 64), (64, 96, 96),
                                            (64, )))

    net['mixed_2/join'] = inceptionA(net['mixed_1/join'],
                                     nfilt=((64, ), (48, 64), (64, 96, 96),
                                            (64, )))

    net['mixed_3/join'] = inceptionB(net['mixed_2/join'],
                                     nfilt=((384, ), (64, 96, 96)))

    net['mixed_4/join'] = inceptionC(net['mixed_3/join'],
                                     nfilt=((192, ), (128, 128, 192),
                                            (128, 128, 128, 128,
                                             192), (192, )))

    net['mixed_5/join'] = inceptionC(net['mixed_4/join'],
                                     nfilt=((192, ), (160, 160, 192),
                                            (160, 160, 160, 160,
                                             192), (192, )))

    net['mixed_6/join'] = inceptionC(net['mixed_5/join'],
                                     nfilt=((192, ), (160, 160, 192),
                                            (160, 160, 160, 160,
                                             192), (192, )))

    net['mixed_7/join'] = inceptionC(net['mixed_6/join'],
                                     nfilt=((192, ), (192, 192, 192),
                                            (192, 192, 192, 192,
                                             192), (192, )))

    net['mixed_8/join'] = inceptionD(net['mixed_7/join'],
                                     nfilt=((192, 320), (192, 192, 192, 192)))

    net['mixed_9/join'] = inceptionE(net['mixed_8/join'],
                                     nfilt=((320, ), (384, 384, 384),
                                            (448, 384, 384, 384), (192, )),
                                     pool_mode='average_exc_pad')

    net['mixed_10/join'] = inceptionE(net['mixed_9/join'],
                                      nfilt=((320, ), (384, 384, 384),
                                             (448, 384, 384, 384), (192, )),
                                      pool_mode='max')

    if pooling == "max":
        net['pool3'] = GlobalPoolLayer(net['mixed_10/join'],
                                       pool_function=theano.tensor.max)
    elif pooling in ("avg", "mean"):
        net['pool3'] = GlobalPoolLayer(net['mixed_10/join'],
                                       pool_function=theano.tensor.mean)
    else:
        raise "pooling not specified"

    net['softmax'] = DenseLayer(net['pool3'],
                                num_units=1008,
                                nonlinearity=softmax)

    return net
コード例 #11
0
ファイル: music_synthesis.py プロジェクト: szcom/crikey
random_state = np.random.RandomState(1999)
n_epochs = 200

# theano land tensor4 for 4 dimensions
input_var = tensor.tensor4('X')
target_var = tensor.tensor4('y')
outchan = y_train.shape[0]
inchan = X_train.shape[0]
width = X_train.shape[1]
height = X_train.shape[2]

input_var.tag.test_value = X_mb
target_var.tag.test_value = y_mb

# setting up theano - use None to indicate that dimension may change
coarse_input = InputLayer((minibatch_size, inchan, width, height),
                          input_var=input_var)
# choose number of filters and filter size
coarse_conv1 = Conv2DLayer(coarse_input,
                           num_filters=32,
                           filter_size=(5, 5),
                           nonlinearity=rectify,
                           W=GlorotUniform(),
                           pad=(2, 2))

coarse_pool1 = MaxPool2DLayer(coarse_conv1, pool_size=(2, 2))

coarse_conv2 = Conv2DLayer(coarse_pool1,
                           num_filters=64,
                           filter_size=(3, 3),
                           nonlinearity=rectify,
                           W=GlorotUniform(),
コード例 #12
0
def main():

    
    # import VGG model

    net = {}
    net['input'] = InputLayer((None, 3, 224, 224))
    net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1, flip_filters=False)
    net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1, flip_filters=False)
    net['pool1'] = PoolLayer(net['conv1_2'], 2)
    net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1, flip_filters=False)
    net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1, flip_filters=False)
    net['pool2'] = PoolLayer(net['conv2_2'], 2)
    net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1, flip_filters=False)
    net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1, flip_filters=False)
    net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1, flip_filters=False)
    net['pool3'] = PoolLayer(net['conv3_3'], 2)
    net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1, flip_filters=False)
    net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1, flip_filters=False)
    net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1, flip_filters=False)
    net['pool4'] = PoolLayer(net['conv4_3'], 2)
    net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1, flip_filters=False)
    net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1, flip_filters=False)
    net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1, flip_filters=False)
    net['pool5'] = PoolLayer(net['conv5_3'], 2)
    net['fc6'] = DenseLayer(net['pool5'], num_units=4096)
    net['fc7'] = DenseLayer(net['fc6'], num_units=4096)
    net['fc8'] = DenseLayer(net['fc7'], num_units=1000, nonlinearity=None)
    net['prob'] = NonlinearityLayer(net['fc8'], softmax)

    # loading one image and compute its saliency map

    lasagne.layers.set_all_param_values(net['prob'], weights)
    url = 'http://farm5.static.flickr.com/4064/4334173592_145856d89b.jpg'
    img_original, img = prepare_image(url)

    saliency_fn = compile_saliency_function(net)
    saliency, max_class = saliency_fn(img)
    show_images(img_original, saliency, max_class, "default gradient")

    relu = lasagne.nonlinearities.rectify
    relu_layers = [layer for layer in lasagne.layers.get_all_layers(net['prob'])
                    if getattr(layer, 'nonlinearity', None) is relu]

    # Guided Backpropgation 

    modded_relu = GuidedBackprop(relu)  # important: only instantiate this once!
    for layer in relu_layers:
        layer.nonlinearity = modded_relu

    saliency_fn = compile_saliency_function(net)
    saliency, max_class = saliency_fn(img)
    show_images(img_original, saliency, max_class, "guided backprop")

    # using Zeiler Backpropagation

    modded_relu = ZeilerBackprop(relu)
    for layer in relu_layers:
        layer.nonlinearity = modded_relu

        saliency_fn = compile_saliency_function(net)
        saliency, max_class = saliency_fn(img)
        show_images(img_original, saliency, max_class, "deconvnet")
コード例 #13
0
ファイル: adenet_v2_4.py プロジェクト: konatasick/ip-avsr
def create_model(ae,
                 diff_ae,
                 input_shape,
                 input_var,
                 mask_shape,
                 mask_var,
                 diff_shape,
                 diff_var,
                 lstm_size=250,
                 win=T.iscalar('theta)'),
                 output_classes=26,
                 fusiontype='concat',
                 w_init_fn=las.init.Orthogonal(),
                 use_peepholes=True):

    bn_weights, bn_biases, bn_shapes, bn_nonlinearities = ae
    diff_weights, diff_biases, diff_shapes, diff_nonlinearities = diff_ae

    gate_parameters = Gate(W_in=w_init_fn,
                           W_hid=w_init_fn,
                           b=las.init.Constant(0.))
    cell_parameters = Gate(
        W_in=w_init_fn,
        W_hid=w_init_fn,
        # Setting W_cell to None denotes that no cell connection will be used.
        W_cell=None,
        b=las.init.Constant(0.),
        # By convention, the cell nonlinearity is tanh in an LSTM.
        nonlinearity=tanh)

    l_raw = InputLayer(input_shape, input_var, 'raw_im')
    l_mask = InputLayer(mask_shape, mask_var, 'mask')
    l_diff = InputLayer(diff_shape, diff_var, 'diff_im')

    symbolic_batchsize_raw = l_raw.input_var.shape[0]
    symbolic_seqlen_raw = l_raw.input_var.shape[1]
    symbolic_batchsize_diff = l_diff.input_var.shape[0]
    symbolic_seqlen_diff = l_diff.input_var.shape[1]

    l_reshape1_raw = ReshapeLayer(l_raw, (-1, input_shape[-1]),
                                  name='reshape1_raw')
    l_encoder_raw = create_pretrained_encoder(
        l_reshape1_raw, bn_weights, bn_biases, bn_shapes, bn_nonlinearities,
        ['fc1_raw', 'fc2_raw', 'fc3_raw', 'bottleneck_raw'])
    raw_len = las.layers.get_output_shape(l_encoder_raw)[-1]

    l_reshape2_raw = ReshapeLayer(
        l_encoder_raw, (symbolic_batchsize_raw, symbolic_seqlen_raw, raw_len),
        name='reshape2_raw')
    l_delta_raw = DeltaLayer(l_reshape2_raw, win, name='delta_raw')

    # diff images
    l_reshape1_diff = ReshapeLayer(l_diff, (-1, diff_shape[-1]),
                                   name='reshape1_diff')
    l_encoder_diff = create_pretrained_encoder(
        l_reshape1_diff, diff_weights, diff_biases, diff_shapes,
        diff_nonlinearities,
        ['fc1_diff', 'fc2_diff', 'fc3_diff', 'bottleneck_diff'])
    diff_len = las.layers.get_output_shape(l_encoder_diff)[-1]
    l_reshape2_diff = ReshapeLayer(
        l_encoder_diff,
        (symbolic_batchsize_diff, symbolic_seqlen_diff, diff_len),
        name='reshape2_diff')
    l_delta_diff = DeltaLayer(l_reshape2_diff, win, name='delta_diff')

    l_lstm_raw = LSTMLayer(
        l_delta_raw,
        int(lstm_size),
        peepholes=use_peepholes,
        # We need to specify a separate input for masks
        mask_input=l_mask,
        # Here, we supply the gate parameters for each gate
        ingate=gate_parameters,
        forgetgate=gate_parameters,
        cell=cell_parameters,
        outgate=gate_parameters,
        # We'll learn the initialization and use gradient clipping
        learn_init=True,
        grad_clipping=5.,
        name='lstm_raw')

    l_lstm_diff = LSTMLayer(
        l_delta_diff,
        lstm_size,
        peepholes=use_peepholes,
        # We need to specify a separate input for masks
        mask_input=l_mask,
        # Here, we supply the gate parameters for each gate
        ingate=gate_parameters,
        forgetgate=gate_parameters,
        cell=cell_parameters,
        outgate=gate_parameters,
        # We'll learn the initialization and use gradient clipping
        learn_init=True,
        grad_clipping=5.,
        name='lstm_diff')

    # We'll combine the forward and backward layer output by summing.
    # Merge layers take in lists of layers to merge as input.
    if fusiontype == 'adasum':
        l_fuse = AdaptiveElemwiseSumLayer([l_lstm_raw, l_lstm_diff],
                                          name='adasum1')
    elif fusiontype == 'sum':
        l_fuse = ElemwiseSumLayer([l_lstm_raw, l_lstm_diff], name='sum1')
    elif fusiontype == 'concat':
        l_fuse = ConcatLayer([l_lstm_raw, l_lstm_diff], axis=-1, name='concat')

    f_lstm_agg = create_lstm(l_fuse, l_mask, lstm_size, cell_parameters,
                             gate_parameters, 'lstm_agg')

    # reshape to (num_examples * seq_len, lstm_size)
    l_reshape3 = ReshapeLayer(f_lstm_agg, (-1, lstm_size))

    # Now, we can apply feed-forward layers as usual.
    # We want the network to predict a classification for the sequence,
    # so we'll use a the number of classes.
    l_softmax = DenseLayer(l_reshape3,
                           num_units=output_classes,
                           nonlinearity=las.nonlinearities.softmax,
                           name='softmax')

    l_out = ReshapeLayer(l_softmax, (-1, symbolic_seqlen_raw, output_classes),
                         name='output')

    return l_out, l_fuse
コード例 #14
0
ファイル: test_recurrent.py プロジェクト: Summerlyu/RNNs
def test_gru_grad_clipping():
    # test that you can set grad_clip variable
    x = T.tensor3()
    l_rec = GRULayer(InputLayer((2, 2, 3)), 5, grad_clipping=1)
    output = lasagne.layers.get_output(l_rec, x)
コード例 #15
0
ファイル: test_recurrent.py プロジェクト: Summerlyu/RNNs
def test_lstm_hid_init_layer_eval():
    # Test `hid_init` as a `Layer` with some dummy input. Compare the output of
    # a network with a `Layer` as input to `hid_init` to a network with a
    # `np.array` as input to `hid_init`
    n_units = 7
    n_test_cases = 2
    in_shp = (n_test_cases, 2, 3)
    in_h_shp = (1, n_units)
    in_cell_shp = (1, n_units)

    # dummy inputs
    X_test = np.ones(in_shp, dtype=theano.config.floatX)
    Xh_test = np.ones(in_h_shp, dtype=theano.config.floatX)
    Xc_test = np.ones(in_cell_shp, dtype=theano.config.floatX)
    Xh_test_batch = np.tile(Xh_test, (n_test_cases, 1))
    Xc_test_batch = np.tile(Xc_test, (n_test_cases, 1))

    # network with `Layer` initializer for hid_init
    l_inp = InputLayer(in_shp)
    l_inp_h = InputLayer(in_h_shp)
    l_inp_cell = InputLayer(in_cell_shp)
    l_rec_inp_layer = LSTMLayer(l_inp,
                                n_units,
                                hid_init=l_inp_h,
                                cell_init=l_inp_cell,
                                nonlinearity=None)

    # network with `np.array` initializer for hid_init
    l_rec_nparray = LSTMLayer(l_inp,
                              n_units,
                              hid_init=Xh_test,
                              cell_init=Xc_test,
                              nonlinearity=None)

    # copy network parameters from l_rec_inp_layer to l_rec_nparray
    l_il_param = dict([(p.name, p) for p in l_rec_inp_layer.get_params()])
    l_rn_param = dict([(p.name, p) for p in l_rec_nparray.get_params()])
    for k, v in l_rn_param.items():
        if k in l_il_param:
            v.set_value(l_il_param[k].get_value())

    # build the theano functions
    X = T.tensor3()
    Xh = T.matrix()
    Xc = T.matrix()
    output_inp_layer = lasagne.layers.get_output(l_rec_inp_layer, {
        l_inp: X,
        l_inp_h: Xh,
        l_inp_cell: Xc
    })
    output_nparray = lasagne.layers.get_output(l_rec_nparray, {l_inp: X})

    # test both nets with dummy input
    output_val_inp_layer = output_inp_layer.eval({
        X: X_test,
        Xh: Xh_test_batch,
        Xc: Xc_test_batch
    })
    output_val_nparray = output_nparray.eval({X: X_test})

    # check output given `Layer` is the same as with `np.array`
    assert np.allclose(output_val_inp_layer, output_val_nparray)
コード例 #16
0
def build_network(batch_size, z_shape, img_height, img_width,
                  conv_nonlinearity, dense_nonlinearity):
    # Draws heavy inspiration from ResNet
    num_filters = 32
    filter_shape = (5, 5)

    l_in = InputLayer((batch_size, 1, z_shape))

    dense_nonlinearity = lasagne.nonlinearities.rectify
    conv_nonlinearity = lasagne.nonlinearities.rectify

    config = {
        'conv_1_repeats': 0,
        'conv_2_repeats': 0,
        'conv_3_repeats': 0,
        'conv_4_repeats': 0
    }

    #####################
    ### Decoding half ###
    #####################
    h_test = 2
    w_test = 5

    dec_2_size = h_test * w_test * num_filters * 8

    l_hid_dec_2 = batch_norm(
        DenseLayer(l_in, dec_2_size, nonlinearity=dense_nonlinearity))
    l_dec_reshape = ReshapeLayer(
        l_hid_dec_2,
        [batch_size, dec_2_size / h_test / w_test, h_test, w_test])

    conv_1 = batch_norm(
        TransposedConv2DLayer(l_dec_reshape,
                              num_filters * 8,
                              filter_shape,
                              nonlinearity=conv_nonlinearity,
                              untie_biases=True))
    for _ in range(config['conv_1_repeats']):
        conv_1 = batch_norm(
            TransposedConv2DLayer(conv_1,
                                  num_filters * 8,
                                  filter_shape,
                                  nonlinearity=conv_nonlinearity,
                                  untie_biases=True,
                                  crop='same'))

    conv_2 = batch_norm(
        TransposedConv2DLayer(conv_1,
                              num_filters * 4,
                              filter_shape,
                              nonlinearity=conv_nonlinearity,
                              untie_biases=True,
                              stride=(2, 2),
                              crop='same'))
    for _ in range(config['conv_2_repeats']):
        conv_2 = batch_norm(
            TransposedConv2DLayer(conv_2,
                                  num_filters * 4,
                                  filter_shape,
                                  nonlinearity=conv_nonlinearity,
                                  untie_biases=True,
                                  crop='same'))

    conv_3 = batch_norm(
        TransposedConv2DLayer(conv_2,
                              num_filters * 2,
                              filter_shape,
                              nonlinearity=conv_nonlinearity,
                              untie_biases=True,
                              stride=(2, 2),
                              crop='same'))
    for _ in range(config['conv_3_repeats']):
        conv_3 = batch_norm(
            TransposedConv2DLayer(conv_3,
                                  num_filters * 2,
                                  filter_shape,
                                  nonlinearity=conv_nonlinearity,
                                  untie_biases=True,
                                  crop='same'))

    conv_4 = batch_norm(
        TransposedConv2DLayer(conv_3,
                              num_filters,
                              filter_shape,
                              nonlinearity=conv_nonlinearity,
                              untie_biases=True,
                              stride=(2, 2),
                              crop='same'))
    for _ in range(config['conv_4_repeats']):
        conv_4 = batch_norm(
            TransposedConv2DLayer(conv_4,
                                  num_filters,
                                  filter_shape,
                                  nonlinearity=conv_nonlinearity,
                                  untie_biases=True,
                                  crop='same'))

    l_out = batch_norm(
        TransposedConv2DLayer(conv_4,
                              1,
                              filter_shape,
                              nonlinearity=lasagne.nonlinearities.sigmoid,
                              untie_biases=True,
                              crop='same'))

    return l_in, l_out
コード例 #17
0
def build_triamese_alpha(inputlist, imgh=50, imgw=50,
                         convpool1dict=None, convpool2dict=None,
                         convpooldictlist=None, nhidden=None,
                         dropoutp=None, noutputs=11,
                         depth=1
):
    """
    'triamese' (one branch for each view, feeding a fully-connected network),
    model using two layers of convolutions and pooling.
    """
    # Input layer
    input_var_x, input_var_u, input_var_v = \
        inputlist[0], inputlist[1], inputlist[2]
    tshape = (None, depth, imgw, imgh)
    l_in1_x = InputLayer(shape=tshape, input_var=input_var_x)
    l_in1_u = InputLayer(shape=tshape, input_var=input_var_u)
    l_in1_v = InputLayer(shape=tshape, input_var=input_var_v)

    if convpool1dict is None:
        convpool1dict = {}
        convpool1dict['nfilters'] = 32
        convpool1dict['filter_size'] = (3, 3)
        convpool1dict['pool_size'] = (2, 2)
    logger.info("Convpool1 params: {}".format(convpool1dict))

    if convpool2dict is None:
        convpool2dict = {}
        convpool2dict['nfilters'] = 32
        convpool2dict['filter_size'] = (3, 3)
        convpool2dict['pool_size'] = (2, 2)
    logger.info("Convpool2 params: {}".format(convpool2dict))
    logger.info("Network: one dense layer per column...")

    def make_branch(input_layer,
                    num_filters1, filter_size1, pool_size1,
                    num_filters2, filter_size2, pool_size2):
        """
        see: http://lasagne.readthedocs.org/en/latest/modules/layers.html
        """
        convlayer1 = Conv2DLayer(input_layer, num_filters=num_filters1,
                                 filter_size=filter_size1,
                                 nonlinearity=lasagne.nonlinearities.rectify,
                                 W=lasagne.init.GlorotUniform())
        maxpoollayer1 = MaxPool2DLayer(convlayer1, pool_size=pool_size1)
        convlayer2 = Conv2DLayer(maxpoollayer1, num_filters=num_filters2,
                                 filter_size=filter_size1,
                                 nonlinearity=lasagne.nonlinearities.rectify,
                                 W=lasagne.init.GlorotUniform())
        maxpoollayer2 = MaxPool2DLayer(convlayer2, pool_size=pool_size2)
        dense1 = DenseLayer(
            dropout(maxpoollayer2, p=.5),
            num_units=256,
            nonlinearity=lasagne.nonlinearities.rectify)
        return dense1

    l_branch_x = make_branch(l_in1_x,
                             convpool1dict['nfilters'],
                             convpool1dict['filter_size'],
                             convpool1dict['pool_size'],
                             convpool2dict['nfilters'],
                             convpool2dict['filter_size'],
                             convpool2dict['pool_size'])
    l_branch_u = make_branch(l_in1_u,
                             convpool1dict['nfilters'],
                             convpool1dict['filter_size'],
                             convpool1dict['pool_size'],
                             convpool2dict['nfilters'],
                             convpool2dict['filter_size'],
                             convpool2dict['pool_size'])
    l_branch_v = make_branch(l_in1_v,
                             convpool1dict['nfilters'],
                             convpool1dict['filter_size'],
                             convpool1dict['pool_size'],
                             convpool2dict['nfilters'],
                             convpool2dict['filter_size'],
                             convpool2dict['pool_size'])

    # Concatenate the parallel inputs
    l_concat = ConcatLayer((l_branch_x, l_branch_u, l_branch_v))
    logger.info("Network: Concat all three columns...")

    # And, finally, the noutputs-unit output layer
    outp = DenseLayer(
        l_concat,
        num_units=noutputs,
        nonlinearity=lasagne.nonlinearities.softmax
    )
    logger.info("Network: Softmax classification layer.")

    logger.info("n-parameters: {}".format(lasagne.layers.count_params(outp)))
    return outp
コード例 #18
0
def build_resnet():
    net = {}
    net['input'] = InputLayer((None, 3, 224, 224))
    sub_net, parent_layer_name = build_simple_block(
        net['input'], ['conv1', 'bn_conv1', 'conv1_relu'],
        64,
        7,
        2,
        3,
        use_bias=True)
    net.update(sub_net)
    net['pool1'] = PoolLayer(net[parent_layer_name],
                             pool_size=3,
                             stride=2,
                             pad=0,
                             mode='max',
                             ignore_border=False)

    block_size = list('abc')
    parent_layer_name = 'pool1'
    for c in block_size:
        if c == 'a':
            sub_net, parent_layer_name = build_residual_block(
                net[parent_layer_name], 1, 1, True, 4, ix='2%s' % c)
        else:
            sub_net, parent_layer_name = build_residual_block(
                net[parent_layer_name], 1.0 / 4, 1, False, 4, ix='2%s' % c)
        net.update(sub_net)

    block_size = list('abcd')
    for c in block_size:
        if c == 'a':
            sub_net, parent_layer_name = build_residual_block(
                net[parent_layer_name],
                1.0 / 2,
                1.0 / 2,
                True,
                4,
                ix='3%s' % c)
        else:
            sub_net, parent_layer_name = build_residual_block(
                net[parent_layer_name], 1.0 / 4, 1, False, 4, ix='3%s' % c)
        net.update(sub_net)

    block_size = list('abcdef')
    for c in block_size:
        if c == 'a':
            sub_net, parent_layer_name = build_residual_block(
                net[parent_layer_name],
                1.0 / 2,
                1.0 / 2,
                True,
                4,
                ix='4%s' % c)
        else:
            sub_net, parent_layer_name = build_residual_block(
                net[parent_layer_name], 1.0 / 4, 1, False, 4, ix='4%s' % c)
        net.update(sub_net)

    block_size = list('abc')
    for c in block_size:
        if c == 'a':
            sub_net, parent_layer_name = build_residual_block(
                net[parent_layer_name],
                1.0 / 2,
                1.0 / 2,
                True,
                4,
                ix='5%s' % c)
        else:
            sub_net, parent_layer_name = build_residual_block(
                net[parent_layer_name], 1.0 / 4, 1, False, 4, ix='5%s' % c)
        net.update(sub_net)

    net['pool5'] = PoolLayer(net[parent_layer_name],
                             pool_size=7,
                             stride=1,
                             pad=0,
                             mode='average_exc_pad',
                             ignore_border=False)
    net['fc1000'] = DenseLayer(net['pool5'], num_units=1000, nonlinearity=None)
    net['prob'] = NonlinearityLayer(net['fc1000'], nonlinearity=softmax)

    return net