Beispiel #1
0
def build_single_channel(var,
                         input_size,
                         output_size,
                         layer_sizes,
                         layer_types,
                         weight_init=lasagne.init.GlorotUniform(),
                         bias_init=lasagne.init.Constant(0.),
                         name='',
                         dropouts_init=None):
    """
    Build a single channel containing layers of sizes according to layer_sizes array with initialization given
    :param var: tensor variable for the input
    :param input_size: input dimensionality
    :param output_size: ouput dimensionality
    :param layer_sizes: array of layer sizes
    :param layer_types: array of layer types
    :param weight_init: initialization function for the weights
    :param bias_init: initialization function for the biases
    :param name: name of the network
    :param dropouts_init: initialization of the tied dropout, if none samples a new drop matrix
    :return: the model containing the channels with it's weights, biases, drop matrices and batch normalization hooks
    """
    model = []
    weights = []
    biases = []
    hidden = []
    dropouts = []
    hooks = {}

    if isinstance(weight_init, lasagne.init.Initializer):
        weight_init = [weight_init for i in range(len(layer_sizes) + 1)]

    if isinstance(bias_init, lasagne.init.Initializer):
        bias_init = [bias_init for i in range(len(layer_sizes) + 1)]

    if dropouts_init is None:
        dropouts_init = [dropouts_init for i in range(len(layer_sizes) + 1)]

    # Add Input Layer
    model.append(
        lasagne.layers.InputLayer((None, input_size), var,
                                  'input_layer_{0}'.format(name)))

    # Add hidden layers
    for index, layer_size in enumerate(layer_sizes):
        model.append(layer_types[index](
            incoming=model[-1],
            num_units=layer_size,
            W=weight_init[index],
            b=bias_init[index],
            nonlinearity=lasagne.nonlinearities.LeakyRectify(Params.LEAKINESS)
            if not Params.BN_ACTIVATION else lasagne.nonlinearities.identity,
            cell_num=Params.LOCALLY_DENSE_M))

        weights.append(model[-1].W)
        biases.append(model[-1].b)

        if Params.BN:
            model.append(
                BatchNormalizationLayer(
                    model[-1],
                    nonlinearity=lasagne.nonlinearities.LeakyRectify(
                        Params.LEAKINESS) if Params.BN_ACTIVATION else
                    lasagne.nonlinearities.identity))

        model.append(
            Params.NOISE_LAYER(model[-1],
                               p=Params.DROP_PROBABILITY,
                               noise_layer=dropouts_init[-(index + 1)]))

        dropouts.append(model[-1])

        hidden.append(model[-1])

    # Add output layer
    model.append(layer_types[-1](model[-1],
                                 num_units=output_size,
                                 W=weight_init[-1],
                                 b=bias_init[-1],
                                 nonlinearity=lasagne.nonlinearities.identity,
                                 cell_num=Params.LOCALLY_DENSE_M))
    weights.append(model[-1].W)
    biases.append(model[-1].b)

    prediction = lasagne.layers.get_output(model[-1], moving_avg_hooks=hooks)

    return model, hidden, weights, biases, prediction, hooks, dropouts