def align_targets(predictions, targets):
    if (getattr(predictions, 'broadcastable', None) == (False, True) and
            getattr(targets, 'ndim', None) == 1):
        targets = as_theano_expression(targets).dimshuffle(0, 'x')
    return predictions, targets
示例#2
0
def test_as_theano_expression_fails():
    from lasagne.utils import as_theano_expression
    with pytest.raises(TypeError):
        as_theano_expression({})
示例#3
0
def get_full_output(layer_or_layers, inputs=None, **kwargs):
    """
    Computes the output of the network at one or more given layers.
    Optionally, you can define the input(s) to propagate through the network
    instead of using the input variable(s) associated with the network's
    input layer(s).

    Parameters
    ----------
    layer_or_layers : Layer or list
        the :class:`Layer` instance for which to compute the output
        expressions, or a list of :class:`Layer` instances.

    inputs : None, Theano expression, numpy array, or dict
        If None, uses the input variables associated with the
        :class:`InputLayer` instances.
        If a Theano expression, this defines the input for a single
        :class:`InputLayer` instance. Will throw a ValueError if there
        are multiple :class:`InputLayer` instances.
        If a numpy array, this will be wrapped as a Theano constant
        and used just like a Theano expression.
        If a dictionary, any :class:`Layer` instance (including the
        input layers) can be mapped to a Theano expression or numpy
        array to use instead of its regular output.

    Returns
    -------
    output : Theano expression or list
        the output of the given layer(s) for the given network input

    Notes
    -----
    Depending on your network architecture, `get_output([l1, l2])` may
    be crucially different from `[get_output(l1), get_output(l2)]`. Only
    the former ensures that the output expressions depend on the same
    intermediate expressions. For example, when `l1` and `l2` depend on
    a common dropout layer, the former will use the same dropout mask for
    both, while the latter will use two different dropout masks.
    """
    from lasagne.layers.input import InputLayer
    from lasagne.layers.base import MergeLayer
    # obtain topological ordering of all layers the output layer(s) depend on
    treat_as_input = list(inputs.keys()) if isinstance(inputs, dict) else []
    all_layers = get_all_layers(layer_or_layers, treat_as_input)
    # initialize layer-to-expression mapping from all input layers
    all_outputs = dict(
        (layer, layer.input_var) for layer in all_layers
        if isinstance(layer, InputLayer) and layer not in treat_as_input)
    extra_outputs = dict()
    # update layer-to-expression mapping from given input(s), if any
    if isinstance(inputs, dict):
        all_outputs.update((layer, utils.as_theano_expression(expr))
                           for layer, expr in list(inputs.items()))
    elif inputs is not None:
        # if len(all_outputs) > 1:
        #     raise ValueError("get_output() was called with a single input "
        #                      "expression on a network with multiple input "
        #                      "layers. Please call it with a dictionary of "
        #                      "input expressions instead.")
        for input_layer in all_outputs:
            all_outputs[input_layer] = utils.as_theano_expression(inputs)
    # update layer-to-expression mapping by propagating the inputs
    for layer in all_layers:
        if layer not in all_outputs:
            try:
                if isinstance(layer, MergeLayer):
                    layer_inputs = [
                        all_outputs[input_layer]
                        for input_layer in layer.input_layers
                    ]
                else:
                    layer_inputs = all_outputs[layer.input_layer]
            except KeyError:
                # one of the input_layer attributes must have been `None`
                raise ValueError(
                    "get_output() was called without giving an "
                    "input expression for the free-floating "
                    "layer %r. Please call it with a dictionary "
                    "mapping this layer to an input expression." % layer)
            if hasattr(layer, "get_full_output_for"):
                output, extra = layer.get_full_output_for(
                    layer_inputs, **kwargs)
                all_outputs[layer] = output
                extra_outputs[layer] = extra
            else:
                all_outputs[layer] = layer.get_output_for(
                    layer_inputs, **kwargs)
    # return the output(s) of the requested layer(s) only
    try:
        return [all_outputs[layer] for layer in layer_or_layers], extra_outputs
    except TypeError:
        return all_outputs[layer_or_layers], extra_outputs
示例#4
0
def N_get_output(layer_or_layers,
                 inputs,
                 hnet,
                 input_h,
                 deterministic=False,
                 norm_type='BN',
                 static_bias=None,
                 nlb=nlb,
                 test_time=False,
                 **kwargs):

    # check if the keys of the dictionary are valid
    if isinstance(inputs, dict):
        for input_key in inputs.keys():
            if (input_key is not None) and (not isinstance(input_key, Layer)):
                raise TypeError("The inputs dictionary keys must be"
                                " lasagne layers not %s." % type(input_key))
    # track accepted kwargs used by get_output_for
    accepted_kwargs = {'deterministic'}
    # obtain topological ordering of all layers the output layer(s) depend on
    treat_as_input = inputs.keys() if isinstance(inputs, dict) else []
    all_layers = get_all_layers(layer_or_layers, treat_as_input)
    # initialize layer-to-expression mapping from all input layers
    all_outputs = dict(
        (layer, layer.input_var) for layer in all_layers
        if isinstance(layer, InputLayer) and layer not in treat_as_input)
    # update layer-to-expression mapping from given input(s), if any
    if isinstance(inputs, dict):
        all_outputs.update((layer, utils.as_theano_expression(expr))
                           for layer, expr in inputs.items())
    elif inputs is not None:
        if len(all_outputs) > 1:
            raise ValueError("get_output() was called with a single input "
                             "expression on a network with multiple input "
                             "layers. Please call it with a dictionary of "
                             "input expressions instead.")
        for input_layer in all_outputs:
            all_outputs[input_layer] = utils.as_theano_expression(inputs)

    N_params = lasagne.layers.get_output(hnet, input_h)
    index = 0
    if static_bias is not None:
        index_b = 0
        if static_bias.ndim == 1:
            static_bias = static_bias.dimshuffle('x', 0)

    # update layer-to-expression mapping by propagating the inputs
    #last_layer = all_layers[-1]
    for layer in all_layers:

        if layer not in all_outputs:
            try:
                if isinstance(layer, MergeLayer):
                    layer_inputs = [
                        all_outputs[input_layer]
                        for input_layer in layer.input_layers
                    ]
                else:
                    layer_inputs = all_outputs[layer.input_layer]
            except KeyError:
                # one of the input_layer attributes must have been `None`
                raise ValueError("get_output() was called without giving an "
                                 "input expression for the free-floating "
                                 "layer %r. Please call it with a dictionary "
                                 "mapping this layer to an input expression." %
                                 layer)

            #if layer is not last_layer and not isinstance(layer,
            #                                              ElemwiseSumLayer):
            if not isinstance(layer, ElemwiseSumLayer) and \
               nlb(layer):

                nonlinearity = getattr(layer, 'nonlinearity', None)
                if nonlinearity is not None:
                    layer.nonlinearity = lambda x: x
                else:
                    nonlinearity = lambda x: x

                if hasattr(layer, 'b') and layer.b is not None:
                    del layer.params[layer.b]
                    layer.b = None

                size = layer.output_shape[1]
                print size
                if norm_type == 'BN':
                    N_layer = BatchNormLayer(layer, beta=None, gamma=None)
                    layer_output = layer.get_output_for(layer_inputs, **kwargs)
                    if test_time:
                        N_output = N_layer.get_output_for(layer_output, True)
                    else:
                        N_output = N_layer.get_output_for(
                            layer_output, deterministic)

                elif norm_type == 'WN':
                    N_layer = WeightNormLayer(layer, b=None, g=None)
                    layer_output = layer.get_output_for(layer_inputs, **kwargs)
                    N_output = N_layer.get_output_for(layer_output,
                                                      deterministic)
                else:
                    raise Exception('normalization method {} not ' \
                                    'supported.'.format(norm_type))

                gamma, index = slicing(N_params, index, size)
                if static_bias is None:
                    beta, index = slicing(N_params, index, size)
                else:
                    beta, index_b = slicing(static_bias, index_b, size)
                if len(layer.output_shape) == 4:
                    gamma = gamma.dimshuffle(0, 1, 'x', 'x')
                    beta = beta.dimshuffle(0, 1, 'x', 'x')

                CN_output = gamma * N_output + beta

                all_outputs[layer] = nonlinearity(CN_output)
                layer.nonlinearity = nonlinearity
            else:
                all_outputs[layer] = layer.get_output_for(
                    layer_inputs, **kwargs)
            try:
                accepted_kwargs |= set(
                    utils.inspect_kwargs(layer.get_output_for))
            except:  # TypeError:
                # If introspection is not possible, skip it
                pass
            accepted_kwargs |= set(layer.get_output_kwargs)

    hs = hnet.output_shape[1]
    errmsg = 'mismatch: hnet output ({}) cbn params ({})'.format(hs, index)
    assert hs == index, errmsg
    unused_kwargs = set(kwargs.keys()) - accepted_kwargs
    if unused_kwargs:
        suggestions = []
        for kwarg in unused_kwargs:
            suggestion = get_close_matches(kwarg, accepted_kwargs)
            if suggestion:
                suggestions.append('%s (perhaps you meant %s)' %
                                   (kwarg, suggestion[0]))
            else:
                suggestions.append(kwarg)
        warn("get_output() was called with unused kwargs:\n\t%s" %
             "\n\t".join(suggestions))
    # return the output(s) of the requested layer(s) only
    try:
        return [all_outputs[layer] for layer in layer_or_layers]
    except TypeError:
        return all_outputs[layer_or_layers]
示例#5
0
def test_as_theano_expression_fails():
    from lasagne.utils import as_theano_expression
    with pytest.raises(TypeError):
        as_theano_expression({})
def get_output(layer_or_layers,
               inputs=None,
               layer_inputs=None,
               layer_kwargs=None,
               **kwargs):
    """
    Computes the output of the network at one or more given layers.
    Optionally, you can define the input(s) to propagate through the network
    instead of using the input variable(s) associated with the network's
    input layer(s).

    Parameters
    ----------
    layer_or_layers : Layer or list
        the :class:`Layer` instance for which to compute the output
        expressions, or a list of :class:`Layer` instances.

    inputs : None, Theano expression, numpy array, or dict
        If None, uses the input variables associated with the
        :class:`InputLayer` instances.
        If a Theano expression, this defines the input for a single
        :class:`InputLayer` instance. Will throw a ValueError if there
        are multiple :class:`InputLayer` instances.
        If a numpy array, this will be wrapped as a Theano constant
        and used just like a Theano expression.
        If a dictionary, any :class:`Layer` instance (including the
        input layers) can be mapped to a Theano expression or numpy
        array to use instead of its regular output.

    layer_inputs : None or dict
        If None, no layer input map.
        If a dictionary, any input of a :class:`Layer` instance can be mapped
        to a Theano expression or numpy array to use instead of its regular
        input. This is used internally when precomputation is involved for
        recurrent cells, specifically where the same input layer is fed into 2
        different layers, which have 2 different precomputed inputs.


    Returns
    -------
    output : Theano expression or list
        the output of the given layer(s) for the given network input

    Notes
    -----
    Depending on your network architecture, `get_output([l1, l2])` may
    be crucially different from `[get_output(l1), get_output(l2)]`. Only
    the former ensures that the output expressions depend on the same
    intermediate expressions. For example, when `l1` and `l2` depend on
    a common dropout layer, the former will use the same dropout mask for
    both, while the latter will use two different dropout masks.
    """
    from .input import InputLayer
    from .base import MergeLayer
    layer_inputs = layer_inputs or {}
    layer_kwargs = layer_kwargs or {}
    # track accepted kwargs used by get_output_for
    accepted_kwargs = {'deterministic'}
    # obtain topological ordering of all layers the output layer(s) depend on
    treat_as_input = inputs.keys() if isinstance(inputs, dict) else []
    all_layers = get_all_layers(layer_or_layers, treat_as_input)
    # initialize layer-to-expression mapping from all input layers
    all_outputs = dict(
        (layer, layer.input_var) for layer in all_layers
        if isinstance(layer, InputLayer) and layer not in treat_as_input)
    # update layer-to-expression mapping from given input(s), if any
    if isinstance(inputs, dict):
        all_outputs.update((layer, {
            name: utils.as_theano_expression(expr)
            for name, expr in expr.items()
        } if isinstance(expr, dict) else utils.as_theano_expression(expr))
                           for layer, expr in inputs.items())
    elif inputs is not None:
        if len(all_outputs) > 1:
            raise ValueError("get_output() was called with a single input "
                             "expression on a network with multiple input "
                             "layers. Please call it with a dictionary of "
                             "input expressions instead.")
        for input_layer in all_outputs:
            all_outputs[input_layer] = utils.as_theano_expression(inputs)
    # update layer-to-expression mapping by propagating the inputs
    for layer in all_layers:
        if layer not in all_outputs:
            try:
                if isinstance(layer, MergeLayer):
                    if isinstance(layer.input_layers, dict):
                        inputs_n = layer_inputs.get(layer, {})
                        for name, input_layer in layer.input_layers.items():
                            if name not in inputs_n:
                                inputs_n[name] = all_outputs[input_layer]
                    else:
                        inputs_n = layer_inputs.get(layer, {})
                        for i, input_layer in enumerate(layer.input_layers):
                            if i not in inputs_n:
                                inputs_n[i] = all_outputs[input_layer]
                        inputs_n = [v for k, v in sorted(inputs_n.items())]
                else:
                    inputs_n = layer_inputs.get(layer, None) or \
                               all_outputs[layer.input_layer]
            except KeyError:
                # one of the input_layer attributes must have been `None`
                raise ValueError("get_output() was called without giving an "
                                 "input expression for the free-floating "
                                 "layer %r. Please call it with a dictionary "
                                 "mapping this layer to an input expression." %
                                 layer)
            kwargs_n = kwargs
            kwargs_n.update(layer_kwargs.get(layer, {}))
            all_outputs[layer] = layer.get_output_for(inputs_n, **kwargs_n)
            try:
                accepted_kwargs |= set(
                    utils.inspect_kwargs(layer.get_output_for))
            except TypeError:
                # If introspection is not possible, skip it
                pass
            accepted_kwargs |= set(layer.get_output_kwargs)
    unused_kwargs = set(kwargs.keys()) - accepted_kwargs
    if unused_kwargs:
        suggestions = []
        for kwarg in unused_kwargs:
            suggestion = get_close_matches(kwarg, accepted_kwargs)
            if suggestion:
                suggestions.append('%s (perhaps you meant %s)' %
                                   (kwarg, suggestion[0]))
            else:
                suggestions.append(kwarg)
        warn("get_output() was called with unused kwargs:\n\t%s" %
             "\n\t".join(suggestions))
    # return the output(s) of the requested layer(s) only
    try:
        return [all_outputs[layer] for layer in layer_or_layers]
    except TypeError:
        return all_outputs[layer_or_layers]
def get_full_output(layer_or_layers, inputs=None, **kwargs):
    """
    Computes the output of the network at one or more given layers.
    Optionally, you can define the input(s) to propagate through the network
    instead of using the input variable(s) associated with the network's
    input layer(s).

    Parameters
    ----------
    layer_or_layers : Layer or list
        the :class:`Layer` instance for which to compute the output
        expressions, or a list of :class:`Layer` instances.

    inputs : None, Theano expression, numpy array, or dict
        If None, uses the input variables associated with the
        :class:`InputLayer` instances.
        If a Theano expression, this defines the input for a single
        :class:`InputLayer` instance. Will throw a ValueError if there
        are multiple :class:`InputLayer` instances.
        If a numpy array, this will be wrapped as a Theano constant
        and used just like a Theano expression.
        If a dictionary, any :class:`Layer` instance (including the
        input layers) can be mapped to a Theano expression or numpy
        array to use instead of its regular output.

    Returns
    -------
    output : Theano expression or list
        the output of the given layer(s) for the given network input

    Notes
    -----
    Depending on your network architecture, `get_output([l1, l2])` may
    be crucially different from `[get_output(l1), get_output(l2)]`. Only
    the former ensures that the output expressions depend on the same
    intermediate expressions. For example, when `l1` and `l2` depend on
    a common dropout layer, the former will use the same dropout mask for
    both, while the latter will use two different dropout masks.
    """
    from lasagne.layers.input import InputLayer
    from lasagne.layers.base import MergeLayer
    # obtain topological ordering of all layers the output layer(s) depend on
    treat_as_input = list(inputs.keys()) if isinstance(inputs, dict) else []
    all_layers = get_all_layers(layer_or_layers, treat_as_input)
    # initialize layer-to-expression mapping from all input layers
    all_outputs = dict((layer, layer.input_var)
                       for layer in all_layers
                       if isinstance(layer, InputLayer) and
                       layer not in treat_as_input)
    extra_outputs = dict()
    # update layer-to-expression mapping from given input(s), if any
    if isinstance(inputs, dict):
        all_outputs.update((layer, utils.as_theano_expression(expr))
                           for layer, expr in list(inputs.items()))
    elif inputs is not None:
        # if len(all_outputs) > 1:
        #     raise ValueError("get_output() was called with a single input "
        #                      "expression on a network with multiple input "
        #                      "layers. Please call it with a dictionary of "
        #                      "input expressions instead.")
        for input_layer in all_outputs:
            all_outputs[input_layer] = utils.as_theano_expression(inputs)
    # update layer-to-expression mapping by propagating the inputs
    for layer in all_layers:
        if layer not in all_outputs:
            try:
                if isinstance(layer, MergeLayer):
                    layer_inputs = [all_outputs[input_layer]
                                    for input_layer in layer.input_layers]
                else:
                    layer_inputs = all_outputs[layer.input_layer]
            except KeyError:
                # one of the input_layer attributes must have been `None`
                raise ValueError("get_output() was called without giving an "
                                 "input expression for the free-floating "
                                 "layer %r. Please call it with a dictionary "
                                 "mapping this layer to an input expression."
                                 % layer)
            if hasattr(layer, "get_full_output_for"):
                output, extra = layer.get_full_output_for(layer_inputs, **kwargs)
                all_outputs[layer] = output
                extra_outputs[layer] = extra
            else:
                all_outputs[layer] = layer.get_output_for(
                    layer_inputs, **kwargs)
    # return the output(s) of the requested layer(s) only
    try:
        return [all_outputs[layer] for layer in layer_or_layers], extra_outputs
    except TypeError:
        return all_outputs[layer_or_layers], extra_outputs