def test_prune_view_references():
    vrefs = {
        'LstmLayer_1': {'IX': {}, 'OX': {}, 'I_bias': {},
                        'O_bias': {}},
        'LstmLayer_2': {'IX': {1, 7}, 'OX': {}, 'I_bias': {2},
                        'O_bias': {}}
    }
    assert prune_view_references(vrefs) == {
        'LstmLayer_2': {'IX': {1, 7}, 'I_bias': {2}}
    }
Exemplo n.º 2
0
    def set_weight_modifiers(self, default_or_mod_dict=None, **kwargs):
        """
        Install
        :class:`ValueModifiers <brainstorm.value_modifiers.ValueModifier>` in
        the network to change the weights.

        They can be run manually using :meth:`.apply_weight_modifiers`,
        but they will also be called by the trainer after each weight update.

        Value modifiers can be set for specific weights in the same way
        initializers can, but there is no fallback.
        (see :meth:`.initialize` for details)


        A modifier can be a ValueModifiers object or a list of them.
        So for example:

        >>> net.set_weight_modifiers(
        ...    default=bs.ClipValues(-1, 1)
        ...    FullyConnectedLayer={'W': [bs.RescaleIncomingWeights(),
        ...                               bs.MaskValues(my_mask)]}
        ...    )

        Note:
            The order in which ValueModifiers appear in the list matters,
            because it is the same order in which they will be executed.
        """
        weight_mod_refs = _update_references_with_dict(default_or_mod_dict,
                                                       kwargs)
        all_parameters = {
            k: v.parameters
            for k, v in self.buffer.items()
            if k not in ['parameters', 'gradients'] and 'parameters' in v
        }
        weight_mods, fallback = resolve_references(all_parameters,
                                                   weight_mod_refs)

        assert not prune_view_references(fallback), \
            'fallback is not supported for weight modifiers'
        weight_mods = prune_view_references(weight_mods)
        self.weight_modifiers = order_and_copy_modifiers(weight_mods)
Exemplo n.º 3
0
    def set_weight_modifiers(self, default_or_mod_dict=None, **kwargs):
        """
        Install
        :class:`ValueModifiers <brainstorm.value_modifiers.ValueModifier>` in
        the network to change the weights.

        They can be run manually using :meth:`.apply_weight_modifiers`,
        but they will also be called by the trainer after each weight update.

        Value modifiers can be set for specific weights in the same way
        initializers can, but there is no fallback.
        (see :meth:`.initialize` for details)


        A modifier can be a ValueModifiers object or a list of them.
        So for example:

        >>> net.set_weight_modifiers(
        ...    default=bs.ClipValues(-1, 1)
        ...    FullyConnectedLayer={'W': [bs.RescaleIncomingWeights(),
        ...                               bs.MaskValues(my_mask)]}
        ...    )

        Note:
            The order in which ValueModifiers appear in the list matters,
            because it is the same order in which they will be executed.
        """
        weight_mod_refs = _update_references_with_dict(default_or_mod_dict,
                                                       kwargs)
        all_parameters = {k: v.parameters
                          for k, v in self.buffer.items()
                          if k not in ['parameters', 'gradients'] and
                          'parameters' in v}
        weight_mods, fallback = resolve_references(all_parameters,
                                                   weight_mod_refs)

        assert not prune_view_references(fallback), \
            'fallback is not supported for weight modifiers'
        weight_mods = prune_view_references(weight_mods)
        self.weight_modifiers = order_and_copy_modifiers(weight_mods)