def test_resolve_references2():
    refs = {'*_bias': 2, 'I_bias': 1, 'default': 0}
    keys = {'IX': None, 'OX': None, 'I_bias': None, 'O_bias': None}
    full_thing, fb = resolve_references(keys, refs)
    assert full_thing == {'IX': {0}, 'OX': {0}, 'I_bias': {1, 2},
                          'O_bias': {2}}
    assert fb == {'IX': set(), 'OX': set(), 'I_bias': set(), 'O_bias': set()}
def test_resolve_references_list():
    refs = {'I_bias': [1, 2, 3], 'default': 0}
    keys = {'IX': None, 'OX': None, 'I_bias': None, 'O_bias': None}
    full_thing, fb = resolve_references(keys, refs)
    assert full_thing == {'IX': {0}, 'OX': {0}, 'I_bias': {(1, 2, 3)},
                          'O_bias': {0}}
    assert fb == {'IX': set(), 'OX': set(), 'I_bias': set(), 'O_bias': set()}
def test_resolve_references_complicated():
    refs = {'LstmLayer*': {'IX': 1},
            '*Layer*': {'*_bias': 2},
            '*_1': {'I_bias': [4, 5]},
            'ForwardLayer': {'H_bias': 3, 'default': 6},
            '*_2': 7,
            'default': 0}

    keys = {
        'LstmLayer_1': {'IX': None, 'OX': None, 'I_bias': None,
                        'O_bias': None},
        'LstmLayer_2': {'IX': None, 'OX': None, 'I_bias': None,
                        'O_bias': None},
        'ForwardLayer': {'HX': None, 'H_bias': None},
        'FooLayer': {'bar': None, 'bar_bias': None},
    }
    full_thing, fb = resolve_references(keys, refs)
    assert full_thing == {
        'LstmLayer_1': {'IX': {1}, 'OX': {0}, 'I_bias': {2, (4, 5)},
                        'O_bias': {2}},
        'LstmLayer_2': {'IX': {1, 7}, 'OX': {7}, 'I_bias': {2, 7},
                        'O_bias': {2, 7}},
        'ForwardLayer': {'HX': {6}, 'H_bias': {2, 3}},
        'FooLayer': {'bar': {0}, 'bar_bias': {2}}
    }
    assert fb == {
        'LstmLayer_1': {'IX': set(), 'OX': set(), 'I_bias': set(),
                        'O_bias': set()},
        'LstmLayer_2': {'IX': set(), 'OX': set(), 'I_bias': set(),
                        'O_bias': set()},
        'ForwardLayer': {'HX': set(), 'H_bias': set()},
        'FooLayer': {'bar': set(), 'bar_bias': set()},
    }
def test_resolve_references_parent_fallback():
    refs = {'FooLayer': {'HX': 0, 'fallback': 2}, 'fallback': 1}
    keys = {'FooLayer': {'HX': None, 'H_bias': None},
            'BarLayer': {'HX': None, 'H_bias': None}}
    full_thing, fb = resolve_references(keys, refs)
    assert fb == {'FooLayer': {'HX': {2}, 'H_bias': {2}},
                  'BarLayer': {'HX': {1}, 'H_bias': {1}}}
def test_resolve_references_parent_default():
    refs = {'FooLayer': {'HX': 0, 'default': 2}, 'default': 1}
    keys = {'FooLayer': {'HX': None, 'H_bias': None},
            'BarLayer': {'HX': None, 'H_bias': None}}
    full_thing, fb = resolve_references(keys, refs)
    assert full_thing == {
        'FooLayer': {'HX': {0}, 'H_bias': {2}},
        'BarLayer': {'HX': {1}, 'H_bias': {1}}
    }
예제 #6
0
    def set_weight_modifiers(self, default_or_mod_dict=None, **kwargs):
        """
        Install
        :class:`ValueModifiers <brainstorm.value_modifiers.ValueModifier>` in
        the network to change the weights.

        They can be run manually using :meth:`.apply_weight_modifiers`,
        but they will also be called by the trainer after each weight update.

        Value modifiers can be set for specific weights in the same way
        initializers can, but there is no fallback.
        (see :meth:`.initialize` for details)


        A modifier can be a ValueModifiers object or a list of them.
        So for example:

        >>> net.set_weight_modifiers(
        ...    default=bs.ClipValues(-1, 1)
        ...    FullyConnectedLayer={'W': [bs.RescaleIncomingWeights(),
        ...                               bs.MaskValues(my_mask)]}
        ...    )

        Note:
            The order in which ValueModifiers appear in the list matters,
            because it is the same order in which they will be executed.
        """
        weight_mod_refs = _update_references_with_dict(default_or_mod_dict,
                                                       kwargs)
        all_parameters = {
            k: v.parameters
            for k, v in self.buffer.items()
            if k not in ['parameters', 'gradients'] and 'parameters' in v
        }
        weight_mods, fallback = resolve_references(all_parameters,
                                                   weight_mod_refs)

        assert not prune_view_references(fallback), \
            'fallback is not supported for weight modifiers'
        weight_mods = prune_view_references(weight_mods)
        self.weight_modifiers = order_and_copy_modifiers(weight_mods)
예제 #7
0
    def set_weight_modifiers(self, default_or_mod_dict=None, **kwargs):
        """
        Install
        :class:`ValueModifiers <brainstorm.value_modifiers.ValueModifier>` in
        the network to change the weights.

        They can be run manually using :meth:`.apply_weight_modifiers`,
        but they will also be called by the trainer after each weight update.

        Value modifiers can be set for specific weights in the same way
        initializers can, but there is no fallback.
        (see :meth:`.initialize` for details)


        A modifier can be a ValueModifiers object or a list of them.
        So for example:

        >>> net.set_weight_modifiers(
        ...    default=bs.ClipValues(-1, 1)
        ...    FullyConnectedLayer={'W': [bs.RescaleIncomingWeights(),
        ...                               bs.MaskValues(my_mask)]}
        ...    )

        Note:
            The order in which ValueModifiers appear in the list matters,
            because it is the same order in which they will be executed.
        """
        weight_mod_refs = _update_references_with_dict(default_or_mod_dict,
                                                       kwargs)
        all_parameters = {k: v.parameters
                          for k, v in self.buffer.items()
                          if k not in ['parameters', 'gradients'] and
                          'parameters' in v}
        weight_mods, fallback = resolve_references(all_parameters,
                                                   weight_mod_refs)

        assert not prune_view_references(fallback), \
            'fallback is not supported for weight modifiers'
        weight_mods = prune_view_references(weight_mods)
        self.weight_modifiers = order_and_copy_modifiers(weight_mods)
예제 #8
0
    def initialize(self, default_or_init_dict=None, seed=None, **kwargs):
        """Initialize the weights of the network.

        Initialization can be specified in three equivalent ways:

            1. just a default initializer:

                >>> net.initialize(Gaussian())

                Note that this is equivalent to:

                >>> net.initialize(default=Gaussian())

            2. by passing a dictionary:

                >>> net.initialize({'RegularLayer': Uniform(),
                ...                 'LstmLayer': Gaussian()})

            3. by using keyword arguments:

                >>> net.initialize(RegularLayer=Uniform(),
                ...                LstmLayer=Uniform())

        All following explanations will be with regards to the dictionary style
        of initialization, because it is the most general one.

        Note:
            It is not recommended to combine 2. and 3. but if they are,
            then keyword arguments take precedence.

        Each initialization consists of a layer-pattern and that maps to an
        initializer or a weight-pattern dictionary.

        Layer patterns can take the following forms:

            1. ``{'layer_name': INIT_OR_SUBDICT}``
               Matches all the weights of the layer named layer_name
            2. ``{'layer_*': INIT_OR_SUBDICT}``
               Matches all layers with a name that starts with ``layer_``
               The wild-card ``*`` can appear at arbitrary positions and even
               multiple times in one path.

        There are two special layer patterns:

            3. ``{'default': INIT}``
               Matches all weights that are not matched by any other
               path-pattern
            4. ``{'fallback': INIT}``
               Set a fallback initializer for every weight. It will only be
               evaluated for the weights for which the regular initializer
               failed with an InitializationError.

               `This is useful for initializers that require a certain shape
               of weights and will not work otherwise. The fallback will then
               be used for all cases when that initializer failed.`

        The weight-pattern sub-dictionary follows the same form as the layer-
        pattern:

            1) ``{'layer_pattern': {'a': INIT_A, 'b': INIT_B}}``
            2) ``{'layer_pattern': {'a*': INIT}``
            3) ``{'layer_pattern': {'default': INIT}``
            4) ``{'layer_pattern': {'fallback': INIT}``


        An initializer can either be a scalar, something that converts to a
        numpy array of the correct shape or an :class:`Initializer` object.
        So for example:

        >>> net.initialize(default=0,
        ...                RnnLayer={'b': [1, 2, 3, 4, 5]},
        ...                ForwardLayer=bs.Gaussian())

        Note:
            Each view must match exactly one initialization and up to one
            fallback to be unambiguous. Otherwise the initialization will fail.

        You can specify a seed to make the initialization reproducible:

        >>> net.initialize({'default': bs.Gaussian()}, seed=1234)
        """
        init_refs = _update_references_with_dict(default_or_init_dict, kwargs)
        self.initializers = get_description(init_refs)
        all_parameters = {
            k: v.parameters
            for k, v in self.buffer.items()
            if isinstance(v, BufferView) and 'parameters' in v
        }
        _replace_lists_with_array_initializers(init_refs)
        initializers, fallback = resolve_references(all_parameters, init_refs)
        init_rnd = self.rnd.create_random_state(seed)
        for layer_name, views in sorted(all_parameters.items()):
            if views is None:
                continue
            for view_name, view in sorted(views.items()):
                init = initializers[layer_name][view_name]
                fb = fallback[layer_name][view_name]
                if len(init) > 1:
                    raise NetworkValidationError(
                        "Multiple initializers for {}.{}: {}".format(
                            layer_name, view_name, init))

                if len(init) == 0:
                    raise NetworkValidationError(
                        "No initializer for {}.{}".format(
                            layer_name, view_name))
                if len(fb) > 1:
                    raise NetworkValidationError(
                        "Multiple fallbacks for {}.{}: {}".format(
                            layer_name, view_name, fb))

                fb = fb.pop() if len(fb) else None
                self.handler.set_from_numpy(
                    view,
                    evaluate_initializer(init.pop(),
                                         view.shape,
                                         fb,
                                         seed=init_rnd.generate_seed()))
def test_resolve_references_fallback():
    refs = {'*_bias': 2, 'fallback': 0}
    keys = {'OX': None, 'O_bias': None}
    full_thing, fb = resolve_references(keys, refs)
    assert fb == {'OX': {0}, 'O_bias': {0}}
def test_resolve_references1():
    refs = {'*_bias': 2, 'IX': 1, 'default': 0}
    struct = {'IX': None, 'OX': None, 'I_bias': None, 'O_bias': None}
    full_thing, fb = resolve_references(struct, refs)
    assert full_thing == {'IX': {1}, 'OX': {0}, 'I_bias': {2}, 'O_bias': {2}}
    assert fb == {'IX': set(), 'OX': set(), 'I_bias': set(), 'O_bias': set()}
예제 #11
0
    def initialize(self, default_or_init_dict=None, seed=None, **kwargs):
        """Initialize the weights of the network.

        Initialization can be specified in three equivalent ways:

            1. just a default initializer:

                >>> net.initialize(Gaussian())

                Note that this is equivalent to:

                >>> net.initialize(default=Gaussian())

            2. by passing a dictionary:

                >>> net.initialize({'RegularLayer': Uniform(),
                ...                 'LstmLayer': Gaussian()})

            3. by using keyword arguments:

                >>> net.initialize(RegularLayer=Uniform(),
                ...                LstmLayer=Uniform())

        All following explanations will be with regards to the dictionary style
        of initialization, because it is the most general one.

        Note:
            It is not recommended to combine 2. and 3. but if they are,
            then keyword arguments take precedence.

        Each initialization consists of a layer-pattern and that maps to an
        initializer or a weight-pattern dictionary.

        Layer patterns can take the following forms:

            1. ``{'layer_name': INIT_OR_SUBDICT}``
               Matches all the weights of the layer named layer_name
            2. ``{'layer_*': INIT_OR_SUBDICT}``
               Matches all layers with a name that starts with ``layer_``
               The wild-card ``*`` can appear at arbitrary positions and even
               multiple times in one path.

        There are two special layer patterns:

            3. ``{'default': INIT}``
               Matches all weights that are not matched by any other
               path-pattern
            4. ``{'fallback': INIT}``
               Set a fallback initializer for every weight. It will only be
               evaluated for the weights for which the regular initializer
               failed with an InitializationError.

               `This is useful for initializers that require a certain shape
               of weights and will not work otherwise. The fallback will then
               be used for all cases when that initializer failed.`

        The weight-pattern sub-dictionary follows the same form as the layer-
        pattern:

            1) ``{'layer_pattern': {'a': INIT_A, 'b': INIT_B}}``
            2) ``{'layer_pattern': {'a*': INIT}``
            3) ``{'layer_pattern': {'default': INIT}``
            4) ``{'layer_pattern': {'fallback': INIT}``


        An initializer can either be a scalar, something that converts to a
        numpy array of the correct shape or an :class:`Initializer` object.
        So for example:

        >>> net.initialize(default=0,
        ...                RnnLayer={'b': [1, 2, 3, 4, 5]},
        ...                ForwardLayer=bs.Gaussian())

        Note:
            Each view must match exactly one initialization and up to one
            fallback to be unambiguous. Otherwise the initialization will fail.

        You can specify a seed to make the initialization reproducible:

        >>> net.initialize({'default': bs.Gaussian()}, seed=1234)
        """
        init_refs = _update_references_with_dict(default_or_init_dict, kwargs)
        self.initializers = get_description(init_refs)
        all_parameters = {k: v.parameters
                          for k, v in self.buffer.items()
                          if isinstance(v, BufferView) and 'parameters' in v}
        _replace_lists_with_array_initializers(init_refs)
        initializers, fallback = resolve_references(all_parameters, init_refs)
        init_rnd = self.rnd.create_random_state(seed)
        for layer_name, views in sorted(all_parameters.items()):
            if views is None:
                continue
            for view_name, view in sorted(views.items()):
                init = initializers[layer_name][view_name]
                fb = fallback[layer_name][view_name]
                if len(init) > 1:
                    raise NetworkValidationError(
                        "Multiple initializers for {}.{}: {}".format(
                            layer_name, view_name, init))

                if len(init) == 0:
                    raise NetworkValidationError("No initializer for {}.{}".
                                                 format(layer_name, view_name))
                if len(fb) > 1:
                    raise NetworkValidationError(
                        "Multiple fallbacks for {}.{}: {}".format(
                            layer_name, view_name, fb))

                fb = fb.pop() if len(fb) else None
                self.handler.set_from_numpy(
                    view,
                    evaluate_initializer(init.pop(), view.shape, fb,
                                         seed=init_rnd.generate_seed()))