Esempio n. 1
0
    def __init__(self,
                 forward=None,
                 backward=None,
                 return_sequences=False,
                 forward_conf=None,
                 backward_conf=None):
        assert forward is not None or forward_conf is not None, "Must provide a forward RNN or a forward configuration"
        assert backward is not None or backward_conf is not None, "Must provide a backward RNN or a backward configuration"
        super(Bidirectional, self).__init__()
        if forward is not None:
            self.forward = forward
        else:
            # Must import inside the function, because in order to support loading
            # we must import this module inside layer_utils... ugly
            from keras.utils.layer_utils import container_from_config
            self.forward = container_from_config(forward_conf)
        if backward is not None:
            self.backward = backward
        else:
            from keras.utils.layer_utils import container_from_config
            self.backward = container_from_config(backward_conf)
        self.return_sequences = return_sequences
        self.output_dim = self.forward.output_dim + self.backward.output_dim

        if not (self.return_sequences == self.forward.return_sequences ==
                self.backward.return_sequences):
            raise ValueError("Make sure 'return_sequences' is equal for self,"
                             " forward and backward.")
Esempio n. 2
0
    def __init__(self, forward=None, backward=None, return_sequences=False,
                 truncate_gradient=-1, forward_conf=None, backward_conf=None):
        assert forward is not None or forward_conf is not None, "Must provide a forward RNN or a forward configuration"
        assert backward is not None or backward_conf is not None, "Must provide a backward RNN or a backward configuration"
        super(Bidirectional, self).__init__()
        if forward is not None:
            self.forward = forward
        else:
            # Must import inside the function, because in order to support loading
            # we must import this module inside layer_utils... ugly
            from keras.utils.layer_utils import container_from_config
            self.forward = container_from_config(forward_conf)
        if backward is not None:
            self.backward = backward
        else:
            from keras.utils.layer_utils import container_from_config
            self.backward = container_from_config(backward_conf)
        self.return_sequences = return_sequences
        self.truncate_gradient = truncate_gradient
        self.output_dim = self.forward.output_dim + self.backward.output_dim
        #if self.forward.output_dim != self.backward.output_dim:
        #    raise ValueError("Make sure `forward` and `backward` have " +
        #                     "the same `ouput_dim.`")

        if not (self.return_sequences == self.forward.return_sequences == self.backward.return_sequences):
            raise ValueError("Make sure 'return_sequences' is equal for self,"
                             " forward and backward.")
        if not (self.truncate_gradient == self.forward.truncate_gradient == self.backward.truncate_gradient):
            raise ValueError("Make sure 'truncate_gradient' is equal for self,"
                             " forward and backward.")
Esempio n. 3
0
def model_from_config(config):
    model_name = config.get('name')
    if model_name not in {'Graph', 'Sequential'}:
        raise Exception('Unrecognized model:', model_name)

    # Create a container then set class to appropriate model
    model = container_from_config(config)
    if model_name == 'Graph':
        model.__class__ = Graph
    elif model_name == 'Sequential':
        model.__class__ = Sequential

    if 'optimizer' in config:
        # if it has an optimizer, the model is assumed to be compiled
        loss = config.get('loss')
        class_mode = config.get('class_mode')
        theano_mode = config.get('theano_mode')

        optimizer_params = dict([(k, v) for k, v in config.get('optimizer').items()])
        optimizer_name = optimizer_params.pop('name')
        optimizer = optimizers.get(optimizer_name, optimizer_params)

        if model_name == 'Sequential':
            model.compile(loss=loss, optimizer=optimizer, class_mode=class_mode, theano_mode=theano_mode)
        elif model_name == 'Graph':
            model.compile(loss=loss, optimizer=optimizer, theano_mode=theano_mode)

    return model
Esempio n. 4
0
def model_from_config(config, custom_objects={}):
    model_name = config.get('name')
    if model_name not in {'Graph', 'Sequential'}:
        raise Exception('Unrecognized model:', model_name)

    # Create a container then set class to appropriate model
    model = container_from_config(config, custom_objects=custom_objects)
    if model_name == 'Graph':
        model.__class__ = Graph
    elif model_name == 'Sequential':
        model.__class__ = Sequential

    if 'optimizer' in config:
        # if it has an optimizer, the model is assumed to be compiled
        loss = config.get('loss')
        class_mode = config.get('class_mode')
        theano_mode = config.get('theano_mode')

        optimizer_params = dict([(k, v)
                                 for k, v in config.get('optimizer').items()])
        optimizer_name = optimizer_params.pop('name')
        optimizer = optimizers.get(optimizer_name, optimizer_params)

        if model_name == 'Sequential':
            model.compile(loss=loss,
                          optimizer=optimizer,
                          class_mode=class_mode,
                          theano_mode=theano_mode)
        elif model_name == 'Graph':
            model.compile(loss=loss,
                          optimizer=optimizer,
                          theano_mode=theano_mode)

    return model
Esempio n. 5
0
def upgrade_sequential(old_model):
    """Upgrade a ``keras.models.Sequential`` instance to be fully
    convolutional.

    :param old_model: The old (not-fully-convolutional) ``Sequential`` model.
    :returns:  A new ``Sequential`` model with the same weights as the old one,
               but with flattening layers removed and dense layers replaced by
               convolutions."""
    assert isinstance(old_model, Sequential), "only works on sequences"
    rv = Sequential()
    all_layers = list(old_model.layers)

    while all_layers:
        next_layer = all_layers.pop(0)
        if isinstance(next_layer, Flatten):
            assert all_layers, "flatten must be followed by layer"
            next_dense = all_layers.pop(0)
            assert isinstance(next_dense, Dense), \
                "flatten must be followed by dense"
            # Upgrade the dense layer to a convolution with the same filter
            # size as the input
            assert len(next_layer.input_shape) == 4, "input must be conv"
            new_conv = dense_to_conv(next_dense, next_layer.input_shape[1:])
            rv.add(new_conv)

            print('Converted {} via {} to {}'.format(
                repr_layer(next_dense), repr_layer(next_layer),
                repr_layer(new_conv)
            ))
        elif isinstance(next_layer, Dense):
            assert len(next_layer.input_shape) == 2, "input must be conv"
            new_conv = dense_to_conv(
                next_layer, (next_layer.input_shape[1], 1, 1)
            )
            rv.add(new_conv)

            print('Converted {} to {}'.format(
                repr_layer(next_layer), repr_layer(new_conv)
            ))
        else:
            next_layer_copy = container_from_config(
                next_layer.get_config()
            )
            rv.add(next_layer_copy)
            next_layer_weights = next_layer.get_weights()
            next_layer_copy.set_weights(next_layer_weights)

            # Just make sure that weights really are the same
            new_weights = rv.layers[-1].get_weights()
            assert len(new_weights) == len(next_layer_weights)
            assert all(
                np.all(w1 == w2)
                for w1, w2 in zip(new_weights, next_layer_weights)
            )

            print('Added {} to model unchanged (was {})'.format(
                repr_layer(next_layer_copy), repr_layer(next_layer)
            ))

    return rv
Esempio n. 6
0
def upgrade_sequential(old_model):
    """Upgrade a ``keras.models.Sequential`` instance to be fully
    convolutional.

    :param old_model: The old (not-fully-convolutional) ``Sequential`` model.
    :returns:  A new ``Sequential`` model with the same weights as the old one,
               but with flattening layers removed and dense layers replaced by
               convolutions."""
    assert isinstance(old_model, Sequential), "only works on sequences"
    rv = Sequential()
    all_layers = list(old_model.layers)

    while all_layers:
        next_layer = all_layers.pop(0)
        if isinstance(next_layer, Flatten):
            assert all_layers, "flatten must be followed by layer"
            next_dense = all_layers.pop(0)
            assert isinstance(next_dense, Dense), \
                "flatten must be followed by dense"
            # Upgrade the dense layer to a convolution with the same filter
            # size as the input
            assert len(next_layer.input_shape) == 4, "input must be conv"
            new_conv = dense_to_conv(next_dense, next_layer.input_shape[1:])
            rv.add(new_conv)

            print('Converted {} via {} to {}'.format(repr_layer(next_dense),
                                                     repr_layer(next_layer),
                                                     repr_layer(new_conv)))
        elif isinstance(next_layer, Dense):
            assert len(next_layer.input_shape) == 2, "input must be conv"
            new_conv = dense_to_conv(next_layer,
                                     (next_layer.input_shape[1], 1, 1))
            rv.add(new_conv)

            print('Converted {} to {}'.format(repr_layer(next_layer),
                                              repr_layer(new_conv)))
        else:
            next_layer_copy = container_from_config(next_layer.get_config())
            rv.add(next_layer_copy)
            next_layer_weights = next_layer.get_weights()
            next_layer_copy.set_weights(next_layer_weights)

            # Just make sure that weights really are the same
            new_weights = rv.layers[-1].get_weights()
            assert len(new_weights) == len(next_layer_weights)
            assert all(
                np.all(w1 == w2)
                for w1, w2 in zip(new_weights, next_layer_weights))

            print('Added {} to model unchanged (was {})'.format(
                repr_layer(next_layer_copy), repr_layer(next_layer)))

    return rv