コード例 #1
0
ファイル: Attention.py プロジェクト: h0n9670/tts
class Decoderlstm:
    def __init__(self):

        self.layers = hps.DecoderRNN_layers
        self.size = hps.DecoderRNN_size

        self.lstm_list = [LSTMCell(self.size) for _ in range(self.layers)]

        self.lstm_cell = RNN(self.lstm_list,
                             return_state=True,
                             return_sequences=True)

    def build(self, input_shape):
        self.lstm_cell.build(input_shape)
        self.trainable_weights = self.lstm_cell._trainable_weights
        self.weights = self.lstm_cell.weights

    def compute_output_shape(self, inputs):
        return self.lstm_cell.compute_output_shape(inputs)

    def get_initial_state(self, inputs):
        return self.lstm_cell.get_initial_state(inputs)

    def __call__(self, inputs, initial_state):
        return self.lstm_cell(inputs, initial_state=initial_state)
コード例 #2
0
ファイル: lmu.py プロジェクト: ino09/lmu
class LMU(Layer):
    """
    A layer of trainable low-dimensional delay systems.

    Each unit buffers its encoded input by internally representing a low-dimensional
    (i.e., compressed) version of the input window.

    Nonlinear decodings of this representation provide computations across the window,
    such as its derivative, energy, median value, etc (*). Note that decoders can span
    across all of the units.

    By default the window lengths are trained via backpropagation, as well as the
    encoding and decoding weights.

    Optionally, the state-space matrices that implement the low-dimensional delay
    system can be trained as well, but these are shared across all of the units in the
    layer.

    Based on the occurrence of the recurrent connections, this layer will choose
    different implementations of evaluating the delay system.

    If any recurrent connections are enabled, evaluation will occur sequentially with a
    Keras RNN layer using the ``LMUCell`` cell class.
    If all recurrent connections are disabled, evaluation of the delay system will be
    computed as the convolution of the input sequence with the impulse response of
    the LMU cell, using the ``LMUCellFFT`` cell class.

    (*) Voelker and Eliasmith (2018). Improving spiking dynamical
    networks: Accurate delays, higher-order synapses, and time cells.
    Neural Computation, 30(3): 569-609.

    (*) Voelker and Eliasmith. "Methods and systems for implementing
    dynamic neural networks." U.S. Patent Application No. 15/243,223.
    Filing date: 2016-08-22.

    """
    def __init__(
            self,
            units,
            order,
            theta,  # relative to dt=1
            method="zoh",
            realizer=Identity(),  # TODO: Deprecate?
            factory=LegendreDelay,  # TODO: Deprecate?
            memory_to_memory=True,
            hidden_to_memory=True,
            hidden_to_hidden=True,
            trainable_input_encoders=True,
            trainable_hidden_encoders=True,
            trainable_memory_encoders=True,
            trainable_input_kernel=True,
            trainable_hidden_kernel=True,
            trainable_memory_kernel=True,
            trainable_A=False,
            trainable_B=False,
            input_encoders_initializer="lecun_uniform",
            hidden_encoders_initializer="lecun_uniform",
            memory_encoders_initializer=Constant(0),  # 'lecun_uniform',
            input_kernel_initializer="glorot_normal",
            hidden_kernel_initializer="glorot_normal",
            memory_kernel_initializer="glorot_normal",
            hidden_activation="tanh",
            return_sequences=False,
            **kwargs):
        # Note: Setting memory_to_memory, hidden_to_memory, and hidden_to_hidden to
        # False doesn't actually remove the connections, but only initializes the
        # weights to be zero and non-trainable (when using the LMUCell).
        # This behaviour may change pending a future API decision.

        self.units = units
        self.order = order
        self.theta = theta
        self.method = method
        self.realizer = realizer
        self.factory = factory
        self.memory_to_memory = memory_to_memory
        self.hidden_to_memory = hidden_to_memory
        self.hidden_to_hidden = hidden_to_hidden
        self.trainable_input_encoders = trainable_input_encoders
        self.trainable_hidden_encoders = (trainable_hidden_encoders
                                          if hidden_to_memory else False)
        self.trainable_memory_encoders = (trainable_memory_encoders
                                          if memory_to_memory else False)
        self.trainable_input_kernel = trainable_input_kernel
        self.trainable_hidden_kernel = (trainable_hidden_kernel
                                        if hidden_to_hidden else False)
        self.trainable_memory_kernel = trainable_memory_kernel
        self.trainable_A = trainable_A
        self.trainable_B = trainable_B
        self.input_encoders_initializer = input_encoders_initializer
        self.hidden_encoders_initializer = (hidden_encoders_initializer if
                                            hidden_to_memory else Constant(0))
        self.memory_encoders_initializer = (memory_encoders_initializer if
                                            memory_to_memory else Constant(0))
        self.input_kernel_initializer = input_kernel_initializer
        self.hidden_kernel_initializer = (hidden_kernel_initializer
                                          if hidden_to_hidden else Constant(0))
        self.memory_kernel_initializer = memory_kernel_initializer
        self.hidden_activation = hidden_activation
        self.return_sequences = return_sequences

        super().__init__(**kwargs)

        if self.fft_check():
            self.lmu_layer = LMUCellFFT(
                units=self.units,
                order=self.order,
                theta=self.theta,
                trainable_input_encoders=self.trainable_input_encoders,
                trainable_input_kernel=self.trainable_input_kernel,
                trainable_memory_kernel=self.trainable_memory_kernel,
                input_encoders_initializer=self.input_encoders_initializer,
                input_kernel_initializer=self.input_kernel_initializer,
                memory_kernel_initializer=self.memory_kernel_initializer,
                hidden_activation=self.hidden_activation,
                return_sequences=self.return_sequences,
            )
        else:
            self.lmu_layer = RNN(
                LMUCell(
                    units=self.units,
                    order=self.order,
                    theta=self.theta,
                    method=self.method,
                    realizer=self.realizer,
                    factory=self.factory,
                    trainable_input_encoders=self.trainable_input_encoders,
                    trainable_hidden_encoders=self.trainable_hidden_encoders,
                    trainable_memory_encoders=self.trainable_memory_encoders,
                    trainable_input_kernel=self.trainable_input_kernel,
                    trainable_hidden_kernel=self.trainable_hidden_kernel,
                    trainable_memory_kernel=self.trainable_memory_kernel,
                    trainable_A=self.trainable_A,
                    trainable_B=self.trainable_B,
                    input_encoders_initializer=self.input_encoders_initializer,
                    hidden_encoders_initializer=self.
                    hidden_encoders_initializer,
                    memory_encoders_initializer=self.
                    memory_encoders_initializer,
                    input_kernel_initializer=self.input_kernel_initializer,
                    hidden_kernel_initializer=self.hidden_kernel_initializer,
                    memory_kernel_initializer=self.memory_kernel_initializer,
                    hidden_activation=self.hidden_activation,
                ),
                return_sequences=self.return_sequences,
            )

    def call(self, inputs):
        """
        Calls the layer with inputs.
        """
        return self.lmu_layer.call(inputs)

    def build(self, input_shape):
        """
        Initializes network parameters.
        """

        self.lmu_layer.build(input_shape)

        self.built = True

    def fft_check(self):
        """
        Checks if recurrent connections are enabled to
        automatically switch to FFT.
        """
        # Note: Only the flags are checked here. The alternative would be to check the
        # weight initializers and trainiable flag settings, however it is cumbersome
        # to check against all initializers forms that initialize the recurrent weights
        # to 0.
        #
        # These flags used below exist in other LMUCell implementations, and will be
        # brought forward in a future API decisions.
        return not (self.memory_to_memory or self.hidden_to_memory
                    or self.hidden_to_hidden)

    def get_config(self):
        """
        Overrides the tensorflow get_config function.
        """
        config = super().get_config()
        config.update(
            dict(
                units=self.units,
                order=self.order,
                theta=self.theta,
                method=self.method,
                factory=self.factory,
                memory_to_memory=self.memory_to_memory,
                hidden_to_memory=self.hidden_to_memory,
                hidden_to_hidden=self.hidden_to_hidden,
                trainable_input_encoders=self.trainable_input_encoders,
                trainable_hidden_encoders=self.trainable_hidden_encoders,
                trainable_memory_encoders=self.trainable_memory_encoders,
                trainable_input_kernel=self.trainable_input_kernel,
                trainable_hidden_kernel=self.trainable_hidden_kernel,
                trainable_memory_kernel=self.trainable_memory_kernel,
                trainable_A=self.trainable_A,
                trainable_B=self.trainable_B,
                input_encorders_initializer=self.input_encoders_initializer,
                hidden_encoders_initializer=self.hidden_encoders_initializer,
                memory_encoders_initializer=self.memory_encoders_initializer,
                input_kernel_initializer=self.input_kernel_initializer,
                hidden_kernel_initializer=self.hidden_kernel_initializer,
                memory_kernel_initializer=self.memory_kernel_initializer,
                hidden_activation=self.hidden_activation,
                return_sequences=self.return_sequences,
            ))

        return config