Beispiel #1
0
    def __init__(self,
                 lstm_model=None,
                 computable_loss=None,
                 batch_size=20,
                 input_neuron_count=100,
                 hidden_neuron_count=300,
                 observed_activating_function=None,
                 input_gate_activating_function=None,
                 forget_gate_activating_function=None,
                 output_gate_activating_function=None,
                 hidden_activating_function=None,
                 output_activating_function=None,
                 seq_len=10,
                 join_io_flag=False,
                 learning_rate=1e-05,
                 learning_attenuate_rate=0.1,
                 attenuate_epoch=50):
        '''
        Init.

        Args:
            lstm_model:                         is-a `lstm_model`.
            computable_loss:                    is-a `ComputableLoss`.

            batch_size:                         Batch size.
                                                This parameters will be refered only when `lstm_model` is `None`.

            input_neuron_count:                 The number of input units.
                                                This parameters will be refered only when `lstm_model` is `None`.

            hidden_neuron_count:                The number of hidden units.
                                                This parameters will be refered only when `lstm_model` is `None`.

            observed_activating_function:       is-a `ActivatingFunctionInterface` in hidden layer.
                                                This parameters will be refered only when `lstm_model` is `None`.
                                                If `None`, this value will be `TanhFunction`.

            input_gate_activating_function:     is-a `ActivatingFunctionInterface` in hidden layer.
                                                This parameters will be refered only when `lstm_model` is `None`.
                                                If `None`, this value will be `LogisticFunction`.

            forget_gate_activating_function:    is-a `ActivatingFunctionInterface` in hidden layer.
                                                This parameters will be refered only when `lstm_model` is `None`.
                                                If `None`, this value will be `LogisticFunction`.

            output_gate_activating_function:    is-a `ActivatingFunctionInterface` in hidden layer.
                                                This parameters will be refered only when `lstm_model` is `None`.
                                                If `None`, this value will be `LogisticFunction`.

            hidden_activating_function:         is-a `ActivatingFunctionInterface` in hidden layer.
                                                This parameters will be refered only when `lstm_model` is `None`.

            output_activating_function:         is-a `ActivatingFunctionInterface` in output layer.
                                                This parameters will be refered only when `lstm_model` is `None`.
                                                If `None`, this model outputs from LSTM's hidden layer in inferencing.

            seq_len:                            The length of sequences.
                                                This means refereed maxinum step `t` in feedforward.

            join_io_flag:                       If this value and value of `output_activating_function` is not `None`,
                                                This model outputs tensors combining observed data points and inferenced data
                                                in a series direction.

            learning_rate:                      Learning rate.
            learning_attenuate_rate:            Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
            attenuate_epoch:                    Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
                                                Additionally, in relation to regularization,
                                                this class constrains weight matrixes every `attenuate_epoch`.

        '''
        if computable_loss is None:
            computable_loss = MeanSquaredError()

        if lstm_model is not None:
            if isinstance(lstm_model, LSTM) is False:
                raise TypeError()
        else:
            # Init.
            graph = LSTMGraph()

            # Activation function in LSTM.
            if observed_activating_function is None:
                graph.observed_activating_function = TanhFunction()
            else:
                if isinstance(observed_activating_function,
                              ActivatingFunctionInterface) is False:
                    raise TypeError()
                graph.observed_activating_function = observed_activating_function

            if input_gate_activating_function is None:
                graph.input_gate_activating_function = LogisticFunction()
            else:
                if isinstance(input_gate_activating_function,
                              ActivatingFunctionInterface) is False:
                    raise TypeError()
                graph.input_gate_activating_function = input_gate_activating_function

            if forget_gate_activating_function is None:
                graph.forget_gate_activating_function = LogisticFunction()
            else:
                if isinstance(forget_gate_activating_function,
                              ActivatingFunctionInterface) is False:
                    raise TypeError()
                graph.forget_gate_activating_function = forget_gate_activating_function

            if output_gate_activating_function is None:
                graph.output_gate_activating_function = LogisticFunction()
            else:
                if isinstance(output_gate_activating_function,
                              ActivatingFunctionInterface) is False:
                    raise TypeError()
                graph.output_gate_activating_function = output_gate_activating_function

            if hidden_activating_function is None:
                graph.hidden_activating_function = TanhFunction()
            else:
                if isinstance(hidden_activating_function,
                              ActivatingFunctionInterface) is False:
                    raise TypeError()
                graph.hidden_activating_function = hidden_activating_function

            if output_activating_function is None:
                graph.output_activating_function = TanhFunction()
                self.__output_flag = False
                output_neuron_count = 1
            else:
                graph.output_activating_function = output_activating_function
                self.__output_flag = True
                output_neuron_count = hidden_neuron_count

            # Initialization strategy.
            # This method initialize each weight matrices and biases in Gaussian distribution: `np.random.normal(size=hoge) * 0.01`.
            graph.create_rnn_cells(input_neuron_count=input_neuron_count,
                                   hidden_neuron_count=hidden_neuron_count,
                                   output_neuron_count=output_neuron_count)

            opt_params = SGD()
            opt_params.weight_limit = 1e+10
            opt_params.dropout_rate = 0.0

            lstm_model = LSTM(
                # Delegate `graph` to `LSTMModel`.
                graph=graph,
                # The number of epochs in mini-batch training.
                epochs=100,
                # The batch size.
                batch_size=batch_size,
                # Learning rate.
                learning_rate=learning_rate,
                # Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
                learning_attenuate_rate=learning_attenuate_rate,
                # Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
                attenuate_epoch=attenuate_epoch,
                # The length of sequences.
                seq_len=seq_len,
                # Refereed maxinum step `t` in BPTT. If `0`, this class referes all past data in BPTT.
                bptt_tau=seq_len,
                # Size of Test data set. If this value is `0`, the validation will not be executed.
                test_size_rate=0.3,
                # Loss function.
                computable_loss=computable_loss,
                # Optimizer.
                opt_params=opt_params,
                # Verification function.
                verificatable_result=VerificateFunctionApproximation(),
                tol=0.0)

        self.__lstm_model = lstm_model
        self.__seq_len = seq_len
        self.__learning_rate = learning_rate
        self.__join_io_flag = join_io_flag
        self.__computable_loss = computable_loss
        self.__loss_list = []
        self.__epoch_counter = 0
        self.__learning_attenuate_rate = learning_attenuate_rate
        self.__attenuate_epoch = attenuate_epoch

        logger = getLogger("pygan")
        self.__logger = logger
Beispiel #2
0
    def __init__(self,
                 lstm_model=None,
                 batch_size=20,
                 input_neuron_count=100,
                 hidden_neuron_count=300,
                 hidden_activating_function=None,
                 seq_len=10,
                 learning_rate=1e-05,
                 verbose_mode=False):
        '''
        Init.

        Args:
            lstm_model:                     is-a `lstm_model`.
            batch_size:                     Batch size.
                                            This parameters will be refered only when `lstm_model` is `None`.

            input_neuron_count:             The number of input units.
                                            This parameters will be refered only when `lstm_model` is `None`.

            hidden_neuron_count:            The number of hidden units.
                                            This parameters will be refered only when `lstm_model` is `None`.

            hidden_activating_function:     is-a `ActivatingFunctionInterface` in hidden layer.
                                            This parameters will be refered only when `lstm_model` is `None`.

            seq_len:                        The length of sequences.
                                            This means refereed maxinum step `t` in feedforward.

            learning_rate:                  Learning rate.
            verbose_mode:                   Verbose mode or not.
        '''
        logger = getLogger("pydbm")
        handler = StreamHandler()
        if verbose_mode is True:
            handler.setLevel(DEBUG)
            logger.setLevel(DEBUG)
        else:
            handler.setLevel(ERROR)
            logger.setLevel(ERROR)

        logger.addHandler(handler)

        if lstm_model is not None:
            if isinstance(lstm_model, LSTM) is False:
                raise TypeError()
        else:
            # Init.
            graph = LSTMGraph()

            # Activation function in LSTM.
            graph.observed_activating_function = TanhFunction()
            graph.input_gate_activating_function = LogisticFunction()
            graph.forget_gate_activating_function = LogisticFunction()
            graph.output_gate_activating_function = LogisticFunction()
            if hidden_activating_function is None:
                graph.hidden_activating_function = TanhFunction()
            else:
                if isinstance(hidden_activating_function,
                              ActivatingFunctionInterface) is False:
                    raise TypeError()
                graph.hidden_activating_function = hidden_activating_function

            graph.output_activating_function = TanhFunction()

            # Initialization strategy.
            # This method initialize each weight matrices and biases in Gaussian distribution: `np.random.normal(size=hoge) * 0.01`.
            graph.create_rnn_cells(input_neuron_count=input_neuron_count,
                                   hidden_neuron_count=hidden_neuron_count,
                                   output_neuron_count=1)

            opt_params = SGD()
            opt_params.weight_limit = 0.5
            opt_params.dropout_rate = 0.0

            lstm_model = LSTM(
                # Delegate `graph` to `LSTMModel`.
                graph=graph,
                # The number of epochs in mini-batch training.
                epochs=100,
                # The batch size.
                batch_size=batch_size,
                # Learning rate.
                learning_rate=1e-05,
                # Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
                learning_attenuate_rate=0.1,
                # Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
                attenuate_epoch=50,
                # The length of sequences.
                seq_len=seq_len,
                # Refereed maxinum step `t` in BPTT. If `0`, this class referes all past data in BPTT.
                bptt_tau=seq_len,
                # Size of Test data set. If this value is `0`, the validation will not be executed.
                test_size_rate=0.3,
                # Loss function.
                computable_loss=MeanSquaredError(),
                # Optimizer.
                opt_params=opt_params,
                # Verification function.
                verificatable_result=VerificateFunctionApproximation(),
                tol=0.0)

        self.__lstm_model = lstm_model
        self.__seq_len = seq_len
        self.__learning_rate = learning_rate
        self.__verbose_mode = verbose_mode
        self.__loss_list = []