Esempio n. 1
0
    def __init__(self,
                 convolutional_auto_encoder=None,
                 batch_size=10,
                 channel=1,
                 learning_rate=1e-10,
                 learning_attenuate_rate=0.1,
                 attenuate_epoch=50,
                 opt_params=None,
                 feature_matching_layer=0):
        '''
        Init.
        
        Args:
            convolutional_auto_encoder:         is-a `pydbm.cnn.convolutionalneuralnetwork.convolutionalautoencoder.ConvolutionalLadderNetworks`.
            learning_rate:                      Learning rate.
            batch_size:                         Batch size in mini-batch.
            learning_rate:                      Learning rate.
            learning_attenuate_rate:            Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
            attenuate_epoch:                    Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
                                                Additionally, in relation to regularization,
                                                this class constrains weight matrixes every `attenuate_epoch`.

            opt_params:                         is-a `pydbm.optimization.opt_params.OptParams`.
            feature_matching_layer:             Key of layer number for feature matching forward/backward.

        '''

        if isinstance(convolutional_auto_encoder,
                      CLN) is False and convolutional_auto_encoder is not None:
            raise TypeError(
                "The type of `convolutional_auto_encoder` must be `pydbm.cnn.convolutionalneuralnetwork.convolutional_auto_encoder.ConvolutionalAutoEncoder`."
            )

        if opt_params is None:
            opt_params = Adam()
            opt_params.weight_limit = 1e+10
            opt_params.dropout_rate = 0.0

        if convolutional_auto_encoder is None:
            scale = 0.01
            conv1 = ConvolutionLayer1(
                ConvGraph1(activation_function=TanhFunction(),
                           filter_num=batch_size,
                           channel=channel,
                           kernel_size=3,
                           scale=scale,
                           stride=1,
                           pad=1))

            conv2 = ConvolutionLayer2(
                ConvGraph2(activation_function=TanhFunction(),
                           filter_num=batch_size,
                           channel=batch_size,
                           kernel_size=3,
                           scale=scale,
                           stride=1,
                           pad=1))

            convolutional_auto_encoder = CLN(
                layerable_cnn_list=[conv1, conv2],
                epochs=100,
                batch_size=batch_size,
                learning_rate=learning_rate,
                learning_attenuate_rate=learning_attenuate_rate,
                attenuate_epoch=attenuate_epoch,
                computable_loss=MeanSquaredError(),
                opt_params=opt_params,
                verificatable_result=VerificateFunctionApproximation(),
                test_size_rate=0.3,
                tol=1e-15,
                save_flag=False)

        self.__convolutional_auto_encoder = convolutional_auto_encoder
        self.__learning_rate = learning_rate
        self.__attenuate_epoch = attenuate_epoch
        self.__learning_attenuate_rate = learning_attenuate_rate

        self.__epoch_counter = 0
        self.__feature_matching_layer = feature_matching_layer
        logger = getLogger("pygan")
        self.__logger = logger

        super().__init__(convolutional_auto_encoder=convolutional_auto_encoder,
                         batch_size=batch_size,
                         channel=channel,
                         learning_rate=learning_rate,
                         learning_attenuate_rate=learning_attenuate_rate,
                         attenuate_epoch=attenuate_epoch,
                         opt_params=opt_params,
                         feature_matching_layer=feature_matching_layer)

        self.__alpha_loss_list = []
        self.__sigma_loss_list = []
        self.__mu_loss_list = []
Esempio n. 2
0
    def __init__(self,
                 lstm_model=None,
                 batch_size=20,
                 input_neuron_count=100,
                 hidden_neuron_count=300,
                 hidden_activating_function=None,
                 seq_len=10,
                 learning_rate=1e-05,
                 verbose_mode=False):
        '''
        Init.

        Args:
            lstm_model:                     is-a `lstm_model`.
            batch_size:                     Batch size.
                                            This parameters will be refered only when `lstm_model` is `None`.

            input_neuron_count:             The number of input units.
                                            This parameters will be refered only when `lstm_model` is `None`.

            hidden_neuron_count:            The number of hidden units.
                                            This parameters will be refered only when `lstm_model` is `None`.

            hidden_activating_function:     is-a `ActivatingFunctionInterface` in hidden layer.
                                            This parameters will be refered only when `lstm_model` is `None`.

            seq_len:                        The length of sequences.
                                            This means refereed maxinum step `t` in feedforward.

            learning_rate:                  Learning rate.
            verbose_mode:                   Verbose mode or not.
        '''
        logger = getLogger("pydbm")
        handler = StreamHandler()
        if verbose_mode is True:
            handler.setLevel(DEBUG)
            logger.setLevel(DEBUG)
        else:
            handler.setLevel(ERROR)
            logger.setLevel(ERROR)

        logger.addHandler(handler)

        if lstm_model is not None:
            if isinstance(lstm_model, LSTM) is False:
                raise TypeError()
        else:
            # Init.
            graph = LSTMGraph()

            # Activation function in LSTM.
            graph.observed_activating_function = TanhFunction()
            graph.input_gate_activating_function = LogisticFunction()
            graph.forget_gate_activating_function = LogisticFunction()
            graph.output_gate_activating_function = LogisticFunction()
            if hidden_activating_function is None:
                graph.hidden_activating_function = TanhFunction()
            else:
                if isinstance(hidden_activating_function,
                              ActivatingFunctionInterface) is False:
                    raise TypeError()
                graph.hidden_activating_function = hidden_activating_function

            graph.output_activating_function = TanhFunction()

            # Initialization strategy.
            # This method initialize each weight matrices and biases in Gaussian distribution: `np.random.normal(size=hoge) * 0.01`.
            graph.create_rnn_cells(input_neuron_count=input_neuron_count,
                                   hidden_neuron_count=hidden_neuron_count,
                                   output_neuron_count=1)

            opt_params = SGD()
            opt_params.weight_limit = 0.5
            opt_params.dropout_rate = 0.0

            lstm_model = LSTM(
                # Delegate `graph` to `LSTMModel`.
                graph=graph,
                # The number of epochs in mini-batch training.
                epochs=100,
                # The batch size.
                batch_size=batch_size,
                # Learning rate.
                learning_rate=1e-05,
                # Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
                learning_attenuate_rate=0.1,
                # Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
                attenuate_epoch=50,
                # The length of sequences.
                seq_len=seq_len,
                # Refereed maxinum step `t` in BPTT. If `0`, this class referes all past data in BPTT.
                bptt_tau=seq_len,
                # Size of Test data set. If this value is `0`, the validation will not be executed.
                test_size_rate=0.3,
                # Loss function.
                computable_loss=MeanSquaredError(),
                # Optimizer.
                opt_params=opt_params,
                # Verification function.
                verificatable_result=VerificateFunctionApproximation(),
                tol=0.0)

        self.__lstm_model = lstm_model
        self.__seq_len = seq_len
        self.__learning_rate = learning_rate
        self.__verbose_mode = verbose_mode
        self.__loss_list = []
Esempio n. 3
0
    def __init__(self,
                 batch_size,
                 nn_layer_list,
                 learning_rate=1e-05,
                 computable_loss=None,
                 opt_params=None,
                 verificatable_result=None,
                 nn=None,
                 feature_matching_layer=0):
        '''
        Init.

        Args:
            batch_size:                     Batch size in mini-batch.
            nn_layer_list:                  `list` of `NNLayer`.
            learning_rate:                  Learning rate.
            computable_loss:                is-a `ComputableLoss`.
                                            This parameters will be refered only when `nn` is `None`.

            opt_params:                     is-a `OptParams`.
                                            This parameters will be refered only when `nn` is `None`.

            verificatable_result:           is-a `VerificateFunctionApproximation`.
                                            This parameters will be refered only when `nn` is `None`.

            nn:                             is-a `NeuralNetwork` as a model in this class.
                                            If not `None`, `self.__nn` will be overrided by this `nn`.
                                            If `None`, this class initialize `NeuralNetwork`
                                            by default hyper parameters.

            feature_matching_layer:         Key of layer number for feature matching forward/backward.

        '''
        if nn is None:
            if computable_loss is None:
                computable_loss = MeanSquaredError()

            if isinstance(computable_loss, ComputableLoss) is False:
                raise TypeError()

            if verificatable_result is None:
                verificatable_result = VerificateFunctionApproximation()

            if isinstance(verificatable_result, VerificatableResult) is False:
                raise TypeError()

            if opt_params is None:
                opt_params = Adam()
                opt_params.weight_limit = 0.5
                opt_params.dropout_rate = 0.0

            if isinstance(opt_params, OptParams) is False:
                raise TypeError()

            nn = NeuralNetwork(
                # The `list` of `ConvolutionLayer`.
                nn_layer_list=nn_layer_list,
                # The number of epochs in mini-batch training.
                epochs=200,
                # The batch size.
                batch_size=batch_size,
                # Learning rate.
                learning_rate=learning_rate,
                # Loss function.
                computable_loss=computable_loss,
                # Optimizer.
                opt_params=opt_params,
                # Verification.
                verificatable_result=verificatable_result,
                # Pre-learned parameters.
                pre_learned_path_list=None,
                # Others.
                learning_attenuate_rate=0.1,
                attenuate_epoch=50)

        self.__nn = nn
        self.__batch_size = batch_size
        self.__learning_rate = learning_rate
        self.__q_shape = None
        self.__loss_list = []
        self.__feature_matching_layer = feature_matching_layer
        self.__epoch_counter = 0
        logger = getLogger("pygan")
        self.__logger = logger
Esempio n. 4
0
    def __init__(self,
                 batch_size,
                 nn_layer_list,
                 learning_rate=1e-05,
                 computable_loss=None,
                 opt_params=None,
                 verificatable_result=None,
                 pre_learned_path_list=None,
                 nn=None,
                 verbose_mode=False):
        '''
        Init.

        Args:
            batch_size:                     Batch size in mini-batch.
            nn_layer_list:                  `list` of `NNLayer`.
            learning_rate:                  Learning rate.
            computable_loss:                is-a `ComputableLoss`.
            opt_params:                     is-a `OptParams`.
            verificatable_result:           is-a `VerificateFunctionApproximation`.
            pre_learned_path_list:          `list` of file path that stored pre-learned parameters.
                                            This parameters will be refered only when `cnn` is `None`.

            nn:                             is-a `NeuralNetwork` as a model in this class.
                                            If not `None`, `self.__nn` will be overrided by this `nn`.
                                            If `None`, this class initialize `NeuralNetwork`
                                            by default hyper parameters.

            verbose_mode:                   Verbose mode or not.

        '''
        logger = getLogger("pydbm")
        handler = StreamHandler()
        if verbose_mode is True:
            handler.setLevel(DEBUG)
            logger.setLevel(DEBUG)
        else:
            handler.setLevel(ERROR)
            logger.setLevel(ERROR)

        logger.addHandler(handler)
        if computable_loss is None:
            computable_loss = MeanSquaredError()
        if verificatable_result is None:
            verificatable_result = VerificateFunctionApproximation()
        if opt_params is None:
            opt_params = Adam()
            opt_params.weight_limit = 0.5
            opt_params.dropout_rate = 0.0

        if nn is None:
            nn = NeuralNetwork(
                # The `list` of `ConvolutionLayer`.
                nn_layer_list=nn_layer_list,
                # The number of epochs in mini-batch training.
                epochs=200,
                # The batch size.
                batch_size=batch_size,
                # Learning rate.
                learning_rate=learning_rate,
                # Loss function.
                computable_loss=computable_loss,
                # Optimizer.
                opt_params=opt_params,
                # Verification.
                verificatable_result=verificatable_result,
                # Pre-learned parameters.
                pre_learned_path_list=pre_learned_path_list,
                # Others.
                learning_attenuate_rate=0.1,
                attenuate_epoch=50)

        self.__nn = nn
        self.__batch_size = batch_size
        self.__computable_loss = computable_loss
        self.__learning_rate = learning_rate
        self.__verbose_mode = verbose_mode
        self.__q_shape = None
        self.__loss_list = []
Esempio n. 5
0
    def __init__(self,
                 batch_size,
                 conv_lstm_model,
                 seq_len=10,
                 learning_rate=1e-05,
                 computable_loss=None,
                 opt_params=None,
                 verificatable_result=None,
                 verbose_mode=False):
        '''
        Init.

        Args:
            batch_size:                     Batch size in mini-batch.
            conv_lstm_model:                is-a `ConvLSTMModel`.
            seq_len:                        The length of sequences.
            learning_rate:                  Learning rate.
            computable_loss:                is-a `ComputableLoss`.
            opt_params:                     is-a `OptParams`.
            verificatable_result:           is-a `VerificateFunctionApproximation`.
            verbose_mode:                   Verbose mode or not.

        '''
        logger = getLogger("pydbm")
        handler = StreamHandler()
        if verbose_mode is True:
            handler.setLevel(DEBUG)
            logger.setLevel(DEBUG)
        else:
            handler.setLevel(ERROR)
            logger.setLevel(ERROR)

        logger.addHandler(handler)

        self.__logger = getLogger("pyqlearning")
        handler = StreamHandler()
        if verbose_mode is True:
            self.__logger.setLevel(DEBUG)
        else:
            self.__logger.setLevel(ERROR)

        self.__logger.addHandler(handler)

        if isinstance(conv_lstm_model, ConvLSTMModel) is False:
            raise TypeError()

        if computable_loss is None:
            computable_loss = MeanSquaredError()
        if verificatable_result is None:
            verificatable_result = VerificateFunctionApproximation()
        if opt_params is None:
            opt_params = Adam()
            opt_params.weight_limit = 0.5
            opt_params.dropout_rate = 0.0

        self.__conv_lstm_model = conv_lstm_model
        self.__seq_len = seq_len
        self.__batch_size = batch_size
        self.__computable_loss = computable_loss
        self.__learning_rate = learning_rate
        self.__verbose_mode = verbose_mode
        self.__loss_list = []
class FeatureMatching(object):
    '''
    Value function with Feature matching, which addresses the instability of GANs 
    by specifying a new objective for the generator that prevents it from overtraining 
    on the current discriminator(Salimans, T., et al., 2016).
    
    "Instead of directly maximizing the output of the discriminator, 
    the new objective requires the generator to generate data that matches
    the statistics of the real data, where we use the discriminator only to specify 
    the statistics that we think are worth matching." (Salimans, T., et al., 2016, p2.)

    References:
        - Salimans, T., Goodfellow, I., Zaremba, W., Cheung, V., Radford, A., & Chen, X. (2016). Improved techniques for training gans. In Advances in neural information processing systems (pp. 2234-2242).
        - Yang, L. C., Chou, S. Y., & Yang, Y. H. (2017). MidiNet: A convolutional generative adversarial network for symbolic-domain music generation. arXiv preprint arXiv:1703.10847.
    '''
    def __init__(self, lambda1=1.0, lambda2=0.0, computable_loss=None):
        '''
        Init.

        Args:
            lambda1:            Weight for results of standard feature matching.
            lambda2:            Weight for results of difference between generated data points and true samples.
            computable_loss:    is-a `pydbm.loss.interface.computable_loss.ComputableLoss`.
                                If `None`, the default value is a `MeanSquaredError`.

        Exceptions:
            ValueError:     When the sum of `lambda1` and `lambda2` is not less than `1.0`.
                            Those parameters are trade-off parameters.
        '''
        self.__true_arr = None
        if computable_loss is None:
            self.__computable_loss = MeanSquaredError()
        else:
            if isinstance(computable_loss, ComputableLoss) is False:
                raise TypeError(
                    "The type of `computable_loss` must be `ComputableLoss`.")
            self.__computable_loss = computable_loss
        self.__loss_list = []

        if lambda1 + lambda2 > 1:
            raise ValueError(
                "The sum of `lambda1` and `lambda2` must be less than `1.0`. Those parameters are trade-off parameters."
            )
        self.__lambda1 = lambda1
        self.__lambda2 = lambda2

    def compute_delta(self, true_sampler, discriminative_model, generated_arr):
        '''
        Compute generator's reward.

        Args:
            true_sampler:           Sampler which draws samples from the `true` distribution.
            discriminative_model:   Discriminator which discriminates `true` from `fake`.
            generated_arr:          `np.ndarray` generated data points.
        
        Returns:
            `np.ndarray` of Gradients.
        '''
        if isinstance(true_sampler, TrueSampler) is False:
            raise TypeError(
                "The type of `true_sampler` must be `TrueSampler`.")

        if isinstance(discriminative_model, DiscriminativeModel) is False:
            raise TypeError(
                "The type of `discriminative_model` must be `DiscriminativeModel`."
            )

        self.__true_arr = true_sampler.draw()

        grad_arr1 = np.zeros_like(generated_arr)
        grad_arr2 = np.zeros_like(generated_arr)
        loss1, loss2 = 0.0, 0.0
        if self.__lambda1 > 0.0:
            _generated_arr = discriminative_model.feature_matching_forward(
                generated_arr)
            _true_arr = discriminative_model.feature_matching_forward(
                self.__true_arr)

            grad_arr1 = self.__computable_loss.compute_delta(
                _generated_arr, _true_arr)

            grad_arr1 = discriminative_model.feature_matching_backward(
                grad_arr1)
            grad_arr1 = grad_arr1.reshape(generated_arr.shape)

            loss1 = self.__computable_loss.compute_loss(
                _generated_arr, _true_arr)
        if self.__lambda2 > 0.0:
            grad_arr2 = self.__computable_loss.compute_delta(
                generated_arr, self.__true_arr)
            loss2 = self.__computable_loss.compute_loss(
                generated_arr, self.__true_arr)
        grad_arr = (grad_arr1 * self.__lambda1) + (grad_arr2 * self.__lambda2)
        loss = (loss1 * self.__lambda1) + (loss2 * self.__lambda2)

        self.__loss_list.append(loss)
        return grad_arr

    def set_readonly(self, value):
        ''' setter '''
        raise TypeError("This property must be read-only.")

    def get_true_arr(self):
        ''' getter '''
        return self.__true_arr

    true_arr = property(get_true_arr, set_readonly)

    def get_computable_loss(self):
        ''' getter '''
        return self.__computable_loss

    computable_loss = property(get_computable_loss, set_readonly)

    def get_loss_arr(self):
        ''' getter '''
        return np.array(self.__loss_list)

    loss_arr = property(get_loss_arr, set_readonly)
    def learn(self,
              sentence_list,
              token_master_list,
              hidden_neuron_count=200,
              epochs=100,
              batch_size=100,
              learning_rate=1e-05,
              learning_attenuate_rate=0.1,
              attenuate_epoch=50,
              bptt_tau=8,
              weight_limit=0.5,
              dropout_rate=0.5,
              test_size_rate=0.3):
        '''
        Init.
        
        Args:
            sentence_list:                  The list of tokenized sentences.
                                            [[`token`, `token`, `token`, ...],
                                            [`token`, `token`, `token`, ...],
                                            [`token`, `token`, `token`, ...]]

            token_master_list:              Unique `list` of tokens.
            hidden_neuron_count:            The number of units in hidden layer.
            epochs:                         Epochs of Mini-batch.
            batch_size:                     Batch size of Mini-batch.
            learning_rate:                  Learning rate.
            learning_attenuate_rate:        Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
            attenuate_epoch:                Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
                                            Additionally, in relation to regularization,
                                            this class constrains weight matrixes every `attenuate_epoch`.

            bptt_tau:                       Refereed maxinum step `t` in Backpropagation Through Time(BPTT).
            weight_limit:                   Regularization for weights matrix
                                            to repeat multiplying the weights matrix and `0.9`
                                            until $\sum_{j=0}^{n}w_{ji}^2 < weight\_limit$.

            dropout_rate:                   The probability of dropout.
            test_size_rate:                 Size of Test data set. If this value is `0`, the 
        '''
        observed_arr = self.__setup_dataset(sentence_list, token_master_list)

        self.__logger.debug("Shape of observed data points:")
        self.__logger.debug(observed_arr.shape)

        # Init.
        encoder_graph = EncoderGraph()

        # Activation function in LSTM.
        encoder_graph.observed_activating_function = LogisticFunction()
        encoder_graph.input_gate_activating_function = LogisticFunction()
        encoder_graph.forget_gate_activating_function = LogisticFunction()
        encoder_graph.output_gate_activating_function = LogisticFunction()
        encoder_graph.hidden_activating_function = LogisticFunction()
        encoder_graph.output_activating_function = LogisticFunction()

        # Initialization strategy.
        # This method initialize each weight matrices and biases in Gaussian distribution: `np.random.normal(size=hoge) * 0.01`.
        encoder_graph.create_rnn_cells(
            input_neuron_count=observed_arr.shape[-1],
            hidden_neuron_count=hidden_neuron_count,
            output_neuron_count=1)

        # Init.
        decoder_graph = DecoderGraph()

        # Activation function in LSTM.
        decoder_graph.observed_activating_function = LogisticFunction()
        decoder_graph.input_gate_activating_function = LogisticFunction()
        decoder_graph.forget_gate_activating_function = LogisticFunction()
        decoder_graph.output_gate_activating_function = LogisticFunction()
        decoder_graph.hidden_activating_function = LogisticFunction()
        decoder_graph.output_activating_function = LogisticFunction()

        # Initialization strategy.
        # This method initialize each weight matrices and biases in Gaussian distribution: `np.random.normal(size=hoge) * 0.01`.
        decoder_graph.create_rnn_cells(
            input_neuron_count=hidden_neuron_count,
            hidden_neuron_count=hidden_neuron_count,
            output_neuron_count=observed_arr.shape[-1])

        encoder_opt_params = EncoderAdam()
        encoder_opt_params.weight_limit = weight_limit
        encoder_opt_params.dropout_rate = dropout_rate

        encoder = Encoder(
            # Delegate `graph` to `LSTMModel`.
            graph=encoder_graph,
            # The number of epochs in mini-batch training.
            epochs=epochs,
            # The batch size.
            batch_size=batch_size,
            # Learning rate.
            learning_rate=learning_rate,
            # Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
            learning_attenuate_rate=learning_attenuate_rate,
            # Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
            attenuate_epoch=attenuate_epoch,
            # Refereed maxinum step `t` in BPTT. If `0`, this class referes all past data in BPTT.
            bptt_tau=bptt_tau,
            # Size of Test data set. If this value is `0`, the validation will not be executed.
            test_size_rate=test_size_rate,
            # Loss function.
            computable_loss=MeanSquaredError(),
            # Optimizer.
            opt_params=encoder_opt_params,
            # Verification function.
            verificatable_result=VerificateFunctionApproximation(),
            tol=0.0)

        decoder_opt_params = DecoderAdam()
        decoder_opt_params.weight_limit = weight_limit
        decoder_opt_params.dropout_rate = dropout_rate

        decoder = Decoder(
            # Delegate `graph` to `LSTMModel`.
            graph=decoder_graph,
            # The number of epochs in mini-batch training.
            epochs=epochs,
            # The batch size.
            batch_size=batch_size,
            # Learning rate.
            learning_rate=learning_rate,
            # Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
            learning_attenuate_rate=learning_attenuate_rate,
            # Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
            attenuate_epoch=attenuate_epoch,
            # The length of sequences.
            seq_len=observed_arr.shape[1],
            # Refereed maxinum step `t` in BPTT. If `0`, this class referes all past data in BPTT.
            bptt_tau=bptt_tau,
            # Size of Test data set. If this value is `0`, the validation will not be executed.
            test_size_rate=test_size_rate,
            # Loss function.
            computable_loss=MeanSquaredError(),
            # Optimizer.
            opt_params=decoder_opt_params,
            # Verification function.
            verificatable_result=VerificateFunctionApproximation(),
            tol=0.0)

        encoder_decoder_controller = EncoderDecoderController(
            encoder=encoder,
            decoder=decoder,
            epochs=epochs,
            batch_size=batch_size,
            learning_rate=learning_rate,
            learning_attenuate_rate=learning_attenuate_rate,
            attenuate_epoch=attenuate_epoch,
            test_size_rate=test_size_rate,
            computable_loss=MeanSquaredError(),
            verificatable_result=VerificateFunctionApproximation(),
            tol=0.0)

        # Learning.
        encoder_decoder_controller.learn(observed_arr, observed_arr)

        self.__controller = encoder_decoder_controller
        self.__token_master_list = token_master_list
Esempio n. 8
0
    def __init__(self,
                 batch_size,
                 layerable_cnn_list,
                 cnn_output_graph,
                 learning_rate=1e-05,
                 computable_loss=None,
                 opt_params=None,
                 verificatable_result=None,
                 cnn=None,
                 verbose_mode=False):
        '''
        Init.

        Args:
            batch_size:                     Batch size in mini-batch.
            layerable_cnn_list:             `list` of `LayerableCNN`.
            cnn_output_graph:               is-a `CNNOutputGraph`.
            learning_rate:                  Learning rate.
            computable_loss:                is-a `ComputableLoss`.
                                            This parameters will be refered only when `cnn` is `None`.

            opt_params:                     is-a `OptParams`.
                                            This parameters will be refered only when `cnn` is `None`.

            verificatable_result:           is-a `VerificateFunctionApproximation`.
                                            This parameters will be refered only when `cnn` is `None`.

            cnn:                            is-a `ConvolutionalNeuralNetwork` as a model in this class.
                                            If not `None`, `self.__cnn` will be overrided by this `cnn`.
                                            If `None`, this class initialize `ConvolutionalNeuralNetwork`
                                            by default hyper parameters.

            verbose_mode:                   Verbose mode or not.

        '''
        for layerable_cnn in layerable_cnn_list:
            if isinstance(layerable_cnn, LayerableCNN) is False:
                raise TypeError()

        logger = getLogger("pydbm")
        handler = StreamHandler()
        if verbose_mode is True:
            handler.setLevel(DEBUG)
            logger.setLevel(DEBUG)
        else:
            handler.setLevel(ERROR)
            logger.setLevel(ERROR)

        logger.addHandler(handler)

        self.__layerable_cnn_list = layerable_cnn_list
        self.__learning_rate = learning_rate
        self.__opt_params = opt_params
        self.__logger = logger

        if cnn is None:
            if computable_loss is None:
                computable_loss = MeanSquaredError()
            if isinstance(computable_loss, ComputableLoss) is False:
                raise TypeError()

            if verificatable_result is None:
                verificatable_result = VerificateFunctionApproximation()
            if isinstance(verificatable_result, VerificatableResult) is False:
                raise TypeError()

            if opt_params is None:
                opt_params = Adam()
                opt_params.weight_limit = 0.5
                opt_params.dropout_rate = 0.0
            if isinstance(opt_params, OptParams) is False:
                raise TypeError()

            cnn = ConvolutionalNeuralNetwork(
                layerable_cnn_list=layerable_cnn_list,
                computable_loss=computable_loss,
                opt_params=opt_params,
                verificatable_result=verificatable_result,
                epochs=100,
                batch_size=batch_size,
                learning_rate=learning_rate,
                learning_attenuate_rate=0.1,
                test_size_rate=0.3,
                tol=1e-15,
                tld=100.0,
                save_flag=False,
                pre_learned_path_list=None)
            cnn.setup_output_layer(cnn_output_graph)

        self.__cnn = cnn
        self.__batch_size = batch_size
        self.__computable_loss = computable_loss
        self.__learning_rate = learning_rate
        self.__verbose_mode = verbose_mode
        self.__q_shape = None
        self.__loss_list = []
Esempio n. 9
0
    def __init__(
        self,
        batch_size,
        layerable_cnn_list,
        learning_rate=1e-05,
        computable_loss=None,
        opt_params=None,
        verificatable_result=None,
        pre_learned_path_list=None,
        fc_w_arr=None,
        fc_activation_function=None,
        verbose_mode=False
    ):
        logger = getLogger("pydbm")
        handler = StreamHandler()
        if verbose_mode is True:
            handler.setLevel(DEBUG)
            logger.setLevel(DEBUG)
        else:
            handler.setLevel(ERROR)
            logger.setLevel(ERROR)
            
        logger.addHandler(handler)

        self.__logger = getLogger("pyqlearning")
        handler = StreamHandler()
        if verbose_mode is True:
            self.__logger.setLevel(DEBUG)
        else:
            self.__logger.setLevel(ERROR)
            
        self.__logger.addHandler(handler)

        if computable_loss is None:
            computable_loss = MeanSquaredError()
        if verificatable_result is None:
            verificatable_result = VerificateFunctionApproximation()
        if opt_params is None:
            opt_params = Adam()
            opt_params.weight_limit = 0.5
            opt_params.dropout_rate = 0.0

        cnn = ConvolutionalNeuralNetwork(
            # The `list` of `ConvolutionLayer`.
            layerable_cnn_list=layerable_cnn_list,
            # The number of epochs in mini-batch training.
            epochs=200,
            # The batch size.
            batch_size=batch_size,
            # Learning rate.
            learning_rate=learning_rate,
            # Loss function.
            computable_loss=computable_loss,
            # Optimizer.
            opt_params=opt_params,
            # Verification.
            verificatable_result=verificatable_result,
            # Others.
            learning_attenuate_rate=0.1,
            attenuate_epoch=50
        )
        self.__cnn = cnn
        self.__batch_size = batch_size
        self.__computable_loss = computable_loss
        self.__learning_rate = learning_rate
        self.__verbose_mode = verbose_mode
        self.__fc_w_arr = fc_w_arr
        self.__fc_activation_function = fc_activation_function
        self.__q_shape = None
        self.__q_logs_list = []
Esempio n. 10
0
    def __init__(
        self,
        batch_size,
        nn_layer_list,
        learning_rate=1e-05,
        learning_attenuate_rate=0.1,
        attenuate_epoch=50,
        computable_loss=None,
        opt_params=None,
        verificatable_result=None,
        pre_learned_path_list=None,
        nn=None
    ):
        '''
        Init.

        Args:
            batch_size:                     Batch size in mini-batch.
            nn_layer_list:                  `list` of `NNLayer`.
            learning_rate:                  Learning rate.
            learning_attenuate_rate:        Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
            attenuate_epoch:                Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
                                            Additionally, in relation to regularization,
                                            this class constrains weight matrixes every `attenuate_epoch`.

            computable_loss:                is-a `ComputableLoss`.
            opt_params:                     is-a `OptParams`.
            verificatable_result:           is-a `VerificateFunctionApproximation`.
            pre_learned_path_list:          `list` of file path that stored pre-learned parameters.
                                            This parameters will be refered only when `cnn` is `None`.

            nn:                             is-a `NeuralNetwork` as a model in this class.
                                            If not `None`, `self.__nn` will be overrided by this `nn`.
                                            If `None`, this class initialize `NeuralNetwork`
                                            by default hyper parameters.

        '''
        if computable_loss is None:
            computable_loss = MeanSquaredError()
        if verificatable_result is None:
            verificatable_result = VerificateFunctionApproximation()
        if opt_params is None:
            opt_params = Adam()
            opt_params.weight_limit = 1e+10
            opt_params.dropout_rate = 0.0

        if nn is None:
            nn = NeuralNetwork(
                # The `list` of `ConvolutionLayer`.
                nn_layer_list=nn_layer_list,
                # The number of epochs in mini-batch training.
                epochs=200,
                # The batch size.
                batch_size=batch_size,
                # Learning rate.
                learning_rate=learning_rate,
                # Loss function.
                computable_loss=computable_loss,
                # Optimizer.
                opt_params=opt_params,
                # Verification.
                verificatable_result=verificatable_result,
                # Pre-learned parameters.
                pre_learned_path_list=pre_learned_path_list,
                # Others.
                learning_attenuate_rate=learning_attenuate_rate,
                attenuate_epoch=attenuate_epoch
            )

        self.__nn = nn
        self.__batch_size = batch_size
        self.__computable_loss = computable_loss
        self.__learning_rate = learning_rate
        self.__q_shape = None
        self.__loss_list = []
        self.__epoch_counter = 0
        self.__learning_attenuate_rate = learning_attenuate_rate
        self.__attenuate_epoch = attenuate_epoch

        logger = getLogger("pygan")
        self.__logger = logger
Esempio n. 11
0
    def __init__(self,
                 batch_size=20,
                 learning_rate=1e-10,
                 opt_params=None,
                 convolutional_auto_encoder=None,
                 deconvolution_layer_list=None,
                 gray_scale_flag=True,
                 verbose_mode=False):
        '''
        Init.

        Args:
            batch_size:                     Batch size in mini-batch.
            learning_rate:                  Learning rate.
            convolutional_auto_encoder:     is-a `pydbm.cnn.convolutionalneuralnetwork.convolutional_auto_encoder.ConvolutionalAutoEncoder`.
            deconvolution_layer_list:       `list` of `DeconvolutionLayer`.
            gray_scale_flag:                Gray scale or not. If `True`, the channel will be `1`. If `False`, the channel will be `3`.
            verbose_mode:                   Verbose mode or not.
        '''
        logger = getLogger("pydbm")
        handler = StreamHandler()
        if verbose_mode is True:
            handler.setLevel(DEBUG)
            logger.setLevel(DEBUG)
        else:
            handler.setLevel(ERROR)
            logger.setLevel(ERROR)

        logger.addHandler(handler)

        if gray_scale_flag is True:
            channel = 1
        else:
            channel = 3

        if opt_params is None:
            opt_params = Adam()
            opt_params.dropout_rate = 0.0

        if isinstance(opt_params, OptParams) is False:
            raise TypeError()

        scale = 0.01
        if convolutional_auto_encoder is None:
            conv1 = ConvolutionLayer1(
                ConvGraph1(activation_function=TanhFunction(),
                           filter_num=batch_size,
                           channel=channel,
                           kernel_size=3,
                           scale=scale,
                           stride=1,
                           pad=1))

            conv2 = ConvolutionLayer2(
                ConvGraph2(activation_function=TanhFunction(),
                           filter_num=batch_size,
                           channel=batch_size,
                           kernel_size=3,
                           scale=scale,
                           stride=1,
                           pad=1))

            convolutional_auto_encoder = CAE(
                layerable_cnn_list=[conv1, conv2],
                epochs=100,
                batch_size=batch_size,
                learning_rate=1e-05,
                learning_attenuate_rate=0.1,
                attenuate_epoch=25,
                computable_loss=MeanSquaredError(),
                opt_params=opt_params,
                verificatable_result=VerificateFunctionApproximation(),
                test_size_rate=0.3,
                tol=1e-15,
                save_flag=False)

        if deconvolution_layer_list is None:
            deconvolution_layer_list = [
                DeconvolutionLayer(
                    DeCNNGraph(activation_function=TanhFunction(),
                               filter_num=batch_size,
                               channel=channel,
                               kernel_size=3,
                               scale=scale,
                               stride=1,
                               pad=1))
            ]

        self.__convolutional_auto_encoder = convolutional_auto_encoder
        self.__deconvolution_layer_list = deconvolution_layer_list
        self.__opt_params = opt_params
        self.__learning_rate = learning_rate
        self.__verbose_mode = verbose_mode
        self.__logger = logger
        self.__batch_size = batch_size
        self.__saved_img_n = 0
        self.__attenuate_epoch = 50
Esempio n. 12
0
    def __init__(self,
                 batch_size,
                 layerable_cnn_list,
                 cnn_output_graph,
                 learning_rate=1e-05,
                 learning_attenuate_rate=0.1,
                 attenuate_epoch=50,
                 computable_loss=None,
                 opt_params=None,
                 verificatable_result=None,
                 pre_learned_path_list=None,
                 pre_learned_output_path=None,
                 cnn=None,
                 verbose_mode=False):
        '''
        Init.

        Args:
            batch_size:                     Batch size in mini-batch.
            layerable_cnn_list:             `list` of `LayerableCNN`.
            cnn_output_graph:               Computation graph which is-a `CNNOutputGraph` to compute parameters in output layer.
            learning_rate:                  Learning rate.
            learning_attenuate_rate:        Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
            attenuate_epoch:                Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
            computable_loss:                is-a `ComputableLoss`.
            opt_params:                     is-a `OptParams`.
            verificatable_result:           is-a `VerificateFunctionApproximation`.
            pre_learned_path_list:          `list` of file path that stored pre-learned parameters.
                                            This parameters will be refered only when `cnn` is `None`.

            pre_learned_output_path:        File path that stores pre-learned parameters.

            cnn:                            is-a `ConvolutionalNeuralNetwork` as a model in this class.
                                            If not `None`, `self.__cnn` will be overrided by this `cnn`.
                                            If `None`, this class initialize `ConvolutionalNeuralNetwork`
                                            by default hyper parameters.

            verbose_mode:                   Verbose mode or not.
            
        '''
        logger = getLogger("pydbm")
        handler = StreamHandler()
        if verbose_mode is True:
            handler.setLevel(DEBUG)
            logger.setLevel(DEBUG)
        else:
            handler.setLevel(ERROR)
            logger.setLevel(ERROR)

        logger.addHandler(handler)

        self.__logger = getLogger("pyqlearning")
        handler = StreamHandler()
        if verbose_mode is True:
            self.__logger.setLevel(DEBUG)
        else:
            self.__logger.setLevel(ERROR)

        self.__logger.addHandler(handler)

        if computable_loss is None:
            computable_loss = MeanSquaredError()
        if verificatable_result is None:
            verificatable_result = VerificateFunctionApproximation()
        if opt_params is None:
            opt_params = Adam()
            opt_params.weight_limit = 1e+10
            opt_params.dropout_rate = 0.0

        if cnn is None:
            cnn = ConvolutionalNeuralNetwork(
                # The `list` of `ConvolutionLayer`.
                layerable_cnn_list=layerable_cnn_list,
                # The number of epochs in mini-batch training.
                epochs=200,
                # The batch size.
                batch_size=batch_size,
                # Learning rate.
                learning_rate=learning_rate,
                # Loss function.
                computable_loss=computable_loss,
                # Optimizer.
                opt_params=opt_params,
                # Verification.
                verificatable_result=verificatable_result,
                # Pre-learned parameters.
                pre_learned_path_list=pre_learned_path_list,
                # Others.
                learning_attenuate_rate=learning_attenuate_rate,
                attenuate_epoch=attenuate_epoch)
            cnn.setup_output_layer(cnn_output_graph, pre_learned_output_path)

        self.__cnn = cnn
        self.__batch_size = batch_size
        self.__computable_loss = computable_loss
        self.__learning_rate = learning_rate
        self.__learning_attenuate_rate = learning_attenuate_rate
        self.__attenuate_epoch = attenuate_epoch
        self.__verbose_mode = verbose_mode
        self.__loss_list = []
        self.__epoch_counter = 0
Esempio n. 13
0
    def __init__(
        self,
        lstm_model=None,
        computable_loss=None,
        batch_size=20,
        input_neuron_count=100,
        hidden_neuron_count=300,
        observed_activating_function=None,
        input_gate_activating_function=None,
        forget_gate_activating_function=None,
        output_gate_activating_function=None,
        hidden_activating_function=None,
        output_activating_function=None,
        seq_len=10,
        join_io_flag=False,
        learning_rate=1e-05
    ):
        '''
        Init.

        Args:
            lstm_model:                         is-a `lstm_model`.
            computable_loss:                    is-a `ComputableLoss`.

            batch_size:                         Batch size.
                                                This parameters will be refered only when `lstm_model` is `None`.

            input_neuron_count:                 The number of input units.
                                                This parameters will be refered only when `lstm_model` is `None`.

            hidden_neuron_count:                The number of hidden units.
                                                This parameters will be refered only when `lstm_model` is `None`.

            observed_activating_function:       is-a `ActivatingFunctionInterface` in hidden layer.
                                                This parameters will be refered only when `lstm_model` is `None`.
                                                If `None`, this value will be `TanhFunction`.

            input_gate_activating_function:     is-a `ActivatingFunctionInterface` in hidden layer.
                                                This parameters will be refered only when `lstm_model` is `None`.
                                                If `None`, this value will be `LogisticFunction`.

            forget_gate_activating_function:    is-a `ActivatingFunctionInterface` in hidden layer.
                                                This parameters will be refered only when `lstm_model` is `None`.
                                                If `None`, this value will be `LogisticFunction`.

            output_gate_activating_function:    is-a `ActivatingFunctionInterface` in hidden layer.
                                                This parameters will be refered only when `lstm_model` is `None`.
                                                If `None`, this value will be `LogisticFunction`.

            hidden_activating_function:         is-a `ActivatingFunctionInterface` in hidden layer.
                                                This parameters will be refered only when `lstm_model` is `None`.

            output_activating_function:         is-a `ActivatingFunctionInterface` in output layer.
                                                This parameters will be refered only when `lstm_model` is `None`.
                                                If `None`, this model outputs from LSTM's hidden layer in inferencing.

            seq_len:                            The length of sequences.
                                                This means refereed maxinum step `t` in feedforward.

            join_io_flag:                       If this value and value of `output_activating_function` is not `None`,
                                                This model outputs tensors combining observed data points and inferenced data
                                                in a series direction.

            learning_rate:                      Learning rate.
        '''
        if computable_loss is None:
            computable_loss = MeanSquaredError()

        if lstm_model is not None:
            if isinstance(lstm_model, LSTM) is False:
                raise TypeError()
        else:
            # Init.
            graph = LSTMGraph()

            # Activation function in LSTM.
            if observed_activating_function is None:
                graph.observed_activating_function = TanhFunction()
            else:
                if isinstance(observed_activating_function, ActivatingFunctionInterface) is False:
                    raise TypeError()
                graph.observed_activating_function = observed_activating_function
            
            if input_gate_activating_function is None:
                graph.input_gate_activating_function = LogisticFunction()
            else:
                if isinstance(input_gate_activating_function, ActivatingFunctionInterface) is False:
                    raise TypeError()
                graph.input_gate_activating_function = input_gate_activating_function
            
            if forget_gate_activating_function is None:
                graph.forget_gate_activating_function = LogisticFunction()
            else:
                if isinstance(forget_gate_activating_function, ActivatingFunctionInterface) is False:
                    raise TypeError()
                graph.forget_gate_activating_function = forget_gate_activating_function
            
            if output_gate_activating_function is None:
                graph.output_gate_activating_function = LogisticFunction()
            else:
                if isinstance(output_gate_activating_function, ActivatingFunctionInterface) is False:
                    raise TypeError()
                graph.output_gate_activating_function = output_gate_activating_function

            if hidden_activating_function is None:
                graph.hidden_activating_function = TanhFunction()
            else:
                if isinstance(hidden_activating_function, ActivatingFunctionInterface) is False:
                    raise TypeError()
                graph.hidden_activating_function = hidden_activating_function

            if output_activating_function is None:
                graph.output_activating_function = TanhFunction()
                self.__output_flag = False
                output_neuron_count = 1
            else:
                graph.output_activating_function = output_activating_function
                self.__output_flag = True
                output_neuron_count = hidden_neuron_count

            # Initialization strategy.
            # This method initialize each weight matrices and biases in Gaussian distribution: `np.random.normal(size=hoge) * 0.01`.
            graph.create_rnn_cells(
                input_neuron_count=input_neuron_count,
                hidden_neuron_count=hidden_neuron_count,
                output_neuron_count=output_neuron_count
            )

            opt_params = SGD()
            opt_params.weight_limit = 1e+10
            opt_params.dropout_rate = 0.0

            lstm_model = LSTM(
                # Delegate `graph` to `LSTMModel`.
                graph=graph,
                # The number of epochs in mini-batch training.
                epochs=100,
                # The batch size.
                batch_size=batch_size,
                # Learning rate.
                learning_rate=1e-05,
                # Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
                learning_attenuate_rate=0.1,
                # Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
                attenuate_epoch=50,
                # The length of sequences.
                seq_len=seq_len,
                # Refereed maxinum step `t` in BPTT. If `0`, this class referes all past data in BPTT.
                bptt_tau=seq_len,
                # Size of Test data set. If this value is `0`, the validation will not be executed.
                test_size_rate=0.3,
                # Loss function.
                computable_loss=computable_loss,
                # Optimizer.
                opt_params=opt_params,
                # Verification function.
                verificatable_result=VerificateFunctionApproximation(),
                tol=0.0
            )

        self.__lstm_model = lstm_model
        self.__seq_len = seq_len
        self.__learning_rate = learning_rate
        self.__join_io_flag = join_io_flag
        self.__computable_loss = computable_loss
        self.__loss_list = []
        self.__epoch_counter = 0
        logger = getLogger("pygan")
        self.__logger = logger
    def __init__(self,
                 batch_size=20,
                 learning_rate=1e-10,
                 learning_attenuate_rate=0.1,
                 attenuate_epoch=50,
                 opt_params=None,
                 convolutional_auto_encoder=None,
                 deconvolution_layer_list=None,
                 gray_scale_flag=True,
                 channel=None):
        '''
        Init.

        Args:
            batch_size:                     Batch size in mini-batch.
            learning_rate:                  Learning rate.
            learning_attenuate_rate:        Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
            attenuate_epoch:                Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
                                            Additionally, in relation to regularization,
                                            this class constrains weight matrixes every `attenuate_epoch`.

            convolutional_auto_encoder:     is-a `pydbm.cnn.convolutionalneuralnetwork.convolutional_auto_encoder.ConvolutionalAutoEncoder`.
            deconvolution_layer_list:       `list` of `DeconvolutionLayer`.
            gray_scale_flag:                Gray scale or not.
                                            This parameter will be refered when `channel` is None.
                                            If `True`, the channel will be `1`. If `False`, the channel will be `3`.

            channel:                        Channel.
        '''
        if channel is None:
            if gray_scale_flag is True:
                channel = 1
            else:
                channel = 3

        if opt_params is None:
            opt_params = Adam()
            opt_params.weight_limit = 1e+10
            opt_params.dropout_rate = 0.0

        if isinstance(opt_params, OptParams) is False:
            raise TypeError()

        scale = 0.01
        if convolutional_auto_encoder is None:
            conv1 = ConvolutionLayer1(
                ConvGraph1(activation_function=TanhFunction(),
                           filter_num=batch_size,
                           channel=channel,
                           kernel_size=3,
                           scale=scale,
                           stride=1,
                           pad=1))

            conv2 = ConvolutionLayer2(
                ConvGraph2(activation_function=TanhFunction(),
                           filter_num=batch_size,
                           channel=batch_size,
                           kernel_size=3,
                           scale=scale,
                           stride=1,
                           pad=1))

            convolutional_auto_encoder = CAE(
                layerable_cnn_list=[conv1, conv2],
                epochs=100,
                batch_size=batch_size,
                learning_rate=learning_rate,
                learning_attenuate_rate=learning_attenuate_rate,
                attenuate_epoch=attenuate_epoch,
                computable_loss=MeanSquaredError(),
                opt_params=opt_params,
                verificatable_result=VerificateFunctionApproximation(),
                test_size_rate=0.3,
                tol=1e-15,
                save_flag=False)

        if deconvolution_layer_list is None:
            deconvolution_layer_list = [
                DeconvolutionLayer(
                    DeCNNGraph(activation_function=TanhFunction(),
                               filter_num=batch_size,
                               channel=channel,
                               kernel_size=3,
                               scale=scale,
                               stride=1,
                               pad=1))
            ]

        self.__convolutional_auto_encoder = convolutional_auto_encoder
        self.__deconvolution_layer_list = deconvolution_layer_list
        self.__opt_params = opt_params

        self.__learning_rate = learning_rate
        self.__attenuate_epoch = attenuate_epoch
        self.__learning_attenuate_rate = learning_attenuate_rate

        self.__batch_size = batch_size
        self.__saved_img_n = 0
        self.__attenuate_epoch = 50

        self.__epoch_counter = 0

        logger = getLogger("pygan")
        self.__logger = logger
Esempio n. 15
0
    def __build_encoder_decoder_controller(self,
                                           input_neuron_count=20,
                                           hidden_neuron_count=20,
                                           weight_limit=0.5,
                                           dropout_rate=0.5,
                                           epochs=1000,
                                           batch_size=20,
                                           learning_rate=1e-05,
                                           attenuate_epoch=50,
                                           learning_attenuate_rate=0.1,
                                           seq_len=8,
                                           bptt_tau=8,
                                           test_size_rate=0.3,
                                           tol=1e-10,
                                           tld=100.0):
        # Init.
        encoder_graph = EncoderGraph()

        # Activation function in LSTM.
        encoder_graph.observed_activating_function = LogisticFunction()
        encoder_graph.input_gate_activating_function = LogisticFunction()
        encoder_graph.forget_gate_activating_function = LogisticFunction()
        encoder_graph.output_gate_activating_function = LogisticFunction()
        encoder_graph.hidden_activating_function = LogisticFunction()
        encoder_graph.output_activating_function = LogisticFunction()

        # Initialization strategy.
        # This method initialize each weight matrices and biases in Gaussian distribution: `np.random.normal(size=hoge) * 0.01`.
        encoder_graph.create_rnn_cells(input_neuron_count=input_neuron_count,
                                       hidden_neuron_count=hidden_neuron_count,
                                       output_neuron_count=1)
        encoder_opt_params = EncoderAdam()
        encoder_opt_params.weight_limit = weight_limit
        encoder_opt_params.dropout_rate = dropout_rate

        encoder = Encoder(
            # Delegate `graph` to `LSTMModel`.
            graph=encoder_graph,
            # The number of epochs in mini-batch training.
            epochs=100,
            # The batch size.
            batch_size=batch_size,
            # Learning rate.
            learning_rate=learning_rate,
            # Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
            learning_attenuate_rate=0.1,
            # Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
            attenuate_epoch=50,
            # Refereed maxinum step `t` in BPTT. If `0`, this class referes all past data in BPTT.
            bptt_tau=8,
            # Size of Test data set. If this value is `0`, the validation will not be executed.
            test_size_rate=0.3,
            # Loss function.
            computable_loss=MeanSquaredError(),
            # Optimizer.
            opt_params=encoder_opt_params,
            # Verification function.
            verificatable_result=VerificateFunctionApproximation())

        # Init.
        decoder_graph = DecoderGraph()

        # Activation function in LSTM.
        decoder_graph.observed_activating_function = LogisticFunction()
        decoder_graph.input_gate_activating_function = LogisticFunction()
        decoder_graph.forget_gate_activating_function = LogisticFunction()
        decoder_graph.output_gate_activating_function = LogisticFunction()
        decoder_graph.hidden_activating_function = LogisticFunction()
        decoder_graph.output_activating_function = SoftmaxFunction()

        # Initialization strategy.
        # This method initialize each weight matrices and biases in Gaussian distribution: `np.random.normal(size=hoge) * 0.01`.
        decoder_graph.create_rnn_cells(input_neuron_count=hidden_neuron_count,
                                       hidden_neuron_count=hidden_neuron_count,
                                       output_neuron_count=input_neuron_count)
        decoder_opt_params = DecoderAdam()
        decoder_opt_params.weight_limit = weight_limit
        decoder_opt_params.dropout_rate = dropout_rate

        decoder = Decoder(
            # Delegate `graph` to `LSTMModel`.
            graph=decoder_graph,
            # The number of epochs in mini-batch training.
            epochs=100,
            # The batch size.
            batch_size=batch_size,
            # Learning rate.
            learning_rate=learning_rate,
            # Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
            learning_attenuate_rate=0.1,
            # Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
            attenuate_epoch=50,
            # The length of sequences.
            seq_len=seq_len,
            # Refereed maxinum step `t` in BPTT. If `0`, this class referes all past data in BPTT.
            bptt_tau=bptt_tau,
            # Size of Test data set. If this value is `0`, the validation will not be executed.
            test_size_rate=0.3,
            # Loss function.
            computable_loss=MeanSquaredError(),
            # Optimizer.
            opt_params=decoder_opt_params,
            # Verification function.
            verificatable_result=VerificateFunctionApproximation())

        encoder_decoder_controller = EncoderDecoderController(
            encoder=encoder,
            decoder=decoder,
            epochs=epochs,
            batch_size=batch_size,
            learning_rate=learning_rate,
            learning_attenuate_rate=learning_attenuate_rate,
            attenuate_epoch=attenuate_epoch,
            test_size_rate=test_size_rate,
            computable_loss=MeanSquaredError(),
            verificatable_result=VerificateFunctionApproximation(),
            tol=tol,
            tld=tld)

        return encoder_decoder_controller
Esempio n. 16
0
    def __init__(self,
                 batch_size,
                 layerable_cnn_list,
                 cnn_output_graph,
                 learning_rate=1e-05,
                 computable_loss=None,
                 opt_params=None,
                 verificatable_result=None,
                 cnn=None,
                 feature_matching_layer=0):
        '''
        Init.

        Args:
            batch_size:                     Batch size in mini-batch.
            layerable_cnn_list:             `list` of `LayerableCNN`.
            cnn_output_graph:               is-a `CNNOutputGraph`.
            learning_rate:                  Learning rate.
            computable_loss:                is-a `ComputableLoss`.
                                            This parameters will be refered only when `cnn` is `None`.

            opt_params:                     is-a `OptParams`.
                                            This parameters will be refered only when `cnn` is `None`.

            verificatable_result:           is-a `VerificateFunctionApproximation`.
                                            This parameters will be refered only when `cnn` is `None`.

            cnn:                            is-a `ConvolutionalNeuralNetwork` as a model in this class.
                                            If not `None`, `self.__cnn` will be overrided by this `cnn`.
                                            If `None`, this class initialize `ConvolutionalNeuralNetwork`
                                            by default hyper parameters.

            feature_matching_layer:         Key of layer number for feature matching forward/backward.

        '''
        for layerable_cnn in layerable_cnn_list:
            if isinstance(layerable_cnn, LayerableCNN) is False:
                raise TypeError()

        self.__layerable_cnn_list = layerable_cnn_list
        self.__learning_rate = learning_rate
        self.__opt_params = opt_params

        if cnn is None:
            if computable_loss is None:
                computable_loss = MeanSquaredError()
            if isinstance(computable_loss, ComputableLoss) is False:
                raise TypeError()

            if verificatable_result is None:
                verificatable_result = VerificateFunctionApproximation()
            if isinstance(verificatable_result, VerificatableResult) is False:
                raise TypeError()

            if opt_params is None:
                opt_params = Adam()
                opt_params.weight_limit = 1e+10
                opt_params.dropout_rate = 0.0
            if isinstance(opt_params, OptParams) is False:
                raise TypeError()

            cnn = ConvolutionalNeuralNetwork(
                layerable_cnn_list=layerable_cnn_list,
                computable_loss=computable_loss,
                opt_params=opt_params,
                verificatable_result=verificatable_result,
                epochs=100,
                batch_size=batch_size,
                learning_rate=learning_rate,
                learning_attenuate_rate=0.1,
                test_size_rate=0.3,
                tol=1e-15,
                tld=100.0,
                save_flag=False,
                pre_learned_path_list=None)
            cnn.setup_output_layer(cnn_output_graph)

        self.__cnn = cnn
        self.__batch_size = batch_size
        self.__computable_loss = computable_loss
        self.__learning_rate = learning_rate
        self.__q_shape = None
        self.__loss_list = []
        self.__feature_matching_layer = feature_matching_layer
        self.__epoch_counter = 0
        logger = getLogger("pygan")
        self.__logger = logger
Esempio n. 17
0
    def __build_encoder_decoder_controller(self,
                                           input_neuron_count=20,
                                           hidden_neuron_count=20,
                                           weight_limit=1e+15,
                                           dropout_rate=0.5,
                                           epochs=1000,
                                           batch_size=20,
                                           learning_rate=1e-05,
                                           attenuate_epoch=50,
                                           learning_attenuate_rate=1.0,
                                           seq_len=8,
                                           bptt_tau=8,
                                           test_size_rate=0.3,
                                           tol=1e-10,
                                           tld=100.0):
        encoder_graph = EncoderGraph()

        encoder_graph.observed_activating_function = LogisticFunction()
        encoder_graph.input_gate_activating_function = LogisticFunction()
        encoder_graph.forget_gate_activating_function = LogisticFunction()
        encoder_graph.output_gate_activating_function = LogisticFunction()
        encoder_graph.hidden_activating_function = LogisticFunction()
        encoder_graph.output_activating_function = LogisticFunction()

        encoder_graph.create_rnn_cells(input_neuron_count=input_neuron_count,
                                       hidden_neuron_count=hidden_neuron_count,
                                       output_neuron_count=1)
        encoder_opt_params = EncoderAdam()
        encoder_opt_params.weight_limit = weight_limit
        encoder_opt_params.dropout_rate = dropout_rate

        encoder = Encoder(
            graph=encoder_graph,
            epochs=100,
            batch_size=batch_size,
            learning_rate=learning_rate,
            learning_attenuate_rate=1.0,
            attenuate_epoch=50,
            bptt_tau=8,
            test_size_rate=0.3,
            computable_loss=MeanSquaredError(),
            opt_params=encoder_opt_params,
            verificatable_result=VerificateFunctionApproximation(),
            tol=tol,
            tld=tld)

        decoder_graph = DecoderGraph()

        decoder_graph.observed_activating_function = LogisticFunction()
        decoder_graph.input_gate_activating_function = LogisticFunction()
        decoder_graph.forget_gate_activating_function = LogisticFunction()
        decoder_graph.output_gate_activating_function = LogisticFunction()
        decoder_graph.hidden_activating_function = LogisticFunction()
        decoder_graph.output_activating_function = SoftmaxFunction()

        decoder_graph.create_rnn_cells(input_neuron_count=hidden_neuron_count,
                                       hidden_neuron_count=hidden_neuron_count,
                                       output_neuron_count=input_neuron_count)
        decoder_opt_params = DecoderAdam()
        decoder_opt_params.weight_limit = weight_limit
        decoder_opt_params.dropout_rate = dropout_rate

        decoder = Decoder(
            graph=decoder_graph,
            epochs=100,
            batch_size=batch_size,
            learning_rate=learning_rate,
            learning_attenuate_rate=1.0,
            attenuate_epoch=50,
            seq_len=seq_len,
            bptt_tau=bptt_tau,
            test_size_rate=0.3,
            computable_loss=MeanSquaredError(),
            opt_params=decoder_opt_params,
            verificatable_result=VerificateFunctionApproximation())

        encoder_decoder_controller = EncoderDecoderController(
            encoder=encoder,
            decoder=decoder,
            epochs=epochs,
            batch_size=batch_size,
            learning_rate=learning_rate,
            learning_attenuate_rate=learning_attenuate_rate,
            attenuate_epoch=attenuate_epoch,
            test_size_rate=test_size_rate,
            computable_loss=MeanSquaredError(),
            verificatable_result=VerificateFunctionApproximation(),
            tol=tol,
            tld=tld)

        return encoder_decoder_controller