Ejemplo n.º 1
0
def Main(params_dict):

    logger = getLogger("pydbm")
    handler = StreamHandler()
    if params_dict["debug_mode"] is True:
        handler.setLevel(DEBUG)
        logger.setLevel(DEBUG)
    else:
        handler.setLevel(ERROR)
        logger.setLevel(ERROR)

    logger.addHandler(handler)

    epochs = params_dict["epochs"]
    batch_size = params_dict["batch_size"]
    seq_len = params_dict["seq_len"]
    channel = params_dict["channel"]
    height = params_dict["height"]
    width = params_dict["width"]
    scale = params_dict["scale"]
    training_image_dir = params_dict["training_image_dir"]
    test_image_dir = params_dict["test_image_dir"]

    enc_dim = batch_size * height * width
    dec_dim = batch_size * height * width

    feature_generator = ImageGenerator(epochs=epochs,
                                       batch_size=batch_size,
                                       training_image_dir=training_image_dir,
                                       test_image_dir=test_image_dir,
                                       seq_len=seq_len,
                                       gray_scale_flag=True,
                                       wh_size_tuple=(height, width),
                                       norm_mode="z_score")

    # Init.
    encoder_graph = EncoderGraph()

    # Activation function in LSTM.
    encoder_graph.observed_activating_function = TanhFunction()
    encoder_graph.input_gate_activating_function = LogisticFunction()
    encoder_graph.forget_gate_activating_function = LogisticFunction()
    encoder_graph.output_gate_activating_function = LogisticFunction()
    encoder_graph.hidden_activating_function = TanhFunction()
    encoder_graph.output_activating_function = LogisticFunction()

    # Initialization strategy.
    # This method initialize each weight matrices and biases in Gaussian distribution: `np.random.normal(size=hoge) * 0.01`.
    encoder_graph.create_rnn_cells(input_neuron_count=enc_dim,
                                   hidden_neuron_count=200,
                                   output_neuron_count=enc_dim)

    # Optimizer for Encoder.
    encoder_opt_params = EncoderAdam()
    encoder_opt_params.weight_limit = 0.5
    encoder_opt_params.dropout_rate = 0.5

    encoder = Encoder(
        # Delegate `graph` to `LSTMModel`.
        graph=encoder_graph,
        # The number of epochs in mini-batch training.
        epochs=epochs,
        # The batch size.
        batch_size=batch_size,
        # Learning rate.
        learning_rate=1e-05,
        # Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
        learning_attenuate_rate=0.1,
        # Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
        attenuate_epoch=50,
        # Refereed maxinum step `t` in BPTT. If `0`, this class referes all past data in BPTT.
        bptt_tau=8,
        # Size of Test data set. If this value is `0`, the validation will not be executed.
        test_size_rate=0.3,
        # Loss function.
        computable_loss=MeanSquaredError(),
        # Optimizer.
        opt_params=encoder_opt_params,
        # Verification function.
        verificatable_result=VerificateFunctionApproximation(),
        # Tolerance for the optimization.
        # When the loss or score is not improving by at least tol
        # for two consecutive iterations, convergence is considered
        # to be reached and training stops.
        tol=0.0)

    # Init.
    decoder_graph = DecoderGraph()

    # Activation function in LSTM.
    decoder_graph.observed_activating_function = TanhFunction()
    decoder_graph.input_gate_activating_function = LogisticFunction()
    decoder_graph.forget_gate_activating_function = LogisticFunction()
    decoder_graph.output_gate_activating_function = LogisticFunction()
    decoder_graph.hidden_activating_function = TanhFunction()
    decoder_graph.output_activating_function = LogisticFunction()

    # Initialization strategy.
    # This method initialize each weight matrices and biases in Gaussian distribution: `np.random.normal(size=hoge) * 0.01`.
    decoder_graph.create_rnn_cells(input_neuron_count=200,
                                   hidden_neuron_count=dec_dim,
                                   output_neuron_count=200)

    # Optimizer for Decoder.
    decoder_opt_params = DecoderAdam()
    decoder_opt_params.weight_limit = 0.5
    decoder_opt_params.dropout_rate = 0.5

    decoder = Decoder(
        # Delegate `graph` to `LSTMModel`.
        graph=decoder_graph,
        # The number of epochs in mini-batch training.
        epochs=epochs,
        # The batch size.
        batch_size=batch_size,
        # Learning rate.
        learning_rate=1e-05,
        # Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
        learning_attenuate_rate=0.1,
        # Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
        attenuate_epoch=50,
        # Refereed maxinum step `t` in BPTT. If `0`, this class referes all past data in BPTT.
        bptt_tau=8,
        # Size of Test data set. If this value is `0`, the validation will not be executed.
        test_size_rate=0.3,
        # Loss function.
        computable_loss=MeanSquaredError(),
        # Optimizer.
        opt_params=decoder_opt_params,
        # Verification function.
        verificatable_result=VerificateFunctionApproximation(),
        # Tolerance for the optimization.
        # When the loss or score is not improving by at least tol
        # for two consecutive iterations, convergence is considered
        # to be reached and training stops.
        tol=0.0)

    conv1 = ConvolutionLayer1(
        ConvGraph1(activation_function=TanhFunction(),
                   filter_num=batch_size,
                   channel=channel,
                   kernel_size=3,
                   scale=scale,
                   stride=1,
                   pad=1))

    conv2 = ConvolutionLayer2(
        ConvGraph2(activation_function=TanhFunction(),
                   filter_num=batch_size,
                   channel=batch_size,
                   kernel_size=3,
                   scale=scale,
                   stride=1,
                   pad=1))

    cnn = SpatioTemporalAutoEncoder(
        layerable_cnn_list=[conv1, conv2],
        encoder=encoder,
        decoder=decoder,
        epochs=epochs,
        batch_size=batch_size,
        learning_rate=1e-05,
        learning_attenuate_rate=0.1,
        attenuate_epoch=25,
        computable_loss=MeanSquaredError(),
        opt_params=Adam(),
        verificatable_result=VerificateFunctionApproximation(),
        test_size_rate=0.3,
        tol=1e-15,
        save_flag=False)

    cnn.learn_generated(feature_generator)

    test_len = 0
    test_limit = 1

    test_arr_list = []
    rec_arr_list = []
    for batch_observed_arr, batch_target_arr, test_batch_observed_arr, test_batch_target_arr in feature_generator.generate(
    ):
        test_len += 1
        result_arr = cnn.inference(test_batch_observed_arr)
        for batch in range(test_batch_target_arr.shape[0]):
            for seq in range(test_batch_target_arr[batch].shape[0]):
                arr = test_batch_target_arr[batch][seq][0]
                arr = (arr - arr.min()) / (arr.max() - arr.min())
                arr *= 255
                img = Image.fromarray(np.uint8(arr))
                img.save("result/" + str(i) + "_" + str(seq) + "_observed.png")
            for seq in range(result_arr[batch].shape[0]):
                arr = result_arr[batch][seq][0]
                arr = (arr - arr.min()) / (arr.max() - arr.min())
                arr *= 255
                img = Image.fromarray(np.uint8(arr))
                img.save("result/" + str(i) + "_" + str(seq) +
                         "_reconsturcted.png")

        if test_len >= test_limit:
            break
Ejemplo n.º 2
0
    def __build_encoder_decoder_controller(
        self,
        input_neuron_count=20,
        hidden_neuron_count=20,
        weight_limit=0.5,
        dropout_rate=0.5,
        epochs=1000,
        batch_size=20,
        learning_rate=1e-05,
        attenuate_epoch=50,
        learning_attenuate_rate=0.1,
        seq_len=8,
        bptt_tau=8,
        test_size_rate=0.3,
        tol=1e-10,
        tld=100.0
    ):
        encoder_graph = EncoderGraph()

        encoder_graph.observed_activating_function = LogisticFunction()
        encoder_graph.input_gate_activating_function = LogisticFunction()
        encoder_graph.forget_gate_activating_function = LogisticFunction()
        encoder_graph.output_gate_activating_function = LogisticFunction()
        encoder_graph.hidden_activating_function = LogisticFunction()
        encoder_graph.output_activating_function = LogisticFunction()

        encoder_graph.create_rnn_cells(
            input_neuron_count=input_neuron_count,
            hidden_neuron_count=hidden_neuron_count,
            output_neuron_count=1
        )
        encoder_opt_params = EncoderAdam()
        encoder_opt_params.weight_limit = weight_limit
        encoder_opt_params.dropout_rate = dropout_rate

        encoder = Encoder(
            graph=encoder_graph,
            epochs=100,
            batch_size=batch_size,
            learning_rate=learning_rate,
            learning_attenuate_rate=0.1,
            attenuate_epoch=50,
            bptt_tau=8,
            test_size_rate=0.3,
            computable_loss=MeanSquaredError(),
            opt_params=encoder_opt_params,
            verificatable_result=VerificateFunctionApproximation(),
            tol=tol,
            tld=tld
        )

        decoder_graph = DecoderGraph()

        decoder_graph.observed_activating_function = LogisticFunction()
        decoder_graph.input_gate_activating_function = LogisticFunction()
        decoder_graph.forget_gate_activating_function = LogisticFunction()
        decoder_graph.output_gate_activating_function = LogisticFunction()
        decoder_graph.hidden_activating_function = LogisticFunction()
        decoder_graph.output_activating_function = SoftmaxFunction()

        decoder_graph.create_rnn_cells(
            input_neuron_count=hidden_neuron_count,
            hidden_neuron_count=hidden_neuron_count,
            output_neuron_count=input_neuron_count
        )
        decoder_opt_params = DecoderAdam()
        decoder_opt_params.weight_limit = weight_limit
        decoder_opt_params.dropout_rate = dropout_rate

        decoder = Decoder(
            graph=decoder_graph,
            epochs=100,
            batch_size=batch_size,
            learning_rate=learning_rate,
            learning_attenuate_rate=0.1,
            attenuate_epoch=50,
            seq_len=seq_len,
            bptt_tau=bptt_tau,
            test_size_rate=0.3,
            computable_loss=MeanSquaredError(),
            opt_params=decoder_opt_params,
            verificatable_result=VerificateFunctionApproximation()
        )

        encoder_decoder_controller = EncoderDecoderController(
            encoder=encoder,
            decoder=decoder,
            epochs=epochs,
            batch_size=batch_size,
            learning_rate=learning_rate,
            learning_attenuate_rate=learning_attenuate_rate,
            attenuate_epoch=attenuate_epoch,
            test_size_rate=test_size_rate,
            computable_loss=MeanSquaredError(),
            verificatable_result=VerificateFunctionApproximation(),
            tol=tol,
            tld=tld
        )

        return encoder_decoder_controller
Ejemplo n.º 3
0
    def learn(self,
              sentence_list,
              token_master_list,
              hidden_neuron_count=200,
              epochs=100,
              batch_size=100,
              learning_rate=1e-05,
              learning_attenuate_rate=0.1,
              attenuate_epoch=50,
              bptt_tau=8,
              weight_limit=0.5,
              dropout_rate=0.5,
              test_size_rate=0.3):
        '''
        Init.
        
        Args:
            sentence_list:                  The `list` of sentences.
            token_master_list:              Unique `list` of tokens.
            hidden_neuron_count:            The number of units in hidden layer.
            epochs:                         Epochs of Mini-batch.
            bath_size:                      Batch size of Mini-batch.
            learning_rate:                  Learning rate.
            learning_attenuate_rate:        Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
            attenuate_epoch:                Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
                                            Additionally, in relation to regularization,
                                            this class constrains weight matrixes every `attenuate_epoch`.

            bptt_tau:                       Refereed maxinum step `t` in Backpropagation Through Time(BPTT).
            weight_limit:                   Regularization for weights matrix
                                            to repeat multiplying the weights matrix and `0.9`
                                            until $\sum_{j=0}^{n}w_{ji}^2 < weight\_limit$.

            dropout_rate:                   The probability of dropout.
            test_size_rate:                 Size of Test data set. If this value is `0`, the 
        '''
        observed_arr = self.__setup_dataset(sentence_list, token_master_list)

        self.__logger.debug("Shape of observed data points:")
        self.__logger.debug(observed_arr.shape)

        # Init.
        encoder_graph = EncoderGraph()

        # Activation function in LSTM.
        encoder_graph.observed_activating_function = LogisticFunction()
        encoder_graph.input_gate_activating_function = LogisticFunction()
        encoder_graph.forget_gate_activating_function = LogisticFunction()
        encoder_graph.output_gate_activating_function = LogisticFunction()
        encoder_graph.hidden_activating_function = LogisticFunction()
        encoder_graph.output_activating_function = LogisticFunction()

        # Initialization strategy.
        # This method initialize each weight matrices and biases in Gaussian distribution: `np.random.normal(size=hoge) * 0.01`.
        encoder_graph.create_rnn_cells(
            input_neuron_count=observed_arr.shape[-1],
            hidden_neuron_count=hidden_neuron_count,
            output_neuron_count=observed_arr.shape[-1])

        # Init.
        decoder_graph = DecoderGraph()

        # Activation function in LSTM.
        decoder_graph.observed_activating_function = LogisticFunction()
        decoder_graph.input_gate_activating_function = LogisticFunction()
        decoder_graph.forget_gate_activating_function = LogisticFunction()
        decoder_graph.output_gate_activating_function = LogisticFunction()
        decoder_graph.hidden_activating_function = LogisticFunction()
        decoder_graph.output_activating_function = LogisticFunction()

        # Initialization strategy.
        # This method initialize each weight matrices and biases in Gaussian distribution: `np.random.normal(size=hoge) * 0.01`.
        decoder_graph.create_rnn_cells(
            input_neuron_count=hidden_neuron_count,
            hidden_neuron_count=observed_arr.shape[-1],
            output_neuron_count=hidden_neuron_count)

        encoder_opt_params = EncoderAdam()
        encoder_opt_params.weight_limit = weight_limit
        encoder_opt_params.dropout_rate = dropout_rate

        encoder = Encoder(
            # Delegate `graph` to `LSTMModel`.
            graph=encoder_graph,
            # The number of epochs in mini-batch training.
            epochs=epochs,
            # The batch size.
            batch_size=batch_size,
            # Learning rate.
            learning_rate=learning_rate,
            # Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
            learning_attenuate_rate=learning_attenuate_rate,
            # Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
            attenuate_epoch=attenuate_epoch,
            # Refereed maxinum step `t` in BPTT. If `0`, this class referes all past data in BPTT.
            bptt_tau=bptt_tau,
            # Size of Test data set. If this value is `0`, the validation will not be executed.
            test_size_rate=test_size_rate,
            # Loss function.
            computable_loss=MeanSquaredError(),
            # Optimizer.
            opt_params=encoder_opt_params,
            # Verification function.
            verificatable_result=VerificateFunctionApproximation(),
            tol=0.0)

        decoder_opt_params = DecoderAdam()
        decoder_opt_params.weight_limit = weight_limit
        decoder_opt_params.dropout_rate = dropout_rate

        decoder = Decoder(
            # Delegate `graph` to `LSTMModel`.
            graph=decoder_graph,
            # The number of epochs in mini-batch training.
            epochs=epochs,
            # The batch size.
            batch_size=batch_size,
            # Learning rate.
            learning_rate=learning_rate,
            # Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
            learning_attenuate_rate=learning_attenuate_rate,
            # Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
            attenuate_epoch=attenuate_epoch,
            # Refereed maxinum step `t` in BPTT. If `0`, this class referes all past data in BPTT.
            bptt_tau=bptt_tau,
            # Size of Test data set. If this value is `0`, the validation will not be executed.
            test_size_rate=test_size_rate,
            # Loss function.
            computable_loss=MeanSquaredError(),
            # Optimizer.
            opt_params=decoder_opt_params,
            # Verification function.
            verificatable_result=VerificateFunctionApproximation(),
            tol=0.0)

        encoder_decoder_controller = EncoderDecoderController(
            encoder=encoder,
            decoder=decoder,
            epochs=epochs,
            batch_size=batch_size,
            learning_rate=learning_rate,
            learning_attenuate_rate=learning_attenuate_rate,
            attenuate_epoch=attenuate_epoch,
            test_size_rate=test_size_rate,
            computable_loss=MeanSquaredError(),
            verificatable_result=VerificateFunctionApproximation(),
            tol=0.0)

        # Learning.
        encoder_decoder_controller.learn(observed_arr, observed_arr)

        self.__controller = encoder_decoder_controller
        self.__token_master_list = token_master_list
Ejemplo n.º 4
0
    def __build_encoder_decoder_controller(self,
                                           input_neuron_count=20,
                                           hidden_neuron_count=20,
                                           weight_limit=0.5,
                                           dropout_rate=0.5,
                                           epochs=1000,
                                           batch_size=20,
                                           learning_rate=1e-05,
                                           attenuate_epoch=50,
                                           learning_attenuate_rate=0.1,
                                           seq_len=8,
                                           bptt_tau=8,
                                           test_size_rate=0.3,
                                           tol=1e-10,
                                           tld=100.0):
        # Init.
        encoder_graph = EncoderGraph()

        # Activation function in LSTM.
        encoder_graph.observed_activating_function = LogisticFunction()
        encoder_graph.input_gate_activating_function = LogisticFunction()
        encoder_graph.forget_gate_activating_function = LogisticFunction()
        encoder_graph.output_gate_activating_function = LogisticFunction()
        encoder_graph.hidden_activating_function = LogisticFunction()
        encoder_graph.output_activating_function = LogisticFunction()

        # Initialization strategy.
        # This method initialize each weight matrices and biases in Gaussian distribution: `np.random.normal(size=hoge) * 0.01`.
        encoder_graph.create_rnn_cells(input_neuron_count=input_neuron_count,
                                       hidden_neuron_count=hidden_neuron_count,
                                       output_neuron_count=1)
        encoder_opt_params = EncoderAdam()
        encoder_opt_params.weight_limit = weight_limit
        encoder_opt_params.dropout_rate = dropout_rate

        encoder = Encoder(
            # Delegate `graph` to `LSTMModel`.
            graph=encoder_graph,
            # The number of epochs in mini-batch training.
            epochs=100,
            # The batch size.
            batch_size=batch_size,
            # Learning rate.
            learning_rate=learning_rate,
            # Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
            learning_attenuate_rate=0.1,
            # Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
            attenuate_epoch=50,
            # Refereed maxinum step `t` in BPTT. If `0`, this class referes all past data in BPTT.
            bptt_tau=8,
            # Size of Test data set. If this value is `0`, the validation will not be executed.
            test_size_rate=0.3,
            # Loss function.
            computable_loss=MeanSquaredError(),
            # Optimizer.
            opt_params=encoder_opt_params,
            # Verification function.
            verificatable_result=VerificateFunctionApproximation())

        # Init.
        decoder_graph = DecoderGraph()

        # Activation function in LSTM.
        decoder_graph.observed_activating_function = LogisticFunction()
        decoder_graph.input_gate_activating_function = LogisticFunction()
        decoder_graph.forget_gate_activating_function = LogisticFunction()
        decoder_graph.output_gate_activating_function = LogisticFunction()
        decoder_graph.hidden_activating_function = LogisticFunction()
        decoder_graph.output_activating_function = SoftmaxFunction()

        # Initialization strategy.
        # This method initialize each weight matrices and biases in Gaussian distribution: `np.random.normal(size=hoge) * 0.01`.
        decoder_graph.create_rnn_cells(input_neuron_count=hidden_neuron_count,
                                       hidden_neuron_count=hidden_neuron_count,
                                       output_neuron_count=input_neuron_count)
        decoder_opt_params = DecoderAdam()
        decoder_opt_params.weight_limit = weight_limit
        decoder_opt_params.dropout_rate = dropout_rate

        decoder = Decoder(
            # Delegate `graph` to `LSTMModel`.
            graph=decoder_graph,
            # The number of epochs in mini-batch training.
            epochs=100,
            # The batch size.
            batch_size=batch_size,
            # Learning rate.
            learning_rate=learning_rate,
            # Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
            learning_attenuate_rate=0.1,
            # Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
            attenuate_epoch=50,
            # The length of sequences.
            seq_len=seq_len,
            # Refereed maxinum step `t` in BPTT. If `0`, this class referes all past data in BPTT.
            bptt_tau=bptt_tau,
            # Size of Test data set. If this value is `0`, the validation will not be executed.
            test_size_rate=0.3,
            # Loss function.
            computable_loss=MeanSquaredError(),
            # Optimizer.
            opt_params=decoder_opt_params,
            # Verification function.
            verificatable_result=VerificateFunctionApproximation())

        encoder_decoder_controller = EncoderDecoderController(
            encoder=encoder,
            decoder=decoder,
            epochs=epochs,
            batch_size=batch_size,
            learning_rate=learning_rate,
            learning_attenuate_rate=learning_attenuate_rate,
            attenuate_epoch=attenuate_epoch,
            test_size_rate=test_size_rate,
            computable_loss=MeanSquaredError(),
            verificatable_result=VerificateFunctionApproximation(),
            tol=tol,
            tld=tld)

        return encoder_decoder_controller