def __build_retrospective_encoder( self, input_neuron_count=20, hidden_neuron_count=20, weight_limit=0.5, dropout_rate=0.5, batch_size=20, learning_rate=1e-05, bptt_tau=8 ): encoder_graph = ReEncoderGraph() encoder_graph.observed_activating_function = TanhFunction() encoder_graph.input_gate_activating_function = LogisticFunction() encoder_graph.forget_gate_activating_function = LogisticFunction() encoder_graph.output_gate_activating_function = LogisticFunction() encoder_graph.hidden_activating_function = LogisticFunction() encoder_graph.output_activating_function = LogisticFunction() encoder_graph.create_rnn_cells( input_neuron_count=input_neuron_count, hidden_neuron_count=hidden_neuron_count, output_neuron_count=1 ) encoder_opt_params = EncoderAdam() encoder_opt_params.weight_limit = weight_limit encoder_opt_params.dropout_rate = dropout_rate encoder = ReEncoder( graph=encoder_graph, epochs=100, batch_size=batch_size, learning_rate=learning_rate, learning_attenuate_rate=0.1, attenuate_epoch=50, bptt_tau=bptt_tau, test_size_rate=0.3, computable_loss=MeanSquaredError(), opt_params=encoder_opt_params, verificatable_result=VerificateFunctionApproximation() ) return encoder
def Main(params_dict): logger = getLogger("pydbm") handler = StreamHandler() if params_dict["debug_mode"] is True: handler.setLevel(DEBUG) logger.setLevel(DEBUG) else: handler.setLevel(ERROR) logger.setLevel(ERROR) logger.addHandler(handler) epochs = params_dict["epochs"] batch_size = params_dict["batch_size"] seq_len = params_dict["seq_len"] channel = params_dict["channel"] height = params_dict["height"] width = params_dict["width"] scale = params_dict["scale"] training_image_dir = params_dict["training_image_dir"] test_image_dir = params_dict["test_image_dir"] enc_dim = batch_size * height * width dec_dim = batch_size * height * width feature_generator = ImageGenerator(epochs=epochs, batch_size=batch_size, training_image_dir=training_image_dir, test_image_dir=test_image_dir, seq_len=seq_len, gray_scale_flag=True, wh_size_tuple=(height, width), norm_mode="z_score") # Init. encoder_graph = EncoderGraph() # Activation function in LSTM. encoder_graph.observed_activating_function = TanhFunction() encoder_graph.input_gate_activating_function = LogisticFunction() encoder_graph.forget_gate_activating_function = LogisticFunction() encoder_graph.output_gate_activating_function = LogisticFunction() encoder_graph.hidden_activating_function = TanhFunction() encoder_graph.output_activating_function = LogisticFunction() # Initialization strategy. # This method initialize each weight matrices and biases in Gaussian distribution: `np.random.normal(size=hoge) * 0.01`. encoder_graph.create_rnn_cells(input_neuron_count=enc_dim, hidden_neuron_count=200, output_neuron_count=enc_dim) # Optimizer for Encoder. encoder_opt_params = EncoderAdam() encoder_opt_params.weight_limit = 0.5 encoder_opt_params.dropout_rate = 0.5 encoder = Encoder( # Delegate `graph` to `LSTMModel`. graph=encoder_graph, # The number of epochs in mini-batch training. epochs=epochs, # The batch size. batch_size=batch_size, # Learning rate. learning_rate=1e-05, # Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`. learning_attenuate_rate=0.1, # Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`. attenuate_epoch=50, # Refereed maxinum step `t` in BPTT. If `0`, this class referes all past data in BPTT. bptt_tau=8, # Size of Test data set. If this value is `0`, the validation will not be executed. test_size_rate=0.3, # Loss function. computable_loss=MeanSquaredError(), # Optimizer. opt_params=encoder_opt_params, # Verification function. verificatable_result=VerificateFunctionApproximation(), # Tolerance for the optimization. # When the loss or score is not improving by at least tol # for two consecutive iterations, convergence is considered # to be reached and training stops. tol=0.0) # Init. decoder_graph = DecoderGraph() # Activation function in LSTM. decoder_graph.observed_activating_function = TanhFunction() decoder_graph.input_gate_activating_function = LogisticFunction() decoder_graph.forget_gate_activating_function = LogisticFunction() decoder_graph.output_gate_activating_function = LogisticFunction() decoder_graph.hidden_activating_function = TanhFunction() decoder_graph.output_activating_function = LogisticFunction() # Initialization strategy. # This method initialize each weight matrices and biases in Gaussian distribution: `np.random.normal(size=hoge) * 0.01`. decoder_graph.create_rnn_cells(input_neuron_count=200, hidden_neuron_count=dec_dim, output_neuron_count=200) # Optimizer for Decoder. decoder_opt_params = DecoderAdam() decoder_opt_params.weight_limit = 0.5 decoder_opt_params.dropout_rate = 0.5 decoder = Decoder( # Delegate `graph` to `LSTMModel`. graph=decoder_graph, # The number of epochs in mini-batch training. epochs=epochs, # The batch size. batch_size=batch_size, # Learning rate. learning_rate=1e-05, # Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`. learning_attenuate_rate=0.1, # Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`. attenuate_epoch=50, # Refereed maxinum step `t` in BPTT. If `0`, this class referes all past data in BPTT. bptt_tau=8, # Size of Test data set. If this value is `0`, the validation will not be executed. test_size_rate=0.3, # Loss function. computable_loss=MeanSquaredError(), # Optimizer. opt_params=decoder_opt_params, # Verification function. verificatable_result=VerificateFunctionApproximation(), # Tolerance for the optimization. # When the loss or score is not improving by at least tol # for two consecutive iterations, convergence is considered # to be reached and training stops. tol=0.0) conv1 = ConvolutionLayer1( ConvGraph1(activation_function=TanhFunction(), filter_num=batch_size, channel=channel, kernel_size=3, scale=scale, stride=1, pad=1)) conv2 = ConvolutionLayer2( ConvGraph2(activation_function=TanhFunction(), filter_num=batch_size, channel=batch_size, kernel_size=3, scale=scale, stride=1, pad=1)) cnn = SpatioTemporalAutoEncoder( layerable_cnn_list=[conv1, conv2], encoder=encoder, decoder=decoder, epochs=epochs, batch_size=batch_size, learning_rate=1e-05, learning_attenuate_rate=0.1, attenuate_epoch=25, computable_loss=MeanSquaredError(), opt_params=Adam(), verificatable_result=VerificateFunctionApproximation(), test_size_rate=0.3, tol=1e-15, save_flag=False) cnn.learn_generated(feature_generator) test_len = 0 test_limit = 1 test_arr_list = [] rec_arr_list = [] for batch_observed_arr, batch_target_arr, test_batch_observed_arr, test_batch_target_arr in feature_generator.generate( ): test_len += 1 result_arr = cnn.inference(test_batch_observed_arr) for batch in range(test_batch_target_arr.shape[0]): for seq in range(test_batch_target_arr[batch].shape[0]): arr = test_batch_target_arr[batch][seq][0] arr = (arr - arr.min()) / (arr.max() - arr.min()) arr *= 255 img = Image.fromarray(np.uint8(arr)) img.save("result/" + str(i) + "_" + str(seq) + "_observed.png") for seq in range(result_arr[batch].shape[0]): arr = result_arr[batch][seq][0] arr = (arr - arr.min()) / (arr.max() - arr.min()) arr *= 255 img = Image.fromarray(np.uint8(arr)) img.save("result/" + str(i) + "_" + str(seq) + "_reconsturcted.png") if test_len >= test_limit: break
def __init__(self, lstm_model=None, computable_loss=None, batch_size=20, input_neuron_count=100, hidden_neuron_count=300, observed_activating_function=None, input_gate_activating_function=None, forget_gate_activating_function=None, output_gate_activating_function=None, hidden_activating_function=None, output_activating_function=None, seq_len=10, join_io_flag=False, learning_rate=1e-05, learning_attenuate_rate=0.1, attenuate_epoch=50): ''' Init. Args: lstm_model: is-a `lstm_model`. computable_loss: is-a `ComputableLoss`. batch_size: Batch size. This parameters will be refered only when `lstm_model` is `None`. input_neuron_count: The number of input units. This parameters will be refered only when `lstm_model` is `None`. hidden_neuron_count: The number of hidden units. This parameters will be refered only when `lstm_model` is `None`. observed_activating_function: is-a `ActivatingFunctionInterface` in hidden layer. This parameters will be refered only when `lstm_model` is `None`. If `None`, this value will be `TanhFunction`. input_gate_activating_function: is-a `ActivatingFunctionInterface` in hidden layer. This parameters will be refered only when `lstm_model` is `None`. If `None`, this value will be `LogisticFunction`. forget_gate_activating_function: is-a `ActivatingFunctionInterface` in hidden layer. This parameters will be refered only when `lstm_model` is `None`. If `None`, this value will be `LogisticFunction`. output_gate_activating_function: is-a `ActivatingFunctionInterface` in hidden layer. This parameters will be refered only when `lstm_model` is `None`. If `None`, this value will be `LogisticFunction`. hidden_activating_function: is-a `ActivatingFunctionInterface` in hidden layer. This parameters will be refered only when `lstm_model` is `None`. output_activating_function: is-a `ActivatingFunctionInterface` in output layer. This parameters will be refered only when `lstm_model` is `None`. If `None`, this model outputs from LSTM's hidden layer in inferencing. seq_len: The length of sequences. This means refereed maxinum step `t` in feedforward. join_io_flag: If this value and value of `output_activating_function` is not `None`, This model outputs tensors combining observed data points and inferenced data in a series direction. learning_rate: Learning rate. learning_attenuate_rate: Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`. attenuate_epoch: Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`. Additionally, in relation to regularization, this class constrains weight matrixes every `attenuate_epoch`. ''' if computable_loss is None: computable_loss = MeanSquaredError() if lstm_model is not None: if isinstance(lstm_model, LSTM) is False: raise TypeError() else: # Init. graph = LSTMGraph() # Activation function in LSTM. if observed_activating_function is None: graph.observed_activating_function = TanhFunction() else: if isinstance(observed_activating_function, ActivatingFunctionInterface) is False: raise TypeError() graph.observed_activating_function = observed_activating_function if input_gate_activating_function is None: graph.input_gate_activating_function = LogisticFunction() else: if isinstance(input_gate_activating_function, ActivatingFunctionInterface) is False: raise TypeError() graph.input_gate_activating_function = input_gate_activating_function if forget_gate_activating_function is None: graph.forget_gate_activating_function = LogisticFunction() else: if isinstance(forget_gate_activating_function, ActivatingFunctionInterface) is False: raise TypeError() graph.forget_gate_activating_function = forget_gate_activating_function if output_gate_activating_function is None: graph.output_gate_activating_function = LogisticFunction() else: if isinstance(output_gate_activating_function, ActivatingFunctionInterface) is False: raise TypeError() graph.output_gate_activating_function = output_gate_activating_function if hidden_activating_function is None: graph.hidden_activating_function = TanhFunction() else: if isinstance(hidden_activating_function, ActivatingFunctionInterface) is False: raise TypeError() graph.hidden_activating_function = hidden_activating_function if output_activating_function is None: graph.output_activating_function = TanhFunction() self.__output_flag = False output_neuron_count = 1 else: graph.output_activating_function = output_activating_function self.__output_flag = True output_neuron_count = hidden_neuron_count # Initialization strategy. # This method initialize each weight matrices and biases in Gaussian distribution: `np.random.normal(size=hoge) * 0.01`. graph.create_rnn_cells(input_neuron_count=input_neuron_count, hidden_neuron_count=hidden_neuron_count, output_neuron_count=output_neuron_count) opt_params = SGD() opt_params.weight_limit = 1e+10 opt_params.dropout_rate = 0.0 lstm_model = LSTM( # Delegate `graph` to `LSTMModel`. graph=graph, # The number of epochs in mini-batch training. epochs=100, # The batch size. batch_size=batch_size, # Learning rate. learning_rate=learning_rate, # Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`. learning_attenuate_rate=learning_attenuate_rate, # Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`. attenuate_epoch=attenuate_epoch, # The length of sequences. seq_len=seq_len, # Refereed maxinum step `t` in BPTT. If `0`, this class referes all past data in BPTT. bptt_tau=seq_len, # Size of Test data set. If this value is `0`, the validation will not be executed. test_size_rate=0.3, # Loss function. computable_loss=computable_loss, # Optimizer. opt_params=opt_params, # Verification function. verificatable_result=VerificateFunctionApproximation(), tol=0.0) self.__lstm_model = lstm_model self.__seq_len = seq_len self.__learning_rate = learning_rate self.__join_io_flag = join_io_flag self.__computable_loss = computable_loss self.__loss_list = [] self.__epoch_counter = 0 self.__learning_attenuate_rate = learning_attenuate_rate self.__attenuate_epoch = attenuate_epoch logger = getLogger("pygan") self.__logger = logger
def learn(self, sentence_list, token_master_list, hidden_neuron_count=200, epochs=100, batch_size=100, learning_rate=1e-05, learning_attenuate_rate=0.1, attenuate_epoch=50, bptt_tau=8, weight_limit=0.5, dropout_rate=0.5, test_size_rate=0.3): ''' Init. Args: sentence_list: The `list` of sentences. token_master_list: Unique `list` of tokens. hidden_neuron_count: The number of units in hidden layer. epochs: Epochs of Mini-batch. bath_size: Batch size of Mini-batch. learning_rate: Learning rate. learning_attenuate_rate: Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`. attenuate_epoch: Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`. Additionally, in relation to regularization, this class constrains weight matrixes every `attenuate_epoch`. bptt_tau: Refereed maxinum step `t` in Backpropagation Through Time(BPTT). weight_limit: Regularization for weights matrix to repeat multiplying the weights matrix and `0.9` until $\sum_{j=0}^{n}w_{ji}^2 < weight\_limit$. dropout_rate: The probability of dropout. test_size_rate: Size of Test data set. If this value is `0`, the ''' observed_arr = self.__setup_dataset(sentence_list, token_master_list) self.__logger.debug("Shape of observed data points:") self.__logger.debug(observed_arr.shape) # Init. encoder_graph = EncoderGraph() # Activation function in LSTM. encoder_graph.observed_activating_function = LogisticFunction() encoder_graph.input_gate_activating_function = LogisticFunction() encoder_graph.forget_gate_activating_function = LogisticFunction() encoder_graph.output_gate_activating_function = LogisticFunction() encoder_graph.hidden_activating_function = LogisticFunction() encoder_graph.output_activating_function = LogisticFunction() # Initialization strategy. # This method initialize each weight matrices and biases in Gaussian distribution: `np.random.normal(size=hoge) * 0.01`. encoder_graph.create_rnn_cells( input_neuron_count=observed_arr.shape[-1], hidden_neuron_count=hidden_neuron_count, output_neuron_count=observed_arr.shape[-1]) # Init. decoder_graph = DecoderGraph() # Activation function in LSTM. decoder_graph.observed_activating_function = LogisticFunction() decoder_graph.input_gate_activating_function = LogisticFunction() decoder_graph.forget_gate_activating_function = LogisticFunction() decoder_graph.output_gate_activating_function = LogisticFunction() decoder_graph.hidden_activating_function = LogisticFunction() decoder_graph.output_activating_function = LogisticFunction() # Initialization strategy. # This method initialize each weight matrices and biases in Gaussian distribution: `np.random.normal(size=hoge) * 0.01`. decoder_graph.create_rnn_cells( input_neuron_count=hidden_neuron_count, hidden_neuron_count=observed_arr.shape[-1], output_neuron_count=hidden_neuron_count) encoder_opt_params = EncoderAdam() encoder_opt_params.weight_limit = weight_limit encoder_opt_params.dropout_rate = dropout_rate encoder = Encoder( # Delegate `graph` to `LSTMModel`. graph=encoder_graph, # The number of epochs in mini-batch training. epochs=epochs, # The batch size. batch_size=batch_size, # Learning rate. learning_rate=learning_rate, # Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`. learning_attenuate_rate=learning_attenuate_rate, # Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`. attenuate_epoch=attenuate_epoch, # Refereed maxinum step `t` in BPTT. If `0`, this class referes all past data in BPTT. bptt_tau=bptt_tau, # Size of Test data set. If this value is `0`, the validation will not be executed. test_size_rate=test_size_rate, # Loss function. computable_loss=MeanSquaredError(), # Optimizer. opt_params=encoder_opt_params, # Verification function. verificatable_result=VerificateFunctionApproximation(), tol=0.0) decoder_opt_params = DecoderAdam() decoder_opt_params.weight_limit = weight_limit decoder_opt_params.dropout_rate = dropout_rate decoder = Decoder( # Delegate `graph` to `LSTMModel`. graph=decoder_graph, # The number of epochs in mini-batch training. epochs=epochs, # The batch size. batch_size=batch_size, # Learning rate. learning_rate=learning_rate, # Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`. learning_attenuate_rate=learning_attenuate_rate, # Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`. attenuate_epoch=attenuate_epoch, # Refereed maxinum step `t` in BPTT. If `0`, this class referes all past data in BPTT. bptt_tau=bptt_tau, # Size of Test data set. If this value is `0`, the validation will not be executed. test_size_rate=test_size_rate, # Loss function. computable_loss=MeanSquaredError(), # Optimizer. opt_params=decoder_opt_params, # Verification function. verificatable_result=VerificateFunctionApproximation(), tol=0.0) encoder_decoder_controller = EncoderDecoderController( encoder=encoder, decoder=decoder, epochs=epochs, batch_size=batch_size, learning_rate=learning_rate, learning_attenuate_rate=learning_attenuate_rate, attenuate_epoch=attenuate_epoch, test_size_rate=test_size_rate, computable_loss=MeanSquaredError(), verificatable_result=VerificateFunctionApproximation(), tol=0.0) # Learning. encoder_decoder_controller.learn(observed_arr, observed_arr) self.__controller = encoder_decoder_controller self.__token_master_list = token_master_list
def __build_encoder_decoder_controller( self, input_neuron_count=20, hidden_neuron_count=20, weight_limit=0.5, dropout_rate=0.5, epochs=1000, batch_size=20, learning_rate=1e-05, attenuate_epoch=50, learning_attenuate_rate=0.1, seq_len=8, bptt_tau=8, test_size_rate=0.3, tol=1e-10, tld=100.0 ): encoder_graph = EncoderGraph() encoder_graph.observed_activating_function = LogisticFunction() encoder_graph.input_gate_activating_function = LogisticFunction() encoder_graph.forget_gate_activating_function = LogisticFunction() encoder_graph.output_gate_activating_function = LogisticFunction() encoder_graph.hidden_activating_function = LogisticFunction() encoder_graph.output_activating_function = LogisticFunction() encoder_graph.create_rnn_cells( input_neuron_count=input_neuron_count, hidden_neuron_count=hidden_neuron_count, output_neuron_count=1 ) encoder_opt_params = EncoderAdam() encoder_opt_params.weight_limit = weight_limit encoder_opt_params.dropout_rate = dropout_rate encoder = Encoder( graph=encoder_graph, epochs=100, batch_size=batch_size, learning_rate=learning_rate, learning_attenuate_rate=0.1, attenuate_epoch=50, bptt_tau=8, test_size_rate=0.3, computable_loss=MeanSquaredError(), opt_params=encoder_opt_params, verificatable_result=VerificateFunctionApproximation(), tol=tol, tld=tld ) decoder_graph = DecoderGraph() decoder_graph.observed_activating_function = LogisticFunction() decoder_graph.input_gate_activating_function = LogisticFunction() decoder_graph.forget_gate_activating_function = LogisticFunction() decoder_graph.output_gate_activating_function = LogisticFunction() decoder_graph.hidden_activating_function = LogisticFunction() decoder_graph.output_activating_function = SoftmaxFunction() decoder_graph.create_rnn_cells( input_neuron_count=hidden_neuron_count, hidden_neuron_count=hidden_neuron_count, output_neuron_count=input_neuron_count ) decoder_opt_params = DecoderAdam() decoder_opt_params.weight_limit = weight_limit decoder_opt_params.dropout_rate = dropout_rate decoder = Decoder( graph=decoder_graph, epochs=100, batch_size=batch_size, learning_rate=learning_rate, learning_attenuate_rate=0.1, attenuate_epoch=50, seq_len=seq_len, bptt_tau=bptt_tau, test_size_rate=0.3, computable_loss=MeanSquaredError(), opt_params=decoder_opt_params, verificatable_result=VerificateFunctionApproximation() ) encoder_decoder_controller = EncoderDecoderController( encoder=encoder, decoder=decoder, epochs=epochs, batch_size=batch_size, learning_rate=learning_rate, learning_attenuate_rate=learning_attenuate_rate, attenuate_epoch=attenuate_epoch, test_size_rate=test_size_rate, computable_loss=MeanSquaredError(), verificatable_result=VerificateFunctionApproximation(), tol=tol, tld=tld ) return encoder_decoder_controller
def __init__(self, lstm_model=None, batch_size=20, input_neuron_count=100, hidden_neuron_count=300, hidden_activating_function=None, seq_len=10, learning_rate=1e-05, verbose_mode=False): ''' Init. Args: lstm_model: is-a `lstm_model`. batch_size: Batch size. This parameters will be refered only when `lstm_model` is `None`. input_neuron_count: The number of input units. This parameters will be refered only when `lstm_model` is `None`. hidden_neuron_count: The number of hidden units. This parameters will be refered only when `lstm_model` is `None`. hidden_activating_function: is-a `ActivatingFunctionInterface` in hidden layer. This parameters will be refered only when `lstm_model` is `None`. seq_len: The length of sequences. This means refereed maxinum step `t` in feedforward. learning_rate: Learning rate. verbose_mode: Verbose mode or not. ''' logger = getLogger("pydbm") handler = StreamHandler() if verbose_mode is True: handler.setLevel(DEBUG) logger.setLevel(DEBUG) else: handler.setLevel(ERROR) logger.setLevel(ERROR) logger.addHandler(handler) if lstm_model is not None: if isinstance(lstm_model, LSTM) is False: raise TypeError() else: # Init. graph = LSTMGraph() # Activation function in LSTM. graph.observed_activating_function = TanhFunction() graph.input_gate_activating_function = LogisticFunction() graph.forget_gate_activating_function = LogisticFunction() graph.output_gate_activating_function = LogisticFunction() if hidden_activating_function is None: graph.hidden_activating_function = TanhFunction() else: if isinstance(hidden_activating_function, ActivatingFunctionInterface) is False: raise TypeError() graph.hidden_activating_function = hidden_activating_function graph.output_activating_function = TanhFunction() # Initialization strategy. # This method initialize each weight matrices and biases in Gaussian distribution: `np.random.normal(size=hoge) * 0.01`. graph.create_rnn_cells(input_neuron_count=input_neuron_count, hidden_neuron_count=hidden_neuron_count, output_neuron_count=1) opt_params = SGD() opt_params.weight_limit = 0.5 opt_params.dropout_rate = 0.0 lstm_model = LSTM( # Delegate `graph` to `LSTMModel`. graph=graph, # The number of epochs in mini-batch training. epochs=100, # The batch size. batch_size=batch_size, # Learning rate. learning_rate=1e-05, # Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`. learning_attenuate_rate=0.1, # Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`. attenuate_epoch=50, # The length of sequences. seq_len=seq_len, # Refereed maxinum step `t` in BPTT. If `0`, this class referes all past data in BPTT. bptt_tau=seq_len, # Size of Test data set. If this value is `0`, the validation will not be executed. test_size_rate=0.3, # Loss function. computable_loss=MeanSquaredError(), # Optimizer. opt_params=opt_params, # Verification function. verificatable_result=VerificateFunctionApproximation(), tol=0.0) self.__lstm_model = lstm_model self.__seq_len = seq_len self.__learning_rate = learning_rate self.__verbose_mode = verbose_mode self.__loss_list = []
def __build_encoder_decoder_controller(self, input_neuron_count=20, hidden_neuron_count=20, weight_limit=0.5, dropout_rate=0.5, epochs=1000, batch_size=20, learning_rate=1e-05, attenuate_epoch=50, learning_attenuate_rate=0.1, seq_len=8, bptt_tau=8, test_size_rate=0.3, tol=1e-10, tld=100.0): # Init. encoder_graph = EncoderGraph() # Activation function in LSTM. encoder_graph.observed_activating_function = LogisticFunction() encoder_graph.input_gate_activating_function = LogisticFunction() encoder_graph.forget_gate_activating_function = LogisticFunction() encoder_graph.output_gate_activating_function = LogisticFunction() encoder_graph.hidden_activating_function = LogisticFunction() encoder_graph.output_activating_function = LogisticFunction() # Initialization strategy. # This method initialize each weight matrices and biases in Gaussian distribution: `np.random.normal(size=hoge) * 0.01`. encoder_graph.create_rnn_cells(input_neuron_count=input_neuron_count, hidden_neuron_count=hidden_neuron_count, output_neuron_count=1) encoder_opt_params = EncoderAdam() encoder_opt_params.weight_limit = weight_limit encoder_opt_params.dropout_rate = dropout_rate encoder = Encoder( # Delegate `graph` to `LSTMModel`. graph=encoder_graph, # The number of epochs in mini-batch training. epochs=100, # The batch size. batch_size=batch_size, # Learning rate. learning_rate=learning_rate, # Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`. learning_attenuate_rate=0.1, # Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`. attenuate_epoch=50, # Refereed maxinum step `t` in BPTT. If `0`, this class referes all past data in BPTT. bptt_tau=8, # Size of Test data set. If this value is `0`, the validation will not be executed. test_size_rate=0.3, # Loss function. computable_loss=MeanSquaredError(), # Optimizer. opt_params=encoder_opt_params, # Verification function. verificatable_result=VerificateFunctionApproximation()) # Init. decoder_graph = DecoderGraph() # Activation function in LSTM. decoder_graph.observed_activating_function = LogisticFunction() decoder_graph.input_gate_activating_function = LogisticFunction() decoder_graph.forget_gate_activating_function = LogisticFunction() decoder_graph.output_gate_activating_function = LogisticFunction() decoder_graph.hidden_activating_function = LogisticFunction() decoder_graph.output_activating_function = SoftmaxFunction() # Initialization strategy. # This method initialize each weight matrices and biases in Gaussian distribution: `np.random.normal(size=hoge) * 0.01`. decoder_graph.create_rnn_cells(input_neuron_count=hidden_neuron_count, hidden_neuron_count=hidden_neuron_count, output_neuron_count=input_neuron_count) decoder_opt_params = DecoderAdam() decoder_opt_params.weight_limit = weight_limit decoder_opt_params.dropout_rate = dropout_rate decoder = Decoder( # Delegate `graph` to `LSTMModel`. graph=decoder_graph, # The number of epochs in mini-batch training. epochs=100, # The batch size. batch_size=batch_size, # Learning rate. learning_rate=learning_rate, # Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`. learning_attenuate_rate=0.1, # Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`. attenuate_epoch=50, # The length of sequences. seq_len=seq_len, # Refereed maxinum step `t` in BPTT. If `0`, this class referes all past data in BPTT. bptt_tau=bptt_tau, # Size of Test data set. If this value is `0`, the validation will not be executed. test_size_rate=0.3, # Loss function. computable_loss=MeanSquaredError(), # Optimizer. opt_params=decoder_opt_params, # Verification function. verificatable_result=VerificateFunctionApproximation()) encoder_decoder_controller = EncoderDecoderController( encoder=encoder, decoder=decoder, epochs=epochs, batch_size=batch_size, learning_rate=learning_rate, learning_attenuate_rate=learning_attenuate_rate, attenuate_epoch=attenuate_epoch, test_size_rate=test_size_rate, computable_loss=MeanSquaredError(), verificatable_result=VerificateFunctionApproximation(), tol=tol, tld=tld) return encoder_decoder_controller