def _compile(self): self.all_trainable_parameters_symbol = layers.all_trainable_parameters( self._get_output_layer()) self.all_save_parameters_symbol = layers.all_parameters( self._get_output_layer()) # can switch to gen_updates_regular_momentum self.updates_symbol = layers.gen_updates_regular_momentum( self._get_cost_symbol(), self.all_trainable_parameters_symbol, learning_rate=self.learning_rate_symbol, momentum=0.9, weight_decay=1e-5) self.train_func = theano.function( [self._get_input_symbol()], self._get_cost_symbol(), updates=self.updates_symbol) self.eval_func = theano.function( [self._get_input_symbol()], self._get_cost_symbol()) self.prediction_func = theano.function( [self._get_input_symbol()], self._get_output_symbol())
def _compile(self): output = self._get_output_layer() all_trainable_parameters = layers.all_trainable_parameters(output) self.all_trainable_parameters_symbol = list(set( all_trainable_parameters)) all_save_parameters = layers.all_parameters(output) self.all_save_parameters_symbol = list(set(all_save_parameters)) self.updates_symbol = layers.gen_updates_regular_momentum( self._get_cost_symbol(), self.all_trainable_parameters_symbol, learning_rate=self.learning_rate_symbol, momentum=0.9, weight_decay=1e-5) self.train_func = theano.function( [self._get_input_symbol(), self._get_y_symbol()], [self._get_cost_symbol(), self._get_accuracy_symbol()], updates=self.updates_symbol) self.eval_func = theano.function( [self._get_input_symbol(), self._get_y_symbol()], self._get_accuracy_symbol()) self.prediction_func = theano.function( [self._get_input_symbol()], self._get_output_symbol())
def _compile(self): output = self._get_output_layer() all_trainable_parameters = layers.all_trainable_parameters(output) self.all_trainable_parameters_symbol = list( set(all_trainable_parameters)) all_save_parameters = layers.all_parameters(output) self.all_save_parameters_symbol = list(set(all_save_parameters)) self.updates_symbol = layers.gen_updates_regular_momentum( self._get_cost_symbol(), self.all_trainable_parameters_symbol, learning_rate=self.learning_rate_symbol, momentum=0.9, weight_decay=1e-5) self.train_func = theano.function( [self._get_input_symbol(), self._get_y_symbol()], [self._get_cost_symbol(), self._get_accuracy_symbol()], updates=self.updates_symbol) self.eval_func = theano.function( [self._get_input_symbol(), self._get_y_symbol()], self._get_accuracy_symbol()) self.prediction_func = theano.function([self._get_input_symbol()], self._get_output_symbol())
def _compile(self): self.all_trainable_parameters_symbol = layers.all_trainable_parameters( self._get_output_layer()) self.all_save_parameters_symbol = layers.all_parameters( self._get_output_layer()) self.updates_symbol = layers.gen_updates_regular_momentum( self._get_cost_symbol(), self.all_trainable_parameters_symbol, learning_rate=self.learning_rate_symbol, momentum=0.9, weight_decay=1e-5) self.train_func = theano.function( [self._get_input_symbol(), self.action_symbol, self.y_symbol], self._get_cost_symbol(), updates=self.updates_symbol) self.eval_func = theano.function( [self._get_input_symbol(), self.action_symbol, self.y_symbol], self._get_cost_symbol() ) self.prediction_func = theano.function( [self._get_input_symbol()], self._get_output_symbol() ) # New funcs for ReinforcementModel self.action_func = theano.function( [self._get_input_symbol()], self._get_action_symbol() ) self.max_q_func = theano.function( [self._get_input_symbol()], self._get_max_q_symbol() ) self.y_func = theano.function( [self._get_input_symbol(), self.r_symbol, self.gamma_symbol], self._get_y_symbol() ) self.value_func = theano.function( [self._get_input_symbol(), self.action_symbol], self._get_value_symbol() )
def _compile(self): self.all_trainable_parameters_symbol = layers.all_trainable_parameters( self._get_output_layer()) self.all_save_parameters_symbol = layers.all_parameters( self._get_output_layer()) self.updates_symbol = layers.gen_updates_regular_momentum( self._get_cost_symbol(), self.all_trainable_parameters_symbol, learning_rate=self.learning_rate_symbol, momentum=0.9, weight_decay=1e-5) self.train_func = theano.function( [self._get_input_symbol(), self.action_symbol, self.y_symbol], self._get_cost_symbol(), updates=self.updates_symbol) self.eval_func = theano.function( [self._get_input_symbol(), self.action_symbol, self.y_symbol], self._get_cost_symbol()) self.prediction_func = theano.function([self._get_input_symbol()], self._get_output_symbol()) # New funcs for ReinforcementModel self.action_func = theano.function([self._get_input_symbol()], self._get_action_symbol()) self.max_q_func = theano.function([self._get_input_symbol()], self._get_max_q_symbol()) self.y_func = theano.function( [self._get_input_symbol(), self.r_symbol, self.gamma_symbol], self._get_y_symbol()) self.value_func = theano.function( [self._get_input_symbol(), self.action_symbol], self._get_value_symbol())
def _compile(self): self.all_trainable_parameters_symbol = layers.all_trainable_parameters( self._get_output_layer()) self.all_save_parameters_symbol = layers.all_parameters( self._get_output_layer()) # can switch to gen_updates_regular_momentum self.updates_symbol = layers.gen_updates_regular_momentum( self._get_cost_symbol(), self.all_trainable_parameters_symbol, learning_rate=self.learning_rate_symbol, momentum=0.9, weight_decay=1e-5) self.train_func = theano.function([self._get_input_symbol()], self._get_cost_symbol(), updates=self.updates_symbol) self.eval_func = theano.function([self._get_input_symbol()], self._get_cost_symbol()) self.prediction_func = theano.function([self._get_input_symbol()], self._get_output_symbol())