def test_minibatches_per_step(self): _epochs = self.epochs self.epochs = 2 rl_parameters = RLParameters(gamma=0.95, target_update_rate=0.9, maxq_learning=True) rainbow_parameters = RainbowDQNParameters(double_q_learning=True, dueling_architecture=False) training_parameters1 = TrainingParameters( layers=self.layers, activations=self.activations, minibatch_size=1024, minibatches_per_step=1, learning_rate=0.25, optimizer="ADAM", ) training_parameters2 = TrainingParameters( layers=self.layers, activations=self.activations, minibatch_size=128, minibatches_per_step=8, learning_rate=0.25, optimizer="ADAM", ) env1 = Env(self.state_dims, self.action_dims) env2 = Env(self.state_dims, self.action_dims) model_parameters1 = DiscreteActionModelParameters( actions=env1.actions, rl=rl_parameters, rainbow=rainbow_parameters, training=training_parameters1, ) model_parameters2 = DiscreteActionModelParameters( actions=env2.actions, rl=rl_parameters, rainbow=rainbow_parameters, training=training_parameters2, ) # minibatch_size / 8, minibatches_per_step * 8 should give the same result logger.info("Training model 1") trainer1 = self._train(model_parameters1, env1) SummaryWriterContext._reset_globals() logger.info("Training model 2") trainer2 = self._train(model_parameters2, env2) weight1 = trainer1.q_network.fc.layers[-1].weight.detach().numpy() weight2 = trainer2.q_network.fc.layers[-1].weight.detach().numpy() # Due to numerical stability this tolerance has to be fairly high self.assertTrue(np.allclose(weight1, weight2, rtol=0.0, atol=1e-3)) self.epochs = _epochs
def get_sarsa_parameters_factorized(self): return ContinuousActionModelParameters( rl=RLParameters( gamma=DISCOUNT, target_update_rate=1.0, reward_burnin=100, maxq_learning=False, ), training=TrainingParameters( layers=[], activations=[], factorization_parameters=FactorizationParameters( state=FeedForwardParameters( layers=[-1, 128, 64, 32], activations=["relu", "relu", "linear"]), action=FeedForwardParameters( layers=[-1, 128, 64, 32], activations=["relu", "relu", "linear"]), ), minibatch_size=self.minibatch_size, learning_rate=0.05, optimizer="ADAM", ), knn=KnnParameters(model_type="DQN"), in_training_cpe_evaluation=InTrainingCPEParameters( mdp_sampled_rate=0.1), )
def test_input_validation(self): with self.assertRaises(Exception): # layers and activations sizes incompatible MLTrainer( "Test model", TrainingParameters([2, 1], ['linear', 'relu'], 100, 0.001, 'ADAM')) with self.assertRaises(Exception): # All values in layers should be positive integers MLTrainer( "Test model", TrainingParameters([-1, 1], ['linear'], 100, 0.001, 'ADAM')) MLTrainer( "Test model", TrainingParameters([1.3, 1], ['linear'], 100, 0.001, 'ADAM'))
def test_trainer_maxq(self): env = Env(self.state_dims, self.action_dims) env.seed(42) maxq_parameters = DiscreteActionModelParameters( actions=env.actions, rl=RLParameters( gamma=0.99, target_update_rate=1.0, reward_burnin=100, maxq_learning=True, ), training=TrainingParameters( layers=self.layers, activations=self.activations, minibatch_size=self.minibatch_size, learning_rate=1.0, optimizer="ADAM", ), ) maxq_trainer = DiscreteActionTrainer(maxq_parameters, env.normalization) # predictor = maxq_trainer.predictor() logger.info("Generating constant_reward MDPs..") states, actions, rewards, next_states, next_actions, is_terminal, possible_next_actions = env.generate_samples_discrete( self.num_samples) logger.info("Preprocessing constant_reward MDPs..") tdps = env.preprocess_samples_discrete( states, actions, rewards, next_states, next_actions, is_terminal, possible_next_actions, self.minibatch_size, ) for epoch in range(self.epochs): logger.info("Training.. " + str(epoch)) for tdp in tdps: maxq_trainer.train_numpy(tdp, None) logger.info(" ".join([ "Training epoch", str(epoch), "average q values", str(np.mean(workspace.FetchBlob(maxq_trainer.q_score_output))), "td_loss", str(workspace.FetchBlob(maxq_trainer.loss_blob)), ])) # Q value should converge to very close to 100 avg_q_value_after_training = np.mean( workspace.FetchBlob(maxq_trainer.q_score_output)) self.assertLess(avg_q_value_after_training, 101) self.assertGreater(avg_q_value_after_training, 99)
def get_sarsa_trainer_reward_boost( self, environment, reward_shape, dueling, use_gpu=False, use_all_avail_gpus=False, ): rl_parameters = RLParameters( gamma=DISCOUNT, target_update_rate=1.0, reward_burnin=10, maxq_learning=False, reward_boost=reward_shape, ) training_parameters = TrainingParameters( layers=[-1, 128, -1] if dueling else [-1, -1], activations=["relu", "linear"] if dueling else ["linear"], minibatch_size=self.minibatch_size, learning_rate=0.05, optimizer="ADAM", ) return DQNTrainer( DiscreteActionModelParameters( actions=environment.ACTIONS, rl=rl_parameters, training=training_parameters, rainbow=RainbowDQNParameters( double_q_learning=True, dueling_architecture=dueling ), ), environment.normalization, use_gpu=use_gpu, use_all_avail_gpus=use_all_avail_gpus, )
def get_sarsa_parameters(self, environment, reward_shape, dueling, categorical, clip_grad_norm): rl_parameters = RLParameters( gamma=DISCOUNT, target_update_rate=1.0, maxq_learning=False, reward_boost=reward_shape, ) training_parameters = TrainingParameters( layers=[-1, 128, -1] if dueling else [-1, -1], activations=["relu", "linear"] if dueling else ["linear"], minibatch_size=self.minibatch_size, learning_rate=0.05, optimizer="ADAM", clip_grad_norm=clip_grad_norm, ) return DiscreteActionModelParameters( actions=environment.ACTIONS, rl=rl_parameters, training=training_parameters, rainbow=RainbowDQNParameters( double_q_learning=True, dueling_architecture=dueling, categorical=categorical, ), )
def get_sarsa_trainer_reward_boost(self, environment, reward_shape): rl_parameters = RLParameters( gamma=DISCOUNT, target_update_rate=1.0, reward_burnin=10, maxq_learning=False, reward_boost=reward_shape, ) training_parameters = TrainingParameters( layers=[-1, -1], activations=["linear"], minibatch_size=self.minibatch_size, learning_rate=0.125, optimizer="ADAM", ) return DiscreteActionTrainer( DiscreteActionModelParameters( actions=environment.ACTIONS, rl=rl_parameters, training=training_parameters, rainbow=RainbowDQNParameters(double_q_learning=True, dueling_architecture=False), in_training_cpe=InTrainingCPEParameters(mdp_sampled_rate=0.1), ), environment.normalization, )
def get_sarsa_parameters_factorized(self): return ContinuousActionModelParameters( rl=RLParameters( gamma=DISCOUNT, target_update_rate=1.0, reward_burnin=100, maxq_learning=False, ), training=TrainingParameters( # These are used by reward network layers=[-1, 256, 128, -1], activations=["relu", "relu", "linear"], factorization_parameters=FactorizationParameters( state=FeedForwardParameters(layers=[-1, 128, 64], activations=["relu", "linear"]), action=FeedForwardParameters( layers=[-1, 128, 64], activations=["relu", "linear"]), ), minibatch_size=self.minibatch_size, learning_rate=0.03, optimizer="ADAM", ), rainbow=RainbowDQNParameters(double_q_learning=True, dueling_architecture=False), in_training_cpe=InTrainingCPEParameters(mdp_sampled_rate=0.1), )
def test_trainer_maxq(self): env = Env(self.state_dims, self.action_dims) env.seed(42) maxq_parameters = DiscreteActionModelParameters( actions=env.actions, rl=RLParameters( gamma=0.99, target_update_rate=0.9, reward_burnin=100, maxq_learning=True, ), rainbow=RainbowDQNParameters(double_q_learning=True, dueling_architecture=False), training=TrainingParameters( layers=self.layers, activations=self.activations, minibatch_size=self.minibatch_size, learning_rate=0.25, optimizer="ADAM", ), ) maxq_trainer = DQNTrainer(maxq_parameters, env.normalization) logger.info("Generating constant_reward MDPs..") states, actions, rewards, next_states, next_actions, is_terminal, possible_actions, possible_next_actions = env.generate_samples_discrete( self.num_samples) logger.info("Preprocessing constant_reward MDPs..") for epoch in range(self.epochs): tdps = env.preprocess_samples_discrete( states, actions, rewards, next_states, next_actions, is_terminal, possible_actions, possible_next_actions, self.minibatch_size, ) logger.info("Training.. " + str(epoch)) for tdp in tdps: maxq_trainer.train(tdp) logger.info(" ".join([ "Training epoch", str(epoch), "average q values", str(torch.mean(maxq_trainer.all_action_scores)), ])) # Q value should converge to very close to 100 avg_q_value_after_training = torch.mean(maxq_trainer.all_action_scores) self.assertLess(avg_q_value_after_training, 101) self.assertGreater(avg_q_value_after_training, 99)
def main(params): # Set minibatch size based on # of devices being used to train params["training"]["minibatch_size"] *= minibatch_size_multiplier( params["use_gpu"], params["use_all_avail_gpus"]) rl_parameters = RLParameters(**params["rl"]) training_parameters = TrainingParameters(**params["training"]) rainbow_parameters = RainbowDQNParameters(**params["rainbow"]) model_params = ContinuousActionModelParameters( rl=rl_parameters, training=training_parameters, rainbow=rainbow_parameters) state_normalization = BaseWorkflow.read_norm_file( params["state_norm_data_path"]) action_normalization = BaseWorkflow.read_norm_file( params["action_norm_data_path"]) writer = SummaryWriter(log_dir=params["model_output_path"]) logger.info("TensorBoard logging location is: {}".format(writer.log_dir)) preprocess_handler = ParametricDqnPreprocessHandler( Preprocessor(state_normalization, False), Preprocessor(action_normalization, False), PandasSparseToDenseProcessor(), ) workflow = ParametricDqnWorkflow( model_params, preprocess_handler, state_normalization, action_normalization, params["use_gpu"], params["use_all_avail_gpus"], ) train_dataset = JSONDatasetReader( params["training_data_path"], batch_size=training_parameters.minibatch_size) eval_dataset = JSONDatasetReader(params["eval_data_path"], batch_size=16) with summary_writer_context(writer): workflow.train_network(train_dataset, eval_dataset, int(params["epochs"])) exporter = ParametricDQNExporter( workflow.trainer.q_network, PredictorFeatureExtractor( state_normalization_parameters=state_normalization, action_normalization_parameters=action_normalization, ), ParametricActionOutputTransformer(), ) return export_trainer_and_predictor(workflow.trainer, params["model_output_path"], exporter=exporter) # noqa
def test_pure_q_learning_all_cheat(self): q_learning_parameters = DiscreteActionModelParameters( actions=self._env.ACTIONS, rl=self._rl_parameters_all_cheat_maxq, training=TrainingParameters( layers=[self._env.width * self._env.height, 1], activations=['linear'], minibatch_size=self.minibatch_size, learning_rate=0.05, optimizer='SGD', lr_policy='fixed', ) ) trainer = DiscreteActionTrainer( q_learning_parameters, self._env.normalization, ) predictor = trainer.predictor() policy = _build_policy(self._env, predictor, 1) initial_state = self._env.reset() iteration_result = _collect_samples( self._env, policy, 20000, initial_state ) num_iterations = 50 for _ in range(num_iterations): tdps = self._env.preprocess_samples( iteration_result.states, iteration_result.actions, iteration_result.rewards, iteration_result.next_states, iteration_result.next_actions, iteration_result.is_terminals, iteration_result.possible_next_actions, None, self.minibatch_size, ) for tdp in tdps: trainer.train_numpy(tdp, None) initial_state = self._env.reset() policy = _build_policy(self._env, predictor, 0.1) iteration_result = _collect_samples( self._env, policy, 20000, initial_state ) policy = _build_policy(self._env, predictor, 0) initial_state = self._env.reset() iteration_result = _collect_samples( self._env, policy, 1000, initial_state ) # 100% should be cheat. Will fix in the future. self.assertGreater( np.sum(np.array(iteration_result.actions) == 'C'), 800 )
def test_trainer_maxq(self): environment = Gridworld() maxq_sarsa_parameters = DiscreteActionModelParameters( actions=environment.ACTIONS, rl=RLParameters( gamma=DISCOUNT, target_update_rate=0.5, reward_burnin=10, maxq_learning=True, ), training=TrainingParameters( layers=[-1, 1], activations=["linear"], minibatch_size=self.minibatch_size, learning_rate=0.01, optimizer="ADAM", ), ) # construct the new trainer that using maxq maxq_trainer = DiscreteActionTrainer( maxq_sarsa_parameters, environment.normalization ) samples = environment.generate_samples(100000, 1.0) predictor = maxq_trainer.predictor() tdps = environment.preprocess_samples(samples, self.minibatch_size) evaluator = GridworldEvaluator(environment, True) evaluator.evaluate(predictor) print( "Pre-Training eval: ", evaluator.mc_loss[-1], evaluator.reward_doubly_robust[-1], ) self.assertGreater(evaluator.mc_loss[-1], 0.3) for _ in range(5): for tdp in tdps: maxq_trainer.train_numpy(tdp, None) evaluator.evaluate(predictor) print( "Post-Training eval: ", evaluator.mc_loss[-1], evaluator.reward_doubly_robust[-1], ) self.assertLess(evaluator.mc_loss[-1], 0.1) self.assertGreater( evaluator.reward_doubly_robust[-1], evaluator.reward_doubly_robust[-2] )
def get_sarsa_parameters(self): return ContinuousActionModelParameters( rl=RLParameters(gamma=DISCOUNT, target_update_rate=1.0, maxq_learning=False), training=TrainingParameters( layers=[-1, 256, 128, -1], activations=["relu", "relu", "linear"], minibatch_size=self.minibatch_size, learning_rate=0.05, optimizer="ADAM", ), rainbow=RainbowDQNParameters(double_q_learning=True, dueling_architecture=False), )
def get_sarsa_parameters(self): return ContinuousActionModelParameters( rl=RLParameters( gamma=DISCOUNT, target_update_rate=0.5, reward_burnin=10, maxq_learning=False, ), training=TrainingParameters( layers=[-1, 200, 1], activations=['linear', 'linear'], minibatch_size=1024, learning_rate=0.01, optimizer='ADAM', ), knn=KnnParameters(model_type='DQN', ))
def get_sarsa_parameters(self): return ContinuousActionModelParameters( rl=RLParameters( gamma=DISCOUNT, target_update_rate=1.0, reward_burnin=100, maxq_learning=False, ), training=TrainingParameters( layers=[-1, 256, 128, -1], activations=["relu", "relu", "linear"], minibatch_size=self.minibatch_size, learning_rate=0.1, optimizer="ADAM", ), knn=KnnParameters(model_type="DQN"), )
def get_sarsa_trainer(self, environment): rl_parameters = RLParameters(gamma=DISCOUNT, target_update_rate=0.5, reward_burnin=10, maxq_learning=False) training_parameters = TrainingParameters( layers=[-1, 1], activations=['linear'], minibatch_size=1024, learning_rate=0.01, optimizer='ADAM', ) return DiscreteActionTrainer( environment.normalization, DiscreteActionModelParameters(actions=environment.ACTIONS, rl=rl_parameters, training=training_parameters))
def train_network(params): logger.info("Running DQN workflow with params:") logger.info(params) action_names = np.array(params["actions"]) rl_parameters = RLParameters(**params["rl"]) training_parameters = TrainingParameters(**params["training"]) rainbow_parameters = RainbowDQNParameters(**params["rainbow"]) trainer_params = DiscreteActionModelParameters( actions=params["actions"], rl=rl_parameters, training=training_parameters, rainbow=rainbow_parameters, ) dataset = JSONDataset(params["training_data_path"], batch_size=training_parameters.minibatch_size) norm_data = JSONDataset(params["state_norm_data_path"]) state_normalization = read_norm_params(norm_data.read_all()) num_batches = int(len(dataset) / training_parameters.minibatch_size) logger.info("Read in batch data set {} of size {} examples. Data split " "into {} batches of size {}.".format( params["training_data_path"], len(dataset), num_batches, training_parameters.minibatch_size, )) trainer = DQNTrainer(trainer_params, state_normalization, params["use_gpu"]) for epoch in range(params["epochs"]): for batch_idx in range(num_batches): helpers.report_training_status(batch_idx, num_batches, epoch, params["epochs"]) batch = dataset.read_batch(batch_idx) tdp = preprocess_batch_for_training(action_names, batch, state_normalization) trainer.train(tdp) logger.info("Training finished. Saving PyTorch model to {}".format( params["pytorch_output_path"])) helpers.save_model_to_file(trainer, params["pytorch_output_path"])
def test_adam_weights(self): num_features = 4 num_outputs = 1 trainer = MLTrainer( "Linear Regression", TrainingParameters(layers=[num_features, num_outputs], activations=['linear'], minibatch_size=100, learning_rate=0.1, optimizer='ADAM')) dist = get_weight_dist(trainer, num_features=num_features, num_outputs=num_outputs) self.assertLess(dist, 0.1)
def test_trainer_maxq(self): environment = Gridworld() maxq_sarsa_parameters = DiscreteActionModelParameters( actions=environment.ACTIONS, rl=RLParameters(gamma=DISCOUNT, target_update_rate=0.5, reward_burnin=10, maxq_learning=True), training=TrainingParameters( layers=[-1, 1], activations=['linear'], minibatch_size=self.minibatch_size, learning_rate=0.01, optimizer='ADAM', )) # construct the new trainer that using maxq maxq_trainer = DiscreteActionTrainer( maxq_sarsa_parameters, environment.normalization, ) states, actions, rewards, next_states, next_actions, is_terminal,\ possible_next_actions, reward_timelines = \ environment.generate_samples(100000, 1.0) predictor = maxq_trainer.predictor() tdps = environment.preprocess_samples( states, actions, rewards, next_states, next_actions, is_terminal, possible_next_actions, reward_timelines, self.minibatch_size, ) evaluator = GridworldEvaluator(environment, True) print("Pre-Training eval", evaluator.evaluate(predictor)) self.assertGreater(evaluator.evaluate(predictor), 0.3) for _ in range(2): for tdp in tdps: maxq_trainer.stream_tdp(tdp, None) evaluator.evaluate(predictor) print("Post-Training eval", evaluator.evaluate(predictor)) self.assertLess(evaluator.evaluate(predictor), 0.1)
def test_sarsa_layer_validation(self): env = Gridworld() invalid_sarsa_params = DiscreteActionModelParameters( actions=env.ACTIONS, rl=RLParameters(gamma=DISCOUNT, target_update_rate=0.5, reward_burnin=10, maxq_learning=False), training=TrainingParameters( layers=[-1, 3], activations=['linear'], minibatch_size=32, learning_rate=0.1, optimizer='SGD', )) with self.assertRaises(Exception): # layers[-1] should be 1 DiscreteActionTrainer(env.normalization, invalid_sarsa_params)
def main(args): parser = argparse.ArgumentParser( description="Train a RL net to play in an OpenAI Gym environment.") parser.add_argument("-p", "--parameters", help="Path to JSON parameters file.") parser.add_argument("-s", "--score-bar", help="Bar for averaged tests scores.", type=float, default=None) parser.add_argument( "-g", "--gpu_id", help="If set, will use GPU with specified ID. Otherwise will use CPU.", default=USE_CPU) args = parser.parse_args(args) with open(args.parameters, 'r') as f: params = json.load(f) rl_settings = params['rl'] training_settings = params['training'] rl_settings['gamma'] = rl_settings['reward_discount_factor'] del rl_settings['reward_discount_factor'] training_settings['gamma'] = training_settings['learning_rate_decay'] del training_settings['learning_rate_decay'] env_type = params['env'] env = OpenAIGymEnvironment(env_type, rl_settings['epsilon']) trainer_params = DiscreteActionModelParameters( actions=env.actions, rl=RLParameters(**rl_settings), training=TrainingParameters(**training_settings)) device = core.DeviceOption( caffe2_pb2.CPU if args.gpu_id == USE_CPU else caffe2_pb2.CUDA, args.gpu_id) with core.DeviceScope(device): trainer = DiscreteActionTrainer(env.normalization, trainer_params, skip_normalization=True) return run(env, trainer, "{} test run".format(env_type), args.score_bar, **params["run_details"])
def test_sgd_weights(self): num_features = 4 num_outputs = 1 trainer = MLTrainer( "Linear Regression", TrainingParameters(layers=[num_features, num_outputs], activations=['linear'], minibatch_size=100, learning_rate=0.001, optimizer='SGD', gamma=0.9999, lr_policy='step')) dist = get_weight_dist(trainer, num_features=num_features, num_outputs=num_outputs) self.assertLess(dist, 0.1)
def get_sarsa_parameters(self): return ContinuousActionModelParameters( rl=RLParameters( gamma=DISCOUNT, target_update_rate=1.0, reward_burnin=100, maxq_learning=False, ), training=TrainingParameters( layers=[-1, 256, 128, -1], activations=["relu", "relu", "linear"], minibatch_size=self.minibatch_size, learning_rate=0.05, optimizer="ADAM", ), knn=KnnParameters(model_type="DQN"), rainbow=RainbowDQNParameters(double_q_learning=True, dueling_architecture=False), in_training_cpe=InTrainingCPEParameters(mdp_sampled_rate=0.1), )
def test_pure_q_learning_all_cheat(self): q_learning_parameters = DiscreteActionModelParameters( actions=self._env.ACTIONS, rl=self._rl_parameters_all_cheat_maxq, training=TrainingParameters( layers=[self._env.width * self._env.height, 1], activations=['linear'], minibatch_size=32, learning_rate=0.05, optimizer='SGD', lr_policy='fixed')) trainer = DiscreteActionTrainer(self._env.normalization, q_learning_parameters) predictor = trainer.predictor() policy = _build_policy(self._env, predictor, 1) initial_state = self._env.reset() iteration_result = _collect_samples(self._env, policy, 10000, initial_state) num_iterations = 50 for _ in range(num_iterations): policy = _build_policy(self._env, predictor, 0) tdp = self._env.preprocess_samples( iteration_result.states, iteration_result.actions, iteration_result.rewards, iteration_result.next_states, iteration_result.next_actions, iteration_result.is_terminals, iteration_result.possible_next_actions, None, ) trainer.stream_tdp(tdp, None) initial_state = iteration_result.current_state initial_state = self._env.reset() iteration_result = _collect_samples(self._env, policy, 10000, initial_state) self.assertTrue(np.all(np.array(iteration_result.actions) == 'C'))
def test_scaledoutput_regression_weights(self): # this is testing if scaled/inner-product in final layer, # using same case but transformed to (wx+b) * (1/y) = 1, # in this test case, learned weight should be regularized on columns num_features = 4 num_training_samples = 100 num_outputs = 2 training_inputs, training_outputs, weights, _ = (gen_training_data( num_features, num_training_samples, num_outputs, 0.01)) trainer = MLTrainerIP("Linear Regression", TrainingParameters( layers=[num_features, num_outputs], activations=['linear'], minibatch_size=100, learning_rate=0.1), scaled_output=True) training_labels = np.ones( (training_inputs.shape[0], 1)).astype(np.float32) training_scale = np.where(training_outputs == 0, 0, 1.0 / (num_outputs * training_outputs)) for _ in range(10000): trainer.train_wexternal(training_inputs, training_labels, training_scale) trained_weights = np.concatenate( [workspace.FetchBlob(w) for w in trainer.weights], axis=0).transpose() # regularized after training to keep same scale as given weights scale_trained_weights = np.copy(trained_weights) for i in range(trained_weights.shape[1]): scaling = 1.0 / trained_weights[0, i] * weights[0, i] scale_trained_weights[:, i] = trained_weights[:, i] * scaling dist = np.linalg.norm(scale_trained_weights - weights) self.assertLess(dist, 0.1)
def test_trainer_maxq(self): env = Env(self.state_dims, self.action_dims) maxq_parameters = DiscreteActionModelParameters( actions=env.actions, rl=RLParameters(gamma=0.95, target_update_rate=0.9, maxq_learning=True), rainbow=RainbowDQNParameters(double_q_learning=True, dueling_architecture=False), training=TrainingParameters( layers=self.layers, activations=self.activations, minibatch_size=1024, learning_rate=0.25, optimizer="ADAM", ), ) # Q value should converge to very close to 20 trainer = self._train(maxq_parameters, env) avg_q_value_after_training = torch.mean(trainer.all_action_scores) self.assertLess(avg_q_value_after_training, 22) self.assertGreater(avg_q_value_after_training, 18)
def get_sarsa_trainer_reward_boost(self, environment, reward_shape): rl_parameters = RLParameters( gamma=DISCOUNT, target_update_rate=0.5, reward_burnin=10, maxq_learning=False, reward_boost=reward_shape, ) training_parameters = TrainingParameters( layers=[-1, -1], activations=["linear"], minibatch_size=self.minibatch_size, learning_rate=0.01, optimizer="ADAM", ) return DiscreteActionTrainer( DiscreteActionModelParameters( actions=environment.ACTIONS, rl=rl_parameters, training=training_parameters, ), environment.normalization, )
def run_gym(params, score_bar, gpu_id): rl_settings = params['rl'] training_settings = params['training'] rl_settings['gamma'] = rl_settings['reward_discount_factor'] del rl_settings['reward_discount_factor'] training_settings['gamma'] = training_settings['learning_rate_decay'] del training_settings['learning_rate_decay'] env_type = params['env'] env = OpenAIGymEnvironment(env_type, rl_settings['epsilon']) trainer_params = DiscreteActionModelParameters( actions=env.actions, rl=RLParameters(**rl_settings), training=TrainingParameters(**training_settings)) device = core.DeviceOption( caffe2_pb2.CPU if gpu_id == USE_CPU else caffe2_pb2.CUDA, gpu_id, ) with core.DeviceScope(device): if env.img: trainer = DiscreteActionConvTrainer( DiscreteActionConvModelParameters( fc_parameters=trainer_params, cnn_parameters=CNNModelParameters(**params['cnn']), num_input_channels=env.num_input_channels, img_height=env.height, img_width=env.width), env.normalization, ) else: trainer = DiscreteActionTrainer( trainer_params, env.normalization, ) return run(env, trainer, "{} test run".format(env_type), score_bar, **params["run_details"])
def setUp(self): super(self.__class__, self).setUp() np.random.seed(0) random.seed(0) self.state_dim, self.action_dim = 2, 3 self._env = MockEnv(self.state_dim, self.action_dim) self._rl_parameters = RLParameters( gamma=0.9, target_update_rate=0.5, reward_burnin=10, maxq_learning=False, ) self._rl_parameters_maxq = RLParameters( gamma=0.9, target_update_rate=0.5, reward_burnin=10, maxq_learning=True, ) self._rl_parameters = ContinuousActionModelParameters( rl=self._rl_parameters, training=TrainingParameters( layers=[ -1, self._env.num_states * self._env.num_actions * 2, 1 ], activations=['linear', 'linear'], minibatch_size=1024, learning_rate=0.01, optimizer='ADAM', ), knn=KnnParameters(model_type='DQN', )) self._trainer = ContinuousActionDQNTrainer( self._env.normalization, self._env.normalization_action, self._rl_parameters)
def create_trainer(model_type, params, rl_parameters, use_gpu, env): if model_type == ModelType.PYTORCH_DISCRETE_DQN.value: training_parameters = params["training"] if isinstance(training_parameters, dict): training_parameters = TrainingParameters(**training_parameters) rainbow_parameters = params["rainbow"] if isinstance(rainbow_parameters, dict): rainbow_parameters = RainbowDQNParameters(**rainbow_parameters) if env.img: assert (training_parameters.cnn_parameters is not None), "Missing CNN parameters for image input" if isinstance(training_parameters.cnn_parameters, dict): training_parameters.cnn_parameters = CNNParameters( **training_parameters.cnn_parameters) training_parameters.cnn_parameters.conv_dims[ 0] = env.num_input_channels training_parameters.cnn_parameters.input_height = env.height training_parameters.cnn_parameters.input_width = env.width training_parameters.cnn_parameters.num_input_channels = ( env.num_input_channels) else: assert (training_parameters.cnn_parameters is None), "Extra CNN parameters for non-image input" trainer_params = DiscreteActionModelParameters( actions=env.actions, rl=rl_parameters, training=training_parameters, rainbow=rainbow_parameters, ) trainer = DQNTrainer(trainer_params, env.normalization, use_gpu) elif model_type == ModelType.PYTORCH_PARAMETRIC_DQN.value: training_parameters = params["training"] if isinstance(training_parameters, dict): training_parameters = TrainingParameters(**training_parameters) rainbow_parameters = params["rainbow"] if isinstance(rainbow_parameters, dict): rainbow_parameters = RainbowDQNParameters(**rainbow_parameters) if env.img: assert (training_parameters.cnn_parameters is not None), "Missing CNN parameters for image input" training_parameters.cnn_parameters.conv_dims[ 0] = env.num_input_channels else: assert (training_parameters.cnn_parameters is None), "Extra CNN parameters for non-image input" trainer_params = ContinuousActionModelParameters( rl=rl_parameters, training=training_parameters, rainbow=rainbow_parameters) trainer = ParametricDQNTrainer(trainer_params, env.normalization, env.normalization_action, use_gpu) elif model_type == ModelType.CONTINUOUS_ACTION.value: training_parameters = params["shared_training"] if isinstance(training_parameters, dict): training_parameters = DDPGTrainingParameters(**training_parameters) actor_parameters = params["actor_training"] if isinstance(actor_parameters, dict): actor_parameters = DDPGNetworkParameters(**actor_parameters) critic_parameters = params["critic_training"] if isinstance(critic_parameters, dict): critic_parameters = DDPGNetworkParameters(**critic_parameters) trainer_params = DDPGModelParameters( rl=rl_parameters, shared_training=training_parameters, actor_training=actor_parameters, critic_training=critic_parameters, ) action_range_low = env.action_space.low.astype(np.float32) action_range_high = env.action_space.high.astype(np.float32) trainer = DDPGTrainer( trainer_params, env.normalization, env.normalization_action, torch.from_numpy(action_range_low).unsqueeze(dim=0), torch.from_numpy(action_range_high).unsqueeze(dim=0), use_gpu, ) elif model_type == ModelType.SOFT_ACTOR_CRITIC.value: trainer_params = SACModelParameters( rl=rl_parameters, training=SACTrainingParameters( minibatch_size=params["sac_training"]["minibatch_size"], use_2_q_functions=params["sac_training"]["use_2_q_functions"], q_network_optimizer=OptimizerParameters( **params["sac_training"]["q_network_optimizer"]), value_network_optimizer=OptimizerParameters( **params["sac_training"]["value_network_optimizer"]), actor_network_optimizer=OptimizerParameters( **params["sac_training"]["actor_network_optimizer"]), entropy_temperature=params["sac_training"] ["entropy_temperature"], ), q_network=FeedForwardParameters(**params["sac_q_training"]), value_network=FeedForwardParameters( **params["sac_value_training"]), actor_network=FeedForwardParameters( **params["sac_actor_training"]), ) trainer = get_sac_trainer(env, trainer_params, use_gpu) else: raise NotImplementedError( "Model of type {} not supported".format(model_type)) return trainer
def create_trainer(model_type, params, rl_parameters, use_gpu, env): if model_type == ModelType.PYTORCH_DISCRETE_DQN.value: training_parameters = params["training"] if isinstance(training_parameters, dict): training_parameters = TrainingParameters(**training_parameters) rainbow_parameters = params["rainbow"] if isinstance(rainbow_parameters, dict): rainbow_parameters = RainbowDQNParameters(**rainbow_parameters) if env.img: assert ( training_parameters.cnn_parameters is not None ), "Missing CNN parameters for image input" if isinstance(training_parameters.cnn_parameters, dict): training_parameters.cnn_parameters = CNNParameters( **training_parameters.cnn_parameters ) training_parameters.cnn_parameters.conv_dims[0] = env.num_input_channels training_parameters.cnn_parameters.input_height = env.height training_parameters.cnn_parameters.input_width = env.width training_parameters.cnn_parameters.num_input_channels = ( env.num_input_channels ) else: assert ( training_parameters.cnn_parameters is None ), "Extra CNN parameters for non-image input" trainer_params = DiscreteActionModelParameters( actions=env.actions, rl=rl_parameters, training=training_parameters, rainbow=rainbow_parameters, ) trainer = DQNTrainer(trainer_params, env.normalization, use_gpu) elif model_type == ModelType.PYTORCH_PARAMETRIC_DQN.value: training_parameters = params["training"] if isinstance(training_parameters, dict): training_parameters = TrainingParameters(**training_parameters) rainbow_parameters = params["rainbow"] if isinstance(rainbow_parameters, dict): rainbow_parameters = RainbowDQNParameters(**rainbow_parameters) if env.img: assert ( training_parameters.cnn_parameters is not None ), "Missing CNN parameters for image input" training_parameters.cnn_parameters.conv_dims[0] = env.num_input_channels else: assert ( training_parameters.cnn_parameters is None ), "Extra CNN parameters for non-image input" trainer_params = ContinuousActionModelParameters( rl=rl_parameters, training=training_parameters, rainbow=rainbow_parameters ) trainer = ParametricDQNTrainer( trainer_params, env.normalization, env.normalization_action, use_gpu ) elif model_type == ModelType.CONTINUOUS_ACTION.value: training_parameters = params["shared_training"] if isinstance(training_parameters, dict): training_parameters = DDPGTrainingParameters(**training_parameters) actor_parameters = params["actor_training"] if isinstance(actor_parameters, dict): actor_parameters = DDPGNetworkParameters(**actor_parameters) critic_parameters = params["critic_training"] if isinstance(critic_parameters, dict): critic_parameters = DDPGNetworkParameters(**critic_parameters) trainer_params = DDPGModelParameters( rl=rl_parameters, shared_training=training_parameters, actor_training=actor_parameters, critic_training=critic_parameters, ) action_range_low = env.action_space.low.astype(np.float32) action_range_high = env.action_space.high.astype(np.float32) trainer = DDPGTrainer( trainer_params, env.normalization, env.normalization_action, torch.from_numpy(action_range_low).unsqueeze(dim=0), torch.from_numpy(action_range_high).unsqueeze(dim=0), use_gpu, ) elif model_type == ModelType.SOFT_ACTOR_CRITIC.value: trainer_params = SACModelParameters( rl=rl_parameters, training=SACTrainingParameters( minibatch_size=params["sac_training"]["minibatch_size"], use_2_q_functions=params["sac_training"]["use_2_q_functions"], q_network_optimizer=OptimizerParameters( **params["sac_training"]["q_network_optimizer"] ), value_network_optimizer=OptimizerParameters( **params["sac_training"]["value_network_optimizer"] ), actor_network_optimizer=OptimizerParameters( **params["sac_training"]["actor_network_optimizer"] ), entropy_temperature=params["sac_training"]["entropy_temperature"], ), q_network=FeedForwardParameters(**params["sac_q_training"]), value_network=FeedForwardParameters(**params["sac_value_training"]), actor_network=FeedForwardParameters(**params["sac_actor_training"]), ) trainer = get_sac_trainer(env, trainer_params, use_gpu) else: raise NotImplementedError("Model of type {} not supported".format(model_type)) return trainer