'yars_revenge', 'zaxxon' ] atari_deterministic_v4 = { e: "{}".format(lower_under_to_upper(e) + 'Deterministic-v4') for e in gym_atari_envs } atari_no_frameskip_v4 = { e: "{}".format(lower_under_to_upper(e) + 'NoFrameskip-v4') for e in gym_atari_envs } # default atari schedule used in the DeepMind papers atari_schedule = ScheduleParameters() atari_schedule.improve_steps = EnvironmentSteps(50000000) atari_schedule.steps_between_evaluation_periods = EnvironmentSteps(250000) atari_schedule.evaluation_steps = EnvironmentSteps(135000) atari_schedule.heatup_steps = EnvironmentSteps(1) class MaxOverFramesAndFrameskipEnvWrapper(gym.Wrapper): def __init__(self, env, frameskip=4, max_over_num_frames=2): super().__init__(env) self.max_over_num_frames = max_over_num_frames self.observations_stack = [] self.frameskip = frameskip self.first_frame_to_max_over = self.frameskip - self.max_over_num_frames def reset(self): return self.env.reset() def step(self, action):
from rl_coach.filters.observation.observation_to_uint8_filter import ObservationToUInt8Filter from rl_coach.filters.observation.observation_clipping_filter import ObservationClippingFilter from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager from rl_coach.graph_managers.graph_manager import ScheduleParameters from rl_coach.schedules import LinearSchedule from rl_coach.base_parameters import DistributedCoachSynchronizationType, EmbedderScheme #################### # Graph Scheduling # #################### schedule_params = ScheduleParameters() schedule_params.improve_steps = TrainingSteps(10000000) schedule_params.steps_between_evaluation_periods = EnvironmentEpisodes(40) schedule_params.evaluation_steps = EnvironmentEpisodes(5) schedule_params.heatup_steps = EnvironmentSteps(0) ######### # Agent # ######### agent_params = ClippedPPOAgentParameters() agent_params.network_wrappers['main'].input_embedders_parameters = { 'STEREO_CAMERAS': InputEmbedderParameters( scheme=[Conv2d(32, 8, 4), Conv2d(32, 4, 2), Conv2d(64, 4, 2)], activation_function='relu', dropout_rate=0.3), 'LIDAR':
def get_graph_manager(**hp_dict): #################### # All Default Parameters # #################### params = {} params["batch_size"] = int(hp_dict.get("batch_size", 64)) params["num_epochs"] = int(hp_dict.get("num_epochs", 10)) params["stack_size"] = int(hp_dict.get("stack_size", 1)) params["lr"] = float(hp_dict.get("lr", 0.0003)) params["exploration_type"] = (hp_dict.get("exploration_type", "huber")).lower() params["e_greedy_value"] = float(hp_dict.get("e_greedy_value", .05)) params["epsilon_steps"] = int(hp_dict.get("epsilon_steps", 10000)) params["beta_entropy"] = float(hp_dict.get("beta_entropy", .01)) params["discount_factor"] = float(hp_dict.get("discount_factor", .999)) params["loss_type"] = hp_dict.get("loss_type", "Mean squared error").lower() params["num_episodes_between_training"] = int( hp_dict.get("num_episodes_between_training", 20)) params["term_cond_max_episodes"] = int( hp_dict.get("term_cond_max_episodes", 100000)) params["term_cond_avg_score"] = float( hp_dict.get("term_cond_avg_score", 100000)) params_json = json.dumps(params, indent=2, sort_keys=True) print("Using the following hyper-parameters", params_json, sep='\n') #################### # Graph Scheduling # #################### schedule_params = ScheduleParameters() schedule_params.improve_steps = TrainingSteps( params["term_cond_max_episodes"]) schedule_params.steps_between_evaluation_periods = EnvironmentEpisodes(40) schedule_params.evaluation_steps = EnvironmentEpisodes(5) schedule_params.heatup_steps = EnvironmentSteps(0) ######### # Agent # ######### agent_params = ClippedPPOAgentParameters() agent_params.network_wrappers['main'].learning_rate = params["lr"] agent_params.network_wrappers['main'].input_embedders_parameters[ 'observation'].activation_function = 'relu' agent_params.network_wrappers[ 'main'].middleware_parameters.activation_function = 'relu' agent_params.network_wrappers['main'].batch_size = params["batch_size"] agent_params.network_wrappers['main'].optimizer_epsilon = 1e-5 agent_params.network_wrappers['main'].adam_optimizer_beta2 = 0.999 if params["loss_type"] == "huber": agent_params.network_wrappers[ 'main'].replace_mse_with_huber_loss = True agent_params.algorithm.clip_likelihood_ratio_using_epsilon = 0.2 agent_params.algorithm.clipping_decay_schedule = LinearSchedule( 1.0, 0, 1000000) agent_params.algorithm.beta_entropy = params["beta_entropy"] agent_params.algorithm.gae_lambda = 0.95 agent_params.algorithm.discount = params["discount_factor"] agent_params.algorithm.optimization_epochs = params["num_epochs"] agent_params.algorithm.estimate_state_value_using_gae = True agent_params.algorithm.num_steps_between_copying_online_weights_to_target = EnvironmentEpisodes( params["num_episodes_between_training"]) agent_params.algorithm.num_consecutive_playing_steps = EnvironmentEpisodes( params["num_episodes_between_training"]) agent_params.algorithm.distributed_coach_synchronization_type = DistributedCoachSynchronizationType.SYNC if params["exploration_type"] == "categorical": agent_params.exploration = CategoricalParameters() else: agent_params.exploration = EGreedyParameters() agent_params.exploration.epsilon_schedule = LinearSchedule( 1.0, params["e_greedy_value"], params["epsilon_steps"]) ############### # Environment # ############### SilverstoneInputFilter = InputFilter(is_a_reference_filter=True) SilverstoneInputFilter.add_observation_filter('observation', 'to_grayscale', ObservationRGBToYFilter()) SilverstoneInputFilter.add_observation_filter( 'observation', 'to_uint8', ObservationToUInt8Filter(0, 255)) SilverstoneInputFilter.add_observation_filter( 'observation', 'stacking', ObservationStackingFilter(params["stack_size"])) env_params = GymVectorEnvironment() env_params.default_input_filter = SilverstoneInputFilter env_params.level = 'SilverstoneRacetrack-Discrete-v0' vis_params = VisualizationParameters() vis_params.dump_mp4 = False ######## # Test # ######## preset_validation_params = PresetValidationParameters() preset_validation_params.test = True preset_validation_params.min_reward_threshold = 400 preset_validation_params.max_episodes_to_achieve_reward = 1000 graph_manager = BasicRLGraphManager( agent_params=agent_params, env_params=env_params, schedule_params=schedule_params, vis_params=vis_params, preset_validation_params=preset_validation_params) return graph_manager, params_json
from rl_coach.agents.rainbow_dqn_agent import RainbowDQNAgentParameters from rl_coach.base_parameters import VisualizationParameters, PresetValidationParameters from rl_coach.core_types import EnvironmentSteps, RunPhase from rl_coach.environments.environment import MaxDumpMethod, SelectedPhaseOnlyDumpMethod, SingleLevelSelection from rl_coach.environments.gym_environment import Atari, atari_deterministic_v4 from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager from rl_coach.graph_managers.graph_manager import ScheduleParameters from rl_coach.schedules import LinearSchedule #################### # Graph Scheduling # #################### schedule_params = ScheduleParameters() schedule_params.improve_steps = EnvironmentSteps(50000000) schedule_params.steps_between_evaluation_periods = EnvironmentSteps(1000000) schedule_params.evaluation_steps = EnvironmentSteps(125000) schedule_params.heatup_steps = EnvironmentSteps(20000) ######### # Agent # ######### agent_params = RainbowDQNAgentParameters() agent_params.network_wrappers['main'].learning_rate = 0.0000625 agent_params.network_wrappers['main'].optimizer_epsilon = 1.5e-4 agent_params.algorithm.num_steps_between_copying_online_weights_to_target = EnvironmentSteps( 32000 // 4) # 32k frames agent_params.memory.beta = LinearSchedule( 0.4, 1, 12500000) # 12.5M training iterations = 50M steps = 200M frames agent_params.memory.alpha = 0.5
from rl_coach.base_parameters import VisualizationParameters, PresetValidationParameters, DistributedCoachSynchronizationType from rl_coach.core_types import TrainingSteps, EnvironmentEpisodes, EnvironmentSteps, RunPhase from rl_coach.environments.gym_environment import GymVectorEnvironment, mujoco_v2 from rl_coach.exploration_policies.e_greedy import EGreedyParameters from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager from rl_coach.graph_managers.graph_manager import ScheduleParameters from rl_coach.schedules import LinearSchedule #################### # Graph Scheduling # #################### schedule_params = ScheduleParameters() schedule_params.improve_steps = TrainingSteps(int(5e5)) schedule_params.steps_between_evaluation_periods = EnvironmentSteps(50000) schedule_params.evaluation_steps = EnvironmentEpisodes(5) schedule_params.heatup_steps = EnvironmentSteps(150000) ######### # Agent # ######### agent_params = ClippedPPOAgentParameters() agent_params.network_wrappers['main'].learning_rate = 0.0003 agent_params.network_wrappers['main'].input_embedders_parameters['observation'].activation_function = 'tanh' agent_params.network_wrappers['main'].input_embedders_parameters['observation'].scheme = [Dense(64)] agent_params.network_wrappers['main'].middleware_parameters.scheme = [Dense(64)] agent_params.network_wrappers['main'].middleware_parameters.activation_function = 'tanh' agent_params.network_wrappers['main'].batch_size = 64 agent_params.network_wrappers['main'].optimizer_epsilon = 1e-5 agent_params.network_wrappers['main'].adam_optimizer_beta2 = 0.999
def get_graph_manager(hp_dict, agent_list, run_phase_subject, enable_domain_randomization=False, done_condition=any): #################### # All Default Parameters # #################### params = {} params["batch_size"] = int(hp_dict.get("batch_size", 64)) params["num_epochs"] = int(hp_dict.get("num_epochs", 10)) params["stack_size"] = int(hp_dict.get("stack_size", 1)) params["lr"] = float(hp_dict.get("lr", 0.0003)) params["exploration_type"] = (hp_dict.get("exploration_type", "categorical")).lower() params["e_greedy_value"] = float(hp_dict.get("e_greedy_value", .05)) params["epsilon_steps"] = int(hp_dict.get("epsilon_steps", 10000)) params["beta_entropy"] = float(hp_dict.get("beta_entropy", .01)) params["discount_factor"] = float(hp_dict.get("discount_factor", .999)) params["loss_type"] = hp_dict.get("loss_type", "Mean squared error").lower() params["num_episodes_between_training"] = int( hp_dict.get("num_episodes_between_training", 20)) params["term_cond_max_episodes"] = int( hp_dict.get("term_cond_max_episodes", 100000)) params["term_cond_avg_score"] = float( hp_dict.get("term_cond_avg_score", 100000)) params_json = json.dumps(params, indent=2, sort_keys=True) print("Using the following hyper-parameters", params_json, sep='\n') #################### # Graph Scheduling # #################### schedule_params = ScheduleParameters() schedule_params.improve_steps = TrainingSteps( params["term_cond_max_episodes"]) schedule_params.steps_between_evaluation_periods = EnvironmentEpisodes(40) schedule_params.evaluation_steps = EnvironmentEpisodes(5) schedule_params.heatup_steps = EnvironmentSteps(0) ######### # Agent # ######### trainable_agents_list = list() non_trainable_agents_list = list() for agent in agent_list: agent_params = DeepRacerAgentParams() if agent.network_settings: agent_params.env_agent = agent agent_params.network_wrappers['main'].learning_rate = params["lr"] agent_params.network_wrappers['main'].input_embedders_parameters = \ create_input_embedder(agent.network_settings['input_embedders'], agent.network_settings['embedder_type'], agent.network_settings['activation_function']) agent_params.network_wrappers['main'].middleware_parameters = \ create_middle_embedder(agent.network_settings['middleware_embedders'], agent.network_settings['embedder_type'], agent.network_settings['activation_function']) input_filter = InputFilter(is_a_reference_filter=True) for observation in agent.network_settings['input_embedders'].keys( ): if observation == Input.LEFT_CAMERA.value or observation == Input.CAMERA.value or\ observation == Input.OBSERVATION.value: input_filter.add_observation_filter( observation, 'to_grayscale', ObservationRGBToYFilter()) input_filter.add_observation_filter( observation, 'to_uint8', ObservationToUInt8Filter(0, 255)) input_filter.add_observation_filter( observation, 'stacking', ObservationStackingFilter(1)) if observation == Input.STEREO.value: input_filter.add_observation_filter( observation, 'to_uint8', ObservationToUInt8Filter(0, 255)) if observation == Input.LIDAR.value: input_filter.add_observation_filter( observation, 'clipping', ObservationClippingFilter(0.15, 1.0)) if observation == Input.SECTOR_LIDAR.value: input_filter.add_observation_filter( observation, 'binary', ObservationBinarySectorFilter()) agent_params.input_filter = input_filter() agent_params.network_wrappers['main'].batch_size = params[ "batch_size"] agent_params.network_wrappers['main'].optimizer_epsilon = 1e-5 agent_params.network_wrappers['main'].adam_optimizer_beta2 = 0.999 if params["loss_type"] == "huber": agent_params.network_wrappers[ 'main'].replace_mse_with_huber_loss = True agent_params.algorithm.clip_likelihood_ratio_using_epsilon = 0.2 agent_params.algorithm.clipping_decay_schedule = LinearSchedule( 1.0, 0, 1000000) agent_params.algorithm.beta_entropy = params["beta_entropy"] agent_params.algorithm.gae_lambda = 0.95 agent_params.algorithm.discount = params["discount_factor"] agent_params.algorithm.optimization_epochs = params["num_epochs"] agent_params.algorithm.estimate_state_value_using_gae = True agent_params.algorithm.num_steps_between_copying_online_weights_to_target = \ EnvironmentEpisodes(params["num_episodes_between_training"]) agent_params.algorithm.num_consecutive_playing_steps = \ EnvironmentEpisodes(params["num_episodes_between_training"]) agent_params.algorithm.distributed_coach_synchronization_type = \ DistributedCoachSynchronizationType.SYNC if params["exploration_type"] == "categorical": agent_params.exploration = CategoricalParameters() else: agent_params.exploration = EGreedyParameters() agent_params.exploration.epsilon_schedule = LinearSchedule( 1.0, params["e_greedy_value"], params["epsilon_steps"]) trainable_agents_list.append(agent_params) else: non_trainable_agents_list.append(agent) ############### # Environment # ############### env_params = DeepRacerRacetrackEnvParameters() env_params.agents_params = trainable_agents_list env_params.non_trainable_agents = non_trainable_agents_list env_params.level = 'DeepRacerRacetrackEnv-v0' env_params.run_phase_subject = run_phase_subject env_params.enable_domain_randomization = enable_domain_randomization env_params.done_condition = done_condition vis_params = VisualizationParameters() vis_params.dump_mp4 = False ######## # Test # ######## preset_validation_params = PresetValidationParameters() preset_validation_params.test = True preset_validation_params.min_reward_threshold = 400 preset_validation_params.max_episodes_to_achieve_reward = 10000 graph_manager = MultiAgentGraphManager( agents_params=trainable_agents_list, env_params=env_params, schedule_params=schedule_params, vis_params=vis_params, preset_validation_params=preset_validation_params, done_condition=done_condition) return graph_manager, params_json
############### env_params = GymVectorEnvironment(level='coinche-v1') #################### # Graph Scheduling # #################### num_round_improve_steps = 100 num_round_heatup = 50 num_round_training = 50000 num_round_evaluation = 500 schedule_params = ScheduleParameters() schedule_params.improve_steps = EnvironmentEpisodes(num_round_improve_steps) schedule_params.heatup_steps = EnvironmentEpisodes(num_round_heatup) schedule_params.steps_between_evaluation_periods = EnvironmentEpisodes(num_round_training) schedule_params.evaluation_steps = EnvironmentEpisodes(num_round_evaluation) # Politique d'exploration agent_params.exploration.noise_schedule = LinearSchedule(1, 0.001, 50000) # agent_params.exploration.noise_schedule = PieceWiseSchedule([(LinearSchedule(1, 0.01, 300), 300), # (LinearSchedule(0.1, 0.005, 6000), 6000)]) # agent_params.exploration.noise_schedule = ExponentialSchedule(1, 0.05, 0.99) print(agent_params.exploration) print(agent_params.exploration.noise_schedule) ######################## # Create Graph Manager # ######################## # BasicRLGraphManager, créé un uniquement LevelManager entre l'Agent et l'Environnement graph_manager = BasicRLGraphManager(agent_params=agent_params,
from rl_coach.agents.ddqn_bcq_agent import DDQNBCQAgentParameters from rl_coach.agents.ddqn_bcq_agent import KNNParameters,NNImitationModelParameters DATASET_SIZE = 100000 #################### # Graph Scheduling # #################### schedule_params = ScheduleParameters() # schedule_params.improve_steps = TrainingSteps(10000000000) schedule_params.improve_steps = TrainingSteps(400) # 400 epochs schedule_params.steps_between_evaluation_periods = TrainingSteps(1) schedule_params.evaluation_steps = EnvironmentEpisodes(10) schedule_params.heatup_steps = EnvironmentSteps(DATASET_SIZE) ######### # Agent # ######### agent_params = DDQNBCQAgentParameters() agent_params.network_wrappers['main'].batch_size = 128 # TODO cross-DL framework abstraction for a constant initializer? agent_params.network_wrappers['main'].heads_parameters = [QHeadParameters(output_bias_initializer=tf.constant_initializer(-100))] agent_params.algorithm.num_steps_between_copying_online_weights_to_target = TrainingSteps(100) # agent_params.algorithm.num_steps_between_copying_online_weights_to_target = TrainingSteps(500) agent_params.algorithm.discount = 0.99
from rl_coach.exploration_policies.truncated_normal import TruncatedNormalParameters from rl_coach.schedules import ConstantSchedule, PieceWiseSchedule, ExponentialSchedule from rl_coach.memories.memory import MemoryGranularity from rl_coach.base_parameters import EmbedderScheme from rl_coach.architectures.tensorflow_components.layers import Dense from rl_coach.filters.filter import InputFilter, OutputFilter steps_per_episode = 13 #################### # Block Scheduling # #################### schedule_params = ScheduleParameters() schedule_params.improve_steps = EnvironmentEpisodes(400) schedule_params.steps_between_evaluation_periods = EnvironmentEpisodes(3) #3) # Neta: (1000) schedule_params.evaluation_steps = EnvironmentEpisodes(1) #1) # Neta: 0 schedule_params.heatup_steps = EnvironmentEpisodes(100) #120*steps_per_episode) # Neta (2) ##################### # DDPG Agent Params # ##################### agent_params = DDPGAgentParameters() agent_params.network_wrappers['actor'].input_embedders_parameters['observation'].scheme = [Dense(300)] agent_params.network_wrappers['actor'].middleware_parameters.scheme = [Dense(300)] agent_params.network_wrappers['critic'].input_embedders_parameters['observation'].scheme = [Dense(300)] agent_params.network_wrappers['critic'].middleware_parameters.scheme = [Dense(300)] agent_params.network_wrappers['critic'].input_embedders_parameters['action'].scheme = EmbedderScheme.Empty agent_params.network_wrappers['actor'].heads_parameters[0].activation_function = 'sigmoid' # agent_params.network_wrappers['critic'].clip_gradients = 100 # agent_params.network_wrappers['actor'].clip_gradients = 100
def get_graph_manager(**hp_dict): #################### # All Default Parameters # #################### params = {} params["batch_size"] = int(hp_dict.get("batch_size", 64)) params["num_epochs"] = int(hp_dict.get("num_epochs", 10)) params["stack_size"] = int(hp_dict.get("stack_size", 1)) params["lr"] = float(hp_dict.get("lr", 0.0003)) params["exploration_type"] = (hp_dict.get("exploration_type", "huber")).lower() params["e_greedy_value"] = float(hp_dict.get("e_greedy_value", .05)) params["epsilon_steps"] = int(hp_dict.get("epsilon_steps", 10000)) params["beta_entropy"] = float(hp_dict.get("beta_entropy", .01)) params["discount_factor"] = float(hp_dict.get("discount_factor", .999)) params["loss_type"] = hp_dict.get("loss_type", "Mean squared error").lower() params["num_episodes_between_training"] = int(hp_dict.get("num_episodes_between_training", 20)) params["term_cond_max_episodes"] = int(hp_dict.get("term_cond_max_episodes", 100000)) params["term_cond_avg_score"] = float(hp_dict.get("term_cond_avg_score", 100000)) params_json = json.dumps(params, indent=2, sort_keys=True) print("Using the following hyper-parameters", params_json, sep='\n') #################### # Graph Scheduling # #################### schedule_params = ScheduleParameters() schedule_params.improve_steps = TrainingSteps(params["term_cond_max_episodes"]) schedule_params.steps_between_evaluation_periods = EnvironmentEpisodes(40) schedule_params.evaluation_steps = EnvironmentEpisodes(5) schedule_params.heatup_steps = EnvironmentSteps(0) ######### # Agent # ######### agent_params = ClippedPPOAgentParameters() agent_params.network_wrappers['main'].learning_rate = params["lr"] agent_params.network_wrappers['main'].input_embedders_parameters['observation'].activation_function = 'relu' agent_params.network_wrappers['main'].middleware_parameters.activation_function = 'relu' agent_params.network_wrappers['main'].batch_size = params["batch_size"] agent_params.network_wrappers['main'].optimizer_epsilon = 1e-5 agent_params.network_wrappers['main'].adam_optimizer_beta2 = 0.999 if params["loss_type"] == "huber": agent_params.network_wrappers['main'].replace_mse_with_huber_loss = True agent_params.algorithm.clip_likelihood_ratio_using_epsilon = 0.2 agent_params.algorithm.clipping_decay_schedule = LinearSchedule(1.0, 0, 1000000) agent_params.algorithm.beta_entropy = params["beta_entropy"] agent_params.algorithm.gae_lambda = 0.95 agent_params.algorithm.discount = params["discount_factor"] agent_params.algorithm.optimization_epochs = params["num_epochs"] agent_params.algorithm.estimate_state_value_using_gae = True agent_params.algorithm.num_steps_between_copying_online_weights_to_target = EnvironmentEpisodes( params["num_episodes_between_training"]) agent_params.algorithm.num_consecutive_playing_steps = EnvironmentEpisodes(params["num_episodes_between_training"]) agent_params.algorithm.distributed_coach_synchronization_type = DistributedCoachSynchronizationType.SYNC if params["exploration_type"] == "categorical": agent_params.exploration = CategoricalParameters() else: agent_params.exploration = EGreedyParameters() agent_params.exploration.epsilon_schedule = LinearSchedule(1.0, params["e_greedy_value"], params["epsilon_steps"]) ############### # Environment # ############### SilverstoneInputFilter = InputFilter(is_a_reference_filter=True) SilverstoneInputFilter.add_observation_filter('observation', 'to_grayscale', ObservationRGBToYFilter()) SilverstoneInputFilter.add_observation_filter('observation', 'to_uint8', ObservationToUInt8Filter(0, 255)) SilverstoneInputFilter.add_observation_filter('observation', 'stacking', ObservationStackingFilter(params["stack_size"])) env_params = GymVectorEnvironment() env_params.default_input_filter = SilverstoneInputFilter env_params.level = 'SilverstoneRacetrack-Discrete-v0' vis_params = VisualizationParameters() vis_params.dump_mp4 = False ######## # Test # ######## preset_validation_params = PresetValidationParameters() preset_validation_params.test = True preset_validation_params.min_reward_threshold = 400 preset_validation_params.max_episodes_to_achieve_reward = 1000 graph_manager = BasicRLGraphManager(agent_params=agent_params, env_params=env_params, schedule_params=schedule_params, vis_params=vis_params, preset_validation_params=preset_validation_params) return graph_manager, params_json
def get_graph_manager(**hp_dict): #################### # All Default Parameters # #################### params = {} params["batch_size"] = int(hp_dict.get("batch_size", 64)) params["num_epochs"] = int(hp_dict.get("num_epochs", 10)) params["stack_size"] = int(hp_dict.get("stack_size", 1)) params["lr"] = float(hp_dict.get("lr", 0.0003)) params["lr_decay_rate"] = float(hp_dict.get("lr_decay_rate", 0)) params["lr_decay_steps"] = float(hp_dict.get("lr_decay_steps", 0)) params["exploration_type"] = (hp_dict.get("exploration_type", "categorical")).lower() params["e_greedy_value"] = float(hp_dict.get("e_greedy_value", .05)) params["epsilon_steps"] = int(hp_dict.get("epsilon_steps", 10000)) params["beta_entropy"] = float(hp_dict.get("beta_entropy", .01)) params["discount_factor"] = float(hp_dict.get("discount_factor", .999)) params["loss_type"] = hp_dict.get("loss_type", "Mean squared error").lower() params["num_episodes_between_training"] = int(hp_dict.get("num_episodes_between_training", 20)) params["term_cond_max_episodes"] = int(hp_dict.get("term_cond_max_episodes", 100000)) params["term_cond_avg_score"] = float(hp_dict.get("term_cond_avg_score", 100000)) params["tensorboard"] = hp_dict.get("tensorboard", False) params["dump_mp4"] = hp_dict.get("dump_mp4", False) params["dump_gifs"] = hp_dict.get("dump_gifs", False) params_json = json.dumps(params, indent=2, sort_keys=True) print("Using the following hyper-parameters", params_json, sep='\n') #################### # Graph Scheduling # #################### schedule_params = ScheduleParameters() schedule_params.improve_steps = TrainingSteps(params["term_cond_max_episodes"]) schedule_params.steps_between_evaluation_periods = EnvironmentEpisodes(40) schedule_params.evaluation_steps = EnvironmentEpisodes(5) schedule_params.heatup_steps = EnvironmentSteps(0) ######### # Agent # ######### agent_params = ClippedPPOAgentParameters() agent_params.network_wrappers['main'].learning_rate = params["lr"] agent_params.network_wrappers['main'].learning_rate_decay_rate = params["lr_decay_rate"] agent_params.network_wrappers['main'].learning_rate_decay_steps = params["lr_decay_steps"] agent_params.network_wrappers['main'].input_embedders_parameters['observation'].activation_function = 'relu' # Replace the default CNN with single layer Conv2d(32, 3, 1) # agent_params.network_wrappers['main'].input_embedders_parameters['observation'].scheme = EmbedderScheme.Shallow # agent_params.network_wrappers['main'].input_embedders_parameters['observation'].dropout_rate = 0.3 agent_params.network_wrappers['main'].middleware_parameters.activation_function = 'relu' # agent_params.network_wrappers['main'].middleware_parameters.scheme = MiddlewareScheme.Shallow # agent_params.network_wrappers['main'].middleware_parameters.dropout_rate = 0.3 agent_params.network_wrappers['main'].batch_size = params["batch_size"] agent_params.network_wrappers['main'].optimizer_epsilon = 1e-5 agent_params.network_wrappers['main'].adam_optimizer_beta2 = 0.999 # agent_params.network_wrappers['main'].l2_regularization = 2e-5 if params["loss_type"] == "huber": agent_params.network_wrappers['main'].replace_mse_with_huber_loss = True agent_params.algorithm.clip_likelihood_ratio_using_epsilon = 0.2 agent_params.algorithm.clipping_decay_schedule = LinearSchedule(1.0, 0, 1000000) agent_params.algorithm.beta_entropy = params["beta_entropy"] agent_params.algorithm.gae_lambda = 0.95 agent_params.algorithm.discount = params["discount_factor"] agent_params.algorithm.optimization_epochs = params["num_epochs"] agent_params.algorithm.estimate_state_value_using_gae = True agent_params.algorithm.num_steps_between_copying_online_weights_to_target = EnvironmentEpisodes( params["num_episodes_between_training"]) agent_params.algorithm.num_consecutive_playing_steps = EnvironmentEpisodes(params["num_episodes_between_training"]) agent_params.algorithm.distributed_coach_synchronization_type = DistributedCoachSynchronizationType.SYNC if params["exploration_type"] == "categorical": agent_params.exploration = CategoricalParameters() else: agent_params.exploration = EGreedyParameters() agent_params.exploration.epsilon_schedule = LinearSchedule(1.0, params["e_greedy_value"], params["epsilon_steps"]) ############### # Environment # ############### DeepRacerInputFilter = InputFilter(is_a_reference_filter=True) # Add an observation image pertubation for many aspects # DeepRacerInputFilter.add_observation_filter('observation', 'perturb_color', ObservationColorPerturbation(0.2)) # Rescale to much smaller input when using shallow networks to avoid OOM # DeepRacerInputFilter.add_observation_filter('observation', 'rescaling', # ObservationRescaleToSizeFilter(ImageObservationSpace(np.array([84, 84, 3]), # high=255))) DeepRacerInputFilter.add_observation_filter('observation', 'to_grayscale', ObservationRGBToYFilter()) DeepRacerInputFilter.add_observation_filter('observation', 'to_uint8', ObservationToUInt8Filter(0, 255)) DeepRacerInputFilter.add_observation_filter('observation', 'stacking', ObservationStackingFilter(params["stack_size"])) env_params = GymVectorEnvironment() env_params.default_input_filter = DeepRacerInputFilter env_params.level = 'DeepRacerRacetrackCustomActionSpaceEnv-v0' vis_params = VisualizationParameters() vis_params.tensorboard = params["tensorboard"] vis_params.dump_mp4 = params["dump_mp4"] vis_params.dump_gifs = params["dump_gifs"] # AlwaysDumpFilter, MaxDumpFilter, EveryNEpisodesDumpFilter, SelectedPhaseOnlyDumpFilter vis_params.video_dump_filters = [AlwaysDumpFilter()] ######## # Test # ######## preset_validation_params = PresetValidationParameters() preset_validation_params.test = True preset_validation_params.min_reward_threshold = 400 preset_validation_params.max_episodes_to_achieve_reward = 10000 graph_manager = BasicRLGraphManager(agent_params=agent_params, env_params=env_params, schedule_params=schedule_params, vis_params=vis_params, preset_validation_params=preset_validation_params) return graph_manager, params_json
import os from rl_coach.base_parameters import TaskParameters from rl_coach.core_types import EnvironmentSteps from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager from rl_coach.graph_managers.graph_manager import ScheduleParameters from rl_coach.agents.ddqn_agent import DDQNAgentParameters from rl_coach.environments.gym_environment import GymVectorEnvironment from rl_coach.schedules import LinearSchedule #################### # Graph Scheduling # #################### schedule = ScheduleParameters() schedule.improve_steps = EnvironmentSteps(2000) schedule.steps_between_evaluation_periods = EnvironmentSteps(1000) schedule.evaluation_steps = EnvironmentSteps(1000) schedule.heatup_steps = EnvironmentSteps(0) ######### # Agent # ######### agent_params = DDQNAgentParameters() agent_params.network_wrappers['main'].learning_rate = 0.025 agent_params.exploration.epsilon_schedule = LinearSchedule(1, 0, 500) ############### # Environment # ############### level = 'gym_dynamic_multi_armed_bandit.envs:BasicEnv2' env_params = GymVectorEnvironment(level)
def train_using_experience_agent(env_params, n_epochs, dataset_size): tf.reset_default_graph( ) # just to clean things up; only needed for the tutorial # Experience Generating Agent parameters experience_generating_agent_params = DDQNAgentParameters() # schedule parameters experience_generating_schedule_params = ScheduleParameters() experience_generating_schedule_params.heatup_steps = EnvironmentSteps(1000) experience_generating_schedule_params.improve_steps = TrainingSteps( dataset_size - experience_generating_schedule_params.heatup_steps.num_steps) experience_generating_schedule_params.steps_between_evaluation_periods = EnvironmentEpisodes( 10) experience_generating_schedule_params.evaluation_steps = EnvironmentEpisodes( 1) # DQN params experience_generating_agent_params.algorithm.num_steps_between_copying_online_weights_to_target = EnvironmentSteps( 100) experience_generating_agent_params.algorithm.discount = 0.99 experience_generating_agent_params.algorithm.num_consecutive_playing_steps = EnvironmentSteps( 1) # NN configuration experience_generating_agent_params.network_wrappers[ 'main'].learning_rate = 0.0001 experience_generating_agent_params.network_wrappers[ 'main'].batch_size = 128 experience_generating_agent_params.network_wrappers[ 'main'].replace_mse_with_huber_loss = False experience_generating_agent_params.network_wrappers['main'].heads_parameters = \ [QHeadParameters(output_bias_initializer=tf.constant_initializer(-100))] # experience_generating_agent_params.network_wrappers['main'].heads_parameters = \ # [QHeadParameters(output_bias_initializer=tf.constant_initializer(0))] # ER size experience_generating_agent_params.memory = EpisodicExperienceReplayParameters( ) experience_generating_agent_params.memory.max_size = \ (MemoryGranularity.Transitions, experience_generating_schedule_params.heatup_steps.num_steps + experience_generating_schedule_params.improve_steps.num_steps) # E-Greedy schedule experience_generating_agent_params.exploration.epsilon_schedule = LinearSchedule( 1.0, 0.01, DATASET_SIZE) experience_generating_agent_params.exploration.evaluation_epsilon = 0 schedule_params = set_schedule_params(n_epochs, dataset_size) # set the agent params as before # agent_params = set_agent_params(DDQNAgentParameters) agent_params = set_agent_params(DDQNBCQAgentParameters) agent_params.algorithm.action_drop_method_parameters = NNImitationModelParameters( ) # 50 epochs of training (the entire dataset is used each epoch) # schedule_params.improve_steps = TrainingSteps(50) graph_manager = BatchRLGraphManager( agent_params=agent_params, experience_generating_agent_params=experience_generating_agent_params, experience_generating_schedule_params= experience_generating_schedule_params, env_params=env_params, schedule_params=schedule_params, vis_params=VisualizationParameters( dump_signals_to_csv_every_x_episodes=1), reward_model_num_epochs=30, train_to_eval_ratio=0.5) graph_manager.create_graph(task_parameters) graph_manager.improve() return
def get_graph_manager(hp_dict, agent_list, run_phase_subject, enable_domain_randomization=False, done_condition=any, run_type=str(RunType.ROLLOUT_WORKER), pause_physics=None, unpause_physics=None): #################### # Hyperparameters # #################### training_algorithm = agent_list[ 0].ctrl.model_metadata.training_algorithm if agent_list else None params = get_updated_hyper_parameters(hp_dict, training_algorithm) params_json = json.dumps(params, indent=2, sort_keys=True) print("Using the following hyper-parameters", params_json, sep='\n') #################### # Graph Scheduling # #################### schedule_params = ScheduleParameters() schedule_params.improve_steps = TrainingSteps( params[HyperParameterKeys.TERMINATION_CONDITION_MAX_EPISODES.value]) schedule_params.steps_between_evaluation_periods = EnvironmentEpisodes(40) schedule_params.evaluation_steps = EnvironmentEpisodes(5) schedule_params.heatup_steps = EnvironmentSteps(0) ######### # Agent # ######### trainable_agents_list = list() non_trainable_agents_list = list() for agent in agent_list: if agent.network_settings: if TrainingAlgorithm.SAC.value == training_algorithm: agent_params = get_sac_params(DeepRacerSACAgentParams(), agent, params, run_type) else: agent_params = get_clipped_ppo_params( DeepRacerClippedPPOAgentParams(), agent, params) agent_params.env_agent = agent input_filter = InputFilter(is_a_reference_filter=True) for observation in agent.network_settings['input_embedders'].keys( ): if observation == Input.LEFT_CAMERA.value or observation == Input.CAMERA.value or\ observation == Input.OBSERVATION.value: input_filter.add_observation_filter( observation, 'to_grayscale', ObservationRGBToYFilter()) input_filter.add_observation_filter( observation, 'to_uint8', ObservationToUInt8Filter(0, 255)) input_filter.add_observation_filter( observation, 'stacking', ObservationStackingFilter(1)) if observation == Input.STEREO.value: input_filter.add_observation_filter( observation, 'to_uint8', ObservationToUInt8Filter(0, 255)) if observation == Input.LIDAR.value: input_filter.add_observation_filter( observation, 'clipping', ObservationClippingFilter(0.15, 1.0)) if observation == Input.SECTOR_LIDAR.value: input_filter.add_observation_filter( observation, 'binary', ObservationBinarySectorFilter()) agent_params.input_filter = input_filter() trainable_agents_list.append(agent_params) else: non_trainable_agents_list.append(agent) ############### # Environment # ############### env_params = DeepRacerRacetrackEnvParameters() env_params.agents_params = trainable_agents_list env_params.non_trainable_agents = non_trainable_agents_list env_params.level = 'DeepRacerRacetrackEnv-v0' env_params.run_phase_subject = run_phase_subject env_params.enable_domain_randomization = enable_domain_randomization env_params.done_condition = done_condition env_params.pause_physics = pause_physics env_params.unpause_physics = unpause_physics vis_params = VisualizationParameters() vis_params.dump_mp4 = False ######## # Test # ######## preset_validation_params = PresetValidationParameters() preset_validation_params.test = True preset_validation_params.min_reward_threshold = 400 preset_validation_params.max_episodes_to_achieve_reward = 10000 graph_manager = MultiAgentGraphManager( agents_params=trainable_agents_list, env_params=env_params, schedule_params=schedule_params, vis_params=vis_params, preset_validation_params=preset_validation_params, done_condition=done_condition) return graph_manager, params_json
def get_graph_manager(hp_dict, agent_list, run_phase_subject, enable_domain_randomization=False, done_condition=any, run_type=str(RunType.ROLLOUT_WORKER), pause_physics=None, unpause_physics=None): #################### # Hyperparameters # #################### # Note: The following three line hard-coded to pick the first agent's trainig algorithm # and dump the hyper parameters for the particular training algorithm into json # for training jobs (so that the console display the training hyperparameters correctly) # since right now, we only support training one model at a time. # TODO: clean these lines up when we support multi-agent training. training_algorithm = agent_list[ 0].ctrl.model_metadata.training_algorithm if agent_list else None params = get_updated_hyper_parameters(hp_dict, training_algorithm) params_json = json.dumps(params, indent=2, sort_keys=True) print("Using the following hyper-parameters", params_json, sep='\n') #################### # Graph Scheduling # #################### schedule_params = ScheduleParameters() schedule_params.improve_steps = TrainingSteps( params[HyperParameterKeys.TERMINATION_CONDITION_MAX_EPISODES.value]) schedule_params.steps_between_evaluation_periods = EnvironmentEpisodes(40) schedule_params.evaluation_steps = EnvironmentEpisodes(5) schedule_params.heatup_steps = EnvironmentSteps(0) ######### # Agent # ######### trainable_agents_list = list() non_trainable_agents_list = list() for agent in agent_list: if agent.network_settings: training_algorithm = agent.ctrl.model_metadata.training_algorithm params = get_updated_hyper_parameters(hp_dict, training_algorithm) if TrainingAlgorithm.SAC.value == training_algorithm: agent_params = get_sac_params(DeepRacerSACAgentParams(), agent, params, run_type) else: agent_params = get_clipped_ppo_params( DeepRacerClippedPPOAgentParams(), agent, params) agent_params.env_agent = agent input_filter = InputFilter(is_a_reference_filter=True) for observation in agent.network_settings['input_embedders'].keys( ): if observation == Input.LEFT_CAMERA.value or observation == Input.CAMERA.value or \ observation == Input.OBSERVATION.value: input_filter.add_observation_filter( observation, 'to_grayscale', ObservationRGBToYFilter()) input_filter.add_observation_filter( observation, 'to_uint8', ObservationToUInt8Filter(0, 255)) input_filter.add_observation_filter( observation, 'stacking', ObservationStackingFilter(1)) if observation == Input.STEREO.value: input_filter.add_observation_filter( observation, 'to_uint8', ObservationToUInt8Filter(0, 255)) if observation == Input.LIDAR.value: input_filter.add_observation_filter( observation, 'clipping', ObservationClippingFilter(0.15, 1.0)) if observation == Input.SECTOR_LIDAR.value: sector_binary_filter = ObservationSectorDiscretizeFilter( num_sectors=NUMBER_OF_LIDAR_SECTORS, num_values_per_sector=1, clipping_dist=SECTOR_LIDAR_CLIPPING_DIST) input_filter.add_observation_filter( observation, 'binary', sector_binary_filter) if observation == Input.DISCRETIZED_SECTOR_LIDAR.value: num_sectors = agent.ctrl.model_metadata.lidar_num_sectors num_values_per_sector = agent.ctrl.model_metadata.lidar_num_values_per_sector clipping_dist = agent.ctrl.model_metadata.lidar_clipping_dist sector_discretize_filter = ObservationSectorDiscretizeFilter( num_sectors=num_sectors, num_values_per_sector=num_values_per_sector, clipping_dist=clipping_dist) input_filter.add_observation_filter( observation, 'discrete', sector_discretize_filter) agent_params.input_filter = input_filter() trainable_agents_list.append(agent_params) else: non_trainable_agents_list.append(agent) ############### # Environment # ############### env_params = DeepRacerRacetrackEnvParameters() env_params.agents_params = trainable_agents_list env_params.non_trainable_agents = non_trainable_agents_list env_params.level = 'DeepRacerRacetrackEnv-v0' env_params.run_phase_subject = run_phase_subject env_params.enable_domain_randomization = enable_domain_randomization env_params.done_condition = done_condition env_params.pause_physics = pause_physics env_params.unpause_physics = unpause_physics vis_params = VisualizationParameters() vis_params.dump_mp4 = False ######## # Test # ######## preset_validation_params = PresetValidationParameters() preset_validation_params.test = True preset_validation_params.min_reward_threshold = 400 preset_validation_params.max_episodes_to_achieve_reward = 10000 graph_manager = MultiAgentGraphManager( agents_params=trainable_agents_list, env_params=env_params, schedule_params=schedule_params, vis_params=vis_params, preset_validation_params=preset_validation_params, done_condition=done_condition) return graph_manager, params_json