schedule_params.steps_between_evaluation_periods = EnvironmentSteps(250000) schedule_params.evaluation_steps = EnvironmentSteps(135000) schedule_params.heatup_steps = EnvironmentSteps(50000) ######### # Agent # ######### agent_params = QuantileRegressionDQNAgentParameters() agent_params.network_wrappers[ 'main'].learning_rate = 0.00005 # called alpha in the paper agent_params.algorithm.huber_loss_interval = 1 # k = 0 for strict quantile loss, k = 1 for Huber quantile loss ############### # Environment # ############### env_params = Atari() env_params.level = SingleLevelSelection(atari_deterministic_v4) vis_params = VisualizationParameters() vis_params.video_dump_methods = [ SelectedPhaseOnlyDumpMethod(RunPhase.TEST), MaxDumpMethod() ] vis_params.dump_mp4 = False ######## # Test # ######## preset_validation_params = PresetValidationParameters() preset_validation_params.trace_test_levels = [ 'breakout', 'pong', 'space_invaders'
######### # Agent # ######### agent_params = BCAgentParameters() agent_params.network_wrappers['main'].learning_rate = 0.00025 agent_params.memory.max_size = (MemoryGranularity.Transitions, 1000000) # agent_params.memory.discount = 0.99 agent_params.algorithm.discount = 0.99 agent_params.algorithm.num_consecutive_playing_steps = EnvironmentSteps(0) agent_params.memory.load_memory_from_file_path = 'datasets/montezuma_revenge.p' ############### # Environment # ############### env_params = Atari() env_params.level = 'MontezumaRevenge-v0' env_params.random_initialization_steps = 30 vis_params = VisualizationParameters() vis_params.video_dump_methods = [ SelectedPhaseOnlyDumpMethod(RunPhase.TEST), MaxDumpMethod() ] vis_params.dump_mp4 = False graph_manager = BasicRLGraphManager(agent_params=agent_params, env_params=env_params, schedule_params=schedule_params, vis_params=vis_params)
schedule_params.evaluation_steps = EnvironmentEpisodes(5) schedule_params.heatup_steps = EnvironmentSteps(50000) ######### # Agent # ######### agent_params = NECAgentParameters(scheme=MiddlewareScheme.Shallow) agent_params.network_wrappers['main'].learning_rate = 0.00001 agent_params.input_filter = AtariInputFilter() agent_params.input_filter.remove_reward_filter('clipping') ############### # Environment # ############### env_params = Atari(level=SingleLevelSelection(atari_deterministic_v4)) env_params.random_initialization_steps = 1 ######## # Test # ######## preset_validation_params = PresetValidationParameters() preset_validation_params.test_using_a_trace_test = False graph_manager = BasicRLGraphManager( agent_params=agent_params, env_params=env_params, schedule_params=schedule_params, vis_params=VisualizationParameters(), preset_validation_params=preset_validation_params)
agent_params = NECAgentParameters() agent_params.network_wrappers['main'].learning_rate = 0.00025 agent_params.exploration.epsilon_schedule = LinearSchedule(0.5, 0.1, 1000) agent_params.exploration.evaluation_epsilon = 0 agent_params.algorithm.discount = 0.99 agent_params.memory.max_size = (MemoryGranularity.Episodes, 200) agent_params.input_filter = MujocoInputFilter() agent_params.input_filter.add_reward_filter('rescale', RewardRescaleFilter(1 / 200.)) ############### # Environment # ############### env_params = Atari() env_params.level = 'CartPole-v0' vis_params = VisualizationParameters() vis_params.video_dump_methods = [ SelectedPhaseOnlyDumpMethod(RunPhase.TEST), MaxDumpMethod() ] vis_params.dump_mp4 = False ######## # Test # ######## preset_validation_params = PresetValidationParameters() preset_validation_params.test = True preset_validation_params.min_reward_threshold = 150