agent_params.algorithm.beta_entropy = 0 agent_params.algorithm.gae_lambda = 0.95 agent_params.algorithm.discount = 1 # How many epochs to train the network using supervised methods agent_params.algorithm.optimization_epochs = 10 agent_params.algorithm.estimate_state_value_using_gae = True # Distributed Coach synchronization type. agent_params.algorithm.distributed_coach_synchronization_type = DistributedCoachSynchronizationType.SYNC agent_params.pre_network_filter = InputFilter() agent_params.pre_network_filter.add_observation_filter( 'observation', 'normalize_observation', ObservationNormalizationFilter(name='normalize_observation')) ############### # Environment # ############### env_params = GymVectorEnvironment() env_params.level = './environment.py:DistillerWrapperEnvironment' vis_params = VisualizationParameters() vis_params.dump_parameters_documentation = False vis_params.render = True vis_params.native_rendering = True vis_params.dump_signals_to_csv_every_x_episodes = 1 graph_manager = BasicRLGraphManager(agent_params=agent_params, env_params=env_params, schedule_params=schedule_params, vis_params=vis_params)
bottom_critic.learning_rate = 0.001 bottom_critic.batch_size = 4096 agents_params = [top_agent_params, bottom_agent_params] ############### # Environment # ############### time_limit = 1000 env_params = GymVectorEnvironment( level="rl_coach.environments.mujoco.pendulum_with_goals:PendulumWithGoals") env_params.additional_simulator_parameters = { "time_limit": time_limit, "random_goals_instead_of_standing_goal": False, "polar_coordinates": polar_coordinates, "goal_reaching_thresholds": distance_from_goal_threshold } env_params.frame_skip = 10 env_params.custom_reward_threshold = -time_limit + 1 vis_params = VisualizationParameters() vis_params.native_rendering = False graph_manager = HACGraphManager( agents_params=agents_params, env_params=env_params, schedule_params=schedule_params, vis_params=vis_params, consecutive_steps_to_run_non_top_levels=EnvironmentSteps(40))