agent_params.network_wrappers['critic'].input_embedders_parameters.pop('observation')

# left camera
agent_params.network_wrappers['actor'].input_embedders_parameters['left_camera'] = \
    copy.deepcopy(agent_params.network_wrappers['actor'].input_embedders_parameters['forward_camera'])
agent_params.network_wrappers['critic'].input_embedders_parameters['left_camera'] = \
    copy.deepcopy(agent_params.network_wrappers['critic'].input_embedders_parameters['forward_camera'])

# right camera
agent_params.network_wrappers['actor'].input_embedders_parameters['right_camera'] = \
    copy.deepcopy(agent_params.network_wrappers['actor'].input_embedders_parameters['forward_camera'])
agent_params.network_wrappers['critic'].input_embedders_parameters['right_camera'] = \
    copy.deepcopy(agent_params.network_wrappers['critic'].input_embedders_parameters['forward_camera'])

agent_params.input_filter = CarlaInputFilter()
agent_params.input_filter.copy_filters_from_one_observation_to_another(
    'forward_camera', 'left_camera')
agent_params.input_filter.copy_filters_from_one_observation_to_another(
    'forward_camera', 'right_camera')

###############
# Environment #
###############
env_params = CarlaEnvironmentParameters()
env_params.cameras = [CameraTypes.FRONT, CameraTypes.LEFT, CameraTypes.RIGHT]

graph_manager = BasicRLGraphManager(agent_params=agent_params,
                                    env_params=env_params,
                                    schedule_params=schedule_params,
                                    vis_params=VisualizationParameters())
Exemplo n.º 2
0
# download dataset if it doesn't exist
if not os.path.exists(agent_params.memory.load_memory_from_file_path):
    screen.log_title("The CARLA dataset is not present in the following path: {}"
                     .format(agent_params.memory.load_memory_from_file_path))
    result = screen.ask_yes_no("Do you want to download it now?")
    if result:
        create_dataset(None, "./datasets/carla_train_set_replay_buffer.p")
    else:
        screen.error("Please update the path to the CARLA dataset in the CARLA_CIL preset", crash=True)


###############
# Environment #
###############
env_params = CarlaEnvironmentParameters()
env_params.level = 'town1'
env_params.cameras = ['CameraRGB']
env_params.camera_height = 600
env_params.camera_width = 800
env_params.separate_actions_for_throttle_and_brake = True
env_params.allow_braking = True
env_params.quality = CarlaEnvironmentParameters.Quality.EPIC
env_params.experiment_suite = CoRL2017('Town01')

vis_params = VisualizationParameters()
vis_params.video_dump_methods = [SelectedPhaseOnlyDumpMethod(RunPhase.TEST)]
vis_params.dump_mp4 = True

graph_manager = BasicRLGraphManager(agent_params=agent_params, env_params=env_params,
                                    schedule_params=schedule_params, vis_params=vis_params)
Exemplo n.º 3
0
schedule_params.evaluation_steps = EnvironmentEpisodes(1)
schedule_params.heatup_steps = EnvironmentSteps(1000)

#########
# Agent #
#########
agent_params = DDQNAgentParameters()
agent_params.network_wrappers['main'].learning_rate = 0.00025
agent_params.network_wrappers['main'].heads_parameters = [DuelingQHeadParameters()]
agent_params.network_wrappers['main'].middleware_parameters.scheme = MiddlewareScheme.Empty
agent_params.network_wrappers['main'].rescale_gradient_from_head_by_factor = [1/math.sqrt(2), 1/math.sqrt(2)]
agent_params.network_wrappers['main'].clip_gradients = 10
agent_params.algorithm.num_consecutive_playing_steps = EnvironmentSteps(4)
agent_params.network_wrappers['main'].input_embedders_parameters['forward_camera'] = \
    agent_params.network_wrappers['main'].input_embedders_parameters.pop('observation')
agent_params.output_filter = OutputFilter()
agent_params.output_filter.add_action_filter('discretization', BoxDiscretization(5))

###############
# Environment #
###############
env_params = CarlaEnvironmentParameters()
env_params.level = 'town1'

vis_params = VisualizationParameters()
vis_params.video_dump_methods = [SelectedPhaseOnlyDumpMethod(RunPhase.TEST), MaxDumpMethod()]
vis_params.dump_mp4 = False

graph_manager = BasicRLGraphManager(agent_params=agent_params, env_params=env_params,
                                    schedule_params=schedule_params, vis_params=vis_params)
schedule_params.steps_between_evaluation_periods = EnvironmentEpisodes(20)
schedule_params.evaluation_steps = EnvironmentEpisodes(1)
schedule_params.heatup_steps = EnvironmentSteps(1000)

#########
# Agent #
#########
agent_params = DDQNAgentParameters()
agent_params.network_wrappers['main'].learning_rate = 0.00025
agent_params.network_wrappers['main'].heads_parameters = \
    [DuelingQHeadParameters(rescale_gradient_from_head_by_factor=1/math.sqrt(2))]
agent_params.network_wrappers[
    'main'].middleware_parameters.scheme = MiddlewareScheme.Empty
agent_params.network_wrappers['main'].clip_gradients = 10
agent_params.algorithm.num_consecutive_playing_steps = EnvironmentSteps(4)
agent_params.network_wrappers['main'].input_embedders_parameters['forward_camera'] = \
    agent_params.network_wrappers['main'].input_embedders_parameters.pop('observation')
agent_params.output_filter = OutputFilter()
agent_params.output_filter.add_action_filter('discretization',
                                             BoxDiscretization(5))

###############
# Environment #
###############
env_params = CarlaEnvironmentParameters()

graph_manager = BasicRLGraphManager(agent_params=agent_params,
                                    env_params=env_params,
                                    schedule_params=schedule_params,
                                    vis_params=VisualizationParameters())
Exemplo n.º 5
0
# critic (q) network parameters
agent_params.network_wrappers['q'].heads_parameters[0].network_layers_sizes = (
    32, 32)
agent_params.network_wrappers['q'].batch_size = 32
agent_params.network_wrappers['q'].learning_rate = 0.0003
agent_params.network_wrappers['q'].optimizer_epsilon = 1e-5
agent_params.network_wrappers['q'].adam_optimizer_beta2 = 0.999
agent_params.network_wrappers['q'].input_embedders_parameters['forward_camera'] = \
    agent_params.network_wrappers['q'].input_embedders_parameters.pop('observation')

# actor (policy) network parameters
agent_params.network_wrappers['policy'].batch_size = 32
agent_params.network_wrappers['policy'].learning_rate = 0.0003
agent_params.network_wrappers['policy'].middleware_parameters.scheme = [
    Dense(32)
]
agent_params.network_wrappers['policy'].optimizer_epsilon = 1e-5
agent_params.network_wrappers['policy'].adam_optimizer_beta2 = 0.999
agent_params.network_wrappers['policy'].input_embedders_parameters['forward_camera'] = \
    agent_params.network_wrappers['policy'].input_embedders_parameters.pop('observation')

###############
# Environment #
###############
env_params = CarlaEnvironmentParameters(level='town2')

graph_manager = BasicRLGraphManager(agent_params=agent_params,
                                    env_params=env_params,
                                    schedule_params=schedule_params,
                                    vis_params=VisualizationParameters())
Exemplo n.º 6
0
# TODO: normalize the speed with the maximum speed from the training set speed /= 25 (90 km/h)

agent_params.exploration = AdditiveNoiseParameters()
agent_params.exploration.noise_percentage_schedule = ConstantSchedule(0)
agent_params.exploration.evaluation_noise_percentage = 0

agent_params.algorithm.num_consecutive_playing_steps = EnvironmentSteps(0)

agent_params.memory.load_memory_from_file_path = "/home/cvds_lab/Documents/advanced-coach/carla_train_set_replay_buffer.p"
agent_params.memory.state_key_with_the_class_index = 'high_level_command'
agent_params.memory.num_classes = 4

###############
# Environment #
###############
env_params = CarlaEnvironmentParameters()
env_params.level = 'town1'
env_params.cameras = [CameraTypes.FRONT]
env_params.camera_height = 600
env_params.camera_width = 800
env_params.allow_braking = True
env_params.quality = CarlaEnvironmentParameters.Quality.EPIC

vis_params = VisualizationParameters()
vis_params.video_dump_methods = [
    SelectedPhaseOnlyDumpMethod(RunPhase.TEST),
    MaxDumpMethod()
]
vis_params.dump_mp4 = True

graph_manager = BasicRLGraphManager(agent_params=agent_params,