コード例 #1
0
    def __init__(
            self,
            agent_params: AgentParameters,
            env_params: EnvironmentParameters,
            schedule_params: ScheduleParameters,
            vis_params: VisualizationParameters = VisualizationParameters(),
            preset_validation_params:
        PresetValidationParameters = PresetValidationParameters(),
            name='simple_rl_graph'):
        super().__init__(name, schedule_params, vis_params)
        self.agent_params = agent_params
        self.env_params = env_params
        self.preset_validation_params = preset_validation_params
        self.agent_params.visualization = vis_params

        if self.agent_params.input_filter is None:
            if env_params is not None:
                self.agent_params.input_filter = env_params.default_input_filter(
                )
            else:
                # In cases where there is no environment (e.g. batch-rl and imitation learning), there is nowhere to get
                # a default filter from. So using a default no-filter.
                # When there is no environment, the user is expected to define input/output filters (if required) using
                # the preset.
                self.agent_params.input_filter = NoInputFilter()
        if self.agent_params.output_filter is None:
            if env_params is not None:
                self.agent_params.output_filter = env_params.default_output_filter(
                )
            else:
                self.agent_params.output_filter = NoOutputFilter()
コード例 #2
0
 def __init__(self, level=None):
     super().__init__(level=level)
     self.frame_skip = 1
     self.default_input_filter = NoInputFilter()
     self.default_output_filter = NoOutputFilter()
     self.agents_params = None
     self.non_trainable_agents = None
コード例 #3
0
 def __init__(self, level=None):
     super().__init__(level=level)
     self.frame_skip = 1
     self.default_input_filter = NoInputFilter()
     self.default_output_filter = NoOutputFilter()
     self.agents_params = None
     self.non_trainable_agents = None
     self.run_phase_subject = None
     self.enable_domain_randomization = False
コード例 #4
0
 def __init__(self, level=None):
     super().__init__(level=level)
     self.frame_skip = 1
     self.default_input_filter = NoInputFilter()
     self.default_output_filter = NoOutputFilter()
コード例 #5
0
Atari Environment Components
"""

AtariInputFilter = InputFilter(is_a_reference_filter=True)
AtariInputFilter.add_reward_filter('clipping', RewardClippingFilter(-1.0, 1.0))
AtariInputFilter.add_observation_filter(
    'observation',
    'rescaling',
    ObservationRescaleToSizeFilter(
        ImageObservationSpace(
            np.array([84, 84, 3]),  #np.array([224, 224, 3]),
            high=255)))
#AtariInputFilter.add_observation_filter('observation', 'to_grayscale', ObservationRGBToYFilter())
#AtariInputFilter.add_observation_filter('observation', 'to_uint8', ObservationToUInt8Filter(0, 255))
#AtariInputFilter.add_observation_filter('observation', 'stacking', ObservationStackingFilter(4))
AtariOutputFilter = NoOutputFilter()


class Atari(GymEnvironmentParameters):
    def __init__(self, level=None):
        super().__init__(level=level)
        self.frame_skip = 4
        self.max_over_num_frames = 2
        self.random_initialization_steps = 30
        self.default_input_filter = AtariInputFilter
        self.default_output_filter = AtariOutputFilter


gym_atari_envs = [
    'air_raid', 'alien', 'amidar', 'assault', 'asterix', 'asteroids',
    'atlantis', 'bank_heist', 'battle_zone', 'beam_rider', 'berzerk',
コード例 #6
0
####################
# DQN Agent Params #
####################
agent_params = DDQNAgentParameters()
agent_params.network_wrappers['main'].learning_rate = 0.00025
agent_params.network_wrappers['main'].heads_parameters = [
    DuelingQHeadParameters()
]
agent_params.memory.max_size = (MemoryGranularity.Transitions, 1000000)
agent_params.algorithm.discount = 0.99
agent_params.algorithm.num_consecutive_playing_steps = EnvironmentSteps(4)
agent_params.exploration.epsilon_schedule = LinearSchedule(
    1, 0.1, (N + 7) * 2000)
agent_params.input_filter = NoInputFilter()
agent_params.output_filter = NoOutputFilter()

###############
# Environment #
###############
env_params = GymEnvironmentParameters()
env_params.level = 'rl_coach.environments.toy_problems.exploration_chain:ExplorationChain'
env_params.additional_simulator_parameters = {
    'chain_length': N,
    'max_steps': N + 7
}

vis_params = VisualizationParameters()

# preset_validation_params = PresetValidationParameters()
# preset_validation_params.test = True
コード例 #7
0
    6: carla.WeatherParameters.HardRainNoon,
    7: carla.WeatherParameters.SoftRainNoon,
    8: carla.WeatherParameters.ClearSunset,
    9: carla.WeatherParameters.CloudySunset,
    10: carla.WeatherParameters.WetSunset,
    11: carla.WeatherParameters.WetCloudySunset,
    12: carla.WeatherParameters.MidRainSunset,
    13: carla.WeatherParameters.HardRainSunset,
    14: carla.WeatherParameters.SoftRainSunset
}

# Set up the input and output filters
# Input filters apply operations to the input space defined, several input filters can be defined. The order is executed sequentially
# Output filters apply operations on the output space defined.
CarlaInputFilter = NoInputFilter()
CarlaOutputFilter = NoOutputFilter()


# Enumerate observation sources
# New observation sources need to be appended here
class SensorTypes(Enum):
    FRONT_CAMERA = "forward_camera"
    LIDAR = "lidar"
    BIRDEYE = "birdeye"


class CarlaEnvironmentParameters(EnvironmentParameters):
    def __init__(self):
        super().__init__()
        self.host = 'localhost'
        self.port = 2000
コード例 #8
0
ファイル: first_test.py プロジェクト: luker963/coach
 def __init__(self):
     super().__init__()
     self.default_input_filter = NoInputFilter()
     self.default_output_filter = NoOutputFilter()
コード例 #9
0
ファイル: gym_environment.py プロジェクト: mdavala/coach
    def __init__(self):
        super().__init__()
        self.random_initialization_steps = 0
        self.max_over_num_frames = 1
        self.additional_simulator_parameters = None

    @property
    def path(self):
        return 'rl_coach.environments.gym_environment:GymEnvironment'


"""
Roboschool Environment Components
"""
RoboSchoolInputFilters = NoInputFilter()
RoboSchoolOutputFilters = NoOutputFilter()


class Roboschool(GymEnvironmentParameters):
    def __init__(self):
        super().__init__()
        self.frame_skip = 1
        self.default_input_filter = RoboSchoolInputFilters
        self.default_output_filter = RoboSchoolOutputFilters


gym_roboschool_envs = [
    'inverted_pendulum', 'inverted_pendulum_swingup',
    'inverted_double_pendulum', 'reacher', 'hopper', 'walker2d',
    'half_cheetah', 'ant', 'humanoid', 'humanoid_flagrun',
    'humanoid_flagrun_harder', 'pong'
コード例 #10
0
    def __init__(self, level=None):
        super().__init__(level=level)
        self.observation_type = ObservationType.Measurements
        self.default_input_filter = ControlSuiteInputFilter
        self.default_output_filter = ControlSuiteOutputFilter

    @property
    def path(self):
        return 'rl_coach.environments.control_suite_environment:ControlSuiteEnvironment'


"""
ControlSuite Environment Components
"""
ControlSuiteInputFilter = NoInputFilter()
ControlSuiteOutputFilter = NoOutputFilter()

control_suite_envs = {
    ':'.join(env): ':'.join(env)
    for env in suite.BENCHMARKING
}


# Environment
class ControlSuiteEnvironment(Environment):
    def __init__(
            self,
            level: LevelSelection,
            frame_skip: int,
            visualization_parameters: VisualizationParameters,
            seed: Union[None, int] = None,
コード例 #11
0
 def __init__(self, level=None):
     super().__init__(level=level)
     self.frame_skip = 1
     self.default_input_filter = NoInputFilter(
     )  # hrmm.. my custom input filter errored out
     self.default_output_filter = NoOutputFilter()
コード例 #12
0
ファイル: lab_environment.py プロジェクト: ddayzzz/coach
# LabInputFilter = NoInputFilter()
LabInputFilter = InputFilter(is_a_reference_filter=True)
# LabInputFilter.add_reward_filter('clipping', RewardClippingFilter(-1.0, 1.0))
LabInputFilter.add_observation_filter(
    'observation', 'rescaling',
    ObservationRescaleToSizeFilter(
        ImageObservationSpace(np.array([84, 84, 3]), high=255)))
LabInputFilter.add_observation_filter('observation', 'to_grayscale',
                                      ObservationRGBToYFilter())
LabInputFilter.add_observation_filter('observation', 'to_uint8',
                                      ObservationToUInt8Filter(0, 255))
# # stack last 4 images as s_t
LabInputFilter.add_observation_filter('observation', 'stacking',
                                      ObservationStackingFilter(4))
## what will we get from agent
LabOutputFilter = NoOutputFilter()
"""
lab environment parameters
"""


class LabEnvironmentParameters(EnvironmentParameters):
    def __init__(self,
                 level,
                 human_control=False,
                 random_initialization_steps=30,
                 rotation=20,
                 width=84,
                 height=84,
                 fps=60):
        super().__init__(level=level)