# import gym import numpy as np from enum import IntEnum import scipy.ndimage from rl_coach.graph_managers.graph_manager import ScheduleParameters from rl_coach.utils import lower_under_to_upper, short_dynamic_import try: import roboschool from OpenGL import GL except ImportError: from rl_coach.logger import failed_imports failed_imports.append("RoboSchool") try: from rl_coach.gym_extensions.continuous import mujoco except: from rl_coach.logger import failed_imports failed_imports.append("GymExtensions") try: import pybullet_envs except ImportError: from rl_coach.logger import failed_imports failed_imports.append("PyBullet") from typing import Dict, Any, Union from rl_coach.core_types import RunPhase, EnvironmentSteps
import numpy as np from rl_coach.filters.observation.observation_move_axis_filter import ObservationMoveAxisFilter try: from pysc2 import maps from pysc2.env import sc2_env from pysc2.env import available_actions_printer from pysc2.lib import actions from pysc2.lib import features from pysc2.env import environment from absl import app from absl import flags except ImportError: from rl_coach.logger import failed_imports failed_imports.append("PySc2") from rl_coach.environments.environment import Environment, EnvironmentParameters, LevelSelection from rl_coach.base_parameters import VisualizationParameters from rl_coach.spaces import BoxActionSpace, VectorObservationSpace, PlanarMapsObservationSpace, StateSpace, CompoundActionSpace, \ DiscreteActionSpace from rl_coach.filters.filter import InputFilter, OutputFilter from rl_coach.filters.observation.observation_rescale_to_size_filter import ObservationRescaleToSizeFilter from rl_coach.filters.action.linear_box_to_box_map import LinearBoxToBoxMap from rl_coach.filters.observation.observation_to_uint8_filter import ObservationToUInt8Filter FLAGS = flags.FLAGS FLAGS(['coach.py']) SCREEN_SIZE = 84 # will also impact the action space size
# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # try: import vizdoom except ImportError: from rl_coach.logger import failed_imports failed_imports.append("ViZDoom") import os from enum import Enum from os import path, environ from typing import Union, List import numpy as np from rl_coach.base_parameters import VisualizationParameters from rl_coach.environments.environment import Environment, EnvironmentParameters, LevelSelection from rl_coach.filters.action.full_discrete_action_space_map import FullDiscreteActionSpaceMap from rl_coach.filters.filter import InputFilter, OutputFilter from rl_coach.filters.observation.observation_rescale_to_size_filter import ObservationRescaleToSizeFilter from rl_coach.filters.observation.observation_rgb_to_y_filter import ObservationRGBToYFilter from rl_coach.filters.observation.observation_stacking_filter import ObservationStackingFilter
# See the License for the specific language governing permissions and # limitations under the License. # import random from enum import Enum from typing import Union import numpy as np try: from dm_control import suite from dm_control.suite.wrappers import pixels except ImportError: from rl_coach.logger import failed_imports failed_imports.append("DeepMind Control Suite") from rl_coach.base_parameters import VisualizationParameters from rl_coach.environments.environment import Environment, EnvironmentParameters, LevelSelection from rl_coach.filters.filter import NoInputFilter, NoOutputFilter from rl_coach.spaces import BoxActionSpace, ImageObservationSpace, VectorObservationSpace, StateSpace class ObservationType(Enum): Measurements = 1 Image = 2 Image_and_Measurements = 3 # Parameters class ControlSuiteEnvironmentParameters(EnvironmentParameters):
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from typing import List, Tuple from rl_coach.base_parameters import Frameworks, AgentParameters from rl_coach.logger import failed_imports from rl_coach.spaces import SpacesDefinition try: import tensorflow as tf from rl_coach.architectures.tensorflow_components.general_network import GeneralTensorFlowNetwork except ImportError: failed_imports.append("TensorFlow") class NetworkWrapper(object): """ Contains multiple networks and managers syncing and gradient updates between them. """ def __init__(self, agent_parameters: AgentParameters, has_target: bool, has_global: bool, name: str, spaces: SpacesDefinition, replicated_device=None, worker_device=None):
if 'CARLA_ROOT' in environ: sys.path.append(path.join(environ.get('CARLA_ROOT'), 'PythonClient')) else: screen.error( "CARLA_ROOT was not defined. Please set it to point to the CARLA root directory and try again." ) from carla.client import CarlaClient from carla.settings import CarlaSettings from carla.tcp import TCPConnectionError from carla.sensor import Camera from carla.client import VehicleControl from carla.planner.planner import Planner from carla.driving_benchmark.experiment_suites.experiment_suite import ExperimentSuite except ImportError: from rl_coach.logger import failed_imports failed_imports.append("CARLA") import logging import subprocess from rl_coach.environments.environment import Environment, EnvironmentParameters, LevelSelection from rl_coach.spaces import BoxActionSpace, ImageObservationSpace, StateSpace, \ VectorObservationSpace from rl_coach.utils import get_open_port, force_list from enum import Enum import os import signal from typing import List, Union from rl_coach.base_parameters import VisualizationParameters from rl_coach.filters.filter import InputFilter, NoOutputFilter from rl_coach.filters.observation.observation_rescale_to_size_filter import ObservationRescaleToSizeFilter from rl_coach.filters.observation.observation_stacking_filter import ObservationStackingFilter
# distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import pickle import numpy as np try: import annoy from annoy import AnnoyIndex except ImportError: from rl_coach.logger import failed_imports failed_imports.append("annoy") class AnnoyDictionary(object): def __init__(self, dict_size, key_width, new_value_shift_coefficient=0.1, batch_size=100, key_error_threshold=0.01, num_neighbors=50, override_existing_keys=True, rebuild_on_every_update=False): self.rebuild_on_every_update = rebuild_on_every_update self.max_size = dict_size self.curr_size = 0
# See the License for the specific language governing permissions and # limitations under the License. # from typing import List, Tuple from rl_coach.base_parameters import Frameworks, AgentParameters from rl_coach.logger import failed_imports from rl_coach.saver import SaverCollection from rl_coach.spaces import SpacesDefinition from rl_coach.utils import force_list try: import tensorflow as tf from rl_coach.architectures.tensorflow_components.general_network import GeneralTensorFlowNetwork except ImportError: failed_imports.append("tensorflow") try: import mxnet as mx from rl_coach.architectures.mxnet_components.general_network import GeneralMxnetNetwork except ImportError: failed_imports.append("mxnet") class NetworkWrapper(object): """ The network wrapper contains multiple copies of the same network, each one with a different set of weights which is updating in a different time scale. The network wrapper will always contain an online network. It will contain an additional slow updating target network if it was requested by the user, and it will contain a global network shared between different workers, if Coach is run in a single-node multi-process distributed mode. The network wrapper contains functionality for managing these networks and syncing
# limitations under the License. # from typing import Union ,Dict, Any from enum import Enum, Flag, auto from copy import deepcopy import numpy as np import random from collections import namedtuple try: import robosuite from robosuite.wrappers import Wrapper, DomainRandomizationWrapper except ImportError: from rl_coach.logger import failed_imports failed_imports.append("Robosuite") from rl_coach.base_parameters import Parameters, VisualizationParameters from rl_coach.environments.environment import Environment, EnvironmentParameters, LevelSelection from rl_coach.spaces import BoxActionSpace, VectorObservationSpace, StateSpace, PlanarMapsObservationSpace # Importing our custom Robosuite environments here so that they are properly # registered in Robosuite, and so recognized by 'robosuite.make()' and included # in 'robosuite.ALL_ENVIRONMENTS' import rl_coach.environments.robosuite.cube_exp robosuite_environments = list(robosuite.ALL_ENVIRONMENTS) robosuite_robots = list(robosuite.ALL_ROBOTS) robosuite_controllers = list(robosuite.ALL_CONTROLLERS)
try: import deepmind_lab except ImportError: from rl_coach.logger import failed_imports failed_imports.append("deepmind_lab") import os import gym import random from enum import Enum from os import path, environ from typing import Union, List import numpy as np from rl_coach.base_parameters import VisualizationParameters from rl_coach.environments.environment import Environment, EnvironmentParameters, LevelSelection from rl_coach.spaces import DiscreteActionSpace from rl_coach.filters.action.full_discrete_action_space_map import FullDiscreteActionSpaceMap from rl_coach.filters.filter import InputFilter, OutputFilter from rl_coach.filters.observation.observation_rescale_to_size_filter import ObservationRescaleToSizeFilter from rl_coach.filters.observation.observation_rgb_to_y_filter import ObservationRGBToYFilter from rl_coach.filters.observation.observation_stacking_filter import ObservationStackingFilter from rl_coach.filters.observation.observation_to_uint8_filter import ObservationToUInt8Filter from rl_coach.filters.reward.reward_clipping_filter import RewardClippingFilter from rl_coach.filters.filter import NoOutputFilter, NoInputFilter from rl_coach.spaces import MultiSelectActionSpace, ImageObservationSpace, VectorObservationSpace, StateSpace, ActionType from pygame import locals level_scripts = [