def test_gym_wrapper_single_visual_and_vector(use_uint8): mock_env = mock.MagicMock() mock_spec = create_mock_group_spec( number_visual_observations=1, vector_observation_space_size=3, vector_action_space_size=[2], ) mock_decision_step, mock_terminal_step = create_mock_vector_steps( mock_spec, number_visual_observations=1) setup_mock_unityenvironment(mock_env, mock_spec, mock_decision_step, mock_terminal_step) env = UnityToGymWrapper(mock_env, uint8_visual=use_uint8, allow_multiple_obs=True) assert isinstance(env, UnityToGymWrapper) assert isinstance(env.observation_space, spaces.Tuple) assert len(env.observation_space) == 2 reset_obs = env.reset() assert isinstance(reset_obs, list) assert len(reset_obs) == 2 assert all(isinstance(ob, np.ndarray) for ob in reset_obs) assert reset_obs[-1].shape == (3, ) assert len(reset_obs[0].shape) == 3 actions = env.action_space.sample() assert actions.shape == (2, ) obs, rew, done, info = env.step(actions) assert isinstance(obs, list) assert len(obs) == 2 assert all(isinstance(ob, np.ndarray) for ob in obs) assert reset_obs[-1].shape == (3, ) assert isinstance(rew, float) assert isinstance(done, (bool, np.bool_)) assert isinstance(info, dict) # check behavior for allow_multiple_obs = False env = UnityToGymWrapper(mock_env, uint8_visual=use_uint8, allow_multiple_obs=False) assert isinstance(env, UnityToGymWrapper) assert isinstance(env.observation_space, spaces.Box) reset_obs = env.reset() assert isinstance(reset_obs, np.ndarray) assert len(reset_obs.shape) == 3 actions = env.action_space.sample() assert actions.shape == (2, ) obs, rew, done, info = env.step(actions) assert isinstance(obs, np.ndarray)
def test_run_environment(env_name): """ Run the gym test using the specified environment :param env_name: Name of the Unity environment binary to launch """ u_env = UnityEnvironment(env_name, worker_id=1, no_graphics=True) env = UnityToGymWrapper(u_env) try: # Examine environment parameters print(str(env)) # Reset the environment initial_observations = env.reset() if len(env.observation_space.shape) == 1: # Examine the initial vector observation print("Agent observations look like: \n{}".format( initial_observations)) for _episode in range(10): env.reset() done = False episode_rewards = 0 while not done: actions = env.action_space.sample() obs, reward, done, _ = env.step(actions) episode_rewards += reward print("Total reward this episode: {}".format(episode_rewards)) finally: env.close()
class UnityEnvWrapper(gym.Env): def __init__(self, env_config): self.worker_index = 0 if 'SM_CHANNEL_TRAIN' in os.environ: env_name = os.environ['SM_CHANNEL_TRAIN'] +'/'+ env_config['env_name'] os.chmod(env_name, 0o755) print("Changed environment binary into executable mode.") # Try connecting to the Unity3D game instance. while True: try: channel = EnvironmentParametersChannel() unity_env = UnityEnvironment( env_name, no_graphics=True, worker_id=self.worker_index, side_channels=[channel], additional_args=['-logFile', 'unity.log']) channel.set_float_parameter("simulation_mode", 1.0) except UnityWorkerInUseException: self.worker_index += 1 else: break else: env_name = env_config['env_name'] while True: try: unity_env = default_registry[env_name].make( no_graphics=True, worker_id=self.worker_index, additional_args=['-logFile', 'unity.log']) except UnityWorkerInUseException: self.worker_index += 1 else: break self.env = UnityToGymWrapper(unity_env) self.action_space = self.env.action_space self.observation_space = self.env.observation_space def reset(self): return self.env.reset() def step(self, action): return self.env.step(action) def close(self): try: self.env.close() except Exception: pass
def test_gym_wrapper(): mock_env = mock.MagicMock() mock_spec = create_mock_group_spec() mock_decision_step, mock_terminal_step = create_mock_vector_steps( mock_spec) setup_mock_unityenvironment(mock_env, mock_spec, mock_decision_step, mock_terminal_step) env = UnityToGymWrapper(mock_env) assert isinstance(env.reset(), np.ndarray) actions = env.action_space.sample() assert actions.shape[0] == 2 obs, rew, done, info = env.step(actions) assert env.observation_space.contains(obs) assert isinstance(obs, np.ndarray) assert isinstance(rew, float) assert isinstance(done, (bool, np.bool_)) assert isinstance(info, dict)
class UnityEnvWrapper(gym.Env): def __init__(self, env_config): self.worker_index = 0 if "SM_CHANNEL_TRAIN" in os.environ: env_name = os.environ["SM_CHANNEL_TRAIN"] + "/" + env_config[ "env_name"] os.chmod(env_name, 0o755) print("Changed environment binary into executable mode.") # Try connecting to the Unity3D game instance. while True: try: unity_env = UnityEnvironment( env_name, no_graphics=True, worker_id=self.worker_index, additional_args=["-logFile", "unity.log"], ) except UnityWorkerInUseException: self.worker_index += 1 else: break else: env_name = env_config["env_name"] while True: try: unity_env = default_registry[env_name].make( no_graphics=True, worker_id=self.worker_index, additional_args=["-logFile", "unity.log"], ) except UnityWorkerInUseException: self.worker_index += 1 else: break self.env = UnityToGymWrapper(unity_env) self.action_space = self.env.action_space self.observation_space = self.env.observation_space def reset(self): return self.env.reset() def step(self, action): return self.env.step(action)
def play(): engine_configuration_channel = EngineConfigurationChannel() # 時間スケールを10倍に設定 engine_configuration_channel.set_configuration_parameters(time_scale=10.0) unity_env = UnityEnvironment("./ml-agents/Project/PushBlock", side_channels=[engine_configuration_channel]) env = UnityToGymWrapper(unity_env, 0, flatten_branched=True) # モデル読み込み model = deepq.learn(env, "mlp", total_timesteps=0, load_path="./model") obs = env.reset() obs = np.expand_dims(np.array(obs), axis=0) while True: action, _, _, _ = model.step(tf.constant(obs)) action = action[0].numpy() obs, rew, done, _ = env.step(action) if done: obs = env.reset() obs = np.expand_dims(np.array(obs), axis=0)
class Environment(BaseEnvironment): default_config = { "min_throttle": 0.45, "max_throttle": 0.6, "max_steering_diff": 0.15, "jerk_reward_weight": 0.0, "max_steering": 1, # min_steering is negative of the max "steering_gain": 1, "steering_bias": 0, } def __init__(self, **config): self.config = config self.env = UnityToGymWrapper( UnityEnvironment(), allow_multiple_obs=True, # not exactly sure what this does, ) @property def observation_space(self, action): return spaces.Box( low=np.finfo(np.float32).min, high=np.finfo(np.float32).max, shape=(1, self.z_size), dtype=np.float32, ) @property def action_space(self, action): return spaces.Box( low=np.array([-self.config["max_steering"], -1]), high=np.array([self.config["max_steering"], 1]), dtype=np.float32, ) def reset(self): return self.env.reset() def step(self, action): return self.env.step(action)
def main(): # Simulation path: env_location = "./simu_envs/SingleAgentVisualization/scene.x86_64" # Loading the unity environment: unity_env = UnityEnvironment(env_location) # Wrapping with the Gym Wrapper: env = UnityToGymWrapper(unity_env, allow_multiple_obs=True) # We reset the environment and get our initial state: state = env.reset() while True: # We select an action based on a policy and state: action = sample_policy(state) # We perform an action in the environment, receiving # the next state, the reward and a flag indicating # if the episode has ended: next_state, reward, ended, _ = env.step(action) # If the episode ended, we reset the environment, # otherwise we continue execution: if ended: state = env.reset() else: state = next_state
class FooCarEnv(gym.Env): _channel = EnvironmentParametersChannel() PathSpace = { 'xyz': 0, 'xy': 2, 'yz': 2, 'xz': 2 } def __init__(self, no_graphics:bool=False, seed:int=1, **config): self._config = config worker_id = 0 if 'worker_id' in config: worker_id = config['worker_id'] self._unity_env = UnityEnvironment( file_name=UNITY_ENV_EXE_FILE, # file_name=None, # Unity Editor Mode (debug) no_graphics=no_graphics, seed=seed, side_channels=[self._channel], worker_id=worker_id ) for key, value in config.items(): self._channel.set_float_parameter(key, float(value)) self._gym_env = UnityToGymWrapper(self._unity_env) def step(self, action): obs, reward, done, info = self._gym_env.step(action) size = self.observation_size return obs[:size], reward, done, info def reset(self): obs = self._gym_env.reset() size = self.observation_size return obs[:size] def render(self, mode="rgb_array"): return self._gym_env.render(mode=mode) def seed(self, seed=None): self._gym_env.seed(seed=seed) # it will throw a warning def close(self): self._gym_env.close() @property def metadata(self): return self._gym_env.metadata @property def reward_range(self) -> Tuple[float, float]: return self._gym_env.reward_range @property def action_space(self): return self._gym_env.action_space @property def observation_space(self): config = self._config space = self.PathSpace path_space = config['path_space'] if 'path_space' in config else space['xz'] r = config['radius_anchor_circle'] if 'radius_anchor_circle' in config else 8.0 r_e = config['radius_epsilon_ratio'] if 'radius_epsilon_ratio' in config else 0.7 h = config['max_anchor_height'] if 'max_anchor_height' in config else 1.0 xyz_mode = (path_space == space['xyz']) bound = max(r * (1 + r_e), h if xyz_mode else 0) shape = (self.observation_size,) return gym.spaces.Box(-bound, +bound, dtype=np.float32, shape=shape) @property def observation_size(self): # Reference: readonly variable (Unity)FooCar/CarAgent.ObservationSize config = self._config space = self.PathSpace path_space = config['path_space'] if 'path_space' in config else space['xz'] ticker_end = config['ticker_end'] if 'ticker_end' in config else 5 ticker_start = config['ticker_start'] if 'ticker_start' in config else -3 xyz_mode = (path_space == space['xyz']) basic_num = 6 point_dim = 3 if xyz_mode else 2 return basic_num + 2 * point_dim * (ticker_end - ticker_start + 1)
plt.show(block=False) img_h, img_w = obs[0].shape[:2] done = False i = 0 velo = np.zeros((3,)) while not done: # process keyboard to get action act = 0.0 if key_ws[0]: act += 1.0 if key_ws[1]: act -= 1.0 key_ws = np.array([False] * 2) # step in environment obs, _, _, _ = env.step(act) img = obs[0] im.set_data(img) if False: # sanity check # current environment lock motion of x and z axes dt = 1. / UNITY_STEP_FREQ acc = act * thrust_multiplier / mass next_velo_y = velo[1] + (GRAVITY + acc) * dt velo_y_err = np.absolute(next_velo_y - obs[1][4]) # use velocity to compute distance dist_curr = dist_curr + obs[1][4] * dt dist_err = np.absolute(dist_curr - process_ray(obs[1])) print("Velocity error (y-axis) = {}, Distance error = {}".format(velo_y_err, dist_err))
args = parser.parse_args() channel = EngineConfigurationChannel() unity_env = UnityEnvironment(ENV_ID, seed=1, side_channels=[channel]) channel.set_configuration_parameters(time_scale=1.0) env = UnityToGymWrapper(unity_env, allow_multiple_obs=True) net = model.DDPGActor(env.observation_space.shape[0], env.action_space.shape[0]) net.load_state_dict(torch.load(args.model)) obs = env.reset() total_reward = 0.0 total_steps = 0 while True: obs_v = torch.FloatTensor([obs]) mu_v = net(obs_v) action = mu_v.squeeze(dim=0).data.numpy() action = np.clip(action, -1, 1) obs, reward, done, _ = env.step(action) total_reward += reward total_steps += 1 if done: break action.data[0] = action.data[0] / 2 + 0.5 action.data[1] = action.data[1] / 2 + 0.5 action.data[0] = 10 + action.data[0] * 500 action.data[1] = (action.data[1] * -150) print( "The Target ist %.3f away and with the Actions %.3f and %.3f, we get %.3f Points" % (obs_v, action.data[0], action.data[1], reward))
class ActorUnity(Actor, RoadworkActorInterface): def __init__(self, ctx, actor_id): super(ActorUnity, self).__init__(ctx, actor_id) self.env = None # Placeholder self.actor_id = actor_id async def sim_call_method(self, data) -> object: method = data['method'] args = data['args'] # Array of arguments - [] kwargs = data['kwargs'] # Dict return getattr(self.env, method)(*args, **kwargs) async def sim_get_state(self, data) -> object: key = data['key'] has_value, val = await self._state_manager.try_get_state(key) return val async def sim_set_state(self, data) -> None: key = data['key'] value = data['value'] print(f'Setting Sim State for key {key}', flush=True) await self._state_manager.set_state(key, value) await self._state_manager.save_state() async def _on_activate(self) -> None: """An callback which will be called whenever actor is activated.""" print(f'Activate {self.__class__.__name__} actor!', flush=True) async def _on_deactivate(self) -> None: """An callback which will be called whenever actor is deactivated.""" print(f'Deactivate {self.__class__.__name__} actor!', flush=True) # see behavior_spec: https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/docs/Python-API.md#interacting-with-a-unity-environment # behavior_spec.action_type and behavior_spec.action_shape is what we need here async def sim_action_space(self) -> object: behavior_names = list(self.env.behavior_specs.keys()) # the behavior_names which map to a Behavior Spec with observation_shapes, action_type, action_shape behavior_idx = 0 # we currently support only 1 behavior spec! even though Unity can support multiple (@TODO) behavior_spec = self.env.behavior_specs[behavior_names[behavior_idx]] print(f"Action Type: {behavior_spec.action_type}", flush=True) print(f"Action Shape: {behavior_spec.action_shape}", flush=True) # We can use /src/Lib/python/roadwork/roadwork/json/unserializer.py as an example # Currently only ActionType.DISCRETE implemented, all ActionTypes can be found here: https://github.com/Unity-Technologies/ml-agents/blob/3901bad5b0b4e094e119af2f9d0d1304ad3f97ae/ml-agents-envs/mlagents_envs/base_env.py#L247 # Note: Unity supports DISCRETE or CONTINUOUS action spaces @TODO: implement continuous in a specific env (which one??) if behavior_spec.is_action_discrete() == True: self.env.action_space = spaces.Discrete(behavior_spec.action_shape[0]) print(f"Converted Action Space: {self.env.action_space}", flush=True) res = Serializer.serializeMeta(self.env.action_space) return res # see behavior_spec: https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/docs/Python-API.md#interacting-with-a-unity-environment # behavior_spec.observation_shapes is what we need, this is an array of tuples [ (), (), (), ... ] which represents variables? (@TODO: Confirm) (e.g. https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Learning-Environment-Examples.md#basic) # @TODO: This sounds as a MultiDiscrete environment (https://github.com/openai/gym/blob/master/gym/spaces/multi_discrete.py) so we map to this currently async def sim_observation_space(self) -> object: behavior_names = list(self.env.behavior_specs.keys()) # the behavior_names which map to a Behavior Spec with observation_shapes, action_type, action_shape behavior_idx = 0 # we currently support only 1 behavior spec! even though Unity can support multiple (@TODO) behavior_spec = self.env.behavior_specs[behavior_names[behavior_idx]] print(f"Observation Shapes: {behavior_spec.observation_shapes}", flush=True) observation_space_n_vec = [] for i in range(0, len(behavior_spec.observation_shapes)): observation_space_n_vec.append(behavior_spec.observation_shapes[i][0]) # Get el 0 from the tuple, containing the size print(f"Converted Observation Space: {observation_space_n_vec}", flush=True) self.env.observation_space = spaces.MultiDiscrete(observation_space_n_vec) res = Serializer.serializeMeta(self.env.observation_space) return res async def sim_create(self, data) -> None: """An actor method to create a sim environment.""" env_id = data['env_id'] # seed = data['seed'] print(f'Creating sim with value {env_id}', flush=True) print(f"Current dir: {os.getcwd()}", flush=True) try: print("[Server] Creating Unity Environment", flush=True) self.env = UnityEnvironment(f"{os.getcwd()}/src/Server/Unity/envs/{env_id}/{env_id}") print("[Server] Resetting environment already", flush=True) self.env.reset() # we need to reset first in Unity # self.unity_env = UnityEnvironment("./environments/GridWorld") # self.env = gym.make(env_id) # if seed: # self.env.seed(seed) except gym.error.Error as e: print(e) raise Exception("Attempted to look up malformed environment ID '{}'".format(env_id)) except Exception as e: print(e) raise Exception(e) except: print(sys.exc_info()) traceback.print_tb(sys.exc_info()[2]) raise async def sim_reset(self) -> object: observation = self.env.reset() # observation is a ndarray, we need to serialize this # therefore, change it to list type which is serializable if isinstance(observation, np.ndarray): observation = observation.tolist() return observation async def sim_render(self) -> None: self.env.render() async def sim_monitor_start(self, data) -> None: episodeInterval = 10 # Create a recording every X episodes if data['episode_interval']: episodeInterval = int(data['episode_interval']) v_c = lambda count: count % episodeInterval == 0 # Create every X episodes #self.env = gym.wrappers.Monitor(self.env, f'./output/{self.actor_id}', resume=False, force=True, video_callable=v_c) #self.env = UnityToGymWrapper(self.unity_environment) #defaults to BaseEnv self.env = UnityToGymWrapper() async def sim_monitor_stop(self) -> None: self.env.close() async def sim_action_sample(self) -> object: action = self.env.action_space.sample() return action async def sim_step(self, data) -> object: action = data['action'] # Unity requires us to set the action with env.set_actions(behavior_name, action) where action is an array behavior_names = list(self.env.behavior_specs.keys()) # the behavior_names which map to a Behavior Spec with observation_shapes, action_type, action_shape behavior_idx = 0 # we currently support only 1 behavior spec! even though Unity can support multiple (@TODO) behavior_name = behavior_names[behavior_idx] self.env.set_actions(behavior_name, np.array([ [ action ] ])) # first dimension = number of agents, second dimension = action? self.env.step() # step does not return in Unity # Get the DecisionSteps and TerminalSteps # -> they both contain: # DecisionSteps: Which agents need an action this step? (Note: contains action masks!) # E.g.: DecisionStep(obs=[array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32)], reward=-0.01, agent_id=0, action_mask=[array([False, False, False])]) # TerminalSteps: Which agents their episode ended? decision_steps, terminal_steps = self.env.get_steps(behavior_names[behavior_idx]) # print(decision_steps, flush=True) # print(terminal_steps, flush=True) # print(decision_steps[0], flush=True) # print(terminal_steps[0], flush=True) # We support 1 decision step currently, get its observation # TODO decision_step_idx = 0 decision_step = decision_steps[decision_step_idx] obs, reward, agent_id, action_mask = decision_step observation = obs[decision_step_idx] reward = float(reward) isDone = False info = {} # @TODO: terminal_steps should be implemented, it requires a reset # observation is a ndarray, we need to serialize this # therefore, change it to list type which is serializable if isinstance(observation, np.ndarray): observation = observation.tolist() return observation, reward, isDone, info
def train(path): # env = gym.make("LunarLander-v2") # env = wrappers.Monitor(env, "tmp/lunar-lander", video_callable=lambda episode_id: True, force=True) unityenv = UnityEnvironment(path) env = UnityToGymWrapper(unity_env=unityenv, flatten_branched=True) ddqnAgent = DDQNAgent(alpha=0.0001, gamma=0.99, nActions=7, epsilon=1.0, batchSize=512, inputShape=210) nEpisodes = 1000 ddqnScores = [] ddqnAverageScores = [] epsilonHistory = [] stepsPerEpisode = [] for episode in range(nEpisodes): StartTime = time.time() done = False score = 0 steps = 0 observation = env.reset() while not done: action = ddqnAgent.chooseAction(observation) observationNew, reward, done, info = env.step(action) score += reward ddqnAgent.remember(state=observation, stateNew=observationNew, action=action, reward=reward, done=done) observation = observationNew ddqnAgent.learn() steps += 1 epsilonHistory.append(ddqnAgent.epsilon) ddqnScores.append(score) averageScore = np.mean(ddqnScores) ddqnAverageScores.append(averageScore) stepsPerEpisode.append(steps) ElapsedTime = time.time() - StartTime ElapsedTime = ElapsedTime / 60 print("Episode:", episode, "Score: %.2f" % score, "Average Score: %.2f" % averageScore, "Run Time:", ElapsedTime, "Minutes", "Epsilon:", ddqnAgent.epsilon, "Steps:", steps) if episode > 1 and episode % 9 == 0: ddqnAgent.saveModel() env.close() x = [i for i in range(nEpisodes)] fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(10, 10)) fig.suptitle("DDQN Hallway") ax1.plot(x, ddqnScores, "C1") ax1.set_title('Episodes vs Scores') ax1.set(xlabel='Episodes', ylabel='Scores') ax2.plot(x, ddqnAverageScores, "C2") ax2.set_title('Episodes vs Average Scores') ax2.set(xlabel='Episodes', ylabel='Average Scores') ax3.plot(x, epsilonHistory, "C3") ax3.set_title('Episodes vs Epsilon Decay') ax3.set(xlabel='Episodes', ylabel='Epsilon Decay') ax4.plot(x, stepsPerEpisode, "C4") ax4.set_title('Episodes vs Steps Per Epsisode') ax4.set(xlabel='Episodes', ylabel='Steps') plt.savefig('Hallway.png')