def run_parallel2(args): """ Test parallel mode with supersuit env wrappers. """ parallel_env = eval(args.env).parallel_env() # as per openai baseline's MaxAndSKip wrapper, maxes over the last 2 frames # to deal with frame flickering env = supersuit.max_observation_v0(parallel_env, 2) # repeat_action_probability is set to 0.25 to introduce non-determinism to the system env = supersuit.sticky_actions_v0(env, repeat_action_probability=0.25) # skip frames for faster processing and less control # to be compatable with gym, use frame_skip(env, (2,5)) env = supersuit.frame_skip_v0(env, 4) # downscale observation for faster processing env = supersuit.resize_v0(env, 84, 84) # allow agent to see everything on the screen despite Atari's flickering screen problem parallel_env = supersuit.frame_stack_v1(env, 4) parallel_env.seed(1) observations = parallel_env.reset() print(parallel_env.agents) max_cycles = 500 for step in range(max_cycles): actions = {agent: 1 for agent in parallel_env.agents} observations, rewards, dones, infos = parallel_env.step(actions) parallel_env.render()
def atari_preprocessing( env: Union[ParallelEnvWrapper, SequentialEnvWrapper] ) -> Union[ParallelEnvWrapper, SequentialEnvWrapper]: # Preprocessing env = supersuit.max_observation_v0(env, 2) # repeat_action_probability is set to 0.25 # to introduce non-determinism to the system env = supersuit.sticky_actions_v0(env, repeat_action_probability=0.25) # skip frames for faster processing and less control # to be compatable with gym, use frame_skip(env, (2,5)) env = supersuit.frame_skip_v0(env, 4) # downscale observation for faster processing env = supersuit.resize_v0(env, 84, 84) # allow agent to see everything on the screen # despite Atari's flickering screen problem env = supersuit.frame_stack_v1(env, 4) # set dtype to float32 env = supersuit.dtype_v0(env, np.float32) return env
def _load_env(self, env_name, pettingzoo_params): from pettingzoo import atari from supersuit import resize_v0, frame_skip_v0, reshape_v0, max_observation_v0 env = importlib.import_module( 'pettingzoo.atari.{}'.format(env_name)).env( obs_type='grayscale_image', **pettingzoo_params) env = max_observation_v0(env, 2) env = frame_skip_v0(env, 4) env = resize_v0(env, 84, 84) env = reshape_v0(env, (1, 84, 84)) return env
def make_env(env_name='boxing_v1', seed=1, obs_type='rgb_image'): '''https://www.pettingzoo.ml/atari''' if env_name == 'slimevolley_v0': env = SlimeVolleyWrapper(gym.make("SlimeVolley-v0")) else: # PettingZoo envs env = eval(env_name).parallel_env(obs_type=obs_type) if obs_type == 'rgb_image': # as per openai baseline's MaxAndSKip wrapper, maxes over the last 2 frames # to deal with frame flickering env = supersuit.max_observation_v0(env, 2) # repeat_action_probability is set to 0.25 to introduce non-determinism to the system env = supersuit.sticky_actions_v0(env, repeat_action_probability=0.25) # skip frames for faster processing and less control # to be compatable with gym, use frame_skip(env, (2,5)) env = supersuit.frame_skip_v0(env, 4) # downscale observation for faster processing env = supersuit.resize_v0(env, 84, 84) # allow agent to see everything on the screen despite Atari's flickering screen problem env = supersuit.frame_stack_v1(env, 4) else: env = supersuit.frame_skip_v0(env, 4) # env = PettingZooWrapper(env) # need to be put at the end if env_name in AtariEnvs: # normalize the observation of Atari for both image or RAM env = supersuit.dtype_v0( env, 'float32' ) # need to transform uint8 to float first for normalizing observation: https://github.com/PettingZoo-Team/SuperSuit env = supersuit.normalize_obs_v0( env, env_min=0, env_max=1) # normalize the observation to (0,1) # assign observation and action spaces env.observation_space = list(env.observation_spaces.values())[0] env.action_space = list(env.action_spaces.values())[0] env.seed(seed) return env
def unwrapped_check(env): # image observations if isinstance(env.observation_space, spaces.Box): if ((env.observation_space.low.shape == 3) and (env.observation_space.low == 0).all() and (len(env.observation_space.shape[2]) == 3) and (env.observation_space.high == 255).all()): env = max_observation_v0(env, 2) env = color_reduction_v0(env, mode="full") env = normalize_obs_v0(env) # box action spaces if isinstance(env.action_space, spaces.Box): env = clip_actions_v0(env) env = scale_actions_v0(env, 0.5) # stackable observations if isinstance(env.observation_space, spaces.Box) or isinstance( env.observation_space, spaces.Discrete): env = frame_stack_v1(env, 2) # not discrete and not multibinary observations if not isinstance(env.observation_space, spaces.Discrete) and not isinstance( env.observation_space, spaces.MultiBinary): env = dtype_v0(env, np.float16) env = flatten_v0(env) env = frame_skip_v0(env, 2) # everything else env = clip_reward_v0(env, lower_bound=-1, upper_bound=1) env = delay_observations_v0(env, 2) env = sticky_actions_v0(env, 0.5) env = nan_random_v0(env) env = nan_zeros_v0(env) assert env.unwrapped.__class__ == DummyEnv, f"Failed to unwrap {env}"
def wrap_env(env, obs_type='ram'): env = env.parallel_env(obs_type=obs_type) env_agents = env.unwrapped.agents if obs_type == 'rgb_image': env = supersuit.max_observation_v0( env, 2 ) # as per openai baseline's MaxAndSKip wrapper, maxes over the last 2 frames to deal with frame flickering env = supersuit.sticky_actions_v0( env, repeat_action_probability=0.25 ) # repeat_action_probability is set to 0.25 to introduce non-determinism to the system env = supersuit.frame_skip_v0( env, 4 ) # skip frames for faster processing and less control to be compatable with gym, use frame_skip(env, (2,5)) env = supersuit.resize_v0( env, 84, 84) # downscale observation for faster processing env = supersuit.frame_stack_v1( env, 4 ) # allow agent to see everything on the screen despite Atari's flickering screen problem else: env = supersuit.frame_skip_v0( env, 4 ) # RAM version also need frame skip, essential for boxing-v1, etc # normalize the observation of Atari for both image or RAM env = supersuit.dtype_v0( env, 'float32' ) # need to transform uint8 to float first for normalizing observation: https://github.com/PettingZoo-Team/SuperSuit env = supersuit.normalize_obs_v0( env, env_min=0, env_max=1) # normalize the observation to (0,1) env.observation_space = list(env.observation_spaces.values())[0] env.action_space = list(env.action_spaces.values())[0] env.agents = env_agents env = Dict2TupleWrapper(env) return env
def unwrapped_check(env): env.reset() agents = env.agents if image_observation(env, agents): env = max_observation_v0(env, 2) env = color_reduction_v0(env, mode="full") env = normalize_obs_v0(env) if box_action(env, agents): env = clip_actions_v0(env) env = scale_actions_v0(env, 0.5) if observation_homogenizable(env, agents): env = pad_observations_v0(env) env = frame_stack_v1(env, 2) env = agent_indicator_v0(env) env = black_death_v3(env) if (not_dict_observation(env, agents) and not_discrete_observation(env, agents) and not_multibinary_observation(env, agents)): env = dtype_v0(env, np.float16) env = flatten_v0(env) env = frame_skip_v0(env, 2) if action_homogenizable(env, agents): env = pad_action_space_v0(env) env = clip_reward_v0(env, lower_bound=-1, upper_bound=1) env = delay_observations_v0(env, 2) env = sticky_actions_v0(env, 0.5) env = nan_random_v0(env) env = nan_zeros_v0(env) assert env.unwrapped.__class__ == DummyEnv, f"Failed to unwrap {env}"
def create_single_env(args): env_name = args.env if args.num_envs > 1: keep_info = True # keep_info True to maintain dict type for parallel envs (otherwise cannot pass VectorEnv wrapper) else: keep_info = False '''https://www.pettingzoo.ml/atari''' if "slimevolley" in env_name or "SlimeVolley" in env_name: print(f'Load SlimeVolley env: {env_name}') env = gym.make(env_name) if env_name in [ 'SlimeVolleySurvivalNoFrameskip-v0', 'SlimeVolleyNoFrameskip-v0', 'SlimeVolleyPixel-v0' ]: # For image-based envs, apply following wrappers (from gym atari) to achieve pettingzoo style env, # or use supersuit (requires input env to be either pettingzoo or gym env). # same as: https://github.com/hardmaru/slimevolleygym/blob/master/training_scripts/train_ppo_pixel.py # TODO Note: this cannot handle the two obervations in above SlimeVolley envs, # since the wrappers are for single agent. if env_name != 'SlimeVolleyPixel-v0': env = NoopResetEnv(env, noop_max=30) env = MaxAndSkipEnv(env, skip=4) env = WarpFrame(env) # #env = ClipRewardEnv(env) env = FrameStack(env, 4) env = SlimeVolleyWrapper( env, args.against_baseline) # slimevolley to pettingzoo style env = NFSPPettingZooWrapper( env, keep_info=keep_info ) # pettingzoo to nfsp style, keep_info True to maintain dict type for parallel envs elif env_name in AtariEnvs: # PettingZoo Atari envs print(f'Load PettingZoo Atari env: {env_name}') if args.ram: obs_type = 'ram' else: obs_type = 'rgb_image' env = eval(env_name).parallel_env(obs_type=obs_type) env_agents = env.unwrapped.agents # this cannot go through supersuit wrapper, so get it first and reassign it if obs_type == 'rgb_image': # as per openai baseline's MaxAndSKip wrapper, maxes over the last 2 frames # to deal with frame flickering env = supersuit.max_observation_v0(env, 2) # repeat_action_probability is set to 0.25 to introduce non-determinism to the system env = supersuit.sticky_actions_v0(env, repeat_action_probability=0.25) # skip frames for faster processing and less control # to be compatable with gym, use frame_skip(env, (2,5)) env = supersuit.frame_skip_v0(env, 4) # downscale observation for faster processing env = supersuit.resize_v0(env, 84, 84) # allow agent to see everything on the screen despite Atari's flickering screen problem env = supersuit.frame_stack_v1(env, 4) else: env = supersuit.frame_skip_v0( env, 4 ) # RAM version also need frame skip, essential for boxing-v1, etc # env = PettingZooWrapper(env) # need to be put at the end # normalize the observation of Atari for both image or RAM env = supersuit.dtype_v0( env, 'float32' ) # need to transform uint8 to float first for normalizing observation: https://github.com/PettingZoo-Team/SuperSuit env = supersuit.normalize_obs_v0( env, env_min=0, env_max=1) # normalize the observation to (0,1) # assign observation and action spaces env.observation_space = list(env.observation_spaces.values())[0] env.action_space = list(env.action_spaces.values())[0] env.agents = env_agents env = NFSPPettingZooWrapper( env, keep_info=keep_info ) # pettingzoo to nfsp style, keep_info True to maintain dict type for parallel envs) elif env_name in ClassicEnvs: # PettingZoo Classic envs print(f'Load PettingZoo Classic env: {env_name}') if env_name in ['rps_v1', 'rpsls_v1']: env = eval(env_name).parallel_env() env = PettingzooClassicWrapper(env, observation_mask=1.) else: # only rps_v1 can use parallel_env at present env = eval(env_name).env() env = PettingzooClassic_Iterate2Parallel( env, observation_mask=None ) # since Classic games do not support Parallel API yet env = NFSPPettingZooWrapper(env, keep_info=keep_info) elif "LaserTag" in env_name: # LaserTag: https://github.com/younggyoseo/pytorch-nfsp print(f'Load LaserTag env: {env_name}') env = gym.make(env_name) env = wrap_pytorch(env) else: # gym env print(f'Load Gym env: {env_name}') try: env = gym.make(env_name) except: print(f"Error: No such env: {env_name}!") # may need more wrappers here, e.g. Pong-ram-v0 need scaled observation! # Ref: https://towardsdatascience.com/deep-q-network-dqn-i-bce08bdf2af env = NFSPAtariWrapper(env, keep_info=keep_info) env.seed(args.seed) return env
supersuit.flatten_v0(new_dummy()), supersuit.reshape_v0(new_dummy(), (64, 3)), supersuit.normalize_obs_v0(new_dummy(), env_min=-1, env_max=5.0), supersuit.frame_stack_v1(new_dummy(), 8), supersuit.pad_observations_v0(new_dummy()), supersuit.pad_action_space_v0(new_dummy()), supersuit.black_death_v1(new_dummy()), supersuit.agent_indicator_v0(new_dummy(), True), supersuit.agent_indicator_v0(new_dummy(), False), supersuit.reward_lambda_v0(new_dummy(), lambda x: x / 10), supersuit.clip_reward_v0(new_dummy()), supersuit.clip_actions_v0(new_continuous_dummy()), supersuit.frame_skip_v0(new_dummy(), 4), supersuit.sticky_actions_v0(new_dummy(), 0.75), supersuit.delay_observations_v0(new_dummy(), 3), supersuit.max_observation_v0(new_dummy(), 3), ] @pytest.mark.parametrize("env", wrappers) def test_basic_wrappers(env): env.seed(5) env.reset() obs, _, _, _ = env.last() act_space = env.action_spaces[env.agent_selection] obs_space = env.observation_spaces[env.agent_selection] first_obs = env.observe("a_0") assert obs_space.contains(first_obs) assert first_obs.dtype == obs_space.dtype env.step(act_space.sample()) for agent in env.agent_iter():
supersuit.normalize_obs_v0( dtype_v0(generated_agents_parallel_v0.env(), np.float32), env_min=-1, env_max=5.0, ), supersuit.frame_stack_v1(generated_agents_parallel_v0.env(), 8), supersuit.reward_lambda_v0(generated_agents_parallel_v0.env(), lambda x: x / 10), supersuit.clip_reward_v0(generated_agents_parallel_v0.env()), supersuit.nan_noop_v0(generated_agents_parallel_v0.env(), 0), supersuit.nan_zeros_v0(generated_agents_parallel_v0.env()), supersuit.nan_random_v0(generated_agents_parallel_v0.env()), supersuit.frame_skip_v0(generated_agents_parallel_v0.env(), 4), supersuit.sticky_actions_v0(generated_agents_parallel_v0.env(), 0.75), supersuit.delay_observations_v0(generated_agents_parallel_v0.env(), 3), supersuit.max_observation_v0(generated_agents_parallel_v0.env(), 3), ] @pytest.mark.parametrize("env", wrappers) def test_pettingzoo_aec_api_par_gen(env): api_test(env, num_cycles=50) wrappers = [ supersuit.dtype_v0(generated_agents_env_v0.env(), np.int32), supersuit.flatten_v0(generated_agents_env_v0.env()), supersuit.normalize_obs_v0(dtype_v0(generated_agents_env_v0.env(), np.float32), env_min=-1, env_max=5.0),
supersuit.pad_observations_v0(simple_world_comm_v2.env()), supersuit.pad_action_space_v0(simple_world_comm_v2.env()), supersuit.black_death_v3(combined_arms_v6.env()), supersuit.agent_indicator_v0(knights_archers_zombies_v10.env(), True), supersuit.agent_indicator_v0(knights_archers_zombies_v10.env(), False), supersuit.reward_lambda_v0(knights_archers_zombies_v10.env(), lambda x: x / 10), supersuit.clip_reward_v0(combined_arms_v6.env()), supersuit.nan_noop_v0(knights_archers_zombies_v10.env(), 0), supersuit.nan_zeros_v0(knights_archers_zombies_v10.env()), supersuit.nan_random_v0(chess_v5.env()), supersuit.nan_random_v0(knights_archers_zombies_v10.env()), supersuit.frame_skip_v0(combined_arms_v6.env(), 4), supersuit.sticky_actions_v0(combined_arms_v6.env(), 0.75), supersuit.delay_observations_v0(combined_arms_v6.env(), 3), supersuit.max_observation_v0(knights_archers_zombies_v10.env(), 3), ] @pytest.mark.parametrize("env", wrappers) def test_pettingzoo_aec_api(env): api_test(env) parallel_wrappers = [ supersuit.frame_stack_v1(combined_arms_v6.parallel_env(), 8), supersuit.frame_stack_v1(simple_push_v2.parallel_env(), 8), supersuit.reward_lambda_v0(combined_arms_v6.parallel_env(), lambda x: x / 10), supersuit.delay_observations_v0(combined_arms_v6.parallel_env(), 3), supersuit.delay_observations_v0(simple_push_v2.parallel_env(), 3),
# TRY NOT TO MODIFY: seeding device = torch.device( 'cuda' if torch.cuda.is_available() and args.cuda else 'cpu') random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.backends.cudnn.deterministic = args.torch_deterministic # https://github.com/cpnota/autonomous-learning-library/blob/5ee29eac4ad22d6de00f89345dce6f9c55569149/all/environments/multiagent_atari.py#L26 # def make_atari(env_name, pettingzoo_params): env = importlib.import_module( args.gym_id).parallel_env(obs_type='grayscale_image') env = ss.agent_indicator_v0(env) env = ss.clip_reward_v0(env) env = max_observation_v0(env, 2) env = frame_skip_v0(env, 4) env = resize_v0(env, 84, 84) env = ss.pettingzoo_env_to_vec_env_v0(env) envs = ss.concat_vec_envs_v0(env, args.num_envs, num_cpus=args.num_envs, base_class='stable_baselines3') envs = VecMonitor(envs) if args.capture_video: envs = VecVideoRecorder(envs, f'videos/{experiment_name}', record_video_trigger=lambda x: x % 150000 == 0, video_length=400) envs = VecPyTorch(envs, device) args.num_envs = envs.num_envs
env_min=-1, env_max=5.0), supersuit.frame_stack_v1(knights_archers_zombies_v7.env(), 8), supersuit.pad_observations_v0(knights_archers_zombies_v7.env()), supersuit.pad_action_space_v0(knights_archers_zombies_v7.env()), supersuit.black_death_v1(knights_archers_zombies_v7.env()), supersuit.agent_indicator_v0(knights_archers_zombies_v7.env(), True), supersuit.agent_indicator_v0(knights_archers_zombies_v7.env(), False), supersuit.reward_lambda_v0(knights_archers_zombies_v7.env(), lambda x: x / 10), supersuit.clip_reward_v0(knights_archers_zombies_v7.env()), supersuit.clip_actions_v0(prison_v3.env(continuous=True)), supersuit.frame_skip_v0(knights_archers_zombies_v7.env(), 4), supersuit.sticky_actions_v0(knights_archers_zombies_v7.env(), 0.75), supersuit.delay_observations_v0(knights_archers_zombies_v7.env(), 3), supersuit.max_observation_v0(knights_archers_zombies_v7.env(), 3), ] @pytest.mark.parametrize("env", wrappers) def test_pettingzoo_aec_api(env): api_test(env) parallel_wrappers = [ supersuit.frame_stack_v1(knights_archers_zombies_v7.parallel_env(), 8), supersuit.reward_lambda_v0(knights_archers_zombies_v7.parallel_env(), lambda x: x / 10), supersuit.delay_observations_v0(knights_archers_zombies_v7.parallel_env(), 3), supersuit.color_reduction_v0(knights_archers_zombies_v7.parallel_env(),