def make_env(scenario_name, benchmark=False, pre_encode=False): ''' Creates a MultiAgentEnv object as env. This can be used similar to a gym environment by calling env.reset() and env.step(). Use env.render() to view the environment on the screen. Input: scenario_name : name of the scenario from ./scenarios/ to be Returns (without the .py extension) ''' from envs.environment import MultiAgentEnv import envs.scenarios as scenarios # load scenario from script scenario = scenarios.load(scenario_name + ".py").Scenario() # create world world = scenario.make_world() # create multiagent environment env = MultiAgentEnv(world, reset_callback=scenario.reset_world, reward_callback=scenario.reward, observation_callback=scenario.observation, done_callback=scenario.done, info_callback=scenario.info, rx_callback=scenario.received, tx_callback=scenario.transmitted, pre_encode=pre_encode) return env
def make_env(scenario_name): ''' Creates a MultiAgentEnv object as env. This can be used similar to a gym environment by calling env.reset() and env.step(). Use env.render() to view the environment on the screen. Input: scenario_name : name of the scenario from ./scenarios/ to be Returns (without the .py extension) Some useful env properties (see environment.py): .observation_space : Returns the observation space for each agent .action_space : Returns the action space for each agent .n : Returns the number of Agents ''' from envs.environment import MultiAgentEnv import envs.scenarios as scenarios # load scenario from script scenario = scenarios.load(scenario_name + ".py").Scenario() # create world world = scenario.make_world() # create multiagent environment env = MultiAgentEnv(world, reset_callback=scenario.reset_world, reward_callback=scenario.reward, observation_callback=scenario.observation, info_callback=scenario.info, done_callback=scenario.done,) return env
def make_env(scenario_name, benchmark=False): ''' Creates a MultiAgentEnv object as env. This can be used similar to a gym environment by calling env.reset() and env.step(). Use env.render() to view the environment on the screen. Input: scenario_name : name of the scenario from ./scenarios/ to be Returns (without the .py extension) benchmark : whether you want to produce benchmarking data (usually only done during evaluation) Some useful env properties (see environment.py): .observation_space : Returns the observation space for each agent .action_space : Returns the action space for each agent .n : Returns the number of Agents ''' from envs.environment import MultiAgentEnv import envs.scenarios as scenarios # load scenario from script scenario = scenarios.load(scenario_name + ".py").Scenario() # create world world = scenario.make_world() # create multiagent environment if benchmark: env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, scenario.benchmark_data) else: env = MultiAgentEnv(world, reset_callback=scenario.reset_world, reward_callback=scenario.reward, observation_callback=scenario.observation, done_callback=scenario.done) return env
def make_env(scenario_name, n_drone=1): """ :param scenario_name: name of the scenario from ./scenarios/ to be Returns (without the .py extension) :param n_drone: number of the drones in the world :return: """ from envs.environment import Env import envs.scenarios as scenarios # load scenario from script scenario = scenarios.load(scenario_name + ".py").Scenario() # create world world = scenario.make_world(n_drone, scenario.target_move) # create Simsim environment env = Env(world, scenario.reset_world, scenario.get_reward_function(FLAGS.reward), scenario.observation, scenario.info, scenario.done) return env
import numpy as np import config FLAGS = config.flags.FLAGS if __name__ == '__main__': # parse arguments parser = argparse.ArgumentParser(description=None) parser.add_argument('-s', '--scenario', default='pursuit.py', help='Path of the scenario Python script.') args = parser.parse_args() # load scenario from script scenario = scenarios.load(args.scenario).Scenario() # create world world = scenario.make_world() # create multiagent environment env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, done_callback=scenario.done) act_n = [2, 2] print "action space:", env.action_space[0].n print "observation space:", env.observation_space obs_n = env.reset()[:2] print env.get_agent_profile() print env.get_full_encoding()[:, :, 2]