Example #1
0
def gen_env(number_agents, width, height, n_start_goal, seed):

    speed_ration_map = {
        1.: 0.25,  # Fast passenger train
        1. / 2.: 0.25,  # Fast freight train
        1. / 3.: 0.25,  # Slow commuter train
        1. / 4.: 0.25
    }  # Slow freight train

    env = RailEnv(width=width,
                  height=height,
                  rail_generator=complex_rail_generator(
                      nr_start_goal=n_start_goal,
                      nr_extra=3,
                      min_dist=6,
                      max_dist=99999,
                      seed=seed),
                  schedule_generator=complex_schedule_generator(
                      speed_ratio_map=speed_ration_map),
                  number_of_agents=number_agents,
                  obs_builder_object=TreeObsForRailEnv(max_depth=5))

    env.reset()
    env.step(dict(zip(range(number_agents), [2] * number_agents)))

    return env
Example #2
0
def demo(args=None):
    """Demo script to check installation"""
    env = RailEnv(width=15,
                  height=15,
                  rail_generator=complex_rail_generator(nr_start_goal=10,
                                                        nr_extra=1,
                                                        min_dist=8,
                                                        max_dist=99999),
                  schedule_generator=complex_schedule_generator(),
                  number_of_agents=5)

    env._max_episode_steps = int(15 * (env.width + env.height))
    env_renderer = RenderTool(env)

    while True:
        obs, info = env.reset()
        _done = False
        # Run a single episode here
        step = 0
        while not _done:
            # Compute Action
            _action = {}
            for _idx, _ in enumerate(env.agents):
                _action[_idx] = np.random.randint(0, 5)
            obs, all_rewards, done, _ = env.step(_action)
            _done = done['__all__']
            step += 1
            env_renderer.render_env(show=True,
                                    frames=False,
                                    show_observations=False,
                                    show_predictions=False)
            time.sleep(0.3)
    return 0
Example #3
0
def test_save_load():
    env = RailEnv(width=10, height=10,
                  rail_generator=complex_rail_generator(nr_start_goal=2, nr_extra=5, min_dist=6, seed=1),
                  schedule_generator=complex_schedule_generator(), number_of_agents=2)
    env.reset()
    agent_1_pos = env.agents[0].position
    agent_1_dir = env.agents[0].direction
    agent_1_tar = env.agents[0].target
    agent_2_pos = env.agents[1].position
    agent_2_dir = env.agents[1].direction
    agent_2_tar = env.agents[1].target
    
    os.makedirs("tmp", exist_ok=True)

    RailEnvPersister.save(env, "tmp/test_save.pkl")
    env.save("tmp/test_save_2.pkl")

    #env.load("test_save.dat")
    env, env_dict = RailEnvPersister.load_new("tmp/test_save.pkl")
    assert (env.width == 10)
    assert (env.height == 10)
    assert (len(env.agents) == 2)
    assert (agent_1_pos == env.agents[0].position)
    assert (agent_1_dir == env.agents[0].direction)
    assert (agent_1_tar == env.agents[0].target)
    assert (agent_2_pos == env.agents[1].position)
    assert (agent_2_dir == env.agents[1].direction)
    assert (agent_2_tar == env.agents[1].target)
def test_normalize_features():

    random.seed(1)
    np.random.seed(1)
    max_depth = 4

    for i in range(10):
        tree_observer = TreeObsForRailEnv(max_depth=max_depth)
        next_rand_number = random.randint(0, 100)

        env = RailEnv(width=10,
                      height=10,
                      rail_generator=complex_rail_generator(
                          nr_start_goal=10,
                          nr_extra=1,
                          min_dist=8,
                          max_dist=99999,
                          seed=next_rand_number),
                      schedule_generator=complex_schedule_generator(),
                      number_of_agents=1,
                      obs_builder_object=tree_observer)

        obs, all_rewards, done, _ = env.step({0: 0})

        obs_new = tree_observer.get()
        # data, distance, agent_data = split_tree(tree=np.array(obs_old), num_features_per_node=11)
        data_normalized = normalize_observation(obs_new,
                                                max_depth,
                                                observation_radius=10)

        filename = 'testdata/test_array_{}.csv'.format(i)
        data_loaded = np.loadtxt(filename, delimiter=',')

        assert np.allclose(data_loaded, data_normalized)
def test_save_load():
    env = RailEnv(width=10,
                  height=10,
                  rail_generator=complex_rail_generator(nr_start_goal=2,
                                                        nr_extra=5,
                                                        min_dist=6,
                                                        seed=1),
                  schedule_generator=complex_schedule_generator(),
                  number_of_agents=2)
    env.reset()
    agent_1_pos = env.agents[0].position
    agent_1_dir = env.agents[0].direction
    agent_1_tar = env.agents[0].target
    agent_2_pos = env.agents[1].position
    agent_2_dir = env.agents[1].direction
    agent_2_tar = env.agents[1].target
    env.save("test_save.dat")
    env.load("test_save.dat")
    assert (env.width == 10)
    assert (env.height == 10)
    assert (len(env.agents) == 2)
    assert (agent_1_pos == env.agents[0].position)
    assert (agent_1_dir == env.agents[0].direction)
    assert (agent_1_tar == env.agents[0].target)
    assert (agent_2_pos == env.agents[1].position)
    assert (agent_2_dir == env.agents[1].direction)
    assert (agent_2_tar == env.agents[1].target)
Example #6
0
    def regenerate(self, method=None, nAgents=0, env=None):
        self.log("Regenerate size", self.regen_size_width,
                 self.regen_size_height)

        if method is None or method == "Empty":
            fnMethod = empty_rail_generator()
        elif method == "Random Cell":
            fnMethod = random_rail_generator(
                cell_type_relative_proportion=[1] * 11)
        else:
            fnMethod = complex_rail_generator(nr_start_goal=nAgents,
                                              nr_extra=20,
                                              min_dist=12,
                                              seed=int(time.time()))

        if env is None:
            self.env = RailEnv(
                width=self.regen_size_width,
                height=self.regen_size_height,
                rail_generator=fnMethod,
                number_of_agents=nAgents,
                obs_builder_object=TreeObsForRailEnv(max_depth=2))
        else:
            self.env = env
        self.env.reset(regenerate_rail=True)
        self.fix_env()
        self.set_env(self.env)
        self.view.new_env()
        self.redraw()
Example #7
0
def create_testfiles(parameters, test_nr=0, nr_trials_per_test=100):
    # Parameter initialization
    print('Creating {} with (x_dim,y_dim) = ({},{}) and {} Agents.'.format(
        test_nr, parameters[0], parameters[1], parameters[2]))
    # Reset environment
    random.seed(parameters[3])
    np.random.seed(parameters[3])
    nr_paths = max(4, parameters[2] + int(0.5 * parameters[2]))
    min_dist = int(min([parameters[0], parameters[1]]) * 0.75)
    env = RailEnv(width=parameters[0],
                  height=parameters[1],
                  rail_generator=complex_rail_generator(nr_start_goal=nr_paths,
                                                        nr_extra=5,
                                                        min_dist=min_dist,
                                                        max_dist=99999,
                                                        seed=parameters[3]),
                  schedule_generator=complex_schedule_generator(),
                  obs_builder_object=TreeObsForRailEnv(max_depth=2),
                  number_of_agents=parameters[2])
    printProgressBar(0,
                     nr_trials_per_test,
                     prefix='Progress:',
                     suffix='Complete',
                     length=20)
    for trial in range(nr_trials_per_test):
        # Reset the env
        env.reset(True, True)
        env.save("./Tests/{}/Level_{}.pkl".format(test_nr, trial))
        printProgressBar(trial + 1,
                         nr_trials_per_test,
                         prefix='Progress:',
                         suffix='Complete',
                         length=20)

    return
Example #8
0
def create_env(number_agents,width,height,n_start_goal,seed):
    env = RailEnv(width=width,
              height=height,
              rail_generator=complex_rail_generator(nr_start_goal=n_start_goal,
                                                    nr_extra=1,
                                                    min_dist=6,
                                                    max_dist=99999,
                                                    seed = seed),
              schedule_generator=complex_schedule_generator(),
              number_of_agents=number_agents)
    
    return env   
Example #9
0
def main(args):
    try:
        opts, args = getopt.getopt(args, "", ["sleep-for-animation=", ""])
    except getopt.GetoptError as err:
        print(str(err))  # will print something like "option -a not recognized"
        sys.exit(2)
    sleep_for_animation = True
    for o, a in opts:
        if o in ("--sleep-for-animation"):
            sleep_for_animation = str2bool(a)
        else:
            assert False, "unhandled option"

    # Initiate the Predictor
    custom_predictor = ShortestPathPredictorForRailEnv(10)

    # Pass the Predictor to the observation builder
    custom_obs_builder = ObservePredictions(custom_predictor)

    # Initiate Environment
    env = RailEnv(width=10,
                  height=10,
                  rail_generator=complex_rail_generator(nr_start_goal=5,
                                                        nr_extra=1,
                                                        min_dist=8,
                                                        max_dist=99999,
                                                        seed=1),
                  schedule_generator=complex_schedule_generator(),
                  number_of_agents=3,
                  obs_builder_object=custom_obs_builder)

    obs, info = env.reset()
    env_renderer = RenderTool(env, gl="PILSVG")

    # We render the initial step and show the obsered cells as colored boxes
    env_renderer.render_env(show=True,
                            frames=True,
                            show_observations=True,
                            show_predictions=False)

    action_dict = {}
    for step in range(100):
        for a in range(env.get_num_agents()):
            action = np.random.randint(0, 5)
            action_dict[a] = action
        obs, all_rewards, done, _ = env.step(action_dict)
        print("Rewards: ", all_rewards, "  [done=", done, "]")
        env_renderer.render_env(show=True,
                                frames=True,
                                show_observations=True,
                                show_predictions=False)
        if sleep_for_animation:
            time.sleep(0.5)
Example #10
0
def generator_from_seed(nr_start_goal=1,
                        nr_extra=100,
                        min_dist=20,
                        max_dist=99999,
                        seed=0):

    rail_generator = complex_rail_generator(nr_start_goal, nr_extra, min_dist,
                                            max_dist, seed)

    def generator(width, height, num_agents, num_resets=0):
        return rail_generator(width, height, num_agents, 0)

    return generator
Example #11
0
def env_creator():
    """
    Creates an env and returns it
    """
    return RailEnv(width=20,
                   height=30,
                   rail_generator=complex_rail_generator(nr_start_goal=100,
                                                         nr_extra=2,
                                                         min_dist=8,
                                                         max_dist=99999,
                                                         seed=False),
                   schedule_generator=complex_schedule_generator(seed=False),
                   obs_builder_object=GlobalObsForRailEnv(),
                   number_of_agents=3,
                   random_seed=True)
Example #12
0
def test_complex_rail_generator():
    n_agents = 10
    n_start = 2
    x_dim = 10
    y_dim = 10
    min_dist = 4

    # Check that agent number is changed to fit generated level
    env = RailEnv(width=x_dim, height=y_dim,
                  rail_generator=complex_rail_generator(nr_start_goal=n_start, nr_extra=0, min_dist=min_dist),
                  schedule_generator=complex_schedule_generator(), number_of_agents=n_agents)
    env.reset()
    assert env.get_num_agents() == 2
    assert env.rail.grid.shape == (y_dim, x_dim)

    min_dist = 2 * x_dim

    # Check that no agents are generated when level cannot be generated
    env = RailEnv(width=x_dim, height=y_dim,
                  rail_generator=complex_rail_generator(nr_start_goal=n_start, nr_extra=0, min_dist=min_dist),
                  schedule_generator=complex_schedule_generator(), number_of_agents=n_agents)
    env.reset()
    assert env.get_num_agents() == 0
    assert env.rail.grid.shape == (y_dim, x_dim)

    # Check that everything stays the same when correct parameters are given
    min_dist = 2
    n_start = 5
    n_agents = 5

    env = RailEnv(width=x_dim, height=y_dim,
                  rail_generator=complex_rail_generator(nr_start_goal=n_start, nr_extra=0, min_dist=min_dist),
                  schedule_generator=complex_schedule_generator(), number_of_agents=n_agents)
    env.reset()
    assert env.get_num_agents() == n_agents
    assert env.rail.grid.shape == (y_dim, x_dim)
def test_multi_speed_init():
    env = RailEnv(width=50,
                  height=50,
                  rail_generator=complex_rail_generator(nr_start_goal=10,
                                                        nr_extra=1,
                                                        min_dist=8,
                                                        max_dist=99999,
                                                        seed=1),
                  schedule_generator=complex_schedule_generator(),
                  number_of_agents=5)
    # Initialize the agent with the parameters corresponding to the environment and observation_builder
    agent = RandomAgent(218, 4)

    # Empty dictionary for all agent action
    action_dict = dict()

    # Set all the different speeds
    # Reset environment and get initial observations for all agents
    env.reset(False, False, True)

    # Here you can also further enhance the provided observation by means of normalization
    # See training navigation example in the baseline repository
    old_pos = []
    for i_agent in range(env.get_num_agents()):
        env.agents[i_agent].speed_data['speed'] = 1. / (i_agent + 1)
        old_pos.append(env.agents[i_agent].position)

    # Run episode
    for step in range(100):

        # Choose an action for each agent in the environment
        for a in range(env.get_num_agents()):
            action = agent.act(0)
            action_dict.update({a: action})

            # Check that agent did not move in between its speed updates
            assert old_pos[a] == env.agents[a].position

        # Environment step which returns the observations for all agents, their corresponding
        # reward and whether they are done
        _, _, _, _ = env.step(action_dict)

        # Update old position whenever an agent was allowed to move
        for i_agent in range(env.get_num_agents()):
            if (step + 1) % (i_agent + 1) == 0:
                print(step, i_agent, env.agents[i_agent].position)
                old_pos[i_agent] = env.agents[i_agent].position
Example #14
0
def test_rail_env_speed_intializer():
    speed_ratio_map = {1: 0.3, 2: 0.4, 3: 0.1, 5: 0.2}

    env = RailEnv(width=50, height=50,
                  rail_generator=complex_rail_generator(nr_start_goal=10, nr_extra=1, min_dist=8, max_dist=99999,
                                                        seed=1), schedule_generator=complex_schedule_generator(),
                  number_of_agents=10)
    env.reset()
    actual_speeds = list(map(lambda agent: agent.speed_data['speed'], env.agents))

    expected_speed_set = set(speed_ratio_map.keys())

    # check that the number of speeds generated is correct
    assert len(actual_speeds) == env.get_num_agents()

    # check that only the speeds defined are generated
    assert all({(actual_speed in expected_speed_set) for actual_speed in actual_speeds})
Example #15
0
def env_gradual_update(input_env, agent=False, hardness_lvl=1):

    agent_num = input_env.number_of_agents
    env_width = input_env.width + 4
    env_height = input_env.height + 4

    map_agent_ratio = int(np.round(((env_width + env_height) / 2) / 5 - 2))

    if map_agent_ratio > 0:
        agent_num = int(np.round(((env_width + env_height) / 2) / 5 - 2))
    else:
        agent_num = 1

    if hardness_lvl == 1:

        rail_generator = complex_rail_generator(nr_start_goal=20,
                                                nr_extra=1,
                                                min_dist=9,
                                                max_dist=99999,
                                                seed=0)

        schedule_generator = complex_schedule_generator()
    else:

        rail_generator = sparse_rail_generator(nr_start_goal=9,
                                               nr_extra=1,
                                               min_dist=9,
                                               max_dist=99999,
                                               seed=0)

        schedule_generator = sparse_schedule_generator()

    global env, env_renderer, render

    if render:
        env_renderer.close_window()

    env = RailEnv(width=env_width,
                  height=env_height,
                  rail_generator=rail_generator,
                  schedule_generator=schedule_generator,
                  obs_builder_object=GlobalObsForRailEnv(),
                  number_of_agents=agent_num)

    env_renderer = RenderTool(env)
Example #16
0
def create_env(nr_start_goal=10,
               nr_extra=2,
               min_dist=8,
               max_dist=99999,
               nr_agent=10,
               seed=0,
               render_mode='PIL'):
    env = RailEnv(width=30,
                  height=30,
                  rail_generator=complex_rail_generator(
                      nr_start_goal, nr_extra, min_dist, max_dist, seed),
                  schedule_generator=complex_schedule_generator(),
                  obs_builder_object=GlobalObsForRailEnv(),
                  number_of_agents=nr_agent)
    env_renderer = RenderTool(env, gl=render_mode)
    obs = env.reset()

    return env, env_renderer, obs
def create_multi_agent_environment(dimension, num_agents, timed, seed):
    # Create new environment.

    env = RailEnv(width=dimension,
                  height=dimension,
                  rail_generator=complex_rail_generator(
                      nr_start_goal=int(1.5 * num_agents),
                      nr_extra=int(1.2 * num_agents),
                      min_dist=int(floor(dimension / 2)),
                      max_dist=99999,
                      seed=0),
                  schedule_generator=complex_schedule_generator(timed=timed),
                  malfunction_generator_and_process_data=None,
                  number_of_agents=num_agents)

    env.reset(random_seed=int(seed))

    return env
Example #18
0
def test_schedule_from_file_complex():
    """
    Test to see that all parameters are loaded as expected
    Returns
    -------

    """
    # Different agent types (trains) with different speeds.
    speed_ration_map = {
        1.: 0.25,  # Fast passenger train
        1. / 2.: 0.25,  # Fast freight train
        1. / 3.: 0.25,  # Slow commuter train
        1. / 4.: 0.25
    }  # Slow freight train

    # Generate complex test env
    rail_generator = complex_rail_generator(nr_start_goal=10,
                                            nr_extra=1,
                                            min_dist=8,
                                            max_dist=99999)
    schedule_generator = complex_schedule_generator(speed_ration_map)

    create_and_save_env(file_name="./complex_env_test.pkl",
                        rail_generator=rail_generator,
                        schedule_generator=schedule_generator)

    # Load the different envs and check the parameters

    # Complex generator
    rail_generator = rail_from_file("./complex_env_test.pkl")
    schedule_generator = schedule_from_file("./complex_env_test.pkl")
    complex_env_from_file = RailEnv(width=1,
                                    height=1,
                                    rail_generator=rail_generator,
                                    schedule_generator=schedule_generator)
    complex_env_from_file.reset(True, True)

    # Assert loaded agent number is correct
    assert complex_env_from_file.get_num_agents() == 10

    # Assert max steps is correct
    assert complex_env_from_file._max_episode_steps == 1350
Example #19
0
def test_save_load_mpk():
    env = RailEnv(width=10, height=10,
                  rail_generator=complex_rail_generator(nr_start_goal=2, nr_extra=5, min_dist=6, seed=1),
                  schedule_generator=complex_schedule_generator(), number_of_agents=2)
    env.reset()

    os.makedirs("tmp", exist_ok=True)

    RailEnvPersister.save(env, "tmp/test_save.mpk")

    #env.load("test_save.dat")
    env2, env_dict = RailEnvPersister.load_new("tmp/test_save.mpk")
    assert (env.width == env2.width)
    assert (env.height == env2.height)
    assert (len(env2.agents) == len(env.agents))
    
    for agent1, agent2 in zip(env.agents, env2.agents):
        assert(agent1.position == agent2.position)
        assert(agent1.direction == agent2.direction)
        assert(agent1.target == agent2.target)
Example #20
0
def run_benchmark():
    """Run benchmark on a small number of agents in complex rail environment."""
    random.seed(1)
    np.random.seed(1)

    # Example generate a random rail
    env = RailEnv(width=15,
                  height=15,
                  rail_generator=complex_rail_generator(nr_start_goal=5,
                                                        nr_extra=20,
                                                        min_dist=12),
                  schedule_generator=complex_schedule_generator(),
                  number_of_agents=5)
    env.reset()

    n_trials = 20
    action_dict = dict()
    action_prob = [0] * 4

    for trials in range(1, n_trials + 1):

        # Reset environment
        obs, info = env.reset()

        # Run episode
        for step in range(100):
            # Action
            for a in range(env.get_num_agents()):
                action = np.random.randint(0, 4)
                action_prob[action] += 1
                action_dict.update({a: action})

            # Environment step
            next_obs, all_rewards, done, _ = env.step(action_dict)

            if done['__all__']:
                break
        if trials % 100 == 0:
            action_prob = [1] * 4
Example #21
0
def create_env(seed=None):
    """
    Helper function that creates an env everywhere
    This way it only needs to be defined here
    """
    from flatland.envs.rail_env import RailEnv
    from flatland.envs.observations import TreeObsForRailEnv
    from flatland.envs.rail_generators import complex_rail_generator
    from flatland.envs.schedule_generators import complex_schedule_generator
    # TODO make more configurable
    env = RailEnv(width=20,
                  height=20,
                  obs_builder_object=TreeObsForRailEnv(2),
                  rail_generator=complex_rail_generator(nr_start_goal=100,
                                                        nr_extra=2,
                                                        min_dist=8,
                                                        max_dist=99999,
                                                        seed=seed),
                  schedule_generator=complex_schedule_generator(seed=seed),
                  number_of_agents=3,
                  random_seed=seed)
    return env
Example #22
0
def env_random_update(input_env, decay, agent=False, hardness_lvl=1):

    agent_num = np.random.randint(1, 5)
    env_width = (agent_num + 2) * 5
    env_height = (agent_num + 2) * 5

    if hardness_lvl == 1:

        rail_generator = complex_rail_generator(nr_start_goal=20,
                                                nr_extra=1,
                                                min_dist=9,
                                                max_dist=99999,
                                                seed=0)

        schedule_generator = complex_schedule_generator()
    else:

        rail_generator = sparse_rail_generator(nr_start_goal=9,
                                               nr_extra=1,
                                               min_dist=9,
                                               max_dist=99999,
                                               seed=0)

        schedule_generator = sparse_schedule_generator()

    global env, env_renderer, render

    if render:
        env_renderer.close_window()

    env = RailEnv(width=env_width,
                  height=env_height,
                  rail_generator=rail_generator,
                  schedule_generator=schedule_generator,
                  obs_builder_object=GlobalObsForRailEnv(),
                  number_of_agents=agent_num)

    env_renderer = RenderTool(env)
Example #23
0
    def update_env_with_params(self,
                               width,
                               height,
                               num_agents,
                               max_steps,
                               rail_type,
                               rail_gen_params,
                               seed=-1):
        if seed == -1:
            seed = random.randint(0, 100000)

        self.num_agents = num_agents
        self.max_steps = max_steps

        if rail_type == 'complex':
            self.rail_gen = complex_rail_generator(
                nr_start_goal=rail_gen_params['nr_start_goal'],
                nr_extra=rail_gen_params['nr_extra'],
                min_dist=rail_gen_params['min_dist'],
                max_dist=rail_gen_params['max_dist'],
                seed=seed)

            #self.schedule_gen = complex_schedule_generator()
        elif rail_type == 'sparse':
            self.rail_gen = sparse_rail_generator(
                max_num_cities=rail_gen_params['num_cities'],
                seed=seed,
                grid_mode=rail_gen_params['grid_mode'],
                max_rails_between_cities=rail_gen_params[
                    'max_rails_between_cities'],
                max_rails_in_city=rail_gen_params['max_rails_in_city'])

        else:
            raise ValueError(
                'Please specify either "complex" or "sparse" as rail_type')

        self.generate_env(width, height)
Example #24
0
def main(args):
    try:
        opts, args = getopt.getopt(args, "", ["sleep-for-animation=", ""])
    except getopt.GetoptError as err:
        print(str(err))  # will print something like "option -a not recognized"
        sys.exit(2)
    sleep_for_animation = True
    for o, a in opts:
        if o in ("--sleep-for-animation"):
            sleep_for_animation = str2bool(a)
        else:
            assert False, "unhandled option"

    env = RailEnv(width=7,
                  height=7,
                  rail_generator=complex_rail_generator(nr_start_goal=10,
                                                        nr_extra=1,
                                                        min_dist=5,
                                                        max_dist=99999,
                                                        seed=1),
                  schedule_generator=complex_schedule_generator(),
                  number_of_agents=1,
                  obs_builder_object=SingleAgentNavigationObs())

    obs, info = env.reset()
    env_renderer = RenderTool(env)
    env_renderer.render_env(show=True, frames=True, show_observations=True)
    for step in range(100):
        action = np.argmax(obs[0]) + 1
        obs, all_rewards, done, _ = env.step({0: action})
        print("Rewards: ", all_rewards, "  [done=", done, "]")
        env_renderer.render_env(show=True, frames=True, show_observations=True)
        if sleep_for_animation:
            time.sleep(0.1)
        if done["__all__"]:
            break
    env_renderer.close_window()
Example #25
0
import glob

seed = 69  #nice

width = 10  # @param{type: "integer"}
height = 10  # @param{type: "integer"}
num_agents = 4  # @param{type: "integer"}
tree_depth = 2  # @param{type: "integer"}
radius_observation = 10
WINDOW_LENGTH = 22  # @param{type: "integer"}

random_rail_generator = complex_rail_generator(
    nr_start_goal=10,  # @param{type:"integer"} number of start and end goals 
    # connections, the higher the easier it should be for
    # the trains
    nr_extra=10,  # @param{type:"integer"} extra connections 
    # (useful for alternite paths), the higher the easier
    min_dist=10,
    max_dist=99999,
    seed=seed)

env = RailEnv(width=width,
              height=height,
              rail_generator=random_rail_generator,
              obs_builder_object=TreeObsForRailEnv(tree_depth),
              number_of_agents=num_agents)

obs, info = env.reset()

env_renderer = RenderTool(env)
Example #26
0
                        handle, new_position[0], new_position[1],
                        direction] / 100
                else:
                    current['valid'] = False
                    current['distance_to_goal'] = np.inf

        return weights


seed = random.randint(0, 2**32)
print(f"Seed: {seed}")
env = RailEnv(width=20,
              height=20,
              rail_generator=complex_rail_generator(nr_start_goal=10,
                                                    nr_extra=10,
                                                    min_dist=5,
                                                    max_dist=99999,
                                                    seed=seed),
              schedule_generator=complex_schedule_generator(),
              number_of_agents=1,
              obs_builder_object=CustomWeightObserver())

env_renderer = RenderTool(env, gl="PILSVG")

agent = AbelAgent(218, 5)
n_trials = 50

for trials in range(1, n_trials + 1):
    # Reset Environment
    obs = env.reset()
    env_renderer.reset()
Example #27
0
def make_env(env_params, random_seed=True):
    """
    Make env, setup calculate constants
    @param env_params: setup parameters
    @param random_seed: whether to use random seed
    @return: env, state_size, action_size, max_steps for given env
    """
    # Obs builder
    if env_params.use_predictor:
        tree_observation = TreeObsForRailEnv(
            max_depth=env_params.observation_tree_depth,
            predictor=ShortestPathPredictorForRailEnv(30))
    else:
        tree_observation = TreeObsForRailEnv(
            max_depth=env_params.observation_tree_depth)

    seed = env_params.seed if env_params.seed != -1 else random.randint(0, 100)

    n_agents = env_params.n_agents
    x_dim = env_params.x_dim
    y_dim = env_params.y_dim

    # for now
    # n_agents = env_params.n_agents_min

    print(
        f"Env generation seed: {seed}, agents: {n_agents}, rows: {y_dim}, cols: {x_dim}"
    )

    if env_params.rail_generator == "sparse":
        # n_cities = random.randint(env_params.n_cities_min, env_params.n_cities_max)

        rail_gen = sparse_rail_generator(
            max_num_cities=env_params.n_cities,
            seed=seed,
            grid_mode=False,
            max_rails_between_cities=env_params.max_rails_between_cities,
            max_rails_in_city=env_params.max_rails_in_city)
    else:
        n_goals = n_agents + random.randint(0, 3)
        min_dist = int(0.75 * min(x_dim, y_dim))

        rail_gen = complex_rail_generator(nr_start_goal=n_goals,
                                          nr_extra=25,
                                          min_dist=min_dist,
                                          max_dist=9999,
                                          seed=seed)

    # setup env
    env = RailEnv(width=x_dim,
                  height=y_dim,
                  rail_generator=rail_gen,
                  schedule_generator=sparse_schedule_generator(),
                  number_of_agents=n_agents,
                  obs_builder_object=tree_observation)

    env.reset(regenerate_rail=True, regenerate_schedule=True)

    # calc state size given the depth of the tree and num features
    n_features_per_node = env.obs_builder.observation_dim
    n_nodes = 0
    for i in range(env_params.observation_tree_depth + 1):
        n_nodes += np.power(4, i)

    frame_stack_mult = 1
    if env_params.stack_obs:
        frame_stack_mult = env_params.how_many_stack
    state_size = frame_stack_mult * n_features_per_node * n_nodes

    # there are always 5 actions
    action_size = 5
    # official formula
    if env_params.rail_generator == "sparse":
        max_steps = int(4 * 2 * (env.height + env.width +
                                 (env_params.n_agents / env_params.n_cities)))
    else:
        max_steps = int(3 * (env.height + env.width))

    random.seed()
    return env, state_size, action_size, max_steps
def evalfun(debug=False, refresh=0.1):  # refresh default = 0.1
    # A list of (mapsize, agent count) tuples, change or extend this to test different sizes.
    #problemsizes = [(5, 1), (7, 2), (10,3), (13,4), (40, 20)]
    #problemsizes = [(5, 1), (5,2), (6,3), (7,3), (14,4), (8,5)]
    problemsizes = [(6, 3)]

    _seed = np.random.randint(1, 9999999)

    #_seed = 2

    print("Seed:", _seed)
    print("%10s\t%8s\t%9s" % ("Dimensions", "Success", "Runtime"))
    for problemsize in problemsizes:

        dimension = problemsize[0]
        NUMBER_OF_AGENTS = problemsize[1]

        # Create new environment.
        env = RailEnv(width=dimension,
                      height=dimension,
                      rail_generator=complex_rail_generator(
                          nr_start_goal=int(1.5 * NUMBER_OF_AGENTS),
                          nr_extra=int(1.2 * NUMBER_OF_AGENTS),
                          min_dist=int(floor(dimension / 2)),
                          max_dist=99999,
                          seed=0),
                      schedule_generator=complex_schedule_generator(),
                      malfunction_generator_and_process_data=None,
                      number_of_agents=NUMBER_OF_AGENTS)

        env_renderer = RenderTool(env, screen_width=1920, screen_height=1080)

        # Initialize positions.
        env.reset(random_seed=_seed)
        env_renderer.render_env(show=True,
                                frames=False,
                                show_observations=False)
        # Time the search.
        start = time.time()
        schedule = planpath.search(env)
        #schedule = planpath.better_search(env)
        duration = time.time() - start

        if debug:
            env_renderer.render_env(show=True,
                                    frames=False,
                                    show_observations=False)
            time.sleep(refresh)

        # Validate that environment state is unchanged.
        assert env.num_resets == 1 and env._elapsed_steps == 0

        # Run the schedule
        success = False
        for action in schedule:
            _, _, _done, _ = env.step(action)
            success = _done['__all__']
            #print(env.agents)
            if debug:
                #for agent in env.agents:
                #if agent.position:
                #agent_y, agent_x = agent.position
                #print(env.get_valid_directions_on_grid(agent_y,agent_x))
                print(action)
                env_renderer.render_env(show=True,
                                        frames=False,
                                        show_observations=False)
                time.sleep(refresh)

        # Print the performance of the algorithm
        print("%10s\t%8s\t%9.6f" % (str(problemsize), str(success), duration))
Example #29
0
def evalfun(debug=False, refresh=0.1):  # refresh default = 0.1
    # A list of (mapsize, agent count) tuples, change or extend this to test different sizes.
    #problemsizes = [(5, 1), (7, 2), (10,3), (13,4), (40, 20)]

    _seeds = np.random.randint(1, 99, 15)
    #_seed = 2984379

    avg = {}
    for x in range(1, 4):
        avg[x] = []
        problemsizes = [(5, x), (6, x), (8, x), (10, x), (15, x)]
        for problemsize in problemsizes:
            avg_time = 0
            successes = 0
            for seed in _seeds:
                dimension = problemsize[0]
                NUMBER_OF_AGENTS = problemsize[1]

                # Create new environment.
                env = RailEnv(width=dimension,
                              height=dimension,
                              rail_generator=complex_rail_generator(
                                  nr_start_goal=int(1.5 * NUMBER_OF_AGENTS),
                                  nr_extra=int(1.2 * NUMBER_OF_AGENTS),
                                  min_dist=int(floor(dimension / 2)),
                                  max_dist=99999,
                                  seed=0),
                              schedule_generator=complex_schedule_generator(),
                              malfunction_generator_and_process_data=None,
                              number_of_agents=NUMBER_OF_AGENTS)

                env_renderer = RenderTool(env,
                                          screen_width=1920,
                                          screen_height=1080)

                env.reset(random_seed=int(seed))

                if len(env.agents) != NUMBER_OF_AGENTS:
                    continue

                start = time.time()
                schedule = planpath.search(env)
                duration = time.time() - start

                assert env.num_resets == 1 and env._elapsed_steps == 0

                # Run the schedule
                success = False
                if schedule is not None:
                    for action in schedule:
                        _, _, _done, _ = env.step(action)
                        success = _done['__all__']
                        #print(env.agents)

                    if success:
                        avg_time += duration
                        print("Success:", problemsize, seed, duration)
                        successes += 1

                    else:
                        print("Bad schedule - failed.")
                else:
                    print("Couldn't find solution for seed: ", duration, seed)
                    env_renderer.render_env(show=True,
                                            frames=False,
                                            show_observations=False)

            avg_time = avg_time / successes
            avg[x].append((problemsize[0], avg_time))
    #print("%10s\t%8s\t%9s" % ("Dimensions", "Success", "Runtime"))

    #

    # Initialize positions.

    # Time the search.

    #if debug:
    #env_renderer.render_env(show=True, frames=False, show_observations=False)
    #time.sleep(refresh)

    # Validate that environment state is unchanged.

    # Print the performance of the algorithm
    #print("%10s\t%8s\t%9.6f" % (str(problemsize), str(success), duration))
        print(avg)
Example #30
0
from reinforcement_learning.ordered_policy import OrderedPolicy

np.random.seed(2)

x_dim = 20  # np.random.randint(8, 20)
y_dim = 20  # np.random.randint(8, 20)
n_agents = 10  # np.random.randint(3, 8)
n_goals = n_agents + np.random.randint(0, 3)
min_dist = int(0.75 * min(x_dim, y_dim))

env = RailEnv(width=x_dim,
              height=y_dim,
              rail_generator=complex_rail_generator(
                  nr_start_goal=n_goals, nr_extra=5, min_dist=min_dist,
                  max_dist=99999,
                  seed=0
              ),
              schedule_generator=complex_schedule_generator(),
              obs_builder_object=TreeObsForRailEnv(max_depth=1, predictor=ShortestPathPredictorForRailEnv()),
              number_of_agents=n_agents)
env.reset(True, True)

tree_depth = 1
observation_helper = TreeObsForRailEnv(max_depth=tree_depth, predictor=ShortestPathPredictorForRailEnv())
env_renderer = RenderTool(env, gl="PGL", )
handle = env.get_agent_handles()
n_episodes = 1
max_steps = 100 * (env.height + env.width)
record_images = False
policy = OrderedPolicy()