예제 #1
0
    def test_2_agents(self):
        initial_agents_params = [{
            "pos": (2, 4),
            "direction": Direction.DOWN,
            "deterministic": True,
            "depth": 1
        }, {
            "pos": (6, 1),
            "direction": Direction.LEFT,
            "deterministic": True,
            "depth": 1
        }]
        model = SpeedModel(
            width=10,
            height=10,
            nb_agents=2,
            initial_agents_params=initial_agents_params,
            agent_classes=[NStepSurvivalAgent for _ in range(2)])
        particle_cells, region_sizes, is_endgame, _ = voronoi(
            model, model.active_speed_agents[0].unique_id)
        self.assertEqual({0: 2, 1: 48, 2: 50}, region_sizes)
        self.assertEqual(False, is_endgame)

        # run for 5 steps and tests again
        for _ in range(5):
            model.step()
        particle_cells, region_sizes, is_endgame, _ = voronoi(
            model, model.active_speed_agents[0].unique_id)
        self.assertEqual({0: 12, 1: 53, 2: 35}, region_sizes)
        self.assertEqual(False, is_endgame)
예제 #2
0
    def test_endgame(self):
        # creating a model where agent 1 is in an endgame
        cells = np.array([[1, 0, 0, 2, 0], [1, 0, 0, 2, 0], [1, 1, 0, 2, 2]])
        initial_agents_params = [{
            "pos": (1, 2),
            "direction": Direction.DOWN,
            "deterministic": True,
            "depth": 1
        }, {
            "pos": (4, 2),
            "direction": Direction.RIGHT,
            "deterministic": True,
            "depth": 1
        }]
        model = SpeedModel(
            width=5,
            height=3,
            nb_agents=2,
            cells=cells,
            initial_agents_params=initial_agents_params,
            agent_classes=[NStepSurvivalAgent for _ in range(2)])
        particle_cells, region_sizes, is_endgame, _ = voronoi(model, 1)
        self.assertEqual({0: 8, 1: 5, 2: 2}, region_sizes)
        self.assertEqual(True, is_endgame)

        # run one steps and tests again
        model.step()
        particle_cells, region_sizes, is_endgame, _ = voronoi(model, 1)
        self.assertEqual({0: 10, 1: 4, 2: 1}, region_sizes)
        self.assertEqual(True, is_endgame)
예제 #3
0
 def evaluate(self,
              repetitions,
              seeds=None,
              show=True,
              save=False,
              random_move_time=False):
     self._init_tables(repetitions)
     for rep in range(repetitions):
         if random_move_time:
             move_time = np.random.uniform(5, 15)
             for i in range(self.model_params["nb_agents"]):
                 self.model_params["initial_agents_params"][i][
                     "time_for_move"] = move_time
         self.model = SpeedModel(**self.model_params)
         if seeds is not None:
             self.model.reset_randomizer(seeds[rep])
         while self.model.running:
             active_agent_ids = list(
                 map(lambda x: x.unique_id, self.model.active_speed_agents))
             self.model.step()
             new_active_agent_ids = list(
                 map(lambda x: x.unique_id, self.model.active_speed_agents))
             active_agents_ids_disjunction = list(
                 set(active_agent_ids) - set(new_active_agent_ids))
             for unique_id in active_agents_ids_disjunction:
                 self.placement_table[unique_id - 1, rep] += len(
                     self.model.active_speed_agents) + 1
         for winner in self.model.active_speed_agents:
             self.placement_table[winner.unique_id - 1, rep] += 1
         self._update_tables(rep)
     self._process_results(repetitions, show, save)
예제 #4
0
    def __init__(self, model_params, parameter_settings_info=None):
        self.model_params = model_params
        self.parameter_settings_info = parameter_settings_info

        if "initial_agents_params" not in self.model_params:
            self.model_params["initial_agents_params"] = [
                {} for _ in range(self.model_params["nb_agents"])
            ]
        self.model = SpeedModel(**model_params)
        self.win_table = None
        self.elimination_step_table = None
        self.placement_table = None
        self.elimination_action_table = None
        self.agent_independent_table = None
예제 #5
0
    def test_cut_off(self):
        # Agent 1 can cut off agent 2 in one move (only with action SPEED_UP) and win the endgame
        # (voronoi should detect that)
        for agent_cls in self.agent_classes:
            if agent_cls != MultiMinimaxAgent:
                cells = np.array([
                    [0, 0, 0, 0, 0, 0, 1],
                    [0, 0, 0, 0, 0, 0, 1],
                    [0, 0, 0, 0, 0, 0, 1],
                    [0, 0, 0, 0, 0, 1, 1],
                    [2, 2, 2, 0, 2, 2, 2],
                    [2, 2, 2, 0, 0, 0, 2],
                    [2, 2, 2, 2, 0, 0, 2],
                    [2, 2, 2, 2, 2, 2, 2],
                ])
                initial_agents_params = [{
                    "pos": (5, 3),
                    "direction": Direction.LEFT
                }, {
                    "pos": (3, 6),
                    "direction": Direction.UP
                }]
                model = SpeedModel(width=7,
                                   height=8,
                                   nb_agents=2,
                                   cells=cells,
                                   initial_agents_params=initial_agents_params,
                                   agent_classes=[agent_cls for _ in range(2)])

                game_state = get_state(model, model.active_speed_agents[0])
                action_agent_1 = model.active_speed_agents[0].multi_minimax(
                    depth=2, game_state=game_state)
                self.assertEqual(Action.SPEED_UP, action_agent_1)
예제 #6
0
    def test_no_gamble(self):
        # two agents facing each other should not gamble and go for the cell between them (in this case)
        for agent_cls in self.agent_classes:
            cells = np.array([[0, 1, 0, 2, 0], [0, 0, 0, 0,
                                                0], [0, 0, 0, 0, 0],
                              [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])
            initial_agents_params = [{
                "pos": (1, 0),
                "direction": Direction.RIGHT
            }, {
                "pos": (3, 0),
                "direction": Direction.LEFT
            }]
            model = SpeedModel(width=5,
                               height=5,
                               nb_agents=2,
                               cells=cells,
                               initial_agents_params=initial_agents_params,
                               agent_classes=[agent_cls, agent_cls])

            action_agent_1 = model.active_speed_agents[0].multi_minimax(
                depth=2,
                game_state=get_state(model, model.active_speed_agents[0]))
            action_agent_2 = model.active_speed_agents[0].multi_minimax(
                depth=2,
                game_state=get_state(model, model.active_speed_agents[1]))
            self.assertEqual(Action.TURN_RIGHT, action_agent_1)
            self.assertEqual(Action.TURN_LEFT, action_agent_2)
예제 #7
0
    def test_jumping_out(self):
        # Agent should pick speed up to jump over wall
        for agent_cls in self.agent_classes:
            cells = np.array([[0, 0, 2, 2, 2], [0, 0, 2, 0,
                                                0], [2, 1, 2, 0, 0],
                              [2, 0, 2, 0, 0], [2, 0, 2, 0,
                                                0], [2, 0, 2, 0, 0],
                              [2, 2, 2, 0, 0], [0, 0, 0, 0,
                                                0], [0, 0, 0, 0, 0],
                              [0, 0, 0, 0, 0], [0, 0, 0, 0, 0],
                              [0, 0, 0, 0, 0]])
            initial_agents_params = [{
                "pos": (1, 2),
                "direction": Direction.DOWN,
                "speed": 1
            }, {
                "pos": (4, 0),
                "direction": Direction.LEFT
            }]
            model = SpeedModel(width=5,
                               height=12,
                               nb_agents=2,
                               cells=cells,
                               initial_agents_params=initial_agents_params,
                               agent_classes=[agent_cls, NStepSurvivalAgent])

            model.active_speed_agents[0].game_step = 4
            model.schedule.steps = 4

            action_agent_1 = model.active_speed_agents[0].multi_minimax(
                depth=6,
                game_state=get_state(model, model.active_speed_agents[0]))

            self.assertEqual(Action.SPEED_UP, action_agent_1)
예제 #8
0
    def test_force_draw(self):
        # Force a draw with kamikaze in a obviously loosing endgame if a save action is chosen
        # Agent 1 can not avoid elimination but can eliminate agent 2 as well with SPEED_UP but could survive one step
        # longer with CHANGE_NOTHING.
        # Assuming that agent 2 is not just eliminating himself, it is best to force the draw.
        for agent_cls in self.agent_classes:
            cells = np.array([[0, 0, 0, 0, 0], [0, 2, 2, 2,
                                                2], [0, 2, 2, 2, 2],
                              [0, 1, 1, 1, 1], [1, 1, 1, 1, 1],
                              [1, 1, 1, 1, 1]])
            initial_agents_params = [{
                "pos": (0, 4),
                "direction": Direction.UP
            }, {
                "pos": (1, 2),
                "direction": Direction.LEFT
            }]
            model = SpeedModel(width=5,
                               height=6,
                               nb_agents=2,
                               cells=cells,
                               initial_agents_params=initial_agents_params,
                               agent_classes=[agent_cls for _ in range(2)])

            action_agent_1 = model.active_speed_agents[0].multi_minimax(
                depth=2,
                game_state=get_state(model, model.active_speed_agents[0]))
            self.assertEqual(Action.SPEED_UP, action_agent_1)
예제 #9
0
    def test_kamikaze(self):
        # Eliminating another agent is better than just dying when death is inevitable
        # Agent 1 can not avoid elimination but can eliminate agent 2 as well with CHANGE_NOTHING
        for agent_cls in self.agent_classes:
            cells = np.array([[0, 0, 0, 0, 0], [2, 2, 2, 2,
                                                2], [0, 2, 2, 2, 2],
                              [1, 1, 1, 1, 1], [1, 1, 1, 1, 1]])
            initial_agents_params = [{
                "pos": (0, 3),
                "direction": Direction.UP
            }, {
                "pos": (1, 2),
                "direction": Direction.LEFT
            }]
            model = SpeedModel(width=5,
                               height=5,
                               nb_agents=2,
                               cells=cells,
                               initial_agents_params=initial_agents_params,
                               agent_classes=[agent_cls for _ in range(2)])

            action_agent_1 = model.active_speed_agents[0].multi_minimax(
                depth=2,
                game_state=get_state(model, model.active_speed_agents[0]))
            self.assertEqual(Action.CHANGE_NOTHING, action_agent_1)
예제 #10
0
파일: utils.py 프로젝트: jubra97/speedos
def state_to_model(state,
                   initialize_cells=False,
                   agent_classes=None,
                   additional_params=None,
                   trace_aware=False):
    """
    Convert JSON state to model
    :param state: JSON string
    :param initialize_cells: initialize cells
    :param agent_classes: agent classes
    :param additional_params: additional params
    :param trace_aware: include traces
    :return: model
    """
    # import here to avoid cyclic imports
    from src.core.model import SpeedModel
    from src.core.agents import DummyAgent
    width = state["width"]
    height = state["height"]
    nb_agents = len(state["players"])
    initial_params = []
    for i, values in enumerate(state["players"].values()):
        initial_params.append({
            "pos": (values["x"], values["y"]),
            "direction":
            Direction[values["direction"].upper()],
            "speed":
            values["speed"],
            "active":
            values["active"],
        })
        if trace_aware:
            initial_params[i]["trace"] = copy.deepcopy(values["trace"])
        if additional_params is not None:
            initial_params[i] = {**initial_params[i], **additional_params[i]}

    if agent_classes is None:
        agent_classes = [DummyAgent for i in range(nb_agents)]
    model = SpeedModel(width,
                       height,
                       nb_agents,
                       agent_classes,
                       initial_agents_params=initial_params,
                       cells=state["cells"] if not initialize_cells else None)
    agents_to_remove = []
    for agent in model.active_speed_agents:
        if not agent.active:
            agents_to_remove.append(agent)
    for agent in agents_to_remove:
        model.active_speed_agents.remove(agent)

    if "step" in state.keys():
        model.schedule.steps = state["step"]
    return model
예제 #11
0
    def test_winning_move(self):
        # There is only one winning move (TURN_RIGHT) for agent 1 in a late endgame. All other actions lead to a loss or
        # draw if agent 2 plays perfectly.
        for agent_cls in self.agent_classes:
            cells = np.array([[0, 0, 0, 0, 0, 0, 2], [0, 1, 1, 1, 1, 1, 1],
                              [0, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 1]])
            initial_agents_params = [{
                "pos": (1, 1),
                "direction": Direction.LEFT
            }, {
                "pos": (6, 0),
                "direction": Direction.LEFT
            }]
            model = SpeedModel(width=7,
                               height=4,
                               nb_agents=2,
                               cells=cells,
                               initial_agents_params=initial_agents_params,
                               agent_classes=[agent_cls for _ in range(2)])

            action_agent_1 = model.active_speed_agents[0].multi_minimax(
                depth=8,
                game_state=get_state(model, model.active_speed_agents[0]))
            self.assertEqual(Action.TURN_RIGHT, action_agent_1)
예제 #12
0
from src.core.agents import VoronoiAgent, SlidingWindowVoronoiAgent
from src.core.model import SpeedModel

if __name__ == "__main__":
    model = SpeedModel(6, 6, 2, agent_classes=[VoronoiAgent, SlidingWindowVoronoiAgent])
    model.run_model()
예제 #13
0
    def test_6_agents(self):
        initial_agents_params = [{
            "pos": (0, 0),
            "direction": Direction.DOWN,
            "deterministic": True,
            "depth": 1
        }, {
            "pos": (23, 3),
            "direction": Direction.LEFT,
            "deterministic": True,
            "depth": 1
        }, {
            "pos": (5, 35),
            "direction": Direction.DOWN,
            "deterministic": True,
            "depth": 1
        }, {
            "pos": (26, 40),
            "direction": Direction.UP,
            "deterministic": True,
            "depth": 1
        }, {
            "pos": (17, 8),
            "direction": Direction.RIGHT,
            "deterministic": True,
            "depth": 1
        }, {
            "pos": (39, 39),
            "direction": Direction.UP,
            "deterministic": True,
            "depth": 1
        }]
        model = SpeedModel(
            width=50,
            height=50,
            nb_agents=6,
            initial_agents_params=initial_agents_params,
            agent_classes=[NStepSurvivalAgent for _ in range(6)])
        particle_cells, region_sizes, is_endgame, _ = voronoi(
            model, model.active_speed_agents[0].unique_id)
        self.assertEqual(
            {
                -1: 83,
                0: 6,
                1: 126,
                2: 477,
                3: 457,
                4: 422,
                5: 349,
                6: 580
            }, region_sizes)
        self.assertEqual(False, is_endgame)

        # run for 5 steps and tests again
        for _ in range(5):
            model.step()
        particle_cells, region_sizes, is_endgame, _ = voronoi(
            model, model.active_speed_agents[0].unique_id)
        self.assertEqual(
            {
                -1: 76,
                0: 32,
                2: 484,
                3: 456,
                4: 437,
                5: 493,
                6: 522
            }, region_sizes)
        self.assertEqual(False, is_endgame)
예제 #14
0
    def fair_start_evaluate(self,
                            repetitions,
                            seeds=None,
                            show=True,
                            save=False,
                            verbose=False,
                            random_move_time=False):
        if repetitions % self.model_params["nb_agents"]:
            raise ValueError("Repetitions must be a multiple of nb_agents")
        else:
            repetitions_to_iter = int(repetitions /
                                      self.model_params["nb_agents"])
        self._init_tables(repetitions)
        for rep in range(repetitions_to_iter):
            pos_samples = random.sample(
                range(self.model_params["width"] *
                      self.model_params["height"] - 1),
                self.model_params["nb_agents"])
            start_pos = [(sample % self.model_params["width"],
                          sample // self.model_params["width"])
                         for sample in pos_samples]
            start_dir = [
                random.choice(list(Direction))
                for _ in range(self.model_params["nb_agents"])
            ]
            for i in range(self.model_params["nb_agents"]):
                args = [{
                    "pos":
                    start_pos[j % self.model_params["nb_agents"]],
                    "direction":
                    start_dir[j % self.model_params["nb_agents"]]
                } for j in range(i, self.model_params["nb_agents"] + i)]
                for j in range(self.model_params["nb_agents"]):
                    self.model_params["initial_agents_params"][j][
                        "pos"] = args[j]["pos"]
                    self.model_params["initial_agents_params"][j][
                        "direction"] = args[j]["direction"]

                if random_move_time:
                    move_time = np.random.uniform(5, 15)
                    for j in range(self.model_params["nb_agents"]):
                        self.model_params["initial_agents_params"][j][
                            "time_for_move"] = move_time
                self.model = SpeedModel(**self.model_params)
                if seeds is not None:
                    self.model.reset_randomizer(seeds[rep])
                while self.model.running:
                    active_agent_ids = list(
                        map(lambda x: x.unique_id,
                            self.model.active_speed_agents))
                    self.model.step()
                    new_active_agent_ids = list(
                        map(lambda x: x.unique_id,
                            self.model.active_speed_agents))
                    active_agents_ids_disjunction = list(
                        set(active_agent_ids) - set(new_active_agent_ids))
                    for unique_id in active_agents_ids_disjunction:
                        self.placement_table[unique_id - 1, rep] += len(
                            self.model.active_speed_agents) + 1
                for winner in self.model.active_speed_agents:
                    self.placement_table[winner.unique_id - 1, rep] += 1
                self._update_tables(rep * self.model_params["nb_agents"] + i)
                if verbose:
                    print(
                        f"Finished Game {rep * self.model_params['nb_agents'] + i} at {datetime.now()}"
                    )
                    print(
                        f"Current Evaluation Results: \n{self.win_table}\n{self.elimination_step_table}\n"
                        f"{self.placement_table}\n{self.elimination_action_table}\n{self.agent_independent_table}\n"
                    )
        self._process_results(repetitions, show, save)
예제 #15
0
class Evaluator:
    def __init__(self, model_params, parameter_settings_info=None):
        self.model_params = model_params
        self.parameter_settings_info = parameter_settings_info

        if "initial_agents_params" not in self.model_params:
            self.model_params["initial_agents_params"] = [
                {} for _ in range(self.model_params["nb_agents"])
            ]
        self.model = SpeedModel(**model_params)
        self.win_table = None
        self.elimination_step_table = None
        self.placement_table = None
        self.elimination_action_table = None
        self.agent_independent_table = None

    def evaluate(self,
                 repetitions,
                 seeds=None,
                 show=True,
                 save=False,
                 random_move_time=False):
        self._init_tables(repetitions)
        for rep in range(repetitions):
            if random_move_time:
                move_time = np.random.uniform(5, 15)
                for i in range(self.model_params["nb_agents"]):
                    self.model_params["initial_agents_params"][i][
                        "time_for_move"] = move_time
            self.model = SpeedModel(**self.model_params)
            if seeds is not None:
                self.model.reset_randomizer(seeds[rep])
            while self.model.running:
                active_agent_ids = list(
                    map(lambda x: x.unique_id, self.model.active_speed_agents))
                self.model.step()
                new_active_agent_ids = list(
                    map(lambda x: x.unique_id, self.model.active_speed_agents))
                active_agents_ids_disjunction = list(
                    set(active_agent_ids) - set(new_active_agent_ids))
                for unique_id in active_agents_ids_disjunction:
                    self.placement_table[unique_id - 1, rep] += len(
                        self.model.active_speed_agents) + 1
            for winner in self.model.active_speed_agents:
                self.placement_table[winner.unique_id - 1, rep] += 1
            self._update_tables(rep)
        self._process_results(repetitions, show, save)

    def fair_start_evaluate(self,
                            repetitions,
                            seeds=None,
                            show=True,
                            save=False,
                            verbose=False,
                            random_move_time=False):
        if repetitions % self.model_params["nb_agents"]:
            raise ValueError("Repetitions must be a multiple of nb_agents")
        else:
            repetitions_to_iter = int(repetitions /
                                      self.model_params["nb_agents"])
        self._init_tables(repetitions)
        for rep in range(repetitions_to_iter):
            pos_samples = random.sample(
                range(self.model_params["width"] *
                      self.model_params["height"] - 1),
                self.model_params["nb_agents"])
            start_pos = [(sample % self.model_params["width"],
                          sample // self.model_params["width"])
                         for sample in pos_samples]
            start_dir = [
                random.choice(list(Direction))
                for _ in range(self.model_params["nb_agents"])
            ]
            for i in range(self.model_params["nb_agents"]):
                args = [{
                    "pos":
                    start_pos[j % self.model_params["nb_agents"]],
                    "direction":
                    start_dir[j % self.model_params["nb_agents"]]
                } for j in range(i, self.model_params["nb_agents"] + i)]
                for j in range(self.model_params["nb_agents"]):
                    self.model_params["initial_agents_params"][j][
                        "pos"] = args[j]["pos"]
                    self.model_params["initial_agents_params"][j][
                        "direction"] = args[j]["direction"]

                if random_move_time:
                    move_time = np.random.uniform(5, 15)
                    for j in range(self.model_params["nb_agents"]):
                        self.model_params["initial_agents_params"][j][
                            "time_for_move"] = move_time
                self.model = SpeedModel(**self.model_params)
                if seeds is not None:
                    self.model.reset_randomizer(seeds[rep])
                while self.model.running:
                    active_agent_ids = list(
                        map(lambda x: x.unique_id,
                            self.model.active_speed_agents))
                    self.model.step()
                    new_active_agent_ids = list(
                        map(lambda x: x.unique_id,
                            self.model.active_speed_agents))
                    active_agents_ids_disjunction = list(
                        set(active_agent_ids) - set(new_active_agent_ids))
                    for unique_id in active_agents_ids_disjunction:
                        self.placement_table[unique_id - 1, rep] += len(
                            self.model.active_speed_agents) + 1
                for winner in self.model.active_speed_agents:
                    self.placement_table[winner.unique_id - 1, rep] += 1
                self._update_tables(rep * self.model_params["nb_agents"] + i)
                if verbose:
                    print(
                        f"Finished Game {rep * self.model_params['nb_agents'] + i} at {datetime.now()}"
                    )
                    print(
                        f"Current Evaluation Results: \n{self.win_table}\n{self.elimination_step_table}\n"
                        f"{self.placement_table}\n{self.elimination_action_table}\n{self.agent_independent_table}\n"
                    )
        self._process_results(repetitions, show, save)

    def _process_results(self, repetitions, show, save):
        if not show and not save:
            return

        index = [
            f"Agent {i + 1} ({str(type(self.model.get_agent_by_id(i + 1)).__name__)})"
            for i in range(self.model.nb_agents)
        ]

        self.win_table *= 100 / repetitions  # convert to percentages
        win_df = pd.DataFrame(self.win_table,
                              index=index,
                              columns=["Wins [%]", "Ties [%]", "Losses [%]"])

        elimination_data = {"ES Mean": [], "ES Std": []}
        for agent_data in self.elimination_step_table:
            data = agent_data[np.nonzero(agent_data)]
            elimination_data["ES Mean"].append(np.mean(data))
            elimination_data["ES Std"].append(np.std(data))
        elimination_step_df = pd.DataFrame(elimination_data, index=index)

        elimination_action_df = pd.DataFrame(self.elimination_action_table,
                                             index=index,
                                             columns=[
                                                 "EA Left", "EA Right",
                                                 "EA Slow Down", "EA Speed Up",
                                                 "EA Change Nothing"
                                             ])

        placement_data = {"Placement Mean": [], "Placement Std": []}
        for agent_data in self.placement_table:
            placement_data["Placement Mean"].append(np.mean(agent_data))
            placement_data["Placement Std"].append(np.mean(agent_data))
        placement_df = pd.DataFrame(placement_data, index=index)

        agent_table = win_df.join(elimination_step_df).join(
            elimination_action_df).join(placement_df)

        self.agent_independent_table[1] *= 100 / (
            self.model.width * self.model.height)  # convert to percentages
        agent_independent_df = pd.DataFrame(
            {
                "Game Duration Mean": np.mean(self.agent_independent_table[0]),
                "Game Duration Std": np.std(self.agent_independent_table[0]),
                "Empty Cells Mean [%]": np.mean(
                    self.agent_independent_table[1]),
                "Empty Cells Std [%]": np.std(self.agent_independent_table[1])
            },
            index=["Data"])

        parameter_settings = {
            "Width": self.model.width,
            "Height": self.model.height,
            "Repetitions": repetitions
        }
        if self.parameter_settings_info:
            parameter_settings = {
                **parameter_settings,
                **self.parameter_settings_info
            }  # join the dicts
        parameter_settings_df = pd.DataFrame(parameter_settings,
                                             index=["Data"])

        timestamp = datetime.now().strftime("%Y_%m_%d-%H_%M_%S")

        if save:
            writer = pd.ExcelWriter(
                f"../../res/evaluation/eval_{timestamp}.xlsx",
                engine='xlsxwriter')

            agent_table.to_excel(writer, sheet_name='Agents')
            agent_independent_df.to_excel(writer, sheet_name='Global')
            parameter_settings_df.to_excel(writer,
                                           sheet_name='Parameter Settings')

            writer.save()

        if show:
            agent_table.to_html('temp_agents.html')
            webbrowser.open_new_tab('temp_agents.html')

            agent_independent_df.to_html('temp_global.html')
            webbrowser.open_new_tab('temp_global.html')

            parameter_settings_df.to_html('temp_settings.html')
            webbrowser.open_new_tab('temp_settings.html')

    def _init_tables(self, repetitions):
        self.win_table = np.zeros((self.model.nb_agents, 3))
        self.placement_table = np.zeros((self.model.nb_agents, repetitions))
        self.elimination_step_table = np.zeros(
            (self.model.nb_agents, repetitions))
        self.elimination_action_table = np.zeros((self.model.nb_agents, 5),
                                                 dtype=np.int)
        self.agent_independent_table = np.empty((2, repetitions))

    def _update_tables(self, repetition):
        for agent in self.model.speed_agents:
            a_idx = agent.unique_id - 1
            if agent.active:
                # win
                self.win_table[a_idx, 0] += 1
            elif len(
                    self.model.active_speed_agents
            ) == 0 and agent.elimination_step == self.model.schedule.steps:
                # tie
                self.win_table[a_idx, 1] += 1
                self.elimination_step_table[
                    a_idx, repetition] = agent.elimination_step
                self.elimination_action_table[a_idx, agent.action.value] += 1
            else:
                # loss
                self.win_table[a_idx, 2] += 1
                self.elimination_step_table[
                    a_idx, repetition] = agent.elimination_step
                self.elimination_action_table[a_idx, agent.action.value] += 1

        self.agent_independent_table[0, repetition] = self.model.schedule.steps
        self.agent_independent_table[1, repetition] = np.count_nonzero(
            self.model.cells == 0)