Exemple #1
0
    def update(i):
        if i > 0:
            apply_change(state=state, change=changes[i], forward=True)

        nodes_iter = sorted(G.nodes)

        data["node_colors"] = get_colors(state, nodes_iter)
        data["node_sizes"] = get_sizes(state, nodes_iter)
        data["node_labels"] = get_labels(state, nodes_iter)

        nodes.set_array(data["node_colors"])
        nodes.set_sizes(data["node_sizes"])

        for k, v in data["node_labels"].items():
            plot["labels"][k].set_text(data["node_labels"][k])

        return (nodes, )
Exemple #2
0
def test_spend(agent_id, node_id, amount, should_error, game):
    game.game_phase = ActionType.SPEND
    agent = game.graph.agents[agent_id]

    action = Action(spend=Spend(amount=amount, node=node_id))

    try:

        change = apply_action(agent=agent, game=game, action=action)
        pre_change_value = game.graph.nodes[node_id].value

        apply_change(game, change)

        assert game.graph.nodes[node_id].value == pre_change_value + amount

    except AssertionError:

        if not should_error:
            raise
Exemple #3
0
def get_state_transition(agent_id, game_id, action):
    game = games_cache.get_game(game_id)

    change = apply_action(agent=game.get_agent(agent_id),
                          game=game,
                          action=action)

    new_state = apply_change(
        state=game,
        change=change,
    )

    return change, new_state
Exemple #4
0
def test_attack(agent_id, attacker_id, defender_id, attack_should_succeed,
                should_error, game):
    agent = game.graph.agents[agent_id]

    action = Action(attack=Attack(attacker=attacker_id, defender=defender_id))

    try:
        change = apply_action(agent=agent, game=game, action=action)
        new_game = apply_change(game, change)

        if attack_should_succeed is True:
            assert game.graph.nodes[defender_id] != agent.id
            assert new_game.graph.nodes[defender_id].owner == agent.id

        elif attack_should_succeed is False:
            assert new_game.graph.nodes[defender_id].owner != agent.id

    except AssertionError:
        if not should_error:
            raise
Exemple #5
0
def _reconstruct_game_state(
    game_id: int,
    session,
):
    game = session.query(entities.Game).filter_by(id=game_id).one()
    graph = game.graph

    nodes = session.query(entities.Node).filter_by(graph_id=graph.id).all()
    edges = session.query(entities.Edge).filter_by(graph_id=graph.id).all()

    graph_agents = session.query(
        entities.GraphAgent).filter_by(graph_id=graph.id).all()

    graph_state = build_base_graph_from_db(nodes, edges, graph_agents)

    game_state = Game(
        graph=graph_state,
        active_player_id=graph_state.get_next_agent(agent_id=None))

    state_changes = (session.query(entities.Change).filter_by(
        game_id=game_id).order_by(entities.Change.sequence.asc())).all()
    logger.info("nb_state_changes", nb_items=len(state_changes))

    with Timer("replaying_state"):
        for state_change in state_changes:
            models_cls = type_mapping[type(state_change)]

            field_names = set(f.name for f in dataclasses.fields(models_cls))

            state_change_model = models_cls.from_dict({
                key: value
                for key, value in state_change.__dict__.items()
                if key in field_names
            })

            game_state = apply_change(game_state, state_change_model)

    return game_state
def run_game(agents, game: Game, verbose=True):
    game_start = Game.from_json(game.to_json())

    agents = {agent.agent.id: agent for agent in agents}

    metrics = {
        agent_id: {
            "resources": [],
            "nb_nodes": [],
            "total_node_value": [],
            "action_time": [],
            "reward_function": [],
            "step": [],
        }
        for agent_id in agents
    }

    changes = list()

    def log_metrics(agent):
        a = agent.agent
        m = metrics[a.id]

        m["resources"].append(a.resources)
        m["nb_nodes"].append(sum(1 for _ in game.graph.nodes_owned_by(a.id)))
        m["step"].append(nb_actions)
        m["reward_function"].append(agent.reward_function(game))
        m["total_node_value"].append(
            sum(node.value for node in game.graph.nodes_owned_by(a.id)))

    nb_actions = 0

    log_metrics(agents[game.active_player_id])
    log_metrics(agents[game.graph.get_next_agent(game.active_player_id)])

    while not game._any_has_won():
        if nb_actions % 250 == 0 and verbose:
            logger.info("checkpoint", nb_actions=nb_actions)

        agent = agents[game.active_player_id]

        action = agent.get_next_action()

        change = apply_action(action=action,
                              agent=agent.agent,
                              game=game,
                              force=None)
        changes.append(change)
        apply_change(state=game, change=change, forward=True)

        log_metrics(agent)

        nb_actions += 1

    losing_agent = set(agents) - {game._any_has_won()}

    log_metrics(agents[losing_agent.pop()])

    if verbose:
        logger.info("game complete", winner=game._any_has_won())

    return {
        "metrics": metrics,
        "agents": agents,
        "winner": game._any_has_won(),
        "changes": changes,
        "game_end": game,
        "game_start": game_start,
    }