Beispiel #1
0
def get_projected_replicator_dynamics_pi(payoff_table):
    # matrix must be symmetric
    assert len(np.shape(payoff_table)) == 2
    assert np.shape(payoff_table)[0] == np.shape(payoff_table)[1]

    payoff_tables = [payoff_table, payoff_table.T]
    # payoff_tables = [heuristic_payoff_table.from_matrix_game(payoff_tables[0]),
    #                  heuristic_payoff_table.from_matrix_game(payoff_tables[1].T)]

    # Check if the game is symmetric (i.e., players have identical strategy sets
    # and payoff tables) and return only a single-player’s payoff table if so.
    # This ensures Alpha-Rank automatically computes rankings based on the
    # single-population dynamics.
    is_symmetric, _ = utils.is_symmetric_matrix_game(payoff_tables)

    assert is_symmetric
    assert len(payoff_tables) == 2

    pi = projected_replicator_dynamics(payoff_tensors=payoff_tables,
                                       prd_initial_strategies=None,
                                       prd_iterations=int(1e6),
                                       prd_dt=1e-3,
                                       prd_gamma=0.0,
                                       average_over_last_n_strategies=None)

    return pi
Beispiel #2
0
def _prd(meta_game, per_player_repeats, ignore_repeats=False):
  """Projected replicator dynamics."""
  if not ignore_repeats:
    meta_game = _expand_meta_game(meta_game, per_player_repeats)
  meta_dist = projected_replicator_dynamics.projected_replicator_dynamics(
      meta_game)
  labels = string.ascii_lowercase[:len(meta_dist)]
  comma_labels = ",".join(labels)
  meta_dist = np.einsum("{}->{}".format(comma_labels, labels), *meta_dist)
  meta_dist[meta_dist < DIST_TOL] = 0.0
  meta_dist /= np.sum(meta_dist)
  meta_dist = _unexpand_meta_dist(meta_dist, per_player_repeats)
  return meta_dist, dict()
Beispiel #3
0
def prd_strategy(solver, return_joint=False, checkpoint_dir=None):
    """Computes Projected Replicator Dynamics strategies.  Args: solver: GenPSROSolver instance.  return_joint: If true, only returns marginals. Otherwise marginals as well as joint probabilities.  Returns: PRD-computed strategies. 
    """
    meta_games = solver.get_meta_game()
    if not isinstance(meta_games, list):
        meta_games = [meta_games, -meta_games]
    kwargs = solver.get_kwargs()
    result = projected_replicator_dynamics.projected_replicator_dynamics(
        meta_games, **kwargs)
    if not return_joint:
        return result
    else:
        joint_strategies = get_joint_strategy_from_marginals(result)
        return result, joint_strategies
Beispiel #4
0
def prd_strategy(solver):
    """Computes Projected Replicator Dynamics strategies.

  Args:
    solver: GenPSROSolver instance.

  Returns:
    PRD-computed strategies.
  """
    meta_games = solver.get_meta_game
    if not isinstance(meta_games, list):
        meta_games = [meta_games, -meta_games]
    kwargs = solver.get_kwargs
    return projected_replicator_dynamics.projected_replicator_dynamics(
        meta_games, **kwargs)
Beispiel #5
0
    def test_three_players(self):
        test_a = np.array([[[2, 1, 0], [1, 0, -1]], [[1, 0, -1], [0, -1, -2]]])
        test_b = np.array([[[2, 1, 0], [1, 0, -1]], [[1, 0, -1], [0, -1, -2]]])
        test_c = np.array([[[2, 1, 0], [1, 0, -1]], [[1, 0, -1], [0, -1, -2]]])

        strategies = projected_replicator_dynamics.projected_replicator_dynamics(
            [test_a, test_b, test_c],
            prd_initial_strategies=None,
            prd_iterations=50000,
            prd_dt=1e-3,
            prd_gamma=1e-6,
            average_over_last_n_strategies=10)
        self.assertLen(strategies, 3, "Wrong strategy length.")
        self.assertGreater(
            strategies[0][0], 0.999,
            "Projected Replicator Dynamics failed in trivial case.")