def basic_different_dec_cardinality() -> MACID: """A basic MACIM where the cardinality of each agent's decision node is different. It has one subgame perfect NE. """ macid = MACID( [("D1", "D2"), ("D1", "U1"), ("D1", "U2"), ("D2", "U2"), ("D2", "U1")], agent_decisions={ 0: ["D1"], 1: ["D2"] }, agent_utilities={ 0: ["U1"], 1: ["U2"] }, ) cpd_d1 = DecisionDomain("D1", [0, 1]) cpd_d2 = DecisionDomain("D2", [0, 1, 2]) agent1_payoff = np.array([[3, 1, 0], [1, 2, 3]]) agent2_payoff = np.array([[1, 2, 1], [1, 0, 3]]) cpd_u1 = FunctionCPD("U1", lambda d1, d2: agent1_payoff[d1, d2]) # type: ignore cpd_u2 = FunctionCPD("U2", lambda d1, d2: agent2_payoff[d1, d2]) # type: ignore macid.add_cpds(cpd_d1, cpd_d2, cpd_u1, cpd_u2) return macid
def basic2agent_tie_break() -> MACID: macid = MACID( [("D1", "D2"), ("D1", "U1"), ("D1", "U2"), ("D2", "U2"), ("D2", "U1")], agent_decisions={ 0: ["D1"], 1: ["D2"] }, agent_utilities={ 0: ["U1"], 1: ["U2"] }, ) cpd_d1 = DecisionDomain("D1", [0, 1]) cpd_d2 = DecisionDomain("D2", [0, 1]) cpd_u1 = TabularCPD( "U1", 6, np.array([[0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0], [1, 0, 1, 0], [0, 0, 0, 0], [0, 0, 0, 0]]), evidence=["D1", "D2"], evidence_card=[2, 2], ) cpd_u2 = TabularCPD( "U2", 6, np.array([[0, 0, 0, 0], [1, 0, 0, 0], [0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 1, 0, 0]]), evidence=["D1", "D2"], evidence_card=[2, 2], ) macid.add_cpds(cpd_d1, cpd_d2, cpd_u1, cpd_u2) return macid
def test_get_reasoning_patterns(self) -> None: macid = MACID( [("D1", "U"), ("D2", "D1")], agent_decisions={1: ["D1", "D2"]}, agent_utilities={1: ["U"]}, ) self.assertEqual(get_reasoning_patterns(macid)["dir_effect"], ["D1"]) macid2 = MACID( [("D1", "U2"), ("D1", "D2"), ("D2", "U1"), ("D2", "U2")], agent_decisions={1: ["D1"], 2: ["D2"]}, agent_utilities={1: ["U1"], 2: ["U2"]}, ) self.assertEqual(get_reasoning_patterns(macid2)["dir_effect"], ["D2"]) self.assertEqual(get_reasoning_patterns(macid2)["manip"], ["D1"]) macid3 = MACID( [("X", "U1"), ("X", "U2"), ("X", "D1"), ("D1", "D2"), ("D2", "U1"), ("D2", "U2")], agent_decisions={1: ["D1"], 2: ["D2"]}, agent_utilities={1: ["U1"], 2: ["U2"]}, ) self.assertEqual(get_reasoning_patterns(macid3)["dir_effect"], ["D2"]) self.assertEqual(get_reasoning_patterns(macid3)["sig"], ["D1"]) macid4 = MACID( [("D1", "X2"), ("X1", "X2"), ("X2", "D2"), ("D2", "U1"), ("D2", "U2"), ("X1", "U2")], agent_decisions={1: ["D1"], 2: ["D2"]}, agent_utilities={1: ["U1"], 2: ["U2"]}, ) self.assertEqual(get_reasoning_patterns(macid4)["dir_effect"], ["D2"]) self.assertEqual(get_reasoning_patterns(macid4)["rev_den"], ["D1"])
def basic_different_dec_cardinality() -> MACID: """A basic MACIM where the cardinality of each agent's decision node is different. It has one subgame perfect NE. """ macid = MACID( [("D1", "D2"), ("D1", "U1"), ("D1", "U2"), ("D2", "U2"), ("D2", "U1")], agent_decisions={ 0: ["D1"], 1: ["D2"] }, agent_utilities={ 0: ["U1"], 1: ["U2"] }, ) agent1_payoff = np.array([[3, 1, 0], [1, 2, 3]]) agent2_payoff = np.array([[1, 2, 1], [1, 0, 3]]) macid.add_cpds(D1=[0, 1], D2=[0, 1, 2], U1=lambda d1, d2: agent1_payoff[d1, d2], U2=lambda d1, d2: agent2_payoff[d1, d2]) return macid
def two_agent_one_pne() -> MACID: """This macim is a simultaneous two player game and has a parameterisation that corresponds to the following normal form game - where the row player is agent 1, and the column player is agent 2 +----------+----------+----------+ | | Act(0) | Act(1) | +----------+----------+----------+ | Act(0) | 1, 2 | 3, 0 | +----------+----------+----------+ | Act(1) | 0, 3 | 2, 2 | +----------+----------+----------+ """ macid = MACID( [("D1", "U1"), ("D1", "U2"), ("D2", "U2"), ("D2", "U1")], agent_decisions={ 1: ["D1"], 2: ["D2"] }, agent_utilities={ 1: ["U1"], 2: ["U2"] }, ) agent1_payoff = np.array([[1, 3], [0, 2]]) agent2_payoff = np.array([[2, 0], [3, 2]]) macid.add_cpds(D1=[0, 1], D2=[0, 1], U1=lambda d1, d2: agent1_payoff[d1, d2], U2=lambda d1, d2: agent2_payoff[d1, d2]) return macid
def modified_taxi_competition() -> MACID: """Modifying the payoffs in the taxi competition example so that there is a tie break (if taxi 1 chooses to stop in front of the expensive hotel, taxi 2 is indifferent between their choices.) - There are now two SPNE D1 +----------+----------+----------+ | taxi 1 | expensive| cheap | +----------+----------+----------+ |expensive | 2 | 3 | D2 +----------+----------+----------+ | cheap | 5 | 1 | +----------+----------+----------+ D1 +----------+----------+----------+ | taxi 2 | expensive| cheap | +----------+----------+----------+ |expensive | 2 | 5 | D2 +----------+----------+----------+ | cheap | 3 | 5 | +----------+----------+----------+ """ macid = MACID( [("D1", "D2"), ("D1", "U1"), ("D1", "U2"), ("D2", "U2"), ("D2", "U1")], agent_decisions={ 1: ["D1"], 2: ["D2"] }, agent_utilities={ 1: ["U1"], 2: ["U2"] }, ) d1_domain = ["e", "c"] d2_domain = ["e", "c"] agent1_payoff = np.array([[2, 3], [5, 1]]) agent2_payoff = np.array([[2, 5], [3, 5]]) macid.add_cpds( DecisionDomain("D1", d1_domain), DecisionDomain("D2", d2_domain), FunctionCPD( "U1", lambda d1, d2: agent1_payoff[d2_domain.index(d2), d1_domain.index(d1)]), # type: ignore FunctionCPD( "U2", lambda d1, d2: agent2_payoff[d2_domain.index(d2), d1_domain.index(d1)]), # type: ignore ) return macid
def taxi_competition() -> MACID: """MACIM representation of the Taxi Competition game. "Taxi Competition" is an example introduced in "Equilibrium Refinements for Multi-Agent Influence Diagrams: Theory and Practice" by Hammond, Fox, Everitt, Abate & Wooldridge, 2021: D1 +----------+----------+----------+ | taxi 1 | expensive| cheap | +----------+----------+----------+ |expensive | 2 | 3 | D2 +----------+----------+----------+ | cheap | 5 | 1 | +----------+----------+----------+ D1 +----------+----------+----------+ | taxi 2 | expensive| cheap | +----------+----------+----------+ |expensive | 2 | 5 | D2 +----------+----------+----------+ | cheap | 3 | 1 | +----------+----------+----------+ There are 3 pure startegy NE and 1 pure SPE. """ macid = MACID( [("D1", "D2"), ("D1", "U1"), ("D1", "U2"), ("D2", "U2"), ("D2", "U1")], agent_decisions={ 1: ["D1"], 2: ["D2"] }, agent_utilities={ 1: ["U1"], 2: ["U2"] }, ) d1_domain = ["e", "c"] d2_domain = ["e", "c"] agent1_payoff = np.array([[2, 3], [5, 1]]) agent2_payoff = np.array([[2, 5], [3, 1]]) macid.add_cpds( DecisionDomain("D1", d1_domain), DecisionDomain("D2", d2_domain), FunctionCPD( "U1", lambda d1, d2: agent1_payoff[d2_domain.index(d2), d1_domain.index(d1)]), # type: ignore FunctionCPD( "U2", lambda d1, d2: agent2_payoff[d2_domain.index(d2), d1_domain.index(d1)]), # type: ignore ) return macid
def robot_warehouse() -> MACID: r""" Implementation of AAMAS robot warehouse example - Robot 1 collects packages, and can choose to hurry or not (D1) - Hurrying can be quicker (Q) but lead to breakages (B) - Robot 2 tidies up, and can choose to repair (R) breakages or not (D2) - Conducting repairs can obstruct (O) robot 1 - Robot 1 rewarded for speed and lack of breakages (U1), robot 2 is rewarded for things being in a state of repair (U2) """ macid = MACID( [ ("D1", "Q"), ("D1", "B"), ("Q", "U1"), ("B", "U1"), ("B", "R"), ("B", "D2"), ("D2", "R"), ("D2", "O"), ("O", "U1"), ("R", "U2"), ], agent_decisions={ 1: ["D1"], 2: ["D2"], }, agent_utilities={ 1: ["U1"], 2: ["U2"], }, ) macid.add_cpds( DecisionDomain("D1", domain=[0, 1]), DecisionDomain("D2", domain=[0, 1]), # Q copies the value of D1 with 90% probability StochasticFunctionCPD("Q", lambda d1: {d1: 0.9}, domain=[0, 1]), # B copies the value of D1 with 30% probability StochasticFunctionCPD("B", lambda d1: {d1: 0.3}, domain=[0, 1]), # R = not B or D2 FunctionCPD("R", lambda b, d2: int(not b or d2)), # O copies the value of D2 with 60% probability StochasticFunctionCPD("O", lambda d2: {d2: 0.6}, domain=[0, 1]), # U1 = (Q and not O) - B FunctionCPD("U1", lambda q, b, o: int(q and not o) - int(b)), # U2 = R FunctionCPD("U2", lambda r: r), # type: ignore ) return macid
def robot_warehouse() -> MACID: r""" Implementation of AAMAS robot warehouse example - Robot 1 collects packages, and can choose to hurry or not (D1) - Hurrying can be quicker (Q) but lead to breakages (B) - Robot 2 tidies up, and can choose to repair (R) breakages or not (D2) - Conducting repairs can obstruct (O) robot 1 - Robot 1 rewarded for speed and lack of breakages (U1), robot 2 is rewarded for things being in a state of repair (U2) """ macid = MACID( [ ("D1", "Q"), ("D1", "B"), ("Q", "U1"), ("B", "U1"), ("B", "R"), ("B", "D2"), ("D2", "R"), ("D2", "O"), ("O", "U1"), ("R", "U2"), ], agent_decisions={ 1: ["D1"], 2: ["D2"], }, agent_utilities={ 1: ["U1"], 2: ["U2"], }, ) macid.add_cpds( D1=[0, 1], D2=[0, 1], Q=lambda d1: noisy_copy(d1, domain=[0, 1]), B=lambda d1: noisy_copy(d1, probability=0.3, domain=[0, 1]), R=lambda b, d2: int(not b or d2), O=lambda d2: noisy_copy(d2, probability=0.6, domain=[0, 1]), U1=lambda q, b, o: int(q and not o) - int(b), U2=lambda r: r, ) return macid
def battle_of_the_sexes() -> MACID: """MACIM representation of the battle of the sexes game. The battle of the sexes game (also known as Bach or Stravinsky) is a simultaneous symmetric two-player game with payoffs corresponding to the following normal form game - the row player is Female and the column player is Male: +----------+----------+----------+ | |Opera | Football | +----------+----------+----------+ | Opera | 3, 2 | 0, 0 | +----------+----------+----------+ | Football | 0, 0 | 2, 3 | +----------+----------+----------+ This game has two pure NE: (Opera, Football) and (Football, Opera) """ macid = MACID( [("D_F", "U_F"), ("D_F", "U_M"), ("D_M", "U_M"), ("D_M", "U_F")], agent_decisions={ "M": ["D_F"], "F": ["D_M"] }, agent_utilities={ "M": ["U_F"], "F": ["U_M"] }, ) d_f_domain = ["O", "F"] d_m_domain = ["O", "F"] agent_f_payoff = np.array([[3, 0], [0, 2]]) agent_m_payoff = np.array([[2, 0], [0, 3]]) macid.add_cpds( DecisionDomain("D_F", d_f_domain), DecisionDomain("D_M", d_m_domain), FunctionCPD( "U_F", lambda d_f, d_m: agent_f_payoff[d_f_domain.index( d_f), d_m_domain.index(d_m)] # type: ignore ), FunctionCPD( "U_M", lambda d_f, d_m: agent_m_payoff[d_f_domain.index( d_f), d_m_domain.index(d_m)] # type: ignore ), ) return macid
def signal() -> MACID: macid = MACID( [("X", "D1"), ("X", "U2"), ("X", "U1"), ("D1", "U2"), ("D1", "U1"), ("D1", "D2"), ("D2", "U1"), ("D2", "U2")], agent_decisions={ 0: ["D1"], 1: ["D2"] }, agent_utilities={ 0: ["U1"], 1: ["U2"] }, ) cpd_x = TabularCPD("X", 2, np.array([[0.5], [0.5]])) cpd_d1 = DecisionDomain("D1", [0, 1]) cpd_d2 = DecisionDomain("D1", [0, 1]) u1_cpd_array = np.array([ [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0], ]) u2_cpd_array = np.array([ [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0], ]) cpd_u1 = TabularCPD("U1", 6, u1_cpd_array, evidence=["X", "D1", "D2"], evidence_card=[2, 2, 2]) cpd_u2 = TabularCPD("U2", 6, u2_cpd_array, evidence=["X", "D1", "D2"], evidence_card=[2, 2, 2]) macid.add_cpds(cpd_x, cpd_d1, cpd_d2, cpd_u1, cpd_u2) return macid
def two_agent_two_pne() -> MACID: """This macim is a simultaneous two player game and has a parameterisation that corresponds to the following normal form game - where the row player is agent 0, and the column player is agent 1 +----------+----------+----------+ | | Act(0) | Act(1) | +----------+----------+----------+ | Act(0) | 1, 1 | 4, 2 | +----------+----------+----------+ | Act(1) | 2, 4 | 3, 3 | +----------+----------+----------+ """ macid = MACID( [("D1", "U1"), ("D1", "U2"), ("D2", "U2"), ("D2", "U1")], agent_decisions={ 0: ["D1"], 1: ["D2"] }, agent_utilities={ 0: ["U1"], 1: ["U2"] }, ) cpd_d1 = DecisionDomain("D1", [0, 1]) cpd_d2 = DecisionDomain("D2", [0, 1]) cpd_u1 = TabularCPD( "U1", 5, np.array([[0, 0, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 1, 0, 0]]), evidence=["D1", "D2"], evidence_card=[2, 2], ) cpd_u2 = TabularCPD( "U2", 5, np.array([[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]]), evidence=["D1", "D2"], evidence_card=[2, 2], ) macid.add_cpds(cpd_d1, cpd_d2, cpd_u1, cpd_u2) return macid
def matching_pennies() -> MACID: """MACIM representation of the matching pennies game. The matching pennies game is a symmetric two-player game with payoffs corresponding to the following normal form game - the row player is agent 1 and the column player is agent 2: +----------+----------+----------+ | |Heads | Tails | +----------+----------+----------+ | Heads | +1, -1 | -1, +1 | +----------+----------+----------+ | Tails | -1, +1 | +1, -1 | +----------+----------+----------+ This game has no pure NE, but has a mixed NE where each player chooses Heads or Tails with equal probability. """ macid = MACID( [("D1", "U1"), ("D1", "U2"), ("D2", "U2"), ("D2", "U1")], agent_decisions={ 1: ["D1"], 2: ["D2"] }, agent_utilities={ 1: ["U1"], 2: ["U2"] }, ) d1_domain = ["H", "T"] d2_domain = ["H", "T"] agent1_payoff = np.array([[1, -1], [-1, 1]]) agent2_payoff = np.array([[-1, 1], [1, -1]]) macid.add_cpds( DecisionDomain("D1", d1_domain), DecisionDomain("D2", d2_domain), FunctionCPD( "U1", lambda d1, d2: agent1_payoff[d1_domain.index(d1), d2_domain.index(d2)]), # type: ignore FunctionCPD( "U2", lambda d1, d2: agent2_payoff[d1_domain.index(d1), d2_domain.index(d2)]), # type: ignore ) return macid
def get_path_example() -> MACID: macid = MACID( [("X1", "X3"), ("X1", "D"), ("X2", "D"), ("X2", "U"), ("D", "U")], agent_decisions={1: ["D"]}, agent_utilities={1: ["U"]}, ) return macid
def macid_undir_paths2() -> MACID: return MACID( [("A", "B"), ("B", "C"), ("C", "D"), ("D", "E"), ("B", "F"), ("F", "E")], agent_decisions={1: ["D"]}, agent_utilities={1: ["E"]}, )
def tree_doctor() -> MACID: macid = MACID( [ ("PT", "E"), ("PT", "TS"), ("PT", "BP"), ("TS", "TDoc"), ("TS", "TDead"), ("TDead", "V"), ("TDead", "Tree"), ("TDoc", "TDead"), ("TDoc", "Cost"), ("TDoc", "BP"), ("BP", "V"), ], agent_decisions={ 0: ["PT", "BP"], 1: ["TDoc"] }, agent_utilities={ 0: ["E", "V"], 1: ["Tree", "Cost"] }, ) return macid
def get_basic_subgames3() -> MACID: macid = MACID( [ ("D4", "U4"), ("D2", "U4"), ("D3", "U4"), ("D2", "U2"), ("D3", "U3"), ("D1", "U2"), ("D1", "U3"), ("D1", "U1"), ], agent_decisions={ 1: ["D1"], 2: ["D2"], 3: ["D3"], 4: ["D4"], }, agent_utilities={ 1: ["U1"], 2: ["U2"], 3: ["U3"], 4: ["U4"], }, ) return macid
def get_basic_subgames() -> MACID: macid = MACID( [ ("D11", "U11"), ("D11", "U2"), ("D11", "D12"), ("X1", "U11"), ("X1", "D11"), ("X1", "D2"), ("X1", "U3"), ("D2", "U2"), ("D2", "U3"), ("D2", "D3"), ("D3", "U2"), ("D3", "U3"), ("D12", "U3"), ("D12", "U22"), ("X2", "U22"), ("X2", "D12"), ], agent_decisions={ 0: ["D11", "D12"], 1: ["D2"], 2: ["D3"], }, agent_utilities={ 0: ["U11"], 1: ["U2", "U22"], 2: ["U3"], }, ) return macid
def prisoners_dilemma() -> MACID: """MACIM representation of the canonical prisoner's dilemma. The prisoner's dilemma is a simultaneous symmetric two-player game with payoffs corresponding to the following normal form game - the row player is agent 1 and the column player is agent 2: +----------+----------+----------+ | |Cooperate | Defect | +----------+----------+----------+ |Cooperate | -1, -1 | -3, 0 | +----------+----------+----------+ | Defect | 0, -3 | -2, -2 | +----------+----------+----------+ This game has one pure NE: (defect, defect) """ macid = MACID( [("D1", "U1"), ("D1", "U2"), ("D2", "U2"), ("D2", "U1")], agent_decisions={ 1: ["D1"], 2: ["D2"] }, agent_utilities={ 1: ["U1"], 2: ["U2"] }, ) d1_domain = ["c", "d"] d2_domain = ["c", "d"] agent1_payoff = np.array([[-1, -3], [0, -2]]) agent2_payoff = np.transpose(agent1_payoff) macid.add_cpds( DecisionDomain("D1", d1_domain), DecisionDomain("D2", d2_domain), FunctionCPD( "U1", lambda d1, d2: agent1_payoff[d1_domain.index(d1), d2_domain.index(d2)]), # type: ignore FunctionCPD( "U2", lambda d1, d2: agent2_payoff[d1_domain.index(d1), d2_domain.index(d2)]), # type: ignore ) return macid
def two_agents_three_actions() -> MACID: """This macim is a representation of a game where two players must decide between threee different actions simultaneously - the row player is agent 1 and the column player is agent 2 - the normal form representation of the payoffs is as follows: +----------+----------+----------+----------+ | | L | C | R | +----------+----------+----------+----------+ | T | 4, 3 | 5, 1 | 6, 2 | +----------+----------+----------+----------+ | M | 2, 1 | 8, 4 | 3, 6 | +----------+----------+----------+----------+ | B | 3, 0 | 9, 6 | 2, 8 | +----------+----------+----------+----------+ - The game has one pure NE (T,L) """ macid = MACID( [("D1", "U1"), ("D1", "U2"), ("D2", "U2"), ("D2", "U1")], agent_decisions={ 1: ["D1"], 2: ["D2"] }, agent_utilities={ 1: ["U1"], 2: ["U2"] }, ) d1_domain = ["T", "M", "B"] d2_domain = ["L", "C", "R"] cpd_d1 = DecisionDomain("D1", d1_domain) cpd_d2 = DecisionDomain("D2", d2_domain) agent1_payoff = np.array([[4, 5, 6], [2, 8, 3], [3, 9, 2]]) agent2_payoff = np.array([[3, 1, 2], [1, 4, 6], [0, 6, 8]]) cpd_u1 = FunctionCPD("U1", lambda d1, d2: agent1_payoff[d1_domain.index( d1), d2_domain.index(d2)]) # type: ignore cpd_u2 = FunctionCPD("U2", lambda d1, d2: agent2_payoff[d1_domain.index( d1), d2_domain.index(d2)]) # type: ignore macid.add_cpds(cpd_d1, cpd_d2, cpd_u1, cpd_u2) return macid
def test_direct_effect(self) -> None: macid = MACID( [("D1", "U"), ("D2", "D1")], agent_decisions={1: ["D1", "D2"]}, agent_utilities={1: ["U"]}, ) self.assertTrue(direct_effect(macid, "D1")) self.assertFalse(direct_effect(macid, "D2")) with self.assertRaises(KeyError): direct_effect(macid, "D3")
def umbrella() -> MACID: macid = MACID( [("W", "F"), ("W", "A"), ("F", "UM"), ("UM", "A")], agent_decisions={1: ["UM"]}, agent_utilities={1: ["A"]}, ) cpd_w = TabularCPD("W", 2, np.array([[0.6], [0.4]])) cpd_f = TabularCPD("F", 2, np.array([[0.8, 0.3], [0.2, 0.7]]), evidence=["W"], evidence_card=[2]) cpd_a = TabularCPD("A", 3, np.array([[0, 1, 1, 0], [1, 0, 0, 0], [0, 0, 0, 1]]), evidence=["W", "UM"], evidence_card=[2, 2]) macid.add_cpds(cpd_w, cpd_f, cpd_a, UM=[0, 1]) return macid
def two_agent_one_pne() -> MACID: """This macim is a simultaneous two player game and has a parameterisation that corresponds to the following normal form game - where the row player is agent 1, and the column player is agent 2 +----------+----------+----------+ | | Act(0) | Act(1) | +----------+----------+----------+ | Act(0) | 1, 2 | 3, 0 | +----------+----------+----------+ | Act(1) | 0, 3 | 2, 2 | +----------+----------+----------+ """ macid = MACID( [("D1", "U1"), ("D1", "U2"), ("D2", "U2"), ("D2", "U1")], agent_decisions={ 1: ["D1"], 2: ["D2"] }, agent_utilities={ 1: ["U1"], 2: ["U2"] }, ) cpd_d1 = DecisionDomain("D1", [0, 1]) cpd_d2 = DecisionDomain("D2", [0, 1]) agent1_payoff = np.array([[1, 3], [0, 2]]) agent2_payoff = np.array([[2, 0], [3, 2]]) cpd_u1 = FunctionCPD("U1", lambda d1, d2: agent1_payoff[d1, d2]) # type: ignore cpd_u2 = FunctionCPD("U2", lambda d1, d2: agent2_payoff[d1, d2]) # type: ignore macid.add_cpds(cpd_d1, cpd_d2, cpd_u1, cpd_u2) return macid
def sequential() -> MACID: macid = MACID( [("D1", "U1"), ("D1", "U2"), ("D1", "D2"), ("D2", "U1"), ("D2", "U2")], agent_decisions={ 0: ["D1"], 1: ["D2"] }, agent_utilities={ 0: ["U1"], 1: ["U2"] }, ) return macid
def test_revealing_or_denying(self) -> None: macid = MACID( [("D1", "X2"), ("X1", "X2"), ("X2", "D2"), ("D2", "U1"), ("D2", "U2"), ("X1", "U2")], agent_decisions={1: ["D1"], 2: ["D2"]}, agent_utilities={1: ["U1"], 2: ["U2"]}, ) effective_set = {"D2"} # by direct effect self.assertTrue(revealing_or_denying(macid, "D1", effective_set)) self.assertFalse(revealing_or_denying(macid, "D2", effective_set)) with self.assertRaises(KeyError): revealing_or_denying(macid, "D3", effective_set) effective_set2 = {"A"} with self.assertRaises(KeyError): revealing_or_denying(macid, "D1", effective_set2)
def politician() -> MACID: macid = MACID( [("D1", "I"), ("T", "I"), ("T", "U2"), ("I", "D2"), ("R", "D2"), ("D2", "U1"), ("D2", "U2")], agent_decisions={ 1: ["D1"], 2: ["D2"] }, agent_utilities={ 1: ["U1"], 2: ["U2"] }, ) return macid
def forgetful_movie_star() -> MACID: macid = MACID( [ ("S", "D11"), ("S", "D12"), ("D2", "U2"), ("D2", "U11"), ("D11", "U2"), ("D11", "U11"), ("D11", "U12"), ("D12", "U12"), ], agent_decisions={ 1: ["D11", "D12"], 2: ["D2"] }, agent_utilities={ 1: ["U11", "U12"], 2: ["U2"] }, ) return macid
def subgame_difference() -> MACID: macid = MACID( [ ("N", "D1"), ("N", "U1_A"), ("N", "U2_A"), ("D1", "U1_A"), ("D1", "U2_A"), ("D1", "U1_B"), ("D1", "U2_B"), ("D1", "D2"), ("D2", "U1_B"), ("D2", "U2_B"), ], agent_decisions={ 1: ["D1"], 2: ["D2"] }, agent_utilities={ 1: ["U1_A", "U1_B"], 2: ["U2_A", "U2_B"] }, ) return macid
def macid() -> MACID: return MACID( [("D1", "D2"), ("D1", "U1"), ("D1", "U2"), ("D2", "U2"), ("D2", "U1")], agent_decisions={ 0: { "D": ["D1"], "U": ["U1"] }, 1: { "D": ["D2"], "U": ["U2"] } }, agent_utilities={ 0: { "D": ["D1"], "U": ["U1"] }, 1: { "D": ["D2"], "U": ["U2"] } }, )
def triage() -> MACID: macid = MACID( [ ("H1", "D1"), ("H1", "U1"), ("H2", "D2"), ("H2", "U2"), ("D1", "U1"), ("D1", "U2"), ("D1", "D3"), ("D1", "D4"), ("D1", "U3"), ("D1", "U4"), ("D2", "U1"), ("D2", "U2"), ("D2", "D4"), ("D2", "D3"), ("D2", "U3"), ("D2", "U4"), ("H3", "D3"), ("H3", "U3"), ("H4", "D4"), ("H4", "U4"), ("D3", "U3"), ("D3", "U4"), ("D3", "U1"), ("D3", "U2"), ("D4", "U3"), ("D4", "U4"), ("D4", "U1"), ("D4", "U2"), ("D3", "U5"), ("D3", "U6"), ("D4", "U5"), ("D4", "U6"), ("D1", "U5"), ("D1", "U6"), ("D2", "U5"), ("D2", "U6"), ("H5", "D5"), ("H5", "U5"), ("H6", "D6"), ("H6", "U6"), ("D1", "D5"), ("D1", "D6"), ("D2", "D5"), ("D2", "D6"), ("D3", "D5"), ("D3", "D6"), ("D4", "D5"), ("D4", "D6"), ("D5", "U3"), ("D5", "U4"), ("D5", "U1"), ("D5", "U2"), ("D5", "U5"), ("D5", "U6"), ("D6", "U3"), ("D6", "U4"), ("D6", "U1"), ("D6", "U2"), ("D6", "U5"), ("D6", "U6"), ], agent_decisions={ 1: ["D1"], 2: ["D2"], 3: ["D3"], 4: ["D4"], 5: ["D5"], 6: ["D6"], }, agent_utilities={ 1: ["U1"], 2: ["U2"], 3: ["U3"], 4: ["U4"], 5: ["U5"], 6: ["U6"], }, ) return macid