Beispiel #1
0
def get_sequential_cid() -> CID:
    """
    This CID is a subtle case of sufficient recall, as the decision rule for D1 influences
    the expected utility of D2, but D2 can still be chosen without knowing D1, since
    D1 does not influence any utility nodes descending from D2.
    """
    cid = CID(
        [
            ("S1", "D1"),
            ("D1", "U1"),
            ("S1", "U1"),
            ("D1", "S2"),
            ("S2", "D2"),
            ("D2", "U2"),
            ("S2", "U2"),
        ],
        decisions=["D1", "D2"],
        utilities=["U1", "U2"],
    )

    cid.add_cpds(
        UniformRandomCPD("S1", [0, 1]),
        DecisionDomain("D1", [0, 1]),
        FunctionCPD("U1", lambda s1, d1: int(s1 == d1)),  # type: ignore
        FunctionCPD("S2", lambda d1: d1),  # type: ignore
        DecisionDomain("D2", [0, 1]),
        FunctionCPD("U2", lambda s2, d2: int(s2 == d2)),  # type: ignore
    )
    return cid
Beispiel #2
0
def basic_different_dec_cardinality() -> MACID:
    """A basic MACIM where the cardinality of each agent's decision node
    is different. It has one subgame perfect NE.
    """
    macid = MACID(
        [("D1", "D2"), ("D1", "U1"), ("D1", "U2"), ("D2", "U2"), ("D2", "U1")],
        agent_decisions={
            0: ["D1"],
            1: ["D2"]
        },
        agent_utilities={
            0: ["U1"],
            1: ["U2"]
        },
    )

    cpd_d1 = DecisionDomain("D1", [0, 1])
    cpd_d2 = DecisionDomain("D2", [0, 1, 2])

    agent1_payoff = np.array([[3, 1, 0], [1, 2, 3]])
    agent2_payoff = np.array([[1, 2, 1], [1, 0, 3]])

    cpd_u1 = FunctionCPD("U1",
                         lambda d1, d2: agent1_payoff[d1, d2])  # type: ignore
    cpd_u2 = FunctionCPD("U2",
                         lambda d1, d2: agent2_payoff[d1, d2])  # type: ignore

    macid.add_cpds(cpd_d1, cpd_d2, cpd_u1, cpd_u2)

    return macid
Beispiel #3
0
def basic2agent_tie_break() -> MACID:
    macid = MACID(
        [("D1", "D2"), ("D1", "U1"), ("D1", "U2"), ("D2", "U2"), ("D2", "U1")],
        agent_decisions={
            0: ["D1"],
            1: ["D2"]
        },
        agent_utilities={
            0: ["U1"],
            1: ["U2"]
        },
    )

    cpd_d1 = DecisionDomain("D1", [0, 1])
    cpd_d2 = DecisionDomain("D2", [0, 1])
    cpd_u1 = TabularCPD(
        "U1",
        6,
        np.array([[0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0], [1, 0, 1, 0],
                  [0, 0, 0, 0], [0, 0, 0, 0]]),
        evidence=["D1", "D2"],
        evidence_card=[2, 2],
    )
    cpd_u2 = TabularCPD(
        "U2",
        6,
        np.array([[0, 0, 0, 0], [1, 0, 0, 0], [0, 0, 1, 1], [0, 0, 0, 0],
                  [0, 0, 0, 0], [0, 1, 0, 0]]),
        evidence=["D1", "D2"],
        evidence_card=[2, 2],
    )

    macid.add_cpds(cpd_d1, cpd_d2, cpd_u1, cpd_u2)

    return macid
Beispiel #4
0
def get_insufficient_recall_cid() -> CID:
    cid = CID([("A", "U"), ("B", "U")], decisions=["A", "B"], utilities=["U"])
    cid.add_cpds(
        DecisionDomain("A", [0, 1]),
        DecisionDomain("B", [0, 1]),
        FunctionCPD("U", lambda a, b: a * b),  # type: ignore
    )
    return cid
Beispiel #5
0
def modified_taxi_competition() -> MACID:
    """Modifying the payoffs in the taxi competition example
    so that there is a tie break (if taxi 1 chooses to stop
    in front of the expensive hotel, taxi 2 is indifferent
    between their choices.)

    - There are now two SPNE

                              D1
        +----------+----------+----------+
        |  taxi 1  | expensive|  cheap   |
        +----------+----------+----------+
        |expensive |     2    |   3      |
    D2  +----------+----------+----------+
        | cheap    |     5    |   1      |
        +----------+----------+----------+

                              D1
        +----------+----------+----------+
        |  taxi 2  | expensive|  cheap   |
        +----------+----------+----------+
        |expensive |     2    |   5      |
    D2  +----------+----------+----------+
        | cheap    |     3    |   5      |
        +----------+----------+----------+

    """
    macid = MACID(
        [("D1", "D2"), ("D1", "U1"), ("D1", "U2"), ("D2", "U2"), ("D2", "U1")],
        agent_decisions={
            1: ["D1"],
            2: ["D2"]
        },
        agent_utilities={
            1: ["U1"],
            2: ["U2"]
        },
    )

    d1_domain = ["e", "c"]
    d2_domain = ["e", "c"]
    agent1_payoff = np.array([[2, 3], [5, 1]])
    agent2_payoff = np.array([[2, 5], [3, 5]])

    macid.add_cpds(
        DecisionDomain("D1", d1_domain),
        DecisionDomain("D2", d2_domain),
        FunctionCPD(
            "U1",
            lambda d1, d2: agent1_payoff[d2_domain.index(d2),
                                         d1_domain.index(d1)]),  # type: ignore
        FunctionCPD(
            "U2",
            lambda d1, d2: agent2_payoff[d2_domain.index(d2),
                                         d1_domain.index(d1)]),  # type: ignore
    )
    return macid
Beispiel #6
0
def taxi_competition() -> MACID:
    """MACIM representation of the Taxi Competition game.

    "Taxi Competition" is an example introduced in
    "Equilibrium Refinements for Multi-Agent Influence Diagrams: Theory and Practice"
    by Hammond, Fox, Everitt, Abate & Wooldridge, 2021:

                              D1
        +----------+----------+----------+
        |  taxi 1  | expensive|  cheap   |
        +----------+----------+----------+
        |expensive |     2    |   3      |
    D2  +----------+----------+----------+
        | cheap    |     5    |   1      |
        +----------+----------+----------+

                              D1
        +----------+----------+----------+
        |  taxi 2  | expensive|  cheap   |
        +----------+----------+----------+
        |expensive |     2    |   5      |
    D2  +----------+----------+----------+
        | cheap    |     3    |   1      |
        +----------+----------+----------+

    There are 3 pure startegy NE and 1 pure SPE.
    """
    macid = MACID(
        [("D1", "D2"), ("D1", "U1"), ("D1", "U2"), ("D2", "U2"), ("D2", "U1")],
        agent_decisions={
            1: ["D1"],
            2: ["D2"]
        },
        agent_utilities={
            1: ["U1"],
            2: ["U2"]
        },
    )

    d1_domain = ["e", "c"]
    d2_domain = ["e", "c"]
    agent1_payoff = np.array([[2, 3], [5, 1]])
    agent2_payoff = np.array([[2, 5], [3, 1]])

    macid.add_cpds(
        DecisionDomain("D1", d1_domain),
        DecisionDomain("D2", d2_domain),
        FunctionCPD(
            "U1",
            lambda d1, d2: agent1_payoff[d2_domain.index(d2),
                                         d1_domain.index(d1)]),  # type: ignore
        FunctionCPD(
            "U2",
            lambda d1, d2: agent2_payoff[d2_domain.index(d2),
                                         d1_domain.index(d1)]),  # type: ignore
    )
    return macid
Beispiel #7
0
def robot_warehouse() -> MACID:
    r"""
    Implementation of AAMAS robot warehouse example

    - Robot 1 collects packages, and can choose to
    hurry or not (D1)
    - Hurrying can be quicker (Q) but lead to
    breakages (B)
    - Robot 2 tidies up, and can choose to repair
    (R) breakages or not (D2)
    - Conducting repairs can obstruct (O) robot 1
    - Robot 1 rewarded for speed and lack of
    breakages (U1), robot 2 is rewarded for things
    being in a state of repair (U2)

    """
    macid = MACID(
        [
            ("D1", "Q"),
            ("D1", "B"),
            ("Q", "U1"),
            ("B", "U1"),
            ("B", "R"),
            ("B", "D2"),
            ("D2", "R"),
            ("D2", "O"),
            ("O", "U1"),
            ("R", "U2"),
        ],
        agent_decisions={
            1: ["D1"],
            2: ["D2"],
        },
        agent_utilities={
            1: ["U1"],
            2: ["U2"],
        },
    )

    macid.add_cpds(
        DecisionDomain("D1", domain=[0, 1]),
        DecisionDomain("D2", domain=[0, 1]),
        # Q copies the value of D1 with 90% probability
        StochasticFunctionCPD("Q", lambda d1: {d1: 0.9}, domain=[0, 1]),
        # B copies the value of D1 with 30% probability
        StochasticFunctionCPD("B", lambda d1: {d1: 0.3}, domain=[0, 1]),
        # R = not B or D2
        FunctionCPD("R", lambda b, d2: int(not b or d2)),
        # O copies the value of D2 with 60% probability
        StochasticFunctionCPD("O", lambda d2: {d2: 0.6}, domain=[0, 1]),
        # U1 = (Q and not O) - B
        FunctionCPD("U1", lambda q, b, o: int(q and not o) - int(b)),
        # U2 = R
        FunctionCPD("U2", lambda r: r),  # type: ignore
    )
    return macid
Beispiel #8
0
def battle_of_the_sexes() -> MACID:
    """MACIM representation of the battle of the sexes game.

    The battle of the sexes game (also known as Bach or Stravinsky)
    is a simultaneous symmetric two-player game with payoffs
    corresponding to the following normal form game -
    the row player is Female and the column player is Male:

        +----------+----------+----------+
        |          |Opera     | Football |
        +----------+----------+----------+
        |  Opera   | 3, 2     |   0, 0   |
        +----------+----------+----------+
        | Football | 0, 0     | 2, 3     |
        +----------+----------+----------+

    This game has two pure NE: (Opera, Football) and (Football, Opera)
    """
    macid = MACID(
        [("D_F", "U_F"), ("D_F", "U_M"), ("D_M", "U_M"), ("D_M", "U_F")],
        agent_decisions={
            "M": ["D_F"],
            "F": ["D_M"]
        },
        agent_utilities={
            "M": ["U_F"],
            "F": ["U_M"]
        },
    )

    d_f_domain = ["O", "F"]
    d_m_domain = ["O", "F"]
    agent_f_payoff = np.array([[3, 0], [0, 2]])
    agent_m_payoff = np.array([[2, 0], [0, 3]])

    macid.add_cpds(
        DecisionDomain("D_F", d_f_domain),
        DecisionDomain("D_M", d_m_domain),
        FunctionCPD(
            "U_F",
            lambda d_f, d_m: agent_f_payoff[d_f_domain.index(
                d_f), d_m_domain.index(d_m)]  # type: ignore
        ),
        FunctionCPD(
            "U_M",
            lambda d_f, d_m: agent_m_payoff[d_f_domain.index(
                d_f), d_m_domain.index(d_m)]  # type: ignore
        ),
    )
    return macid
Beispiel #9
0
def get_2dec_cid() -> CID:
    cid = CID(
        [("S1", "S2"), ("S1", "D1"), ("D1", "S2"), ("S2", "U"), ("S2", "D2"),
         ("D2", "U")],
        decisions=["D1", "D2"],
        utilities=["U"],
    )
    cpd_s1 = UniformRandomCPD("S1", [0, 1])
    cpd_d1 = DecisionDomain("D1", [0, 1])
    cpd_d2 = DecisionDomain("D2", [0, 1])
    cpd_s2 = FunctionCPD("S2", lambda s1, d1: int(s1 == d1))  # type: ignore
    cpd_u = FunctionCPD("U", lambda s2, d2: int(s2 == d2))  # type: ignore
    cid.add_cpds(cpd_s1, cpd_d1, cpd_s2, cpd_d2, cpd_u)
    return cid
Beispiel #10
0
def signal() -> MACID:
    macid = MACID(
        [("X", "D1"), ("X", "U2"), ("X", "U1"), ("D1", "U2"), ("D1", "U1"),
         ("D1", "D2"), ("D2", "U1"), ("D2", "U2")],
        agent_decisions={
            0: ["D1"],
            1: ["D2"]
        },
        agent_utilities={
            0: ["U1"],
            1: ["U2"]
        },
    )
    cpd_x = TabularCPD("X", 2, np.array([[0.5], [0.5]]))
    cpd_d1 = DecisionDomain("D1", [0, 1])
    cpd_d2 = DecisionDomain("D1", [0, 1])

    u1_cpd_array = np.array([
        [0, 0, 0, 0, 1, 0, 0, 0],
        [0, 0, 0, 1, 0, 0, 1, 0],
        [0, 1, 0, 0, 0, 0, 0, 0],
        [0, 0, 1, 0, 0, 1, 0, 0],
        [0, 0, 0, 0, 0, 0, 0, 1],
        [1, 0, 0, 0, 0, 0, 0, 0],
    ])

    u2_cpd_array = np.array([
        [0, 0, 0, 0, 1, 0, 0, 0],
        [0, 0, 0, 1, 0, 0, 1, 0],
        [0, 1, 0, 0, 0, 0, 0, 0],
        [0, 0, 1, 0, 0, 1, 0, 0],
        [0, 0, 0, 0, 0, 0, 0, 1],
        [1, 0, 0, 0, 0, 0, 0, 0],
    ])

    cpd_u1 = TabularCPD("U1",
                        6,
                        u1_cpd_array,
                        evidence=["X", "D1", "D2"],
                        evidence_card=[2, 2, 2])
    cpd_u2 = TabularCPD("U2",
                        6,
                        u2_cpd_array,
                        evidence=["X", "D1", "D2"],
                        evidence_card=[2, 2, 2])

    macid.add_cpds(cpd_x, cpd_d1, cpd_d2, cpd_u1, cpd_u2)

    return macid
Beispiel #11
0
def two_agent_two_pne() -> MACID:
    """This macim is a simultaneous two player game
    and has a parameterisation that
    corresponds to the following normal
    form game - where the row player is agent 0, and the
    column player is agent 1
        +----------+----------+----------+
        |          | Act(0)   | Act(1)   |
        +----------+----------+----------+
        | Act(0)   | 1, 1     | 4, 2     |
        +----------+----------+----------+
        | Act(1)   | 2, 4     | 3, 3     |
        +----------+----------+----------+
    """
    macid = MACID(
        [("D1", "U1"), ("D1", "U2"), ("D2", "U2"), ("D2", "U1")],
        agent_decisions={
            0: ["D1"],
            1: ["D2"]
        },
        agent_utilities={
            0: ["U1"],
            1: ["U2"]
        },
    )

    cpd_d1 = DecisionDomain("D1", [0, 1])
    cpd_d2 = DecisionDomain("D2", [0, 1])

    cpd_u1 = TabularCPD(
        "U1",
        5,
        np.array([[0, 0, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1],
                  [0, 1, 0, 0]]),
        evidence=["D1", "D2"],
        evidence_card=[2, 2],
    )
    cpd_u2 = TabularCPD(
        "U2",
        5,
        np.array([[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1],
                  [0, 0, 1, 0]]),
        evidence=["D1", "D2"],
        evidence_card=[2, 2],
    )

    macid.add_cpds(cpd_d1, cpd_d2, cpd_u1, cpd_u2)
    return macid
Beispiel #12
0
def matching_pennies() -> MACID:
    """MACIM representation of the matching pennies game.

    The matching pennies game is a symmetric two-player game
    with payoffs corresponding to the following normal form game -
    the row player is agent 1 and the column player is agent 2:

        +----------+----------+----------+
        |          |Heads     | Tails    |
        +----------+----------+----------+
        |  Heads   | +1, -1   | -1, +1   |
        +----------+----------+----------+
        |  Tails   | -1, +1   | +1, -1   |
        +----------+----------+----------+

    This game has no pure NE, but has a mixed NE where
    each player chooses Heads or Tails with equal probability.
    """
    macid = MACID(
        [("D1", "U1"), ("D1", "U2"), ("D2", "U2"), ("D2", "U1")],
        agent_decisions={
            1: ["D1"],
            2: ["D2"]
        },
        agent_utilities={
            1: ["U1"],
            2: ["U2"]
        },
    )

    d1_domain = ["H", "T"]
    d2_domain = ["H", "T"]
    agent1_payoff = np.array([[1, -1], [-1, 1]])
    agent2_payoff = np.array([[-1, 1], [1, -1]])

    macid.add_cpds(
        DecisionDomain("D1", d1_domain),
        DecisionDomain("D2", d2_domain),
        FunctionCPD(
            "U1",
            lambda d1, d2: agent1_payoff[d1_domain.index(d1),
                                         d2_domain.index(d2)]),  # type: ignore
        FunctionCPD(
            "U2",
            lambda d1, d2: agent2_payoff[d1_domain.index(d1),
                                         d2_domain.index(d2)]),  # type: ignore
    )
    return macid
Beispiel #13
0
def get_introduced_bias() -> CID:

    cid = CID(
        [
            ("A", "X"),  # defining the graph's nodes and edges
            ("Z", "X"),
            ("Z", "Y"),
            ("X", "D"),
            ("X", "Y"),
            ("D", "U"),
            ("Y", "U"),
        ],
        decisions=["D"],
        utilities=["U"],
    )

    cpd_a = UniformRandomCPD("A", [0, 1])
    cpd_z = UniformRandomCPD("Z", [0, 1])
    cpd_x = FunctionCPD("X", lambda a, z: a * z)  # type: ignore
    cpd_d = DecisionDomain("D", [0, 1])
    cpd_y = FunctionCPD("Y", lambda x, z: x + z)  # type: ignore
    cpd_u = FunctionCPD("U", lambda d, y: -((d - y) ** 2))  # type: ignore

    cid.add_cpds(cpd_a, cpd_d, cpd_z, cpd_x, cpd_y, cpd_u)
    return cid
Beispiel #14
0
def prisoners_dilemma() -> MACID:
    """MACIM representation of the canonical prisoner's dilemma.

    The prisoner's dilemma is a simultaneous symmetric two-player game
    with payoffs corresponding to the following normal form game -
    the row player is agent 1 and the column player is agent 2:

        +----------+----------+----------+
        |          |Cooperate | Defect   |
        +----------+----------+----------+
        |Cooperate | -1, -1   | -3, 0    |
        +----------+----------+----------+
        |  Defect  | 0, -3    | -2, -2   |
        +----------+----------+----------+

    This game has one pure NE: (defect, defect)
    """
    macid = MACID(
        [("D1", "U1"), ("D1", "U2"), ("D2", "U2"), ("D2", "U1")],
        agent_decisions={
            1: ["D1"],
            2: ["D2"]
        },
        agent_utilities={
            1: ["U1"],
            2: ["U2"]
        },
    )

    d1_domain = ["c", "d"]
    d2_domain = ["c", "d"]
    agent1_payoff = np.array([[-1, -3], [0, -2]])
    agent2_payoff = np.transpose(agent1_payoff)

    macid.add_cpds(
        DecisionDomain("D1", d1_domain),
        DecisionDomain("D2", d2_domain),
        FunctionCPD(
            "U1",
            lambda d1, d2: agent1_payoff[d1_domain.index(d1),
                                         d2_domain.index(d2)]),  # type: ignore
        FunctionCPD(
            "U2",
            lambda d1, d2: agent2_payoff[d1_domain.index(d1),
                                         d2_domain.index(d2)]),  # type: ignore
    )
    return macid
Beispiel #15
0
 def make_decision(self, node: str, agent: AgentLabel = 0) -> None:
     """"Turn a chance or utility node into a decision node."""
     self.make_chance(node)
     self.agent_decisions[agent].append(node)
     self.decision_agent[node] = agent
     cpd = self.get_cpds(node)
     if cpd and not isinstance(cpd, DecisionDomain):
         self.add_cpds(DecisionDomain(node, cpd.domain))
Beispiel #16
0
def two_agents_three_actions() -> MACID:
    """This macim is a representation of a
    game where two players must decide between
    threee different actions simultaneously
    - the row player is agent 1 and the
    column player is agent 2 - the normal form
    representation of the payoffs is as follows:
        +----------+----------+----------+----------+
        |          |  L       |     C    |     R    |
        +----------+----------+----------+----------+
        | T        | 4, 3     | 5, 1     | 6, 2     |
        +----------+----------+----------+----------+
        | M        | 2, 1     | 8, 4     |  3, 6    |
        +----------+----------+----------+----------+
        | B        | 3, 0     | 9, 6     |  2, 8    |
        +----------+----------+----------+----------+
    - The game has one pure NE (T,L)
    """
    macid = MACID(
        [("D1", "U1"), ("D1", "U2"), ("D2", "U2"), ("D2", "U1")],
        agent_decisions={
            1: ["D1"],
            2: ["D2"]
        },
        agent_utilities={
            1: ["U1"],
            2: ["U2"]
        },
    )

    d1_domain = ["T", "M", "B"]
    d2_domain = ["L", "C", "R"]
    cpd_d1 = DecisionDomain("D1", d1_domain)
    cpd_d2 = DecisionDomain("D2", d2_domain)

    agent1_payoff = np.array([[4, 5, 6], [2, 8, 3], [3, 9, 2]])
    agent2_payoff = np.array([[3, 1, 2], [1, 4, 6], [0, 6, 8]])

    cpd_u1 = FunctionCPD("U1", lambda d1, d2: agent1_payoff[d1_domain.index(
        d1), d2_domain.index(d2)])  # type: ignore
    cpd_u2 = FunctionCPD("U2", lambda d1, d2: agent2_payoff[d1_domain.index(
        d1), d2_domain.index(d2)])  # type: ignore

    macid.add_cpds(cpd_d1, cpd_d2, cpd_u1, cpd_u2)
    return macid
Beispiel #17
0
def get_3node_cid() -> CID:
    cid = CID([("S", "D"), ("S", "U"), ("D", "U")],
              decisions=["D"],
              utilities=["U"])
    cpd_s = UniformRandomCPD("S", [-1, 1])
    cpd_u = FunctionCPD("U", lambda s, d: s * d)  # type: ignore
    cpd_d = DecisionDomain("D", [-1, 1])
    cid.add_cpds(cpd_d, cpd_s, cpd_u)
    return cid
Beispiel #18
0
def _add_random_cpds(mb: MACIDBase) -> None:
    """
    add cpds to the random (MA)CID.
    """
    for node in mb.nodes:
        if node in mb.decisions:
            mb.add_cpds(DecisionDomain(node, [0, 1]))
        else:
            mb.add_cpds(RandomCPD(node))
Beispiel #19
0
 def remove_all_decision_rules(self) -> None:
     """Remove the decision rules from all decisions in the (MA)CID"""
     for d in self.decisions:
         cpd = self.get_cpds(d)
         if cpd is None:
             raise ValueError(f"decision {d} has not yet been assigned a domain.")
         elif isinstance(cpd, DecisionDomain):
             continue
         else:
             self.add_cpds(DecisionDomain(d, self, cpd.domain))
Beispiel #20
0
 def make_decision(self, node: str, agent: AgentLabel = 0) -> None:
     """ "Turn a chance or utility node into a decision node.
     - agent specifies which agent the decision node should belong to in a MACID.
     """
     self.make_chance(node)
     self.agent_decisions[agent].append(node)
     self.decision_agent[node] = agent
     cpd = self.get_cpds(node)
     if cpd and not isinstance(cpd, DecisionDomain):
         self.add_cpds(DecisionDomain(node, self, cpd.state_names[node]))
Beispiel #21
0
def get_quantitative_voi_cid() -> CID:
    cid = CID([("S", "X"), ("X", "D"), ("D", "U"), ("S", "U")],
              decisions=["D"],
              utilities=["U"])
    cpd_s = UniformRandomCPD("S", [-1, 1])
    # X takes the value of S with probability 0.8
    cpd_x = StochasticFunctionCPD("X", lambda s: {s: 0.8}, domain=[-1, 1])
    cpd_d = DecisionDomain("D", [-1, 0, 1])
    cpd_u = FunctionCPD("U", lambda s, d: int(s) * int(d))  # type: ignore
    cid.add_cpds(cpd_s, cpd_x, cpd_d, cpd_u)
    return cid
Beispiel #22
0
def two_agent_one_pne() -> MACID:
    """This macim is a simultaneous two player game
    and has a parameterisation that
    corresponds to the following normal
    form game - where the row player is agent 1, and the
    column player is agent 2
        +----------+----------+----------+
        |          | Act(0)   | Act(1)   |
        +----------+----------+----------+
        | Act(0)   | 1, 2     | 3, 0     |
        +----------+----------+----------+
        | Act(1)   | 0, 3     | 2, 2     |
        +----------+----------+----------+
    """
    macid = MACID(
        [("D1", "U1"), ("D1", "U2"), ("D2", "U2"), ("D2", "U1")],
        agent_decisions={
            1: ["D1"],
            2: ["D2"]
        },
        agent_utilities={
            1: ["U1"],
            2: ["U2"]
        },
    )

    cpd_d1 = DecisionDomain("D1", [0, 1])
    cpd_d2 = DecisionDomain("D2", [0, 1])

    agent1_payoff = np.array([[1, 3], [0, 2]])
    agent2_payoff = np.array([[2, 0], [3, 2]])

    cpd_u1 = FunctionCPD("U1",
                         lambda d1, d2: agent1_payoff[d1, d2])  # type: ignore
    cpd_u2 = FunctionCPD("U2",
                         lambda d1, d2: agent2_payoff[d1, d2])  # type: ignore

    macid.add_cpds(cpd_d1, cpd_d2, cpd_u1, cpd_u2)
    return macid
Beispiel #23
0
def get_5node_cid_with_scaled_utility() -> CID:
    cid = CID(
        [("S1", "D"), ("S1", "U1"), ("S2", "D"), ("S2", "U2"), ("D", "U1"),
         ("D", "U2")],
        decisions=["D"],
        utilities=["U1", "U2"],
    )
    cpd_s1 = UniformRandomCPD("S1", [0, 1])
    cpd_s2 = UniformRandomCPD("S2", [0, 1])
    cpd_u1 = FunctionCPD("U1", lambda s1, d: 10 * int(s1 == d))  # type: ignore
    cpd_u2 = FunctionCPD("U2", lambda s2, d: 2 * int(s2 == d))  # type: ignore
    cpd_d = DecisionDomain("D", [0, 1])
    cid.add_cpds(cpd_d, cpd_s1, cpd_s2, cpd_u1, cpd_u2)
    return cid
Beispiel #24
0
def umbrella() -> MACID:
    macid = MACID(
        [("W", "F"), ("W", "A"), ("F", "UM"), ("UM", "A")],
        agent_decisions={1: ["UM"]},
        agent_utilities={1: ["A"]},
    )

    cpd_w = TabularCPD("W", 2, np.array([[0.6], [0.4]]))
    cpd_f = TabularCPD("F",
                       2,
                       np.array([[0.8, 0.3], [0.2, 0.7]]),
                       evidence=["W"],
                       evidence_card=[2])
    cpd_um = DecisionDomain("UM", [0, 1])
    cpd_a = TabularCPD("A",
                       3,
                       np.array([[0, 1, 1, 0], [1, 0, 0, 0], [0, 0, 0, 1]]),
                       evidence=["W", "UM"],
                       evidence_card=[2, 2])
    macid.add_cpds(cpd_w, cpd_f, cpd_um, cpd_a)
    return macid
Beispiel #25
0
 def to_tabular_cpd(self, variable: str, relationship: Union[Relationship, Sequence[Outcome]]) -> TabularCPD:
     if isinstance(relationship, Sequence):
         return DecisionDomain(variable, self.cbn, relationship)
     else:
         return super().to_tabular_cpd(variable, relationship)
Beispiel #26
0
def get_minimal_cid() -> CID:
    cid = CID([("A", "B")], decisions=["A"], utilities=["B"])
    cpd_a = DecisionDomain("A", [0, 1])
    cpd_b = FunctionCPD("B", lambda a: a)  # type: ignore
    cid.add_cpds(cpd_a, cpd_b)
    return cid