Exemple #1
0
def obtain_conEmergence(qc_vs,
                        qr_vs,
                        m_vs,
                        Xsubs,
                        y_vs=[y, y],
                        reward_subs=fc_reward_subs,
                        b_v=3,
                        c_v=5,
                        f_v=1.2,
                        R=R,
                        T=T):

    # %% R and T
    R = R.subs(reward_subs)
    T = T.subs(Nd, 0).subs(N, 2)
    R = R.subs(Nd, 0).subs(N, 2)

    R = R.subs({b: b_v, c: c_v, f: f_v, mi: m_vs[0], mj: m_vs[1]})
    T = T.subs({qci: qc_vs[0], qcj: qc_vs[1], qri: qr_vs[0], qrj: qr_vs[1]})

    # %%
    V0s = cld.obtain_VIs(R=R, T=T, X=Xg.subs(Xsubs), i=0, ys=y_vs)
    V1s = cld.obtain_VIs(R=R, T=T, X=Xg.subs(Xsubs), i=1, ys=y_vs)

    # %% NextValue of Agent _sa
    V00, V01 = sp.symbols("V^0_0 V^0_1")
    V0sGen = sp.Matrix([[V00, V01]])
    NextV0sa = cld.obtain_NextV0sa(V0sGen, T, Xg.subs(Xsubs))
    NextV0sa.simplify()

    V10, V11 = sp.symbols("V^1_0 V^1_1")
    V1sGen = sp.Matrix([[V10, V11]])
    NextV1sa = cld.obtain_NextV1sa(V1sGen, T, Xg.subs(Xsubs))
    NextV1sa.simplify()

    # %% Risa
    Risa = cld.obtain_Risa(R, T, Xg.subs(Xsubs))
    R0sa = sp.Matrix(Risa[0, :, :]).reshape(2, 2)
    R0sa.simplify()
    R1sa = sp.Matrix(Risa[1, :, :]).reshape(2, 2)
    R1sa.simplify()

    # %% Temporal difference error
    td0sa = (1 - y_vs[0]) * R0sa + y_vs[0] * NextV0sa
    td0sa.simplify()
    td1sa = (1 - y_vs[1]) * R1sa + y_vs[1] * NextV1sa
    td1sa.simplify()

    # %% Ansatz
    cond = sp.Eq(td0sa[1, 0] - td0sa[1, 1], td1sa[1, 1] - td1sa[1, 0])
    condition = cond.subs(V00, V0s[0]).subs(V01, V0s[1]).\
        subs(V10, V1s[0]).subs(V11, V1s[1])
    condition = condition.simplify()

    return condition
Exemple #2
0
def qplot(fig,
          xc,
          yc,
          size,
          m_v,
          y_v,
          Label="",
          plotTrajectories=False,
          test=False):

    figfrac = fig.get_figwidth() / fig.get_figheight()
    gs = gridspec.GridSpec(1, 2)
    wsp = 0.07  # space parameter
    gs.update(wspace=wsp,
              left=xc,
              bottom=yc,
              right=xc + 2 * (size + 0.5 * wsp) / figfrac,
              top=yc + size)
    ax1 = fig.add_subplot(gs[0, 0])
    ax2 = fig.add_subplot(gs[0, 1])

    # agent parameters
    alpha = 0.2
    beta = 100.0
    gammas = np.array([y_v, y_v])
    # init agents
    agents = detAC(
        np.array(T.subs(q_subs).subs(paramsubsis).tolist()).astype(float),
        np.array(
            R.subs(reward_subs).subs(paramsubsis).subs({
                mi: m_v,
                mj: m_v
            }).tolist()).astype(float), alpha, beta, gammas)

    # plot quiver
    if test:
        pAs = [0.0, 0.3, 0.7, 1.0]  # for testing
    else:
        pAs = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
    ax1, ax2 = plot_quiver(agents, axes=[ax1, ax2], pAs=pAs, sf=0.22)

    # separatirx
    _plot_lamX(ax2, y_v, m_v, style={"color": "red", "ls": "--"})

    # Trajectories
    if plotTrajectories:
        XisaS = [0.25, 0.5, 0.75]
        for X0s0 in XisaS:
            for X1s0 in XisaS:
                X1 = obt_behavior(X000=X0s0, X100=X1s0, X010=X0s0, X110=X1s0)
                rt, fpr = plot_trajectory(agents,
                                          X1,
                                          axes=[ax1, ax2],
                                          Tmax=7500,
                                          color="k",
                                          alpha=0.75,
                                          lw=0.5,
                                          ms=0.5)
    # decorations
    for ax in [ax1, ax2]:
        ax.set_title("")
        ax.set_yticks([0, 1])
        ax.set_xticks([0, 1])
        ax.set_ylim(0, 1)
        ax.set_xlim(0, 1)
        ax.yaxis.labelpad = -10
        ax.xaxis.labelpad = -10
    ax1.set_xlabel(xlabD)
    ax1.set_ylabel(ylab)
    ax2.set_xlabel(xlabP)
    ax2.set_yticklabels(())

    ax1.annotate(r"de$\mathsf{g}$raded", (0.0, 1.0),
                 textcoords="axes fraction",
                 ha="left",
                 va="bottom")
    ax2.annotate(r"$\mathsf{p}$rosperous", (1.0, 1.0),
                 textcoords="axes fraction",
                 ha="right",
                 va="bottom")

    bbox_props = dict(boxstyle="round", fc="gray", alpha=0.9, lw=1)
    ax2.annotate(Label, (-wsp / 2, 1.0 + wsp),
                 xycoords="axes fraction",
                 color="w",
                 ha="center",
                 va="bottom",
                 bbox=bbox_props)

    return ax1, ax2
Exemple #3
0
xlabP = u"$X^1_{\mathsf{pc}}$"
xlabD = u"$X^1_{\mathsf{gc}}$"

# %% parameters

ysym = mi  # the symbol for the y-axis
xsym = yi  # the symbol for the x-axis
xvs = np.linspace(0.94, 0.9999999, 101)  # numeric values for x-axis
reward_subs = fc_reward_subs  # specific reward subsitution sceme
state = 1  # the envriomental state (1=prosperous)

N_v, Nd_v, f_v, c_v, qc_v, qr_v = 2, 0, 1.2, 5, 0.02, 0.0001  # param. values
paramsubsis = {N: N_v, Nd: Nd_v, f: f_v, c: c_v, qc: qc_v, qr: qr_v}

# obtain solutions to ciritical curves
sol0, sol1, sol2 = obtain_con_sols_Game(R, T.subs(q_subs), ysym=ysym)

#%% analytical calcuation with sympy

# behavior space: separatic condition for Emergence of cooperation
conEm = obtain_conEmergence(qc_vs=[qc, qc],
                            m_vs=[m, m],
                            qr_vs=[qr, qr],
                            Xsubs={},
                            reward_subs=reward_subs)
# set cooperation in degraded state
X_1o0 = {Xg[0, 0, 0]: 1.0, Xg[1, 0, 0]: 1.0}
# solve strategy separatix for discount factor gamma
gam_of = sp.solve(conEm, y)
# lambdify solution
lam_gam = sp.lambdify((Xg[0, 1, 0], Xg[1, 1, 0], qc, qr, m),