np.array([
                [[1, 0, 0],
                 [1, 0, 0],
                 [0, 0, 1]],
                
                [[1, 0, 0],
                 [1, 0, 0],
                 [0, 0, 1]],
                
                [[0, 1, 0],
                 [0, 1, 0],
                 [1, 0, 0]]
                ])
    ] * 3

equilibrium = dsSolve(payoffMatrices, transitionMatrices, 
                      discountFactors=0.95, plotPath=True)

print(np.round(equilibrium['strategies'],3))
# array([[[0.369 0.298 0.333]
#         [0.369 0.298 0.333]]
#
#        [[0.257 0.409 0.333]
#         [0.480 0.187 0.333]]
#
#        [[0.480 0.187 0.333]
#         [0.257 0.409 0.333]]])

print(np.round(equilibrium['stateValues'],3))
# array([[ 0.     0.   ]
#        [ 0.111 -0.111]
#        [-0.111  0.111]])
    payoffMatrix = np.nan * np.ones((1, nums_a[s]))
    for a in range(nums_a[s]):
        payoffMatrix[0,a] = u(C[s,a])
    payoffMatrices.append( payoffMatrix )

transitionMatrices = []
for s in range(num_k):
    transitionMatrix = np.zeros((nums_a[s], num_k))
    for a in range(nums_a[s]):
        for s_ in range(num_k):
            if a == s_:
                transitionMatrix[a,s_] = 1
    transitionMatrices.append( transitionMatrix )

equilibrium = dsSolve(
        payoffMatrices, transitionMatrices, beta, 
        showProgress=True, plotPath=True)

# Dynamic stochastic game with 13 states, 1 players and 109 actions.
# Initial value for homotopy continuation successfully found.
# ==================================================
# Start homotopy continuation
# Step 7006:   t = 29629.89,   s = 121765.69,   ds = 1000.00   
# Final Result:   max|y-y_|/ds = 0.0E+00,   max|H| = 4.9E-09
# Time elapsed = 0:01:37
# End homotopy continuation
# ==================================================

policies = np.nan * np.ones(num_k)
values = np.nan * np.ones(num_k)
for s in range(num_k):
num_p = 2
(num_a_min, num_a_max) = (2, 2)
(delta_min, delta_max) = (0.9, 0.95)


nums_a = np.random.randint(low=num_a_min, high=num_a_max+1, size=(num_s,num_p))

payoffMatrices = [np.random.random((num_p, *nums_a[s,:])) for s in range(num_s)]

transitionMatrices = [np.random.exponential(scale=1, size=(*nums_a[s,:], num_s)) for s in range(num_s)]
for s in range(num_s):
    for index, value in np.ndenumerate(np.sum(transitionMatrices[s], axis=-1)):
        transitionMatrices[s][index] *= 1/value

discountFactors = np.random.uniform(low=delta_min, high=delta_max, size=num_p)


equilibrium = dsSolve(payoffMatrices, transitionMatrices, discountFactors,
                      showProgress=True, plotPath=True)

print(np.round(equilibrium['strategies'], 3))
print(np.round(equilibrium['stateValues'], 3))






## ============================================================================
## end of file
## ============================================================================
Esempio n. 4
0
# -*- coding: utf-8 -*-

import numpy as np
from dsGameSolver.gameSolver import dsSolve

payoffMatrices = [np.array([[[5, 0], [4, 2]], [[5, 4], [0, 2]]])]
equilibrium = dsSolve(payoffMatrices)

print(np.round(equilibrium['strategies'], 3))
# array([[[0. 1.]
#         [0. 1.]]])

print(np.round(equilibrium['stateValues'], 3))
# array([[2. 2.]])

## ============================================================================
## end of file
## ============================================================================