Example #1
0
def delete_dom_act_Pi(game, i):
    no_a_game = list(
        game.shape)  #will contain number of actions available for each player
    no_a_game.pop(
    )  #removes last element of list which simply was n: the number of payoffs in each action profile
    #as the first number is number of actions of last player etc., we turn the list around
    no_a_game = no_a_game[::-1]
    no_a_i = no_a_game.pop(
        i
    )  #remove the number of actions of player i from no_a_game and put it in no_a_i
    if no_a_i == 1:  #if a single action remains it cannot be dominated
        return game
    var_no = np.prod(no_a_game)  #size of the support of mu
    if var_no == 1:  #special case: the support of mu has a single elment
        return del_dom_a_Pi_point_belief(game, i, no_a_i)
    temp_game = game
    f = [0.] * var_no  #dummy objective used below
    lb = [0.] * var_no
    ub = [1.] * var_no
    Aeq = [[1.] * var_no]
    beq = (1., )
    j = 0
    while j < no_a_i:
        u_action = payoffi_builder(game, j, i)
        A = []
        b = []
        k = 0
        while k < no_a_i:
            u_other_action = payoffi_builder(game, k, i)
            A.append(u_other_action - u_action)  #elementwise difference
            b.append(0.)
            k += 1
        p = LP(
            f, A=A, b=b, lb=lb, ub=ub, Aeq=Aeq, beq=beq
        )  #we use the artificial minimization problem under the constraint that action gives a weakly higher payoff than any other action; if no feasible solution is obtained than action is dominated
        p.iprint = -1
        r = p.minimize('pclp')
        if r.stopcase != 1:  #if no feasible solution was obtained then action is dominated...
            temp_game = np.delete(
                temp_game, j, n - i - 1
            )  #...and therefore action j is removed; recall that players are "in the wrong order"
            count = 0
            z = -1
            while count <= j:
                z += 1
                if undominated[i][z] != -1:
                    count += 1
            undominated[i][z] = -1
        j += 1
    return temp_game
def compressed_sensing(x1, trans):
    """L1 compressed sensing
    
    :Parameters:
        x1 : array-like, shape=(n_outputs,)
            input sparse vector
        trans : array-like, shape=(n_outputs, n_inputs)
            transformation matrix
    :Returns:
        decoded vector, shape=(n_inpus,)
    :RType:
        array-like
    """

    # obrain sizes of inputs and outputs
    (n_outputs, n_inputs) = trans.shape

    # objective to minimize: f x^T -> min
    f = np.zeros((n_inputs * 2), dtype=np.float)
    f[n_inputs:2 * n_inputs] = 1.0

    # constraint: a x^T == b
    a_eq = np.zeros((n_outputs, 2 * n_inputs), dtype=np.float)
    a_eq[:, 0:n_inputs] = trans

    b_eq = x1

    # constraint: -t <= x <= t
    a = np.zeros((2 * n_inputs, 2 * n_inputs), dtype=np.float)
    for i in xrange(n_inputs):
        a[i, i] = -1.0
        a[i, n_inputs + i] = -1.0
        a[n_inputs + i, i] = 1.0
        a[n_inputs + i, n_inputs + i] = -1.0

    b = np.zeros(n_inputs * 2)

    # solve linear programming
    prob = LP(f, Aeq=a_eq, beq=b_eq, A=a, b=b)
    result = prob.minimize('pclp') # glpk, lpSolve... if available

    # print result
#    print "x =", result.xf # arguments at mimimum
#    print "objective =", result.ff # value of objective

    return result.xf[0:n_inputs]
def compressed_sensing2(x1, trans):
    """L1 compressed sensing
    
    :Parameters:
        x1 : array-like, shape=(n_outputs,)
            input sparse vector
        trans : array-like, shape=(n_outputs, n_inputs)
            transformation matrix
    :Returns:
        decoded vector, shape=(n_inpus,)
    :RType:
        array-like
    """

    # obrain sizes of inputs and outputs
    (n_outputs, n_inputs) = trans.shape

    # define variable
    t = fd.oovar('t', size=n_inputs)
    x = fd.oovar('x', size=n_inputs)

    # objective to minimize: f x^T -> min
    objective = fd.sum(t)

    # init constraints
    constraints = []

    # equality constraint: a_eq x^T = b_eq
    constraints.append(fd.dot(trans, x) == x1)

    # inequality constraint: -t < x < t
    constraints.append(-t <= x)
    constraints.append(x <= t)

    # start_point
    start_point = {x:np.zeros(n_inputs), t:np.zeros(n_inputs)}

    # solve linear programming
    prob = LP(objective, start_point, constraints=constraints)
    result = prob.minimize('pclp') # glpk, lpSolve... if available

    # print result
#    print "x =", result.xf # arguments at mimimum
#    print "objective =", result.ff # value of objective

    return result.xf[x]
def delete_dom_act_Pi(game,i):
    no_a_game = list(game.shape) #will contain number of actions available for each player
    no_a_game.pop()#removes last element of list which simply was n: the number of payoffs in each action profile
    #as the first number is number of actions of last player etc., we turn the list around
    no_a_game = no_a_game[::-1]
    no_a_i = no_a_game.pop(i)#remove the number of actions of player i from no_a_game and put it in no_a_i
    if no_a_i==1:#if a single action remains it cannot be dominated
        return game
    var_no = np.prod(no_a_game)#size of the support of mu
    if var_no == 1:#special case: the support of mu has a single elment
        return del_dom_a_Pi_point_belief(game, i,no_a_i)
    temp_game = game
    f = [0.]*var_no   #dummy objective used below
    lb = [0.]*var_no
    ub = [1.]*var_no
    Aeq = [[1.]*var_no]
    beq = (1.,)
    j = 0
    while j<no_a_i:
        u_action = payoffi_builder(game,j,i)
        A = []
        b = []
        k = 0
        while k<no_a_i:
            u_other_action = payoffi_builder(game,k,i)
            A.append(u_other_action - u_action)#elementwise difference
            b.append(0.)
            k+=1
        p = LP(f, A=A,b=b,lb=lb,ub=ub,Aeq=Aeq,beq=beq)#we use the artificial minimization problem under the constraint that action gives a weakly higher payoff than any other action; if no feasible solution is obtained than action is dominated
        p.iprint = -1
        r = p.minimize('pclp')
        if r.stopcase!=1:#if no feasible solution was obtained then action is dominated...
            temp_game = np.delete(temp_game,j,n-i-1)#...and therefore action j is removed; recall that players are "in the wrong order"
            count = 0
            z = -1
            while count<=j:
                z+=1
                if undominated[i][z]!=-1 :
                    count+=1 
            undominated[i][z] = -1
        j+=1
    return temp_game
Example #5
0
8x1  + 80x2 + 15x3 <=150      (4)
100x1 +  10x2 + x3 >= 800     (5)
80x1 + 8x2 + 15x3 = 750         (6)
x1 + 10x2 + 100x3 = 80           (7)
x1 >= 4                                     (8)
-8 >= x2 >= -80                        (9)
"""

from numpy import *
from openopt import LP
f = array([15, 8, 80])
A = mat(
    '1 2 3; 8 15 80; 8 80 15; -100 -10 -1')  # numpy.ndarray is also allowed
b = [15, 80, 150, -800]  # numpy.ndarray, matrix etc are also allowed
Aeq = mat('80 8 15; 1 10 100')  # numpy.ndarray is also allowed
beq = (750, 80)

lb = [4, -80, -inf]
ub = [inf, -8, inf]
p = LP(f, A=A, Aeq=Aeq, b=b, beq=beq, lb=lb, ub=ub)
#or p = LP(f=f, A=A, Aeq=Aeq, b=b, beq=beq, lb=lb, ub=ub)

#r = p.minimize('glpk') # CVXOPT must be installed
#r = p.minimize('lpSolve') # lpsolve must be installed
r = p.minimize('pclp')
#search for max: r = p.maximize('glpk') # CVXOPT & glpk must be installed
#r = p.minimize('nlp:ralg', ftol=1e-7, xtol=1e-7, goal='min', plot=1)

print('objFunValue: %f' % r.ff)  # should print 204.48841578
print('x_opt: %s' % r.xf)  # should print [ 9.89355041 -8.          1.5010645 ]
Example #6
0
(hence Nconstraints = 75000)
glpk peak memory ~70 Mb, 
time elapsed = 35.79, CPU time elapsed = 35.0
"""

from openopt import LP
from numpy import arange
from FuncDesigner import *
N = 25000
x, y, z = oovars(3)
startPoint = {
    x: 0,
    y: [0] * N,
    z: [0] * (2 * N)
}  # thus x from R, y from R^N, z from R^2N

objective = sum(x) + 2 * sum(y) + 3 * sum(z)

cons = [
    x < 100, x > -100, y < arange(N), y > -10 - arange(N), z < arange(2 * N),
    z > -100 - arange(2 * N), x + y > 2 - 3 * arange(N),
    x + z > 4 - 5 * arange(2 * N)
]

p = LP(objective, startPoint, constraints=cons)

solver = 'glpk'  # CVXOPT & glpk must be installed
r = p.minimize(solver)

print('objFunValue:%f' % r.ff)
Example #7
0
A  = []
b = []
player = 0
while player<len(no_action):
    action = 0
    while action < no_action[player]:
        for k in range(0,no_action[player]):
            A.append(multiply(udiff(player,action,k),aik_indicator(player,action)))
            b.append(0.)
        action = action + 1
    player = player +1
#print A,b
p = LP(neg_welfare, A=A,b=b,lb=lb,ub=ub,Aeq=Aeq,beq=beq)#we use the artificial minimization problem under the constraint that action gives a weakly higher payoff than any other action; if no feasible solution is obtained than action is dominated
p.iprint = -1
try:
    r = p.minimize('pclp')
    pminw = LP(welfare, A=A,b=b,lb=lb,ub=ub,Aeq=Aeq,beq=beq)
    pminw.iprint = -1
    rminw = pminw.minimize('pclp')

except:
    print "Solver returns error. Probably, each player has a unique rationalizable action (check with rationalizability solver)."
    quit()
    
###formatting the result back into the same format as the game input
# first: rounding
outr = []
for item in r.xf:
    outr.append(round(item,3))

outrminw = []
Example #8
0
"""
Sparse LP example
for Nvariables = 25000 
(hence Nconstraints = 75000)
glpk peak memory ~70 Mb, 
time elapsed = 35.79, CPU time elapsed = 35.0
"""

from openopt import LP
from numpy import arange
from FuncDesigner import *
N = 25000 
x, y, z = oovars(3)
startPoint = {x:0, y:[0]*N, z:[0]*(2*N)} # thus x from R, y from R^N, z from R^2N

objective = sum(x) + 2*sum(y) + 3*sum(z)

cons = [x<100,  x>-100, y<arange(N), y>-10-arange(N), z<arange(2*N), z>-100-arange(2*N), x+y>2-3*arange(N), x+z>4-5*arange(2*N)]

p = LP(objective, startPoint, constraints = cons)

solver = 'glpk' # CVXOPT & glpk must be installed
r = p.minimize(solver)

print('objFunValue:%f' % r.ff)
Example #9
0
8x1  + 80x2 + 15x3 <=150      (4)
100x1 +  10x2 + x3 >= 800     (5)
80x1 + 8x2 + 15x3 = 750         (6)
x1 + 10x2 + 100x3 = 80           (7)
x1 >= 4                                     (8)
-8 >= x2 >= -80                        (9)
"""

from numpy import *
from openopt import LP

f = array([15, 8, 80])
A = mat("1 2 3; 8 15 80; 8 80 15; -100 -10 -1")  # numpy.ndarray is also allowed
b = [15, 80, 150, -800]  # numpy.ndarray, matrix etc are also allowed
Aeq = mat("80 8 15; 1 10 100")  # numpy.ndarray is also allowed
beq = (750, 80)

lb = [4, -80, -inf]
ub = [inf, -8, inf]
p = LP(f, A=A, Aeq=Aeq, b=b, beq=beq, lb=lb, ub=ub)
# or p = LP(f=f, A=A, Aeq=Aeq, b=b, beq=beq, lb=lb, ub=ub)

# r = p.minimize('glpk') # CVXOPT must be installed
# r = p.minimize('lpSolve') # lpsolve must be installed
r = p.minimize("pclp")
# search for max: r = p.maximize('glpk') # CVXOPT & glpk must be installed
# r = p.minimize('nlp:ralg', ftol=1e-7, xtol=1e-7, goal='min', plot=1)

print("objFunValue: %f" % r.ff)  # should print 204.48841578
print("x_opt: %s" % r.xf)  # should print [ 9.89355041 -8.          1.5010645 ]