Ejemplo n.º 1
0
 def test_case1(self):
     x = variable()
     y = variable()
     c1 = (2 * x + y <= 3)
     c2 = (x + 2 * y <= 3)
     c3 = (x >= 0)
     c4 = (y >= 0)
     lp1 = op(-4 * x - 5 * y, [c1, c2, c3, c4])
     print(repr(x))
     print(str(x))
     print(repr(lp1))
     print(str(lp1))
     lp1.solve()
     print(repr(x))
     print(str(x))
     self.assertTrue(lp1.status == 'optimal')
Ejemplo n.º 2
0
 def test_case2(self):
     x = variable(2)
     A = matrix([[2., 1., -1., 0.], [1., 2., 0., -1.]])
     b = matrix([3., 3., 0., 0.])
     c = matrix([-4., -5.])
     ineq = (A * x <= b)
     lp2 = op(dot(c, x), ineq)
     lp2.solve()
     self.assertAlmostEqual(lp2.objective.value()[0], -9.0, places=4)
Ejemplo n.º 3
0
    def test_case3(self):
        m, n = 500, 100
        setseed(100)
        A = normal(m, n)
        b = normal(m)

        x1 = variable(n)
        lp1 = op(max(abs(A * x1 - b)))
        lp1.solve()
        self.assertTrue(lp1.status == 'optimal')

        x2 = variable(n)
        lp2 = op(sum(abs(A * x2 - b)))
        lp2.solve()
        self.assertTrue(lp2.status == 'optimal')

        x3 = variable(n)
        lp3 = op(
            sum(max(0,
                    abs(A * x3 - b) - 0.75, 2 * abs(A * x3 - b) - 2.25)))
        lp3.solve()
        self.assertTrue(lp3.status == 'optimal')
Ejemplo n.º 4
0
 def test_exceptions(self):
     with self.assertRaises(TypeError):
         x = variable(0)
Ejemplo n.º 5
0
# The robust LP example of section 10.5 (Examples).

from kvxopt import normal, uniform
from kvxopt.modeling import variable, dot, op, sum
from kvxopt.blas import nrm2

m, n = 500, 100
A = normal(m, n)
b = uniform(m)
c = normal(n)

x = variable(n)
op(dot(c, x), A * x + sum(abs(x)) <= b).solve()

x2 = variable(n)
y = variable(n)
op(dot(c, x2), [A * x2 + sum(y) <= b, -y <= x2, x2 <= y]).solve()

print("\nDifference between two solutions %e" % nrm2(x.value - x2.value))
Ejemplo n.º 6
0
# The small LP of section 10.4 (Optimization problems).  

from kvxopt import matrix
from kvxopt.modeling import variable, op, dot

x = variable()  
y = variable()  
c1 = ( 2*x+y <= 3 )  
c2 = ( x+2*y <= 3 )  
c3 = ( x >= 0 )  
c4 = ( y >= 0 )  
lp1 = op(-4*x-5*y, [c1,c2,c3,c4])  
lp1.solve()  
print("\nstatus: %s" %lp1.status) 
print("optimal value: %f"  %lp1.objective.value()[0])
print("optimal x: %f" %x.value[0])
print("optimal y: %f" %y.value[0])  
print("optimal multiplier for 1st constraint: %f" %c1.multiplier.value[0])
print("optimal multiplier for 2nd constraint: %f" %c2.multiplier.value[0])
print("optimal multiplier for 3rd constraint: %f" %c3.multiplier.value[0])
print("optimal multiplier for 4th constraint: %f\n" %c4.multiplier.value[0])

x = variable(2)  
A = matrix([[2.,1.,-1.,0.], [1.,2.,0.,-1.]])  
b = matrix([3.,3.,0.,0.])  
c = matrix([-4.,-5.])  
ineq = ( A*x <= b )  
lp2 = op(dot(c,x), ineq)  
lp2.solve()  

print("\nstatus: %s" %lp2.status)  
Ejemplo n.º 7
0
# The norm and penalty approximation problems of section 10.5 (Examples).

from kvxopt import normal, setseed
from kvxopt.modeling import variable, op, max, sum

setseed(0)
m, n = 500, 100
A = normal(m,n)
b = normal(m)

x1 = variable(n)
prob1=op(max(abs(A*x1+b)))
prob1.solve()

x2 = variable(n)
prob2=op(sum(abs(A*x2+b)))
prob2.solve()

x3 = variable(n)
prob3=op(sum(max(0, abs(A*x3+b)-0.75, 2*abs(A*x3+b)-2.25)))
prob3.solve()

try: import pylab
except ImportError: pass
else:
    pylab.subplot(311)
    pylab.hist(list(A*x1.value + b), m//5)
    pylab.subplot(312)
    pylab.hist(list(A*x2.value + b), m//5)
    pylab.subplot(313)
    pylab.hist(list(A*x3.value + b), m//5)
Ejemplo n.º 8
0
# The 1-norm support vector classifier of section 10.5 (Examples).

from kvxopt import normal, setseed
from kvxopt.modeling import variable, op, max, sum
from kvxopt.blas import nrm2

m, n = 500, 100
A = normal(m, n)

x = variable(A.size[1], 'x')
u = variable(A.size[0], 'u')
op(sum(abs(x)) + sum(u), [A * x >= 1 - u, u >= 0]).solve()

x2 = variable(A.size[1], 'x')
op(sum(abs(x2)) + sum(max(0, 1 - A * x2))).solve()

print("\nDifference between two solutions: %e" % nrm2(x.value - x2.value))