Example #1
0
    def test_solve_NLP_rosenbrock(self):

        N = 500

        x = optmod.VariableMatrix(name='x', shape=(N, 1))

        f = 0.
        for i in range(N - 1):
            f = f + 100 * (x[i + 1, 0] - x[i, 0] * x[i, 0]) * (x[
                i + 1, 0] - x[i, 0] * x[i, 0]) + (1 - x[i, 0]) * (1 - x[i, 0])

        p = optmod.Problem(minimize(f))

        try:
            info = p.solve(solver=optalg.opt_solver.OptSolverIpopt(),
                           parameters={
                               'quiet': True,
                               'max_iter': 1500
                           })
        except ImportError:
            raise unittest.SkipTest('ipopt not available')

        self.assertEqual(info['status'], 'solved')
        self.assertAlmostEqual(f.get_value(), 0.)
        self.assertTrue(np.all(np.abs(x.get_value() - 1.) < 1e-10))
Example #2
0
    def test_get_variables(self):

        x = optmod.VariableMatrix(name='x', shape=(2, 3))
        y = optmod.VariableScalar(name='y')

        f = optmod.sin(x * 3) + optmod.cos(y + 10.) * x

        vars = f.get_variables()
        self.assertEqual(len(vars), 7)

        self.assertSetEqual(
            f.get_variables(),
            set([x[i, j] for i in range(2) for j in range(3)] + [y]))
Example #3
0
    def test_solve_NLP_beam(self):

        N = 500
        h = 1. / N
        alpha = 350.

        t = optmod.VariableMatrix('t', shape=(N + 1, 1))
        x = optmod.VariableMatrix('x', shape=(N + 1, 1))
        u = optmod.VariableMatrix('u', shape=(N + 1, 1))

        f = sum([
            0.5 * h * (u[i, 0] * u[i, 0] + u[i + 1, 0] * u[i + 1, 0]) +
            0.5 * alpha * h * (cos(t[i, 0]) + cos(t[i + 1, 0]))
            for i in range(N)
        ])

        constraints = []
        for i in range(N):
            constraints.append(x[i + 1, 0] - x[i, 0] - 0.5 * h *
                               (sin(t[i + 1, 0]) + sin(t[i, 0])) == 0)
            constraints.append(t[i + 1, 0] - t[i, 0] - 0.5 * h *
                               (u[i + 1, 0] - u[i, 0]) == 0)
        constraints.append(t <= 1)
        constraints.append(t >= -1)
        constraints.append(-0.05 <= x)
        constraints.append(x <= 0.05)

        p = optmod.Problem(minimize(f), constraints)

        try:
            info = p.solve(solver=optalg.opt_solver.OptSolverIpopt(),
                           parameters={'quiet': True})
        except ImportError:
            raise unittest.SkipTest('ipopt not available')

        self.assertEqual(info['status'], 'solved')
        self.assertAlmostEqual(f.get_value(), 350.)
Example #4
0
    def test_bad_array_construction(self):

        x = optmod.VariableMatrix('x', np.random.randn(4, 3))
        y = optmod.VariableScalar('y', 5)

        c0 = x == 1
        c1 = y == 0

        c = optmod.constraint.ConstraintArray(c0)
        c = optmod.constraint.ConstraintArray(c1)
        c = optmod.constraint.ConstraintArray([c1])

        self.assertRaises(TypeError, optmod.constraint.ConstraintArray, [x, y])
        self.assertRaises(TypeError, optmod.constraint.ConstraintArray,
                          [c0, c1])
        self.assertRaises(TypeError, optmod.constraint.ConstraintArray,
                          ['foo'])
Example #5
0
    def test_matrix_get_fast_evaluator(self):

        xval = np.random.randn(4, 3)
        x = optmod.VariableMatrix(name='x', value=xval)
        y = optmod.VariableScalar(name='y', value=10.)

        self.assertTupleEqual(x.shape, (4, 3))

        f = optmod.sin(3 * x + 10.) * optmod.cos(y - optmod.sum(x * y))

        self.assertTupleEqual(f.shape, (4, 3))

        variables = list(f.get_variables())
        self.assertEqual(len(variables), 13)

        e = f.get_fast_evaluator(variables)

        val = e.get_value()
        self.assertTrue(isinstance(val, np.matrix))

        self.assertTupleEqual(val.shape, (4, 3))
        self.assertTrue(np.all(val == 0))

        e.eval(np.array([x.get_value() for x in variables]))

        val = e.get_value()
        val1 = np.sin(3 * xval + 10.) * np.cos(10. - np.sum(xval * 10.))

        self.assertTupleEqual(val.shape, (4, 3))
        self.assertLess(np.linalg.norm(val - val1), 1e-10)

        x = np.array([v.get_value() for v in variables])
        e.eval(x)

        self.assertLess(np.max(np.abs(e.get_value() - f.get_value())), 1e-10)

        t0 = time.time()
        for i in range(500):
            f.get_value()
        t1 = time.time()
        for i in range(500):
            e.eval(x)
        t2 = time.time()
        self.assertGreater((t1 - t0) / (t2 - t1), 400.)
Example #6
0
    def test_solve_NLP_unconstrained(self):

        n = 5

        x = optmod.VariableMatrix(name='x',
                                  value=[1.3, 0.7, 0.8, 1.9, 1.2],
                                  shape=(5, 1))

        f = 0.
        for i in range(0, n - 1):
            f = f + 100 * (x[i + 1, 0] - x[i, 0] * x[i, 0]) * (
                x[i + 1, 0] - x[i, 0] * x[i, 0]) + (1. - x[i, 0]) * (1. -
                                                                     x[i, 0])

        # Problem
        p = optmod.Problem(minimize(f))

        # std prob
        std_prob = p.__get_std_problem__()
        self.assertListEqual(std_prob.properties,
                             ['nonlinear', 'continuous', 'optimization'])

        try:
            info = p.solve(solver=optalg.opt_solver.OptSolverIpopt(),
                           parameters={'quiet': True},
                           fast_evaluator=True)
        except ImportError:
            raise unittest.SkipTest('ipopt not available')

        self.assertEqual(info['status'], 'solved')
        self.assertAlmostEqual(f.get_value(), 0, places=4)
        self.assertLess(norm(x.get_value() - np.ones((5, 1)), np.inf), 1e-2)

        x.set_value(np.matrix([1.3, 0.7, 0.8, 1.9, 1.2]).T)

        info = p.solve(solver=optalg.opt_solver.OptSolverIpopt(),
                       parameters={'quiet': True},
                       fast_evaluator=False)

        self.assertEqual(info['status'], 'solved')
        self.assertAlmostEqual(f.get_value(), 0, places=4)
        self.assertLess(norm(x.get_value() - np.ones((5, 1)), np.inf), 1e-2)
Example #7
0
    def test_sum(self):

        r = np.random.randn(3, 2)

        x = optmod.VariableScalar('x', value=4.)
        y = optmod.VariableMatrix('y', value=r)

        self.assertTupleEqual(y.shape, (3, 2))
        self.assertTrue(np.all(y.get_value() == r))

        # scalar
        f = optmod.sum(x)
        self.assertTrue(f is x)

        self.assertTrue(optmod.sum(x, axis=0) is x)
        self.assertRaises(Exception, optmod.sum, x, 1)

        # matrix
        f = optmod.sum(y)
        self.assertTrue(isinstance(f, optmod.expression.Expression))
        self.assertTrue(f.is_function())
        self.assertEqual(len(f.arguments), 6)
        self.assertEqual(
            str(f), 'y[0,0] + y[0,1] + y[1,0] + y[1,1] + y[2,0] + y[2,1]')

        # matrix axis
        f = optmod.sum(y, axis=0)
        self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
        self.assertTupleEqual(f.shape, (1, 2))
        self.assertEqual(str(f), ('[[ y[0,0] + y[1,0] + y[2,0],' +
                                  ' y[0,1] + y[1,1] + y[2,1] ]]\n'))

        # matrix axis
        f = optmod.sum(y, axis=1)
        self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
        self.assertTupleEqual(f.shape, (3, 1))
        self.assertEqual(
            str(f), ('[[ y[0,0] + y[0,1] ],\n' + ' [ y[1,0] + y[1,1] ],\n' +
                     ' [ y[2,0] + y[2,1] ]]\n'))

        self.assertRaises(Exception, optmod.sum, x, 2)
Example #8
0
    def test_flatten_to_list(self):

        x = optmod.VariableMatrix('x', np.random.randn(4, 3))

        c = x == 1
        self.assertTrue(isinstance(c, optmod.constraint.ConstraintArray))

        self.assertTupleEqual(c.shape, (4, 3))

        cf = c.flatten()
        self.assertTrue(isinstance(c, optmod.constraint.ConstraintArray))
        self.assertTupleEqual(cf.shape, (12, ))

        cfl = cf.tolist()
        self.assertTrue(isinstance(cfl, list))
        self.assertEqual(len(cfl), 12)
        for i in range(4):
            for j in range(3):
                self.assertTrue(
                    isinstance(cfl[i * 3 + j], optmod.constraint.Constraint))
                self.assertTrue(cfl[i * 3 + j].lhs is x[i, j])
Example #9
0
# Ported from https://github.com/JuliaOpt/JuMP.jl/blob/master/examples/clnlbeam.jl

import optmod
import optalg
from optmod import sum, cos, sin, minimize

N = 1000
h = 1./N
alpha = 350.

t = optmod.VariableMatrix('t', shape=(N+1,1))
x = optmod.VariableMatrix('x', shape=(N+1,1))
u = optmod.VariableMatrix('u', shape=(N+1,1))

f = sum([0.5*h*(u[i,0]*u[i,0]+u[i+1,0]*u[i+1,0]) +
         0.5*alpha*h*(cos(t[i,0]) + cos(t[i+1,0]))
         for i in range(N)])

constraints = []
for i in range(N):
    constraints.append(x[i+1,0] - x[i,0] - 0.5*h*(sin(t[i+1,0])+sin(t[i,0])) == 0)
    constraints.append(t[i+1,0] - t[i,0] - 0.5*h*(u[i+1,0] - u[i,0]) == 0)
constraints.append(t <= 1)
constraints.append(t >= -1)
constraints.append(-0.05 <= x)
constraints.append(x <= 0.05)

p = optmod.Problem(minimize(f), constraints)

info = p.solve(solver=optalg.opt_solver.OptSolverIpopt(), fast_evaluator=True)