Esempio n. 1
0
    def test_opt(self):
        # Optimization Object
        optProb = Optimization('Paraboloid', objfunc)

        # Design Variables
        optProb.addVarGroup('x', 1, type='c', lower=-50.0, upper=50.0, value=0.0)
        optProb.addVarGroup('y', 1, type='c', lower=-50.0, upper=50.0, value=0.0)
        optProb.finalizeDesignVariables()

        # Objective
        optProb.addObj('obj')

        # Equality Constraint
        optProb.addConGroup('con', 1, lower=-15.0, upper=-15.0, wrt=['x', 'y'], linear=True, jac=con_jac)

        # Check optimization problem:
        print(optProb)


        # Optimizer
        try:
            opt = SNOPT(optOptions = {'Major feasibility tolerance' : 1e-1})
        except:
            raise unittest.SkipTest('Optimizer not available: SNOPT')

        sol = opt(optProb, sens=sens)

        # Check Solution 7.166667, -7.833334
        self.assertAlmostEqual(sol.variables['x'][0].value, 7.166667, places=6)
        self.assertAlmostEqual(sol.variables['y'][0].value, -7.833333, places=6)
Esempio n. 2
0
    def test_autorefine(self):
        # Optimization Object
        optProb = Optimization("TP109 Constraint Problem", objfunc)

        # Design Variables (Removed infinite bounds for ALPSO)
        lower = [0.0, 0.0, -0.55, -0.55, 196, 196, 196, -400, -400]
        upper = [2000, 2000, 0.55, 0.55, 252, 252, 252, 800, 800]
        value = [0, 0, 0, 0, 0, 0, 0, 0, 0]
        optProb.addVarGroup("xvars", 9, lower=lower, upper=upper, value=value)

        # Constraints
        lower = [0, 0, 0, 0, 0, 0, 0, 0]
        upper = [None, None, 0, 0, 0, 0, 0, 0]
        if not USE_LINEAR:
            lower.extend([0, 0])
            upper.extend([None, None])

        optProb.addConGroup("con", len(lower), lower=lower, upper=upper)

        # And the 2 linear constriants
        if USE_LINEAR:
            jac = np.zeros((1, 9))
            jac[0, 3] = 1.0
            jac[0, 2] = -1.0
            optProb.addConGroup("lin_con",
                                1,
                                lower=-0.55,
                                upper=0.55,
                                wrt=["xvars"],
                                jac={"xvars": jac},
                                linear=True)

        # Objective
        optProb.addObj("obj")

        # Check optimization problem:
        # optProb.printSparsity()

        # Global Optimizer: ALPSO
        try:
            opt1 = OPT("ALPSO")
        except Error:
            raise unittest.SkipTest("Optimizer not available:", "ALPSO")

        # Get first Solution
        sol1 = opt1(optProb)

        # Now run the previous solution with SNOPT
        try:
            opt2 = OPT("SNOPT")
        except Error:
            raise unittest.SkipTest("Optimizer not available:", "SNOPT")

        sol2 = opt2(sol1)

        # Check Solution
        assert_allclose(sol2.objectives["obj"].value,
                        0.536206927538e04,
                        atol=1e-2,
                        rtol=1e-2)
    def test_sens(self):
        termcomp = TerminateComp(max_sens=3)
        optProb = Optimization("Paraboloid", termcomp.objfunc)

        optProb.addVarGroup("x", 1, type="c", lower=-50.0, upper=50.0, value=0.0)
        optProb.addVarGroup("y", 1, type="c", lower=-50.0, upper=50.0, value=0.0)
        optProb.finalizeDesignVariables()

        optProb.addObj("obj")

        optProb.addConGroup("con", 1, lower=-15.0, upper=-15.0, wrt=["x", "y"], linear=True, jac=con_jac)

        test_name = "SNOPT_user_termination_sens"
        optOptions = {
            "Print file": "{}.out".format(test_name),
            "Summary file": "{}_summary.out".format(test_name),
        }
        try:
            opt = SNOPT(options=optOptions)
        except Error:
            raise unittest.SkipTest("Optimizer not available: SNOPT")

        sol = opt(optProb, sens=termcomp.sens)

        self.assertEqual(termcomp.sens_count, 4)

        # Exit code for user requested termination.
        self.assertEqual(sol.optInform["value"], 71)
Esempio n. 4
0
    def optimize(self, optName, tol, optOptions={}, storeHistory=False, hotStart=None):
        self.nf = 0  # number of function evaluations
        self.ng = 0  # number of gradient evaluations
        # Optimization Object
        optProb = Optimization("HS15 Constraint Problem", self.objfunc)

        # Design Variables
        lower = [-5.0, -5.0]
        upper = [0.5, 5.0]
        value = [-2, 1.0]
        optProb.addVarGroup("xvars", 2, lower=lower, upper=upper, value=value)

        # Constraints
        lower = [1.0, 0.0]
        upper = [None, None]
        optProb.addConGroup("con", 2, lower=lower, upper=upper)

        # Objective
        optProb.addObj("obj")

        # Check optimization problem:
        print(optProb)

        # Optimizer
        try:
            opt = OPT(optName, options=optOptions)
        except Error:
            raise unittest.SkipTest("Optimizer not available:", optName)

        # Solution
        if storeHistory is not None:
            if storeHistory is True:
                self.histFileName = "%s_hs015_Hist.hst" % (optName.lower())
            elif isinstance(storeHistory, str):
                self.histFileName = storeHistory
        else:
            self.histFileName = None

        sol = opt(optProb, sens=self.sens, storeHistory=self.histFileName, hotStart=hotStart)

        # Test printing solution to screen
        print(sol)

        # Check Solution
        self.fStar1 = 306.5
        self.fStar2 = 360.379767

        self.xStar1 = (0.5, 2.0)
        self.xStar2 = (-0.79212322, -1.26242985)

        dv = sol.getDVs()
        sol_xvars = [sol.variables["xvars"][i].value for i in range(2)]
        assert_allclose(sol_xvars, dv["xvars"], atol=tol, rtol=tol)
        # we check either optimum via try/except
        try:
            assert_allclose(sol.objectives["obj"].value, self.fStar1, atol=tol, rtol=tol)
            assert_allclose(dv["xvars"], self.xStar1, atol=tol, rtol=tol)
        except AssertionError:
            assert_allclose(sol.objectives["obj"].value, self.fStar2, atol=tol, rtol=tol)
            assert_allclose(dv["xvars"], self.xStar2, atol=tol, rtol=tol)
def large_sparse(optimizer="SNOPT", optOptions=None):
    opt_options = {} if optOptions is None else optOptions

    # Optimization Object
    optProb = Optimization("large and sparse", objfunc)

    # Design Variables
    optProb.addVar("x", lower=-100, upper=150, value=0)
    optProb.addVarGroup("y", N, lower=-10 - arange(N), upper=arange(N), value=0)
    optProb.addVarGroup("z", 2 * N, upper=arange(2 * N), lower=-100 - arange(2 * N), value=0)
    # Constraints
    optProb.addCon("con1", upper=100, wrt=["x"])
    optProb.addCon("con2", upper=100)
    optProb.addCon("con3", lower=4, wrt=["x", "z"])
    optProb.addConGroup(
        "lincon",
        N,
        lower=2 - 3 * arange(N),
        linear=True,
        wrt=["x", "y"],
        jac={"x": np.ones((N, 1)), "y": sparse.spdiags(np.ones(N), 0, N, N)},
    )
    optProb.addObj("obj")
    # Optimizer
    opt = OPT(optimizer, options=opt_options)
    optProb.printSparsity()

    return opt, optProb
Esempio n. 6
0
    def optimize(self, optName, tol, optOptions={}, storeHistory=False):
        # Optimization Object
        optProb = Optimization("large and sparse", objfunc)

        # Design Variables
        optProb.addVar("x", lower=-100, upper=150, value=0)
        optProb.addVarGroup("y", N, lower=-10 - arange(N), upper=arange(N), value=0)
        optProb.addVarGroup("z", 2 * N, upper=arange(2 * N), lower=-100 - arange(2 * N), value=0)

        # Constraints
        optProb.addCon("con1", upper=100, wrt=["x"])
        optProb.addCon("con2", upper=100)
        optProb.addCon("con3", lower=4, wrt=["x", "z"])
        optProb.addConGroup(
            "lincon",
            N,
            lower=2 - 3 * arange(N),
            linear=True,
            wrt=["x", "y"],
            jac={"x": np.ones((N, 1)), "y": sparse.spdiags(np.ones(N), 0, N, N)},
        )
        optProb.addObj("obj")

        # Optimizer
        try:
            opt = OPT(optName, options=optOptions)
        except Error:
            raise unittest.SkipTest("Optimizer not available:", optName)

        sol = opt(optProb, sens=sens)

        # Check Solution
        assert_allclose(sol.objectives["obj"].value, 10.0, atol=tol, rtol=tol)

        assert_allclose(sol.variables["x"][0].value, 2.0, atol=tol, rtol=tol)
Esempio n. 7
0
    def optimize(self, optName, optOptions={}, storeHistory=False, places=5):
        # Optimization Object
        optProb = Optimization('HS071 Constraint Problem', objfunc)

        # Design Variables
        x0 = [1.0, 5.0, 5.0, 1.0]
        optProb.addVarGroup('xvars', 4, lower=1, upper=5, value=x0)

        # Constraints
        optProb.addConGroup('con', 2, lower=[25, 40], upper=[None, 40])

        # Objective
        optProb.addObj('obj')

        # Optimizer
        try:
            opt = OPT(optName, options=optOptions)
        except:
            raise unittest.SkipTest('Optimizer not available:', optName)

        sol = opt(optProb, sens=sens)

        # Check Solution
        self.assertAlmostEqual(sol.objectives['obj'].value, 17.0140172, places=places)

        self.assertAlmostEqual(sol.variables['xvars'][0].value, 1.0, places=places)
        self.assertAlmostEqual(sol.variables['xvars'][1].value, 4.743, places=places)
        self.assertAlmostEqual(sol.variables['xvars'][2].value, 3.82115, places=places)
        self.assertAlmostEqual(sol.variables['xvars'][3].value, 1.37941, places=places)

        if hasattr(sol, 'lambdaStar'):
            self.assertAlmostEqual(sol.lambdaStar['con'][0], 0.55229366, places=places)
            self.assertAlmostEqual(sol.lambdaStar['con'][1], -0.16146857, places=places)
Esempio n. 8
0
    def test_opt(self):
        # Optimization Object
        optProb = Optimization("Paraboloid", objfunc)

        # Design Variables
        optProb.addVarGroup("x", 1, varType="c", lower=-50.0, upper=50.0, value=0.0)
        optProb.addVarGroup("y", 1, varType="c", lower=-50.0, upper=50.0, value=0.0)

        # Objective
        optProb.addObj("obj")

        # Equality Constraint
        optProb.addConGroup("con", 1, lower=-15.0, upper=-15.0, wrt=["x", "y"], linear=True, jac=con_jac)

        # Check optimization problem:
        print(optProb)
        test_name = "bugfix_SNOPT_test_opt"
        optOptions = {
            "Major feasibility tolerance": 1e-1,
            "Print file": "{}.out".format(test_name),
            "Summary file": "{}_summary.out".format(test_name),
        }

        # Optimizer
        try:
            opt = SNOPT(options=optOptions)
        except Error:
            raise unittest.SkipTest("Optimizer not available: SNOPT")

        sol = opt(optProb, sens=sens)

        # Check Solution 7.166667, -7.833334
        tol = 1e-6
        assert_allclose(sol.variables["x"][0].value, 7.166667, atol=tol, rtol=tol)
        assert_allclose(sol.variables["y"][0].value, -7.833333, atol=tol, rtol=tol)
Esempio n. 9
0
    def test_autorefine(self):
        # Optimization Object
        optProb = Optimization('TP109 Constraint Problem', objfunc)

        # Design Variables (Removed infinite bounds for ALPSO)
        lower = [0.0, 0.0, -0.55, -0.55, 196, 196, 196, -400, -400]
        upper = [2000, 2000, 0.55, 0.55, 252, 252, 252, 800, 800]
        value = [0, 0, 0, 0, 0, 0, 0, 0, 0]
        optProb.addVarGroup('xvars', 9, lower=lower, upper=upper, value=value)

        # Constraints
        lower = [0, 0, 0, 0, 0, 0, 0, 0]
        upper = [None, None, 0, 0, 0, 0, 0, 0]
        if not USE_LINEAR:
            lower.extend([0, 0])
            upper.extend([None, None])

        optProb.addConGroup('con', len(lower), lower=lower, upper=upper)

        # And the 2 linear constriants
        if USE_LINEAR:
            jac = numpy.zeros((1, 9))
            jac[0, 3] = 1.0
            jac[0, 2] = -1.0
            optProb.addConGroup('lin_con',
                                1,
                                lower=-.55,
                                upper=0.55,
                                wrt=['xvars'],
                                jac={'xvars': jac},
                                linear=True)

        # Objective
        optProb.addObj('obj')

        # Check optimization problem:
        #optProb.printSparsity()

        # Global Optimizer: ALPSO
        try:
            opt1 = OPT('ALPSO')
        except:
            raise unittest.SkipTest('Optimizer not available:', 'ALPSO')

        # Get first Solution
        sol1 = opt1(optProb)

        # Now run the previous solution with SNOPT
        try:
            opt2 = OPT('SNOPT')
        except:
            raise unittest.SkipTest('Optimizer not available:', 'SNOPT')

        sol2 = opt2(sol1)

        # Check Solution
        self.assertAlmostEqual(sol2.objectives['obj'].value,
                               0.536206927538e+04,
                               places=2)
Esempio n. 10
0
    def optimize(self, optName, optOptions={}, storeHistory=False, places=5):
        # Optimization Object
        optProb = Optimization('HS15 Constraint Problem', self.objfunc)

        # Design Variables
        lower = [-5.0, -5.0]
        upper = [0.5, 5.0]
        value = [-2, 1.0]
        optProb.addVarGroup('xvars', 2, lower=lower, upper=upper, value=value)

        # Constraints
        lower = [1.0, 0.0]
        upper = [None, None]
        optProb.addConGroup('con', 2, lower=lower, upper=upper)

        # Objective
        optProb.addObj('obj')

        # Check optimization problem:
        # print(optProb)

        # Optimizer
        try:
            opt = OPT(optName, options=optOptions)
        except:
            raise unittest.SkipTest('Optimizer not available:', optName)

        # Solution
        if storeHistory:
            histFileName = '%s_hs015_Hist.hst' % (optName.lower())
        else:
            histFileName = None

        sol = opt(optProb, sens=self.sens, storeHistory=histFileName)

        # Test printing solution to screen
        print(sol)

        # Check Solution
        fobj = sol.objectives['obj'].value
        diff = np.min(np.abs([fobj - 306.5, fobj - 360.379767]))
        self.assertAlmostEqual(diff, 0.0, places=places)

        xstar1 = (0.5, 2.0)
        xstar2 = (-0.79212322, -1.26242985)
        x1 = sol.variables['xvars'][0].value
        x2 = sol.variables['xvars'][1].value

        dv = sol.getDVs()
        self.assertAlmostEqual(x1, dv['xvars'][0], places=10)
        self.assertAlmostEqual(x2, dv['xvars'][1], places=10)

        diff = np.min(np.abs([xstar1[0] - x1, xstar2[0] - x1]))
        self.assertAlmostEqual(diff, 0.0, places=places)

        diff = np.min(np.abs([xstar1[1] - x2, xstar2[1] - x2]))
        self.assertAlmostEqual(diff, 0.0, places=places)
Esempio n. 11
0
    def __call__(self, optimizer, options=None):
        """ Run optimization """
        system = self._system
        variables = self._variables

        opt_prob = OptProblem('Optimization', self.obj_func)
        for dv_name in variables['dv'].keys():
            dv = variables['dv'][dv_name]
            dv_id = dv['ID']
            value = dv['value']
            lower = dv['lower']
            upper = dv['upper']
            size = system.vec['u'](dv_id).shape[0]
            opt_prob.addVarGroup(dv_name, size, value=value,
                                 lower=lower, upper=upper)
        opt_prob.finalizeDesignVariables()
        for func_name in variables['func'].keys():
            func = variables['func'][func_name]
            func_id = func['ID']
            lower = func['lower']
            upper = func['upper']
            linear = func['linear']
            get_jacs = func['get_jacs']
            size = system.vec['u'](func_id).shape[0]
            if lower is None and upper is None:
                opt_prob.addObj(func_name)
            else:
                if func['get_jacs'] is None:
                    opt_prob.addConGroup(func_name, size,
                                         lower=lower, upper=upper)
                else:
                    jacs_var = get_jacs()

                    dv_names = []
                    jacs = {}
                    for dv_var in jacs_var:
                        dv_id = self._system.get_id(dv_var)
                        dv_name = self._get_name(dv_id)
                        dv_names.append(dv_name)
                        jacs[dv_name] = jacs_var[dv_var]

                    opt_prob.addConGroup(func_name, size,
                                         wrt=dv_names,
                                         jac=jacs, linear=linear,
                                         lower=lower, upper=upper)

        if options is None:
            options = {}

        opt = Optimizer(optimizer, options=options)
        opt.setOption('Iterations limit', int(1e6))
        #opt.setOption('Verify level', 3)
        sol = opt(opt_prob, sens=self.sens_func, storeHistory='hist.hst')
        print sol
Esempio n. 12
0
def pyopt_truss(truss, optimizer='snopt', options={}):
    '''
    Take the given problem and optimize it with the given optimizer
    from the pyOptSparse library of optimizers.
    '''
    # Import the optimization problem
    from pyoptsparse import Optimization, OPT

    class pyOptWrapper:
        def __init__(self, truss):
            self.truss = truss

        def objcon(self, x):
            fail, obj, con = self.truss.evalObjCon(x['x'])
            funcs = {'objective': obj, 'con': con}
            return funcs, fail

        def gobjcon(self, x, funcs):
            g = np.zeros(x['x'].shape)
            A = np.zeros((1, x['x'].shape[0]))
            fail = self.truss.evalObjConGradient(x['x'], g, A)
            sens = {'objective': {'x': g}, 'con': {'x': A}}
            return sens, fail

    # Set the design variables
    wrap = pyOptWrapper(truss)
    prob = Optimization('Truss', wrap.objcon)

    # Determine the initial variable values and their lower/upper
    # bounds in the design problem
    n = len(truss.conn)
    x0 = np.zeros(n)
    lower = np.zeros(n)
    upper = np.zeros(n)
    truss.getVarsAndBounds(x0, lower, upper)

    # Set the variable bounds and initial values
    prob.addVarGroup('x', n, value=x0, lower=lower, upper=upper)

    # Set the constraints
    prob.addConGroup('con', 1, lower=0.0, upper=0.0)

    # Add the objective
    prob.addObj('objective')

    # Optimize the problem
    try:
        opt = OPT(optimizer, options=options)
        sol = opt(prob, sens=wrap.gobjcon)
    except:
        opt = None
        sol = None

    return opt, prob, sol
Esempio n. 13
0
    def optimize(
        self,
        optName,
        tol,
        optOptions={},
        storeHistory=False,
        setDV=None,
        xScale=1.0,
        objScale=1.0,
        conScale=1.0,
        offset=0.0,
        check_solution=True,
    ):
        # Optimization Object
        optProb = Optimization("HS071 Constraint Problem", self.objfunc)

        # Design Variables
        x0 = [1.0, 5.0, 5.0, 1.0]
        optProb.addVarGroup("xvars", 4, lower=1, upper=5, value=x0, scale=xScale, offset=offset)

        # Constraints
        optProb.addConGroup("con", 2, lower=[25, 40], upper=[None, 40], scale=conScale)

        # Objective
        optProb.addObj("obj", scale=objScale)

        # Optimizer
        try:
            opt = OPT(optName, options=optOptions)
        except Error:
            raise unittest.SkipTest("Optimizer not available:", optName)

        if isinstance(setDV, str):
            optProb.setDVsFromHistory(setDV)
        elif isinstance(setDV, dict):
            optProb.setDVs(setDV)
            outDV = optProb.getDVs()
            assert_allclose(setDV["xvars"], outDV["xvars"])

        sol = opt(optProb, sens=self.sens, storeHistory=storeHistory)

        # Check Solution
        if check_solution:
            self.fStar = 17.0140172
            self.xStar = (1.0, 4.743, 3.82115, 1.37941)
            self.lambdaStar = (0.55229366, -0.16146857)
            assert_allclose(sol.objectives["obj"].value, self.fStar, atol=tol, rtol=tol)
            assert_allclose(sol.xStar["xvars"], self.xStar, atol=tol, rtol=tol)

            if hasattr(sol, "lambdaStar"):
                assert_allclose(sol.lambdaStar["con"], self.lambdaStar, atol=tol, rtol=tol)
        return sol
Esempio n. 14
0
    def optimize(self, optName, tol, optOptions={}):
        # Optimization Object
        optProb = Optimization("TP109 Constraint Problem", objfunc)

        # Design Variables
        lower = [0.0, 0.0, -0.55, -0.55, 196, 196, 196, -400, -400]
        upper = [None, None, 0.55, 0.55, 252, 252, 252, 800, 800]
        value = [0, 0, 0, 0, 0, 0, 0, 0, 0]
        optProb.addVarGroup("xvars", 9, lower=lower, upper=upper, value=value)

        # Constraints
        lower = [0, 0, 0, 0, 0, 0, 0, 0]
        upper = [None, None, 0, 0, 0, 0, 0, 0]
        if not USE_LINEAR:
            lower.extend([0, 0])
            upper.extend([None, None])

        optProb.addConGroup("con", len(lower), lower=lower, upper=upper)

        # And the 2 linear constriants
        if USE_LINEAR:
            jac = np.zeros((1, 9))
            jac[0, 3] = 1.0
            jac[0, 2] = -1.0
            optProb.addConGroup("lin_con",
                                1,
                                lower=-0.55,
                                upper=0.55,
                                wrt=["xvars"],
                                jac={"xvars": jac},
                                linear=True)

        # Objective
        optProb.addObj("obj")

        # Check optimization problem:
        # optProb.printSparsity()

        # Optimizer
        try:
            opt = OPT(optName, options=optOptions)
        except Error:
            raise unittest.SkipTest("Optimizer not available:", optName)

        # Solution
        sol = opt(optProb, sens="CS")

        # Check Solution
        assert_allclose(sol.objectives["obj"].value,
                        0.536206927538e04,
                        atol=tol,
                        rtol=tol)
Esempio n. 15
0
    def optimize(self, optName, optOptions={}, places=2):
        # Optimization Object
        optProb = Optimization('TP109 Constraint Problem', objfunc)

        # Design Variables
        lower = [0.0, 0.0, -0.55, -0.55, 196, 196, 196, -400, -400]
        upper = [None, None, 0.55, 0.55, 252, 252, 252, 800, 800]
        value = [0, 0, 0, 0, 0, 0, 0, 0, 0]
        optProb.addVarGroup('xvars', 9, lower=lower, upper=upper, value=value)

        # Constraints
        lower = [0, 0, 0, 0, 0, 0, 0, 0]
        upper = [None, None, 0, 0, 0, 0, 0, 0]
        if not USE_LINEAR:
            lower.extend([0, 0])
            upper.extend([None, None])

        optProb.addConGroup('con', len(lower), lower=lower, upper=upper)

        # And the 2 linear constriants
        if USE_LINEAR:
            jac = numpy.zeros((1, 9))
            jac[0, 3] = 1.0
            jac[0, 2] = -1.0
            optProb.addConGroup('lin_con',
                                1,
                                lower=-.55,
                                upper=0.55,
                                wrt=['xvars'],
                                jac={'xvars': jac},
                                linear=True)

        # Objective
        optProb.addObj('obj')

        # Check optimization problem:
        #optProb.printSparsity()

        # Optimizer
        try:
            opt = OPT(optName, options=optOptions)
        except:
            raise unittest.SkipTest('Optimizer not available:', optName)

        # Solution
        sol = opt(optProb, sens='CS')

        # Check Solution
        self.assertAlmostEqual(sol.objectives['obj'].value,
                               0.536206927538e+04,
                               places=places)
Esempio n. 16
0
    def optimize(self, optName, optOptions={}, storeHistory=False, places=5):
        # Optimization Object
        optProb = Optimization('large and sparse', objfunc)

        # Design Variables
        optProb.addVar('x', lower=-100, upper=150, value=0)
        optProb.addVarGroup('y',
                            N,
                            lower=-10 - arange(N),
                            upper=arange(N),
                            value=0)
        optProb.addVarGroup('z',
                            2 * N,
                            upper=arange(2 * N),
                            lower=-100 - arange(2 * N),
                            value=0)

        # Constraints
        optProb.addCon('con1', upper=100, wrt=['x'])
        optProb.addCon('con2', upper=100)
        optProb.addCon('con3', lower=4, wrt=['x', 'z'])
        optProb.addConGroup('lincon',
                            N,
                            lower=2 - 3 * arange(N),
                            linear=True,
                            wrt=['x', 'y'],
                            jac={
                                'x': numpy.ones((N, 1)),
                                'y': sparse.spdiags(numpy.ones(N), 0, N, N)
                            })
        optProb.addObj('obj')

        # Optimizer
        try:
            opt = OPT(optName, options=optOptions)
        except:
            raise unittest.SkipTest('Optimizer not available:', optName)

        sol = opt(optProb, sens=sens)

        # Check Solution
        self.assertAlmostEqual(sol.objectives['obj'].value,
                               10.0,
                               places=places)

        self.assertAlmostEqual(sol.variables['x'][0].value, 2.0, places=places)
    def test_obj(self):
        termcomp = TerminateComp(max_obj=2)
        optProb = Optimization("Paraboloid", termcomp.objfunc)

        optProb.addVarGroup("x",
                            1,
                            varType="c",
                            lower=-50.0,
                            upper=50.0,
                            value=0.0)
        optProb.addVarGroup("y",
                            1,
                            varType="c",
                            lower=-50.0,
                            upper=50.0,
                            value=0.0)

        optProb.addObj("obj")

        optProb.addConGroup("con",
                            1,
                            lower=-15.0,
                            upper=-15.0,
                            wrt=["x", "y"],
                            linear=True,
                            jac=con_jac)

        test_name = "SNOPT_user_termination_obj"
        optOptions = {
            "Print file": f"{test_name}.out",
            "Summary file": f"{test_name}_summary.out",
        }
        try:
            opt = SNOPT(options=optOptions)
        except Error as e:
            if "There was an error importing" in e.message:
                raise unittest.SkipTest("Optimizer not available: SNOPT")
            raise e

        sol = opt(optProb, sens=termcomp.sens)

        self.assertEqual(termcomp.obj_count, 3)

        # Exit code for user requested termination.
        self.assertEqual(sol.optInform["value"], 71)
Esempio n. 18
0
    def test_opt_bug_print_2con(self):
        # Optimization Object
        optProb = Optimization("Paraboloid", objfunc_2con)

        # Design Variables
        optProb.addVarGroup("x", 1, varType="c", lower=-50.0, upper=50.0, value=0.0)
        optProb.addVarGroup("y", 1, varType="c", lower=-50.0, upper=50.0, value=0.0)

        # Objective
        optProb.addObj("obj")

        con_jac2 = {}
        con_jac2["x"] = -np.ones((2, 1))
        con_jac2["y"] = np.ones((2, 1))

        con_jac3 = {}
        con_jac3["x"] = -np.ones((3, 1))
        con_jac3["y"] = np.ones((3, 1))

        # Equality Constraint
        optProb.addConGroup("con", 2, lower=-15.0, upper=-15.0, wrt=["x", "y"], linear=True, jac=con_jac2)
        optProb.addConGroup("con2", 3, lower=-15.0, upper=-15.0, wrt=["x", "y"], linear=True, jac=con_jac3)

        # Check optimization problem:
        print(optProb)

        test_name = "bugfix_SNOPT_bug_print_2con"
        optOptions = {
            "Major feasibility tolerance": 1e-1,
            "Print file": f"{test_name}.out",
            "Summary file": f"{test_name}_summary.out",
        }

        # Optimizer
        try:
            opt = SNOPT(options=optOptions)
        except Error as e:
            if "There was an error importing" in e.message:
                raise unittest.SkipTest("Optimizer not available: SNOPT")
            raise e

        sol = opt(optProb, sens=sens)

        print(sol)
Esempio n. 19
0
    def optimize(self, optName, optOptions={}, storeHistory=False):
        # Optimization Object
        optProb = Optimization('HS15 Constraint Problem', self.objfunc)

        # Design Variables
        lower = [-5, -5]
        upper = [0.5, 5]
        value = [-2, 1]
        optProb.addVarGroup('xvars', 2, lower=lower, upper=upper, value=value)

        # Constraints
        lower = [1, 0]
        upper = [None, None]
        optProb.addConGroup('con', 2, lower=lower, upper=upper)

        # Objective
        optProb.addObj('obj')

        # Check optimization problem:
        # print(optProb)

        # Optimizer
        try:
            opt = OPT(optName, options=optOptions)
        except:
            raise unittest.SkipTest('Optimizer not available:', optName)

        # Solution
        if storeHistory:
            histFileName = '%s_hs015_Hist.hst' % (optName.lower())
        else:
            histFileName = None

        sol = opt(optProb, sens=self.sens, storeHistory=histFileName)

        # Check Solution
        self.assertAlmostEqual(sol.objectives['obj'].value, 306.5)

        self.assertAlmostEqual(sol.variables['xvars'][0].value, 0.5)
        self.assertAlmostEqual(sol.variables['xvars'][1].value, 2.0)
Esempio n. 20
0
    def test_obj(self):
        termcomp = TerminateComp(max_obj=2)
        optProb = Optimization('Paraboloid', termcomp.objfunc)

        optProb.addVarGroup('x',
                            1,
                            type='c',
                            lower=-50.0,
                            upper=50.0,
                            value=0.0)
        optProb.addVarGroup('y',
                            1,
                            type='c',
                            lower=-50.0,
                            upper=50.0,
                            value=0.0)
        optProb.finalizeDesignVariables()

        optProb.addObj('obj')

        optProb.addConGroup('con',
                            1,
                            lower=-15.0,
                            upper=-15.0,
                            wrt=['x', 'y'],
                            linear=True,
                            jac=con_jac)

        try:
            opt = SNOPT()
        except:
            raise unittest.SkipTest('Optimizer not available: SNOPT')

        sol = opt(optProb, sens=termcomp.sens)

        self.assertEqual(termcomp.obj_count, 3)

        # Exit code for user requested termination.
        self.assertEqual(sol.optInform['value'][0], 71)
Esempio n. 21
0
    def optimize(self, x0, alg='IPOPT', options={}):
        opt = {}
        opt.update(options)
        def objfun(xdict):
            x, fail = self.set_vars(xdict)
            funcs= {
                'obj': self.obj(x),
                'llcon': self.lifting_line_const(x),
                "wcon": self.enough_lift_const(x)
            }
            return funcs, fail
        optProb = Optimization('llOpt', objfun)
        ub = self.get_vars(self.bounds.ub, dic=True)
        lb = self.get_vars(self.bounds.lb, dic=True)
        x0 = self.get_vars(x0, dic=True)
        optProb.addVar('V', upper=ub['V'], lower=lb['V'], value=x0['V'])
        optProb.addVar('b', upper=ub['b'], lower=lb['b'], value=x0['b'])
        optProb.addVarGroup('c', self.N_th, upper=ub['c'], lower=lb['c'], value=x0['c'])
        optProb.addVarGroup('al', self.N_th, upper=ub['al'], lower=lb['al'], value=x0['al'])
        optProb.addVarGroup('A', self.N_A, upper=ub['A'], lower=lb['A'], value=x0['A'])
        optProb.addObj('obj')
        optProb.addConGroup('llcon', self.N_th, lower=0., upper=0.)
        optProb.addCon('wcon', lower=0., upper=0.)

        if alg== "IPOPT":
            opt = OPT(alg, options=options)
            sol = opt(optProb, sens='FD')
        else:
            raise NotImplementedError(f"No routine for algorithm {alg}")

        D = dict(
            al = [a.value for a in sol.variables['al']],
            c = [a.value for a in sol.variables['c']],
            A = [a.value for a in sol.variables['A']],
            b = sol.variables['b'][0].value,
            V = sol.variables['V'][0].value,
        )
        x = self.set_vars(D)[0]
        return x, sol
Esempio n. 22
0
def large_sparse(optimizer='SNOPT', optOptions=None):
    opt_options = {} if optOptions is None else optOptions

    # Optimization Object
    optProb = Optimization('large and sparse', objfunc)

    # Design Variables
    optProb.addVar('x', lower=-100, upper=150, value=0)
    optProb.addVarGroup('y',
                        N,
                        lower=-10 - arange(N),
                        upper=arange(N),
                        value=0)
    optProb.addVarGroup('z',
                        2 * N,
                        upper=arange(2 * N),
                        lower=-100 - arange(2 * N),
                        value=0)
    # Constraints
    optProb.addCon('con1', upper=100, wrt=['x'])
    optProb.addCon('con2', upper=100)
    optProb.addCon('con3', lower=4, wrt=['x', 'z'])
    optProb.addConGroup('lincon',
                        N,
                        lower=2 - 3 * arange(N),
                        linear=True,
                        wrt=['x', 'y'],
                        jac={
                            'x': numpy.ones((N, 1)),
                            'y': sparse.spdiags(numpy.ones(N), 0, N, N)
                        })
    optProb.addObj('obj')
    # Optimizer
    opt = OPT(optimizer, options=opt_options)
    optProb.printSparsity()

    return opt, optProb
Esempio n. 23
0
    def test_opt_bug_print_2con(self):
        # Optimization Object
        optProb = Optimization('Paraboloid', objfunc_2con)

        # Design Variables
        optProb.addVarGroup('x', 1, type='c', lower=-50.0, upper=50.0, value=0.0)
        optProb.addVarGroup('y', 1, type='c', lower=-50.0, upper=50.0, value=0.0)
        optProb.finalizeDesignVariables()

        # Objective
        optProb.addObj('obj')

        con_jac2 = {}
        con_jac2['x'] = -np.ones((2, 1))
        con_jac2['y'] = np.ones((2, 1))

        con_jac3 = {}
        con_jac3['x'] = -np.ones((3, 1))
        con_jac3['y'] = np.ones((3, 1))

        # Equality Constraint
        optProb.addConGroup('con', 2, lower=-15.0, upper=-15.0, wrt=['x', 'y'], linear=True, jac=con_jac2)
        optProb.addConGroup('con2', 3, lower=-15.0, upper=-15.0, wrt=['x', 'y'], linear=True, jac=con_jac3)

        # Check optimization problem:
        print(optProb)

        # Optimizer
        try:
            opt = SNOPT(optOptions = {'Major feasibility tolerance' : 1e-1})
        except:
            raise unittest.SkipTest('Optimizer not available: SNOPT')

        sol = opt(optProb, sens=sens)

        print(sol)
Esempio n. 24
0
    def __call__(self, optimizer, options=None):
        """ Run optimization """
        system = self._system
        variables = self._variables

        opt_prob = OptProblem('Optimization', self.obj_func)
        for dv_name in variables['dv'].keys():
            dv = variables['dv'][dv_name]
            dv_id = dv['ID']
            if dv['value'] is not None:
                value = dv['value']
            else:
                value = system.vec['u'](dv_id)
            scale = dv['scale']
            lower = dv['lower']
            upper = dv['upper']
            size = system.vec['u'](dv_id).shape[0]
            opt_prob.addVarGroup(dv_name, size, value=value, scale=scale,
                                 lower=lower, upper=upper)
        opt_prob.finalizeDesignVariables()
        for func_name in variables['func'].keys():
            func = variables['func'][func_name]
            func_id = func['ID']
            lower = func['lower']
            upper = func['upper']
            linear = func['linear']
            get_jacs = func['get_jacs']
            sys = func['sys']
            size = system.vec['u'](func_id).shape[0]
            if lower is None and upper is None:
                opt_prob.addObj(func_name)
            else:
                if get_jacs is not None:
                    jacs_var = get_jacs()

                    dv_names = []
                    jacs = {}
                    for dv_var in jacs_var:
                        dv_id = self._system.get_id(dv_var)
                        dv_name = self._get_name(dv_id)
                        dv_names.append(dv_name)
                        jacs[dv_name] = jacs_var[dv_var]

                    opt_prob.addConGroup(func_name, size,
                                         wrt=dv_names,
                                         jac=jacs, linear=linear,
                                         lower=lower, upper=upper)
                elif sys is not None:
                    dv_names = []
                    for dv_name in variables['dv'].keys():
                        dv_id = variables['dv'][dv_name]['ID']
                        if dv_id in sys.vec['u']:
                            dv_names.append(dv_name)
                    opt_prob.addConGroup(func_name, size,
                                         wrt=dv_names,
                                         lower=lower, upper=upper)                    
                else:
                    opt_prob.addConGroup(func_name, size,
                                         lower=lower, upper=upper)

        if options is None:
            options = {}

        opt = Optimizer(optimizer, options=options)
        opt.setOption('Iterations limit', int(1e6))
        #opt.setOption('Verify level', 3)
        sol = opt(opt_prob, sens=self.sens_func, storeHistory='hist.hst')
        print sol

        try:
            exit_status = sol.optInform['value']
            self.exit_flag = 1
            if exit_status > 2: # bad
                self.exit_flag = 0
        except KeyError: #nothing is here, so something bad happened!
            self.exit_flag = 0
    def run(self, problem):
        """pyOpt execution. Note that pyOpt controls the execution, and the
        individual optimizers (i.e., SNOPT) control the iteration.

        Args
        ----
        problem : `Problem`
            Our parent `Problem`.
        """

        self.pyopt_solution = None
        rel = problem.root._probdata.relevance

        # Metadata Setup
        self.metadata = create_local_meta(None, self.options['optimizer'])
        self.iter_count = 0
        update_local_meta(self.metadata, (self.iter_count,))

        # Initial Run
        with problem.root._dircontext:
            problem.root.solve_nonlinear(metadata=self.metadata)

        opt_prob = Optimization(self.options['title'], self._objfunc)

        # Add all parameters
        param_meta = self.get_desvar_metadata()
        self.indep_list = indep_list = list(param_meta)
        param_vals = self.get_desvars()

        for name, meta in iteritems(param_meta):
            opt_prob.addVarGroup(name, meta['size'], type='c',
                                 value=param_vals[name],
                                 lower=meta['lower'], upper=meta['upper'])

        opt_prob.finalizeDesignVariables()

        # Figure out parameter subsparsity for paramcomp index connections.
        # sub_param_conns is empty unless there are some index conns.
        # full_param_conns gets filled with the connections to the entire
        # parameter so that those params can be filtered out of the sparse
        # set if the full path is also relevant
        sub_param_conns = {}
        full_param_conns = {}
        for name in indep_list:
            pathname = problem.root.unknowns.metadata(name)['pathname']
            sub_param_conns[name] = {}
            full_param_conns[name] = set()
            for target, info in iteritems(problem.root.connections):
                src, indices = info
                if src == pathname:
                    if indices is not None:
                        # Need to map the connection indices onto the desvar
                        # indices if both are declared.
                        dv_idx = param_meta[name].get('indices')
                        indices = set(indices)
                        if dv_idx is not None:
                            indices.intersection_update(dv_idx)
                            ldv_idx = list(dv_idx)
                            mapped_idx = [ldv_idx.index(item) for item in indices]
                            sub_param_conns[name][target] = mapped_idx
                        else:
                            sub_param_conns[name][target] = indices
                    else:
                        full_param_conns[name].add(target)

        # Add all objectives
        objs = self.get_objectives()
        self.quantities = list(objs)
        self.sparsity = OrderedDict()
        self.sub_sparsity = OrderedDict()
        for name in objs:
            opt_prob.addObj(name)
            self.sparsity[name] = self.indep_list

        # Calculate and save gradient for any linear constraints.
        lcons = self.get_constraints(lintype='linear').keys()
        if len(lcons) > 0:
            self.lin_jacs = problem.calc_gradient(indep_list, lcons,
                                                  return_format='dict')
            #print("Linear Gradient")
            #print(self.lin_jacs)

        # Add all equality constraints
        econs = self.get_constraints(ctype='eq', lintype='nonlinear')
        con_meta = self.get_constraint_metadata()
        self.quantities += list(econs)

        for name in self.get_constraints(ctype='eq'):
            meta = con_meta[name]
            size = meta['size']
            lower = upper = meta['equals']

            # Sparsify Jacobian via relevance
            rels = rel.relevant[name]
            wrt = rels.intersection(indep_list)
            self.sparsity[name] = wrt

            if meta['linear']:
                opt_prob.addConGroup(name, size, lower=lower, upper=upper,
                                     linear=True, wrt=wrt,
                                     jac=self.lin_jacs[name])
            else:

                jac = self._build_sparse(name, wrt, size, param_vals,
                                         sub_param_conns, full_param_conns, rels)
                opt_prob.addConGroup(name, size, lower=lower, upper=upper,
                                     wrt=wrt, jac=jac)

        # Add all inequality constraints
        incons = self.get_constraints(ctype='ineq', lintype='nonlinear')
        self.quantities += list(incons)

        for name in self.get_constraints(ctype='ineq'):
            meta = con_meta[name]
            size = meta['size']

            # Bounds - double sided is supported
            lower = meta['lower']
            upper = meta['upper']

            # Sparsify Jacobian via relevance
            rels = rel.relevant[name]
            wrt = rels.intersection(indep_list)
            self.sparsity[name] = wrt

            if meta['linear']:
                opt_prob.addConGroup(name, size, upper=upper, lower=lower,
                                     linear=True, wrt=wrt,
                                     jac=self.lin_jacs[name])
            else:

                jac = self._build_sparse(name, wrt, size, param_vals,
                                         sub_param_conns, full_param_conns, rels)
                opt_prob.addConGroup(name, size, upper=upper, lower=lower,
                                     wrt=wrt, jac=jac)

        # Instantiate the requested optimizer
        optimizer = self.options['optimizer']
        try:
            exec('from pyoptsparse import %s' % optimizer)
        except ImportError:
            msg = "Optimizer %s is not available in this installation." % \
                   optimizer
            raise ImportError(msg)

        optname = vars()[optimizer]
        opt = optname()

        #Set optimization options
        for option, value in self.opt_settings.items():
            opt.setOption(option, value)

        self._problem = problem

        # Execute the optimization problem
        if self.options['pyopt_diff']:
            # Use pyOpt's internal finite difference
            fd_step = problem.root.fd_options['step_size']
            sol = opt(opt_prob, sens='FD', sensStep=fd_step, storeHistory=self.hist_file)
        else:
            # Use OpenMDAO's differentiator for the gradient
            sol = opt(opt_prob, sens=self._gradfunc, storeHistory=self.hist_file)

        self._problem = None

        # Print results
        if self.options['print_results']:
            print(sol)

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        dv_dict = sol.getDVs()
        for name in indep_list:
            val = dv_dict[name]
            self.set_desvar(name, val)

        with self.root._dircontext:
            self.root.solve_nonlinear(metadata=self.metadata)

        # Save the most recent solution.
        self.pyopt_solution = sol
        try:
            exit_status = sol.optInform['value']
            self.exit_flag = 1
            if exit_status > 2: # bad
                self.exit_flag = 0
        except KeyError: #nothing is here, so something bad happened!
            self.exit_flag = 0
Esempio n. 26
0
 y0 = yt
 area = 2
 xlow = points[0]-spacing*area
 xupp = points[-1]+spacing*area
 ylow = points[0]-spacing*area
 yupp = points[-1]+spacing*area
 
 optProb = Optimization('VAWT_Power', obj_func)
 optProb.addObj('obj')
 
 n = np.size(x0)
 optProb.addVarGroup('xvars', n, 'c', lower=xlow, upper=xupp, value=x0)
 optProb.addVarGroup('yvars', n, 'c', lower=ylow, upper=yupp, value=y0)
 
 num_cons_sep = (n-1)*n/2
 optProb.addConGroup('sep', num_cons_sep, lower=0, upper=None)
 
 opt = SNOPT()
 opt.setOption('Scale option',0)
 res = opt(optProb, sens=None)
 print res
 
 pow = np.array(-1*res.fStar)
 xf = res.xStar['xvars']
 yf = res.xStar['yvars']
 power_turb = funcs['power_turb']
 veleff = funcs['veleff']
 
 print 'The power is:',pow,'kJ'
 print 'The isolated power is:',power_iso*np.size(xt),'kJ'
 print 'The x-locations:',xf
def optimize(func, x0, lb, ub, optimizer, A=[], b=[], Aeq=[], beq=[], args=[]):

    global fcalls  # keep track of function calls myself, seems to be an error in pyopt
    fcalls = 1

    # evalute initial point to get size information and determine if gradients included
    out = func(x0, *args)
    if len(out) == 4:
        gradients = True
        f, c, _, _ = out
    else:
        gradients = False
        f, c = out

    nx = len(x0)
    nc = len(c)
    nlin = len(b)
    nleq = len(beq)
    if hasattr(f, "__len__"):
        nf = len(f)  # multiobjective
    else:
        nf = 1


    def objcon(xdict):

        global fcalls
        fcalls += 1

        x = xdict['x']
        outputs = {}

        if gradients:
            f, c, df, dc = func(x, *args)
            # these gradients aren't directly used in this function but we will save them for later
            outputs['g-obj'] = df
            outputs['g-con'] = dc
            outputs['g-x'] = x
        else:
            f, c = func(x, *args)

        outputs['con'] = c

        if nf == 1:
            outputs['obj'] = f
        else:  # multiobjective
            for i in range(nf):
                outputs['obj%d' % i] = f[i]

        fail = False

        return outputs, fail


    def grad(xdict, fdict):

        # check if this was the x-location we just evaluated from func (should never happen)
        if not np.array_equal(xdict['x'], fdict['g-x']):
            f, c, df, dc = func(xdict['x'], *args)
            global fcalls
            fcalls += 1
        else:
            df = fdict['g-obj']
            dc = fdict['g-con']

        # populate gradients (the multiobjective optimizers don't use gradients so no change needed here)
        gout = {}
        gout['obj'] = {}
        gout['obj']['x'] = df
        gout['con'] = {}
        gout['con']['x'] = dc

        fail = False

        return gout, fail




    # setup problem
    optProb = Optimization('optimization', objcon)

    if nf == 1:
        optProb.addObj('obj')
    else:  # multiobjective
        for i in range(nf):
            optProb.addObj('obj%d' % i)

    optProb.addVarGroup('x', nx, lower=lb, upper=ub, value=x0)

    # add nonlinear constraints
    if nc > 0:
        optProb.addConGroup('con', nc, upper=0.0)

    # add linear inequality constraints
    if nlin > 0:
        optProb.addConGroup('linear-ineq', nlin, upper=b, linear=True, jac={'x': A})

    # add linear equality constraints
    if nleq > 0:
        optProb.addConGroup('linear-ineq', nleq, upper=beq, lower=beq, linear=True, jac={'x': Aeq})

    # check if gradients defined
    if gradients:
        sens = grad
    else:
        sens = 'FDR'  # forward diff with relative step size

    with warnings.catch_warnings():  # FIXME: ignore the FutureWarning until fixed
        warnings.simplefilter("ignore")

        # run optimization
        sol = optimizer(optProb, sens=sens)


    # save solution
    xstar = sol.xStar['x']
    fstar = sol.fStar

    info = {}
    info['fcalls'] = fcalls
    info['time'] = sol.optTime
    if sol.optInform:
        info['code'] = sol.optInform

    # FIXME: bug in how output of NLPQLP is returned
    if optimizer.name == 'NLPQLP':
        xtemp = xstar
        xstar = np.zeros(nx)
        for i in range(nx):
            xstar[i] = xtemp[i, 0]

    # FIXME: because of bug exists in all except SNOPT, also none return cstar
    # if optimizer.name != 'SNOPT':
    if gradients:
        fstar, cstar, _, _ = func(xstar, *args)
    else:
        fstar, cstar = func(xstar, *args)

    # FIXME: handle multiobjective NSGA2
    if nf > 1 and optimizer.name == 'NSGA-II':
        xstar = []
        fstar = []
        cstar = []
        with open('nsga2_final_pop.out') as f:
            # skip first two lines
            f.readline()
            f.readline()
            for line in f:
                values = line.split()
                rank = values[nx + nc + nf + 1]
                if rank == "1":
                    fstar.append(np.array(values[:nf]).astype(np.float))
                    cstar.append(np.array(values[nf:nf+nc]).astype(np.float))
                    xstar.append(np.array(values[nf+nc:nf+nc+nx]).astype(np.float))

        xstar = np.array(xstar)
        fstar = np.array(fstar)
        cstar = -np.array(cstar)  # negative sign because of nsga definition

    if nc > 0:
        info['max-c-vio'] = max(np.amax(cstar), 0.0)

    return xstar, fstar, info
Esempio n. 28
0
    def run(self):
        """
        Excute pyOptsparse.

        Note that pyOpt controls the execution, and the individual optimizers
        (e.g., SNOPT) control the iteration.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        problem = self._problem()
        model = problem.model
        relevant = model._relevant
        self.pyopt_solution = None
        self._total_jac = None
        self.iter_count = 0
        fwd = problem._mode == 'fwd'
        optimizer = self.options['optimizer']
        self._quantities = []

        self._check_for_missing_objective()

        # Only need initial run if we have linear constraints or if we are using an optimizer that
        # doesn't perform one initially.
        con_meta = self._cons
        model_ran = False
        if optimizer in run_required or np.any(
            [con['linear'] for con in self._cons.values()]):
            with RecordingDebugging(self._get_name(), self.iter_count,
                                    self) as rec:
                # Initial Run
                model.run_solve_nonlinear()
                rec.abs = 0.0
                rec.rel = 0.0
                model_ran = True
            self.iter_count += 1

        # compute dynamic simul deriv coloring or just sparsity if option is set
        if c_mod._use_total_sparsity:
            coloring = None
            if self._coloring_info['coloring'] is None and self._coloring_info[
                    'dynamic']:
                coloring = c_mod.dynamic_total_coloring(
                    self,
                    run_model=not model_ran,
                    fname=self._get_total_coloring_fname())

            if coloring is not None:
                # if the improvement wasn't large enough, don't use coloring
                pct = coloring._solves_info()[-1]
                info = self._coloring_info
                if info['min_improve_pct'] > pct:
                    info['coloring'] = info['static'] = None
                    simple_warning(
                        "%s: Coloring was deactivated.  Improvement of %.1f%% was less "
                        "than min allowed (%.1f%%)." %
                        (self.msginfo, pct, info['min_improve_pct']))

        comm = None if isinstance(problem.comm, FakeComm) else problem.comm
        opt_prob = Optimization(self.options['title'],
                                weak_method_wrapper(self, '_objfunc'),
                                comm=comm)

        # Add all design variables
        param_meta = self._designvars
        self._indep_list = indep_list = list(param_meta)
        param_vals = self.get_design_var_values()

        for name, meta in param_meta.items():
            opt_prob.addVarGroup(name,
                                 meta['size'],
                                 type='c',
                                 value=param_vals[name],
                                 lower=meta['lower'],
                                 upper=meta['upper'])

        opt_prob.finalizeDesignVariables()

        # Add all objectives
        objs = self.get_objective_values()
        for name in objs:
            opt_prob.addObj(name)
            self._quantities.append(name)

        # Calculate and save derivatives for any linear constraints.
        lcons = [key for (key, con) in con_meta.items() if con['linear']]
        if len(lcons) > 0:
            _lin_jacs = self._compute_totals(of=lcons,
                                             wrt=indep_list,
                                             return_format='dict')
            # convert all of our linear constraint jacs to COO format. Otherwise pyoptsparse will
            # do it for us and we'll end up with a fully dense COO matrix and very slow evaluation
            # of linear constraints!
            to_remove = []
            for jacdct in _lin_jacs.values():
                for n, subjac in jacdct.items():
                    if isinstance(subjac, np.ndarray):
                        # we can safely use coo_matrix to automatically convert the ndarray
                        # since our linear constraint jacs are constant, so zeros won't become
                        # nonzero during the optimization.
                        mat = coo_matrix(subjac)
                        if mat.row.size > 0:
                            # convert to 'coo' format here to avoid an emphatic warning
                            # by pyoptsparse.
                            jacdct[n] = {
                                'coo': [mat.row, mat.col, mat.data],
                                'shape': mat.shape
                            }

        # Add all equality constraints
        for name, meta in con_meta.items():
            if meta['equals'] is None:
                continue
            size = meta['size']
            lower = upper = meta['equals']
            if fwd:
                wrt = [v for v in indep_list if name in relevant[v]]
            else:
                rels = relevant[name]
                wrt = [v for v in indep_list if v in rels]

            if meta['linear']:
                jac = {w: _lin_jacs[name][w] for w in wrt}
                opt_prob.addConGroup(name,
                                     size,
                                     lower=lower,
                                     upper=upper,
                                     linear=True,
                                     wrt=wrt,
                                     jac=jac)
            else:
                if name in self._res_jacs:
                    resjac = self._res_jacs[name]
                    jac = {n: resjac[n] for n in wrt}
                else:
                    jac = None
                opt_prob.addConGroup(name,
                                     size,
                                     lower=lower,
                                     upper=upper,
                                     wrt=wrt,
                                     jac=jac)
                self._quantities.append(name)

        # Add all inequality constraints
        for name, meta in con_meta.items():
            if meta['equals'] is not None:
                continue
            size = meta['size']

            # Bounds - double sided is supported
            lower = meta['lower']
            upper = meta['upper']

            if fwd:
                wrt = [v for v in indep_list if name in relevant[v]]
            else:
                rels = relevant[name]
                wrt = [v for v in indep_list if v in rels]

            if meta['linear']:
                jac = {w: _lin_jacs[name][w] for w in wrt}
                opt_prob.addConGroup(name,
                                     size,
                                     upper=upper,
                                     lower=lower,
                                     linear=True,
                                     wrt=wrt,
                                     jac=jac)
            else:
                if name in self._res_jacs:
                    resjac = self._res_jacs[name]
                    jac = {n: resjac[n] for n in wrt}
                else:
                    jac = None
                opt_prob.addConGroup(name,
                                     size,
                                     upper=upper,
                                     lower=lower,
                                     wrt=wrt,
                                     jac=jac)
                self._quantities.append(name)

        # Instantiate the requested optimizer
        try:
            _tmp = __import__('pyoptsparse', globals(), locals(), [optimizer],
                              0)
            opt = getattr(_tmp, optimizer)()

        except Exception as err:
            # Change whatever pyopt gives us to an ImportError, give it a readable message,
            # but raise with the original traceback.
            msg = "Optimizer %s is not available in this installation." % optimizer
            raise ImportError(msg)

        # Process any default optimizer-specific settings.
        if optimizer in DEFAULT_OPT_SETTINGS:
            for name, value in DEFAULT_OPT_SETTINGS[optimizer].items():
                if name not in self.opt_settings:
                    self.opt_settings[name] = value

        # Set optimization options
        for option, value in self.opt_settings.items():
            opt.setOption(option, value)

        # Execute the optimization problem
        if self.options['gradient method'] == 'pyopt_fd':

            # Use pyOpt's internal finite difference
            # TODO: Need to get this from OpenMDAO
            # fd_step = problem.model.deriv_options['step_size']
            fd_step = 1e-6
            sol = opt(opt_prob,
                      sens='FD',
                      sensStep=fd_step,
                      storeHistory=self.hist_file,
                      hotStart=self.hotstart_file)

        elif self.options['gradient method'] == 'snopt_fd':
            if self.options['optimizer'] == 'SNOPT':

                # Use SNOPT's internal finite difference
                # TODO: Need to get this from OpenMDAO
                # fd_step = problem.model.deriv_options['step_size']
                fd_step = 1e-6
                sol = opt(opt_prob,
                          sens=None,
                          sensStep=fd_step,
                          storeHistory=self.hist_file,
                          hotStart=self.hotstart_file)

            else:
                raise Exception(
                    "SNOPT's internal finite difference can only be used with SNOPT"
                )
        else:

            # Use OpenMDAO's differentiator for the gradient
            sol = opt(opt_prob,
                      sens=weak_method_wrapper(self, '_gradfunc'),
                      storeHistory=self.hist_file,
                      hotStart=self.hotstart_file)

        # Print results
        if self.options['print_results']:
            print(sol)

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        dv_dict = sol.getDVs()
        for name in indep_list:
            self.set_design_var(name, dv_dict[name])

        with RecordingDebugging(self._get_name(), self.iter_count,
                                self) as rec:
            model.run_solve_nonlinear()
            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        # Save the most recent solution.
        self.pyopt_solution = sol

        try:
            exit_status = sol.optInform['value']
            self.fail = False

            # These are various failed statuses.
            if optimizer == 'IPOPT':
                if exit_status not in {0, 1}:
                    self.fail = True
            elif exit_status > 2:
                self.fail = True

        except KeyError:
            # optimizers other than pySNOPT may not populate this dict
            pass

        # revert signal handler to cached version
        sigusr = self.options['user_teriminate_signal']
        if sigusr is not None:
            signal.signal(sigusr, self._signal_cache)
            self._signal_cache = None  # to prevent memory leak test from failing

        return self.fail
Esempio n. 29
0
    def run(self):
        """
        Excute pyOptsparse.

        Note that pyOpt controls the execution, and the individual optimizers
        (e.g., SNOPT) control the iteration.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        problem = self._problem
        model = problem.model
        relevant = model._relevant
        self.pyopt_solution = None
        self._total_jac = None
        self.iter_count = 0
        fwd = problem._mode == 'fwd'
        optimizer = self.options['optimizer']

        # Only need initial run if we have linear constraints or if we are using an optimizer that
        # doesn't perform one initially.
        con_meta = self._cons
        model_ran = False
        if optimizer in run_required or np.any([con['linear'] for con in itervalues(self._cons)]):
            with RecordingDebugging(self._get_name(), self.iter_count, self) as rec:
                # Initial Run
                model.run_solve_nonlinear()
                rec.abs = 0.0
                rec.rel = 0.0
                model_ran = True
            self.iter_count += 1

        # compute dynamic simul deriv coloring or just sparsity if option is set
        if coloring_mod._use_sparsity:
            if self.options['dynamic_simul_derivs']:
                coloring_mod.dynamic_simul_coloring(self, run_model=not model_ran,
                                                    do_sparsity=True)
            elif self.options['dynamic_derivs_sparsity']:
                coloring_mod.dynamic_sparsity(self)

        opt_prob = Optimization(self.options['title'], self._objfunc)

        # Add all design variables
        param_meta = self._designvars
        self._indep_list = indep_list = list(param_meta)
        param_vals = self.get_design_var_values()

        for name, meta in iteritems(param_meta):
            opt_prob.addVarGroup(name, meta['size'], type='c',
                                 value=param_vals[name],
                                 lower=meta['lower'], upper=meta['upper'])

        opt_prob.finalizeDesignVariables()

        # Add all objectives
        objs = self.get_objective_values()
        for name in objs:
            opt_prob.addObj(name)
            self._quantities.append(name)

        # Calculate and save derivatives for any linear constraints.
        lcons = [key for (key, con) in iteritems(con_meta) if con['linear']]
        if len(lcons) > 0:
            _lin_jacs = self._compute_totals(of=lcons, wrt=indep_list, return_format='dict')
            # convert all of our linear constraint jacs to COO format. Otherwise pyoptsparse will
            # do it for us and we'll end up with a fully dense COO matrix and very slow evaluation
            # of linear constraints!
            to_remove = []
            for jacdct in itervalues(_lin_jacs):
                for n, subjac in iteritems(jacdct):
                    if isinstance(subjac, np.ndarray):
                        # we can safely use coo_matrix to automatically convert the ndarray
                        # since our linear constraint jacs are constant, so zeros won't become
                        # nonzero during the optimization.
                        mat = coo_matrix(subjac)
                        if mat.row.size > 0:
                            # convert to 'coo' format here to avoid an emphatic warning
                            # by pyoptsparse.
                            jacdct[n] = {'coo': [mat.row, mat.col, mat.data], 'shape': mat.shape}

        # Add all equality constraints
        for name, meta in iteritems(con_meta):
            if meta['equals'] is None:
                continue
            size = meta['size']
            lower = upper = meta['equals']
            if fwd:
                wrt = [v for v in indep_list if name in relevant[v]]
            else:
                rels = relevant[name]
                wrt = [v for v in indep_list if v in rels]

            if meta['linear']:
                jac = {w: _lin_jacs[name][w] for w in wrt}
                opt_prob.addConGroup(name, size, lower=lower, upper=upper,
                                     linear=True, wrt=wrt, jac=jac)
            else:
                if name in self._res_jacs:
                    resjac = self._res_jacs[name]
                    jac = {n: resjac[n] for n in wrt}
                else:
                    jac = None
                opt_prob.addConGroup(name, size, lower=lower, upper=upper, wrt=wrt, jac=jac)
                self._quantities.append(name)

        # Add all inequality constraints
        for name, meta in iteritems(con_meta):
            if meta['equals'] is not None:
                continue
            size = meta['size']

            # Bounds - double sided is supported
            lower = meta['lower']
            upper = meta['upper']

            if fwd:
                wrt = [v for v in indep_list if name in relevant[v]]
            else:
                rels = relevant[name]
                wrt = [v for v in indep_list if v in rels]

            if meta['linear']:
                jac = {w: _lin_jacs[name][w] for w in wrt}
                opt_prob.addConGroup(name, size, upper=upper, lower=lower,
                                     linear=True, wrt=wrt, jac=jac)
            else:
                if name in self._res_jacs:
                    resjac = self._res_jacs[name]
                    jac = {n: resjac[n] for n in wrt}
                else:
                    jac = None
                opt_prob.addConGroup(name, size, upper=upper, lower=lower, wrt=wrt, jac=jac)
                self._quantities.append(name)

        # Instantiate the requested optimizer
        try:
            _tmp = __import__('pyoptsparse', globals(), locals(), [optimizer], 0)
            opt = getattr(_tmp, optimizer)()

        except Exception as err:
            # Change whatever pyopt gives us to an ImportError, give it a readable message,
            # but raise with the original traceback.
            msg = "Optimizer %s is not available in this installation." % optimizer
            reraise(ImportError, ImportError(msg), sys.exc_info()[2])

        # Set optimization options
        for option, value in self.opt_settings.items():
            opt.setOption(option, value)

        # Execute the optimization problem
        if self.options['gradient method'] == 'pyopt_fd':

            # Use pyOpt's internal finite difference
            # TODO: Need to get this from OpenMDAO
            # fd_step = problem.root.deriv_options['step_size']
            fd_step = 1e-6
            sol = opt(opt_prob, sens='FD', sensStep=fd_step, storeHistory=self.hist_file,
                      hotStart=self.hotstart_file)

        elif self.options['gradient method'] == 'snopt_fd':
            if self.options['optimizer'] == 'SNOPT':

                # Use SNOPT's internal finite difference
                # TODO: Need to get this from OpenMDAO
                # fd_step = problem.root.deriv_options['step_size']
                fd_step = 1e-6
                sol = opt(opt_prob, sens=None, sensStep=fd_step, storeHistory=self.hist_file,
                          hotStart=self.hotstart_file)

            else:
                msg = "SNOPT's internal finite difference can only be used with SNOPT"
                raise Exception(msg)
        else:

            # Use OpenMDAO's differentiator for the gradient
            sol = opt(opt_prob, sens=self._gradfunc, storeHistory=self.hist_file,
                      hotStart=self.hotstart_file)

        # Print results
        if self.options['print_results']:
            print(sol)

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        dv_dict = sol.getDVs()
        for name in indep_list:
            self.set_design_var(name, dv_dict[name])

        with RecordingDebugging(self._get_name(), self.iter_count, self) as rec:
            model.run_solve_nonlinear()
            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        # Save the most recent solution.
        self.pyopt_solution = sol
        try:
            exit_status = sol.optInform['value']
            self.fail = False

            # These are various failed statuses.
            if exit_status > 2:
                self.fail = True

        except KeyError:
            # optimizers other than pySNOPT may not populate this dict
            pass

        return self.fail
Esempio n. 30
0
    def run(self, problem):
        """pyOpt execution. Note that pyOpt controls the execution, and the
        individual optimizers (i.e., SNOPT) control the iteration.

        Args
        ----
        problem : `Problem`
            Our parent `Problem`.
        """

        self.pyopt_solution = None
        rel = problem.root._probdata.relevance

        # Metadata Setup
        self.metadata = create_local_meta(None, self.options['optimizer'])
        self.iter_count = 0
        update_local_meta(self.metadata, (self.iter_count,))

        # Initial Run
        problem.root.solve_nonlinear(metadata=self.metadata)

        opt_prob = Optimization(self.options['title'], self._objfunc)

        # Add all parameters
        param_meta = self.get_desvar_metadata()
        self.indep_list = indep_list = list(iterkeys(param_meta))
        param_vals = self.get_desvars()

        for name, meta in iteritems(param_meta):
            opt_prob.addVarGroup(name, meta['size'], type='c',
                                 value=param_vals[name],
                                 lower=meta['lower'], upper=meta['upper'])

        opt_prob.finalizeDesignVariables()

        # Add all objectives
        objs = self.get_objectives()
        self.quantities = list(iterkeys(objs))
        self.sparsity = OrderedDict() #{}
        for name in objs:
            opt_prob.addObj(name)
            self.sparsity[name] = self.indep_list

        # Calculate and save gradient for any linear constraints.
        lcons = self.get_constraints(lintype='linear').keys()
        if len(lcons) > 0:
            self.lin_jacs = problem.calc_gradient(indep_list, lcons,
                                                  return_format='dict')
            #print("Linear Gradient")
            #print(self.lin_jacs)

        # Add all equality constraints
        econs = self.get_constraints(ctype='eq', lintype='nonlinear')
        con_meta = self.get_constraint_metadata()
        self.quantities += list(iterkeys(econs))

        for name in self.get_constraints(ctype='eq'):
            size = con_meta[name]['size']
            lower = upper = con_meta[name]['equals']

            # Sparsify Jacobian via relevance
            wrt = rel.relevant[name].intersection(indep_list)
            self.sparsity[name] = wrt

            if con_meta[name]['linear'] is True:
                opt_prob.addConGroup(name, size, lower=lower, upper=upper,
                                     linear=True, wrt=wrt,
                                     jac=self.lin_jacs[name])
            else:
                opt_prob.addConGroup(name, size, lower=lower, upper=upper,
                                     wrt=wrt)

        # Add all inequality constraints
        incons = self.get_constraints(ctype='ineq', lintype='nonlinear')
        self.quantities += list(iterkeys(incons))

        for name in self.get_constraints(ctype='ineq'):
            size = con_meta[name]['size']

            # Bounds - double sided is supported
            lower = con_meta[name]['lower']
            upper = con_meta[name]['upper']

            # Sparsify Jacobian via relevance
            wrt = rel.relevant[name].intersection(indep_list)
            self.sparsity[name] = wrt

            if con_meta[name]['linear'] is True:
                opt_prob.addConGroup(name, size, upper=upper, lower=lower,
                                     linear=True, wrt=wrt,
                                     jac=self.lin_jacs[name])
            else:
                opt_prob.addConGroup(name, size, upper=upper, lower=lower,
                                     wrt=wrt)

        # Instantiate the requested optimizer
        optimizer = self.options['optimizer']
        try:
            exec('from pyoptsparse import %s' % optimizer)
        except ImportError:
            msg = "Optimizer %s is not available in this installation." % \
                   optimizer
            raise ImportError(msg)

        optname = vars()[optimizer]
        opt = optname()

        #Set optimization options
        for option, value in self.opt_settings.items():
            opt.setOption(option, value)

        self._problem = problem

        # Execute the optimization problem
        if self.options['pyopt_diff'] is True:
            # Use pyOpt's internal finite difference
            fd_step = problem.root.fd_options['step_size']
            sol = opt(opt_prob, sens='FD', sensStep=fd_step, storeHistory=self.hist_file)
        else:
            # Use OpenMDAO's differentiator for the gradient
            sol = opt(opt_prob, sens=self._gradfunc, storeHistory=self.hist_file)

        self._problem = None

        # Print results
        if self.options['print_results'] is True:
            print(sol)

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        dv_dict = sol.getDVs()
        for name in indep_list:
            val = dv_dict[name]
            self.set_desvar(name, val)

        self.root.solve_nonlinear(metadata=self.metadata)

        # Save the most recent solution.
        self.pyopt_solution = sol
        try:
            exit_status = sol.optInform['value']
            self.exit_flag = 1
            if exit_status > 2: # bad
                self.exit_flag = 0
        except KeyError: #nothing is here, so something bad happened!
            self.exit_flag = 0
 offset = offset_start
 rotate = rotate_start
 input = {'dx':dx,'dy':dy,'offset':offset,'rotate':rotate}
 funcs,_ = obj_func_grid(input)
 AEPstart = funcs['obj']
 print AEPstart
 nCalls = 0
 """Optimization"""
 optProb = Optimization('Wind_Farm_AEP', obj_func_grid)
 optProb.addObj('obj')
 optProb.addVar('dx', type='c', lower=0., upper=None, value=dx)
 optProb.addVar('dy', type='c', lower=0., upper=None, value=dy)
 optProb.addVar('offset', type='c', lower=None, upper=None, value=offset)
 optProb.addVar('rotate', type='c', lower=None, upper=None, value=rotate)
 num_cons_sep = (nTurbs-1)*nTurbs/2
 optProb.addConGroup('sep', num_cons_sep, lower=0., upper=None)
 optProb.addConGroup('bound', nTurbs, lower=0., upper=None)
 opt = SNOPT()
 opt.setOption('Scale option',0)
 opt.setOption('Iterations limit',1000000)
 opt.setOption('Summary file','summary_grid.out')
 opt.setOption('Major optimality tolerance',1.e-5)
 opt.setOption('Major feasibility tolerance',1.e-6)
 res = opt(optProb)
 dx_f = res.xStar['dx']
 dy_f = res.xStar['dy']
 o_f = res.xStar['offset']
 r_f = res.xStar['rotate']
 # print 'dx_f: ', dx_f
 # print 'dy_f: ', dy_f
 # print 'offset_f: ', o_f
Esempio n. 32
0
    # option to use actuator cylinder or not (use a correction factor method)
    useAC = True
    useAC = False

    if optimize == True:
        # optimization setup
        optProb = Optimization('VAWT_Power', obj_func)
        optProb.addObj('obj')

        n = np.size(x0)
        optProb.addVarGroup('xvars', n, 'c', lower=xlow, upper=xupp, value=x0)
        optProb.addVarGroup('yvars', n, 'c', lower=ylow, upper=yupp, value=y0)

        num_cons_sep = (n - 1) * n / 2
        optProb.addConGroup('sep', num_cons_sep, lower=0, upper=None)
        num_cons_obs = nobs * nwind
        optProb.addConGroup('SPL', num_cons_obs, lower=0, upper=SPLlim / 10.)

        opt = SNOPT()
        opt.setOption('Scale option', 0)
        if rotdir_spec == 'cn':
            opt.setOption(
                'Print file',
                basepath + path.sep + 'optimization_results/SNOPT_print_SPL' +
                str(SPLlim) + '_turb' + str(n) + '_counterrot.out')
            opt.setOption(
                'Summary file', basepath + path.sep +
                'optimization_results/SNOPT_summary_SPL' + str(SPLlim) +
                '_turb' + str(n) + '_counterrot.out')
        elif rotdir_spec == 'co':
Esempio n. 33
0
    
    fail = False
    return funcsSens, fail

# Optimization Object
optProb = Optimization('HS071 Constraint Problem', objfunc)

# Design Variables
x0 = [1.0, 5.0, 5.0, 1.0]
optProb.addVarGroup('x0', 1, lower=1, upper=5, value=x0[0])
optProb.addVarGroup('x1', 1, lower=1, upper=5, value=x0[1])
optProb.addVarGroup('x2', 2, lower=1, upper=5, value=x0[2])
optProb.addVarGroup('x3', 1, lower=1, upper=5, value=x0[3])

# Constraints
optProb.addConGroup('con1', 1, lower=[25], upper=[1e19])
optProb.addConGroup('con2', 1, lower=[40], upper=[40])

# Objective
optProb.addObj('obj')

# Check optimization problem:
print(optProb)

# Optimizer
opt = OPT(args.opt, options=optOptions)

# Solution
sol = opt(optProb, sens=sens)

# Check Solution
Esempio n. 34
0
def create_pyopt(analysis, optimizer='snopt', options={}):
    '''
    Take the given problem and optimize it with the given optimizer
    from the pyOptSparse library of optimizers.
    '''
    # Import the optimization problem
    from pyoptsparse import Optimization, OPT
    from scipy import sparse

    class pyOptWrapper:
        optimizer = None
        options = {}
        opt = None
        prob = None

        def __init__(self, analysis):
            self.xcurr = None
            self.analysis = analysis

        def objcon(self, x):
            # Copy the design variable values
            self.xcurr = np.array(x['x'])
            fail, obj, con = self.analysis.evalObjCon(x['x'])
            funcs = {'objective': obj, 'con': con}
            return funcs, fail

        def gobjcon(self, x, funcs):
            g = np.zeros(x['x'].shape)
            A = np.zeros((1, x['x'].shape[0]))
            fail = self.analysis.evalObjConGradient(x['x'], g, A)
            sens = {'objective': {'x': g}, 'con': {'x': A}}
            return sens, fail

        # Thin wrapper methods to make this look somewhat like ParOpt
        def optimize(self):
            self.opt = OPT(self.optimizer, options=self.options)
            self.sol = self.opt(self.prob, sens=self.gobjcon)
            return

        def setOutputFile(self, fname):
            if self.optimizer == 'snopt':
                self.options['Print file'] = fname
                self.options['Summary file'] = fname + '_summary'
                self.options['Minor feasibility tolerance'] = 1e-10

                # Ensure that we don't stop for iterations
                self.options['Major iterations limit'] = 5000
                self.options['Minor iterations limit'] = 100000000
                self.options['Iterations limit'] = 100000000
            elif self.optimizer == 'ipopt':
                self.options['bound_relax_factor'] = 0.0
                self.options['linear_solver'] = 'ma27'
                self.options['output_file'] = fname
                self.options['max_iter'] = 5000
            return

        def setInitBarrierParameter(self, *args):
            return

        def getOptimizedPoint(self):
            x = np.array(self.xcurr)
            return x

    # Set the design variables
    wrap = pyOptWrapper(analysis)
    prob = Optimization('topo', wrap.objcon)

    # Record the number of elements/materials/designv ars
    num_materials = analysis.nmats
    num_elements = analysis.nelems
    num_design_vars = analysis.num_design_vars

    # Add the linear constraint
    n = num_design_vars

    # Create the sparse matrix for the design variable weights
    rowp = [0]
    cols = []
    data = []
    nrows = num_elements
    ncols = num_design_vars

    nblock = num_materials + 1
    for i in range(num_elements):
        data.append(1.0)
        cols.append(i * nblock)
        for j in range(i * nblock + 1, (i + 1) * nblock):
            data.append(-1.0)
            cols.append(j)
        rowp.append(len(cols))

    Asparse = {'csr': [rowp, cols, data], 'shape': [nrows, ncols]}

    lower = np.zeros(num_elements)
    upper = np.zeros(num_elements)
    prob.addConGroup('lincon',
                     num_elements,
                     lower=lower,
                     upper=upper,
                     linear=True,
                     wrt=['x'],
                     jac={'x': Asparse})

    # Determine the initial variable values and their lower/upper
    # bounds in the design problem
    x0 = np.zeros(n)
    lb = np.zeros(n)
    ub = np.zeros(n)
    analysis.getVarsAndBounds(x0, lb, ub)

    # Set the variable bounds and initial values
    prob.addVarGroup('x', n, value=x0, lower=lb, upper=ub)

    # Set the constraints
    prob.addConGroup('con', 1, lower=0.0, upper=0.0)

    # Add the objective
    prob.addObj('objective')

    # Set the values into the wrapper
    wrap.optimizer = optimizer
    wrap.options = options
    wrap.prob = prob

    return wrap
Esempio n. 35
0
class TestHS71(OptTest):
    # Optimization problem definition
    name = "hs071"
    DVs = {"xvars"}
    cons = {"con"}
    objs = {"obj"}
    fStar = 17.0140172
    xStar = {"xvars": (1.0, 4.743, 3.82115, 1.37941)}
    lambdaStar = {"con": (0.55229366, -0.16146857)}

    # Tolerances
    tol = {
        "SNOPT": 1e-6,
        "IPOPT": 1e-6,
        "NLPQLP": 1e-6,
        "SLSQP": 1e-6,
        "CONMIN": 1e-3,
        "PSQP": 1e-6,
        "ParOpt": 1e-6,
    }
    optOptions = {
        "CONMIN": {
            "DELFUN": 1e-10,
            "DABFUN": 1e-10,
        }
    }

    def objfunc(self, xdict):
        x = xdict["xvars"]
        funcs = {}
        funcs["obj"] = x[0] * x[3] * (x[0] + x[1] + x[2]) + x[2]
        funcs["con"] = [x[0] * x[1] * x[2] * x[3], x[0] * x[0] + x[1] * x[1] + x[2] * x[2] + x[3] * x[3]]
        fail = False
        return funcs, fail

    def sens(self, xdict, funcs):
        x = xdict["xvars"]
        funcsSens = {}
        funcsSens["obj"] = {
            "xvars": np.array(
                [x[0] * x[3] + x[3] * (x[0] + x[1] + x[2]), x[0] * x[3], x[0] * x[3] + 1.0, x[0] * (x[0] + x[1] + x[2])]
            )
        }
        jac = [
            [x[1] * x[2] * x[3], x[0] * x[2] * x[3], x[0] * x[1] * x[3], x[0] * x[1] * x[2]],
            [2.0 * x[0], 2.0 * x[1], 2.0 * x[2], 2.0 * x[3]],
        ]
        funcsSens["con"] = {"xvars": jac}
        fail = False
        return funcsSens, fail

    def setup_optProb(self, xScale=1.0, objScale=1.0, conScale=1.0, offset=0.0):
        # Optimization Object
        self.optProb = Optimization("HS071 Constraint Problem", self.objfunc, sens=self.sens)

        # Design Variables
        x0 = [1.0, 5.0, 5.0, 1.0]
        self.optProb.addVarGroup("xvars", 4, lower=1, upper=5, value=x0, scale=xScale, offset=offset)

        # Constraints
        self.optProb.addConGroup("con", 2, lower=[25, 40], upper=[None, 40], scale=conScale)

        # Objective
        self.optProb.addObj("obj", scale=objScale)

    def test_slsqp_setDV(self):
        """
        Test that setDV works as expected, even with scaling/offset
        """
        self.optName = "SLSQP"
        histFileName = "hs071_SLSQP_setDV.hst"
        newDV = {"xvars": np.array([1, 4, 4, 1])}
        self.setup_optProb(xScale=1.5, conScale=1.2, objScale=32, offset=1.5)
        sol = self.optimize(setDV=newDV, storeHistory=histFileName)
        self.assert_solution_allclose(sol, self.tol["SLSQP"])
        # Verify the history file
        hist = History(histFileName, flag="r")
        init = hist.getValues(names="xvars", callCounters="0", scale=False)
        x_init = init["xvars"][0]
        assert_allclose(x_init, newDV["xvars"], atol=1e-5, rtol=1e-5)

    def test_snopt_setDVFromHist(self):
        """
        Test that setDVFromHistory works as expected, even with scaling/offset
        """
        self.optName = "SNOPT"
        histFileName = "hs071_SNOPT_setDVFromHist.hst"
        self.setup_optProb(xScale=1.5, conScale=1.2, objScale=32, offset=1.5)
        sol = self.optimize(storeHistory=histFileName)
        self.assert_solution_allclose(sol, self.tol["SNOPT"])
        hist = History(histFileName, flag="r")
        first = hist.getValues(names="xvars", callCounters="last", scale=False)
        x_final = first["xvars"][0]
        self.setup_optProb(xScale=0.5, conScale=4.8, objScale=0.1, offset=1.5)
        sol = self.optimize(setDV=histFileName, storeHistory=histFileName)
        self.assert_solution_allclose(sol, self.tol["SNOPT"])
        # Verify the history file
        hist = History(histFileName, flag="r")
        second = hist.getValues(names="xvars", scale=False)
        x_init = second["xvars"][0]
        assert_allclose(x_init, x_final, atol=1e-5, rtol=1e-5)
        # assert that this only took one major iteration
        # since we restarted from the optimum
        self.assertEqual(second["xvars"].shape, (1, 4))

    def test_slsqp_scaling_offset_optProb(self):
        """
        Test that scaling and offset works as expected
        Also test optProb stored in the history file is correct
        """
        self.optName = "SLSQP"
        histFileName = "hs071_SLSQP_scaling_offset.hst"
        objScale = 4.2
        xScale = [2, 3, 4, 5]
        conScale = [0.6, 1.7]
        offset = [1, -2, 40, 2.5]
        self.setup_optProb(objScale=objScale, xScale=xScale, conScale=conScale, offset=offset)
        sol = self.optimize(storeHistory=histFileName)
        self.assert_solution_allclose(sol, self.tol["SLSQP"])
        # now we retrieve the history file, and check the scale=True option is indeed
        # scaling things correctly
        hist = History(histFileName, flag="r")
        orig_values = hist.getValues(callCounters="0", scale=False)
        optProb = hist.getOptProb()

        # check that the scales are stored properly
        for i, var in enumerate(optProb.variables["xvars"]):
            assert_allclose(xScale[i], var.scale, atol=1e-12, rtol=1e-12)
            assert_allclose(offset[i], var.offset, atol=1e-12, rtol=1e-12)
        for con in optProb.constraints:
            assert_allclose(conScale, optProb.constraints[con].scale, atol=1e-12, rtol=1e-12)
        for obj in optProb.objectives:
            assert_allclose(objScale, optProb.objectives[obj].scale, atol=1e-12, rtol=1e-12)

        # verify the scale option in getValues
        scaled_values = hist.getValues(callCounters="0", scale=True, stack=False)
        x = orig_values["xvars"][0]
        x_scaled = scaled_values["xvars"][0]
        assert_allclose(x_scaled, (x - offset) * xScale, atol=1e-12, rtol=1e-12)

        # now do the same but with stack=True
        stacked_values = hist.getValues(callCounters="0", scale=True, stack=True)
        x_scaled = stacked_values["xuser"][0]
        assert_allclose(x_scaled, (x - offset) * xScale, atol=1e-12, rtol=1e-12)

        # now we test objective and constraint scaling in getValues
        obj_orig = orig_values["obj"][0]
        obj_scaled = scaled_values["obj"][0]
        assert_allclose(obj_scaled, obj_orig * objScale, atol=1e-12, rtol=1e-12)
        con_orig = orig_values["con"][0]
        con_scaled = scaled_values["con"][0]
        assert_allclose(con_scaled, con_orig * conScale, atol=1e-12, rtol=1e-12)

    def test_snopt_informs(self):
        self.optName = "SNOPT"
        self.setup_optProb()
        sol = self.optimize(optOptions={"Major iterations limit": 1})
        self.assert_inform_equal(sol, 32)

    def test_slsqp_informs(self):
        self.optName = "SLSQP"
        self.setup_optProb()
        # now we set max iteration to 1 and verify that we get a different inform
        sol = self.optimize(optOptions={"MAXIT": 1})
        self.assert_inform_equal(sol, 9)

    def test_nlpqlp_informs(self):
        self.optName = "NLPQLP"
        self.setup_optProb()
        sol = self.optimize(optOptions={"maxIt": 1})
        self.assert_inform_equal(sol, 1)

    def test_ipopt_informs(self):
        self.optName = "IPOPT"
        self.setup_optProb()
        # Test that the inform is -1 when iterations are too limited.
        sol = self.optimize(optOptions={"max_iter": 1})
        self.assert_inform_equal(sol, -1)
        # Test that the inform is -4 when max_cpu_time are too limited.
        sol = self.optimize(optOptions={"max_cpu_time": 0.001})
        self.assert_inform_equal(sol, -4)

    def test_psqp_informs(self):
        self.optName = "PSQP"
        self.setup_optProb()
        sol = self.optimize(optOptions={"MIT": 1})
        self.assert_inform_equal(sol, 11)

    @parameterized.expand(["SNOPT", "IPOPT", "SLSQP", "PSQP", "CONMIN", "NLPQLP", "ParOpt"])
    def test_optimization(self, optName):
        self.optName = optName
        self.setup_optProb()
        optOptions = self.optOptions.pop(optName, None)
        sol = self.optimize(optOptions=optOptions)
        # Check Solution
        self.assert_solution_allclose(sol, self.tol[optName])
        # Check informs
        self.assert_inform_equal(sol)
    yaw = np.ones((nwind,nturb))*0.
    rpm = np.ones(nwind*nturb)*rpm_max

    # Optimization
    optProb = Optimization('Wind_Farm_APP', obj_func)
    optProb.addObj('obj')

    # Design Variables (scaled to 1)
    nrpm = nturb*nwind
    optProb.addVarGroup('xvars', nturb, 'c', lower=xlow/100., upper=xupp/100., value=turbineX/100.) # x positions
    optProb.addVarGroup('yvars', nturb, 'c', lower=ylow/100., upper=yupp/100., value=turbineY/100.) # y positions
    optProb.addVarGroup('rpm', nrpm, 'c', lower=rpmlow/10., upper=rpmupp/10., value=rpm/10.) # rpm values

    # Constraints (scaled to 1)
    num_cons_sep = (nturb-1)*nturb/2
    optProb.addConGroup('sep', num_cons_sep, lower=0., upper=None) # separation between turbines
    num_cons_spl = nwind*nobs
    optProb.addConGroup('SPL', num_cons_spl, lower=0., upper=SPLlim/10.) # SPL limit

    opt = SNOPT()
    opt.setOption('Scale option',0)
    opt.setOption('Iterations limit',1000000)
    res = opt(optProb)
    # Printing optimization results (SNOPT format)
    print res

    # Final results (scaled back to original values)
    pow = np.array(-1*res.fStar)*1e4
    xf = res.xStar['xvars']*100.
    yf = res.xStar['yvars']*100.
    rpmf = res.xStar['rpm']*10.
Esempio n. 37
0
    def execute(self):
        """pyOpt execution. Note that pyOpt controls the execution, and the
        individual optimizers control the iteration."""

        self.pyOpt_solution = None

        self.run_iteration()

        opt_prob = Optimization(self.title, self.objfunc)

        # Add all parameters
        self.param_type = {}
        self.nparam = self.total_parameters()
        param_list = []
        for name, param in self.get_parameters().iteritems():

            # We need to identify Enums, Lists, Dicts
            metadata = param.get_metadata()[1]
            values = param.evaluate()

            # Assuming uniform enumerated, discrete, or continuous for now.
            val = values[0]
            choices = []
            if 'values' in metadata and \
               isinstance(metadata['values'], (list, tuple, array, set)):
                vartype = 'd'
                choices = metadata['values']
            elif isinstance(val, bool):
                vartype = 'd'
                choices = [True, False]
            elif isinstance(val, (int, int32, int64)):
                vartype = 'i'
            elif isinstance(val, (float, float32, float64)):
                vartype = 'c'
            else:
                msg = 'Only continuous, discrete, or enumerated variables' \
                      ' are supported. %s is %s.' % (name, type(val))
                self.raise_exception(msg, ValueError)
            self.param_type[name] = vartype

            lower_bounds = param.get_low()
            upper_bounds = param.get_high()
            opt_prob.addVarGroup(name, len(values), type=vartype,
                                 lower=lower_bounds, upper=upper_bounds,
                                 value=values, choices=choices)
            param_list.append(name)

        # Add all objectives
        for name, obj in self.get_objectives().iteritems():
            name = '%s.out0' % obj.pcomp_name
            opt_prob.addObj(name)

        # Calculate and save gradient for any linear constraints.
        lcons = self.get_constraints(linear=True).values() + \
                self.get_2sided_constraints(linear=True).values()
        if len(lcons) > 0:
            lcon_names = ['%s.out0' % obj.pcomp_name for obj in lcons]
            self.lin_jacs = self.workflow.calc_gradient(param_list, lcon_names,
                                                   return_format='dict')

        # Add all equality constraints
        nlcons = []
        for name, con in self.get_eq_constraints().iteritems():
            size = con.size
            lower = zeros((size))
            upper = zeros((size))
            name = '%s.out0' % con.pcomp_name
            if con.linear is True:
                opt_prob.addConGroup(name, size, lower=lower, upper=upper,
                                     linear=True, wrt=param_list,
                                     jac=self.lin_jacs[name])
            else:
                opt_prob.addConGroup(name, size, lower=lower, upper=upper)
                nlcons.append(name)

        # Add all inequality constraints
        for name, con in self.get_ineq_constraints().iteritems():
            size = con.size
            upper = zeros((size))
            name = '%s.out0' % con.pcomp_name
            if con.linear is True:
                opt_prob.addConGroup(name, size, upper=upper, linear=True,
                wrt=param_list, jac=self.lin_jacs[name])
            else:
                opt_prob.addConGroup(name, size, upper=upper)
                nlcons.append(name)

        # Add all double_sided constraints
        for name, con in self.get_2sided_constraints().iteritems():
            size = con.size
            upper = con.high * ones((size))
            lower = con.low * ones((size))
            name = '%s.out0' % con.pcomp_name
            if con.linear is True:
                opt_prob.addConGroup(name, size, upper=upper, lower=lower,
                                     linear=True, wrt=param_list,
                                     jac=self.lin_jacs[name])
            else:
                opt_prob.addConGroup(name, size, upper=upper, lower=lower)
                nlcons.append(name)

        self.objs = self.list_objective_targets()
        self.nlcons = nlcons

        # Instantiate the requested optimizer
        optimizer = self.optimizer
        try:
            exec('from pyoptsparse import %s' % optimizer)
        except ImportError:
            msg = "Optimizer %s is not available in this installation." % \
                   optimizer
            self.raise_exception(msg, ImportError)

        optname = vars()[optimizer]
        opt = optname()

        # Set optimization options
        for option, value in self.options.iteritems():
            opt.setOption(option, value)

        # Execute the optimization problem
        if self.pyopt_diff:
            # Use pyOpt's internal finite difference
            sol = opt(opt_prob, sens='FD', sensStep=self.gradient_options.fd_step)
        else:
            # Use OpenMDAO's differentiator for the gradient
            sol = opt(opt_prob, sens=self.gradfunc)

        # Print results
        if self.print_results:
            print sol

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        dv_dict = sol.getDVs()
        param_types = self.param_type
        for name, param in self.get_parameters().iteritems():
            val = dv_dict[name]
            if param_types[name] == 'i':
                val = int(round(val))

            self.set_parameter_by_name(name, val)

        self.run_iteration()

        # Save the most recent solution.
        self.pyOpt_solution = sol
Esempio n. 38
0
    def execute(self):
        """pyOpt execution. Note that pyOpt controls the execution, and the
        individual optimizers control the iteration."""

        self.pyOpt_solution = None

        self.run_iteration()

        opt_prob = Optimization(self.title, self.objfunc)

        # Add all parameters
        self.param_type = {}
        self.nparam = self.total_parameters()
        param_list = []

        #need a counter for lb and ub arrays
        i_param = 0

        for name, param in self.get_parameters().iteritems():

            # We need to identify Enums, Lists, Dicts
            metadata = param.get_metadata()[1]
            values = param.evaluate()

            # Assuming uniform enumerated, discrete, or continuous for now.
            val = values[0]
            n_vals = len(values)
            choices = []
            if 'values' in metadata and \
               isinstance(metadata['values'], (list, tuple, array, set)):
                vartype = 'd'
                choices = metadata['values']
            elif isinstance(val, bool):
                vartype = 'd'
                choices = [True, False]
            elif isinstance(val, (int, int32, int64)):
                vartype = 'i'
            elif isinstance(val, (float, float32, float64)):
                vartype = 'c'
            else:
                msg = 'Only continuous, discrete, or enumerated variables' \
                      ' are supported. %s is %s.' % (name, type(val))
                self.raise_exception(msg, ValueError)
            self.param_type[name] = vartype

            if self.n_x is None:
                lower_bounds = param.get_low()
                upper_bounds = param.get_high()
            else:
                lower_bounds = self.lb[i_param:i_param + n_vals]
                upper_bounds = self.ub[i_param:i_param + n_vals]

            i_param += n_vals
            opt_prob.addVarGroup(name,
                                 n_vals,
                                 type=vartype,
                                 lower=lower_bounds,
                                 upper=upper_bounds,
                                 value=values,
                                 choices=choices)
            param_list.append(name)
        # Add all objectives
        for name, obj in self.get_objectives().iteritems():
            name = '%s.out0' % obj.pcomp_name
            opt_prob.addObj(name)

        # Calculate and save gradient for any linear constraints.
        lcons = self.get_constraints(linear=True).values() + \
                self.get_2sided_constraints(linear=True).values()
        if len(lcons) > 0:
            lcon_names = ['%s.out0' % obj.pcomp_name for obj in lcons]
            self.lin_jacs = self.workflow.calc_gradient(param_list,
                                                        lcon_names,
                                                        return_format='dict')
            #print "Linear Gradient"
            #print self.lin_jacs

        # Add all equality constraints
        nlcons = []
        for name, con in self.get_eq_constraints().iteritems():
            size = con.size
            lower = zeros((size))
            upper = zeros((size))
            name = '%s.out0' % con.pcomp_name
            if con.linear is True:
                opt_prob.addConGroup(name,
                                     size,
                                     lower=lower,
                                     upper=upper,
                                     linear=True,
                                     wrt=param_list,
                                     jac=self.lin_jacs[name])
            else:
                opt_prob.addConGroup(name, size, lower=lower, upper=upper)
                nlcons.append(name)

        # Add all inequality constraints
        for name, con in self.get_ineq_constraints().iteritems():
            size = con.size
            upper = zeros((size))
            name = '%s.out0' % con.pcomp_name
            if con.linear is True:
                opt_prob.addConGroup(name,
                                     size,
                                     upper=upper,
                                     linear=True,
                                     wrt=param_list,
                                     jac=self.lin_jacs[name])
            else:
                opt_prob.addConGroup(name, size, upper=upper)
                nlcons.append(name)

        # Add all double_sided constraints
        for name, con in self.get_2sided_constraints().iteritems():
            size = con.size
            upper = con.high * ones((size))
            lower = con.low * ones((size))
            name = '%s.out0' % con.pcomp_name
            if con.linear is True:
                opt_prob.addConGroup(name,
                                     size,
                                     upper=upper,
                                     lower=lower,
                                     linear=True,
                                     wrt=param_list,
                                     jac=self.lin_jacs[name])
            else:
                opt_prob.addConGroup(name, size, upper=upper, lower=lower)
                nlcons.append(name)

        self.objs = self.list_objective_targets()
        self.nlcons = nlcons

        # Instantiate the requested optimizer
        optimizer = self.optimizer
        try:
            exec('from pyoptsparse import %s' % optimizer)
        except ImportError:
            msg = "Optimizer %s is not available in this installation." % \
                   optimizer
            self.raise_exception(msg, ImportError)

        optname = vars()[optimizer]
        opt = optname()

        # Set optimization options
        for option, value in self.options.iteritems():
            opt.setOption(option, value)

        # Execute the optimization problem
        if self.pyopt_diff:
            # Use pyOpt's internal finite difference
            sol = opt(opt_prob,
                      sens='FD',
                      sensStep=self.gradient_options.fd_step)
        else:
            # Use OpenMDAO's differentiator for the gradient
            sol = opt(opt_prob, sens=self.gradfunc)

        # Print results
        if self.print_results:
            print sol

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        dv_dict = sol.getDVs()
        param_types = self.param_type
        for name, param in self.get_parameters().iteritems():
            val = dv_dict[name]
            if param_types[name] == 'i':
                val = int(round(val))

            self.set_parameter_by_name(name, val)

        self.run_iteration()

        # Save the most recent solution.
        self.pyOpt_solution = sol
        try:
            exit_status = sol.optInform['value']
            self.exit_flag = 1
            if exit_status > 2:  # bad
                self.exit_flag = 0
        except KeyError:  #nothing is here, so something bad happened!
            self.exit_flag = 0
Esempio n. 39
0
    def __call__(self, optimizer, options=None):
        """ Run optimization """
        system = self._system
        variables = self._variables

        opt_prob = OptProblem('Optimization', self.obj_func)
        for dv_name in variables['dv'].keys():
            dv = variables['dv'][dv_name]
            dv_id = dv['ID']
            value = dv['value']
            lower = dv['lower']
            upper = dv['upper']
            size = system.vec['u'](dv_id).shape[0]
            opt_prob.addVarGroup(dv_name,
                                 size,
                                 value=value,
                                 lower=lower,
                                 upper=upper)
        opt_prob.finalizeDesignVariables()
        for func_name in variables['func'].keys():
            func = variables['func'][func_name]
            func_id = func['ID']
            lower = func['lower']
            upper = func['upper']
            linear = func['linear']
            get_jacs = func['get_jacs']
            size = system.vec['u'](func_id).shape[0]
            if lower is None and upper is None:
                opt_prob.addObj(func_name)
            else:
                if func['get_jacs'] is None:
                    opt_prob.addConGroup(func_name,
                                         size,
                                         lower=lower,
                                         upper=upper)
                else:
                    jacs_var = get_jacs()

                    dv_names = []
                    jacs = {}
                    for dv_var in jacs_var:
                        dv_id = self._system.get_id(dv_var)
                        dv_name = self._get_name(dv_id)
                        dv_names.append(dv_name)
                        jacs[dv_name] = jacs_var[dv_var]

                    opt_prob.addConGroup(func_name,
                                         size,
                                         wrt=dv_names,
                                         jac=jacs,
                                         linear=linear,
                                         lower=lower,
                                         upper=upper)

        if options is None:
            options = {}

        opt = Optimizer(optimizer, options=options)
        opt.setOption('Iterations limit', int(1e6))
        #opt.setOption('Verify level', 3)
        sol = opt(opt_prob, sens=self.sens_func, storeHistory='hist.hst')
        print sol