def traj_optim_static(paths, tree):
    path, envs, modes, mnps = paths
    guard_index = [0]
    n = len(modes)
    v_init = np.zeros((n, 3))
    for i in range(1, n):
        if not np.all(modes[i] == modes[i - 1]):
            guard_index.append(i)
        elif len(envs[i]) != 0:
            if not envs[i][0].is_same(envs[i - 1][0]):
                guard_index.append(i)
        elif not (mnps[i][0].is_same(mnps[i - 1][0])
                  and mnps[i][1].is_same(mnps[i - 1][1])):
            # manipulator change
            guard_index.append(i)
        g_v = np.identity(3)
        g_v[0:2, 0:2] = config2trans(path[i - 1])[0:2, 0:2]
        v_init[i - 1] = np.dot(g_v.T,
                               np.array(path[i]) - np.array(path[i - 1]))
    #guard_index.append(len(modes)-1)
    guard_index = np.unique(guard_index)

    Gs = dict()
    hs = dict()
    As = dict()
    bs = dict()
    for i in range(len(path)):
        G, h, A, b = contact_mode_constraints(path[i], mnps[i], envs[i],
                                              modes[i], tree.world,
                                              tree.mnp_mu, tree.env_mu,
                                              tree.mnp_fn_max)
        gid = np.any(G[:, 0:3], axis=1)
        aid = np.any(A[:, 0:3], axis=1)
        Gs[i] = G[gid, 0:3]
        hs[i] = h[gid].flatten()
        As[i] = A[aid, 0:3]
        bs[i] = b[aid].flatten()

    modeconstraints = (Gs, hs, As, bs)
    q_goal = np.array(tree.x_goal)

    opt_prob = Optimization('Trajectory Optimization', obj_fun)
    x_init = np.hstack((np.array(path).flatten(), v_init.flatten()))
    cs = constraints(x_init, path, Gs, hs, As, bs, guard_index)

    opt_prob.addVarGroup('x', n * 6, 'c', value=x_init, lower=-10, upper=10)
    opt_prob.addObj('f')
    opt_prob.addConGroup('g', len(cs), 'i', lower=0.0, upper=10000.0)
    print(opt_prob)
    slsqp = SLSQP()
    #slsqp.setOption('IPRINT', -1)
    slsqp(opt_prob,
          sens_type='FD',
          goal=q_goal,
          path=path,
          modecons=modeconstraints,
          guard_index=guard_index)
    print(opt_prob.solution(0))
    qs = [opt_prob.solution(0)._variables[i].value for i in range(n * 3)]
    return qs
Exemple #2
0
def solveOpt(int_domain, J, a, model, u0, sign):
    '''
    INPUT:
        int_domain = 
        J = 
        a = 
        model = 
        u0 =
        sign = 
    OUTPUT:
        opt_prob = 
    '''
    def objfun(u, **kwargs):
        '''objfun defines the objective function and the constraints (equality
        and inequality) of the optiization problem'''
        # 1) extract paraeters
        int_domain = kwargs['int_domain']
        J = kwargs['J']
        x = kwargs['a']
        model = kwargs['model']
        sign = kwargs['sign']
        # 2) define objective function
        funz = np.trapz(x=int_domain, y=J * model.pf(int_domain, u, x))
        g = [0] * 2
        # 3) budget constraint
        g[0] = u.sum() - 1
        # 4) VaR constarint
        W = model.W
        sigmaMax = model.VaR / norm.ppf(1 - model.alpha)
        g[1] = -sigmaMax + np.sqrt(W.dot(u).dot(u))
        fail = 0
        return sign * funz, g, fail

    opt_prob = Optimization('ODAA problem', objfun)
    opt_prob.addObj('funz')
    opt_prob.addCon('budget const', 'e')
    opt_prob.addCon('VaR const', 'i')
    slsqp = SLSQP()  # instantiate Optimizer
    slsqp.setOption('IPRINT', -1)
    opt_prob.addVarGroup('u',
                         model.M,
                         'c',
                         lower=np.zeros(model.M),
                         upper=np.ones(model.M),
                         value=u0)
    #print opt_prob # print optimization problem
    slsqp(opt_prob,
          sens_type='FD',
          int_domain=int_domain,
          J=J,
          a=a,
          model=model,
          sign=sign)
    #print opt_prob.solution(0) # print solution
    return opt_prob
Exemple #3
0
    def __init__(self, nlpy_model, **kwargs):
        """
        :parameters:

            :nlpy_model: nonlinear problem from the NLPModel class or
                         from AmplModel class
        """

        if nlpy_model.__module__ == 'nlpy.model.amplpy':
            print 'AMPL model'
            print "AMPL isn't handling complex values"
            print "Choose 'FD' or 'opt_prob.grad_func' for sensitivity"

        # Initialize model.
        Optimization.__init__(self, nlpy_model.name, lambda x:
                                    (self.nlpy_model.obj(x),
                                     self.nlpy_model.cons(x).tolist(),0),
                                     var_set={}, obj_set={}, con_set={},
                                     use_groups=False, **kwargs)

        self.nlpy_model = nlpy_model

        self.addObj('f')

        # Assigning lower and upper bounds on variables
        for i in range(0,self.nlpy_model.n):

            if i in self.nlpy_model.lowerB:
                self.addVar("x"+ "%d"%(i+1), lower=self.nlpy_model.Lvar[i],
                        upper=numpy.inf, value=self.nlpy_model.x0[i])
            elif i in self.nlpy_model.upperB:
                self.addVar("x"+ "%d"%(i+1), lower=-numpy.inf,
                        upper=self.nlpy_model.Uvar[i], value=self.nlpy_model.x0[i])
            elif i in self.nlpy_model.rangeB:
                self.addVar("x"+ "%d"%(i+1), lower=self.nlpy_model.Lvar[i],
                        upper=self.nlpy_model.Uvar[i], value=self.nlpy_model.x0[i])
            elif i in self.nlpy_model.freeB:
                self.addVar("x"+ "%d"%(i+1), value=self.nlpy_model.x0[i],
                            lower=-numpy.inf, upper=numpy.inf)

        # Assigning lower and upper bounds on constraints
        for i in range(0,nlpy_model.m):

            if i in nlpy_model.lowerC:
                self.addCon("g"+"%d"%(i+1), 'i', lower=nlpy_model.Lcon[i],
                            upper=numpy.inf)
            elif i in nlpy_model.upperC:
                self.addCon("g"+"%d"%(i+1), 'i', lower=-numpy.inf,
                            upper=nlpy_model.Ucon[i])
            elif i in nlpy_model.rangeC:
                self.addCon("g"+"%d"%(i+1), 'i', lower=nlpy_model.Lcon[i],
                            upper=nlpy_model.Ucon[i])
            elif i in nlpy_model.equalC:
                self.addCon("g"+"%d"%(i+1), 'e', equal=nlpy_model.Lcon[i])
Exemple #4
0
def translateProgramToPyOpt(dfovecProgram):
    # Currently only handles inequality
    def objfunc(x):
        f = dfovecProgram.objective(x)

        g = []
        if dfovecProgram.hasInequalityConstraints():
            g = dfovecProgram.inequalityConstraints(x)

        fail = 0
        return f, g, fail

    opt_prob = Optimization('Dfovec problem', objfunc)
    for i in range(len(dfovecProgram.x0)):
        opt_prob.addVar('x' + str(i),
                        lower=-1000.0,
                        upper=1000.0,
                        value=dfovecProgram.x0[i])
    opt_prob.addObj('f')

    numIneq = dfovecProgram.getNumInequalityConstraints()
    opt_prob.addConGroup('g',
                         numIneq,
                         type='i',
                         lower=[-10000] * numIneq,
                         upper=[0] * numIneq)

    print(opt_prob)
    return opt_prob
Exemple #5
0
def otimiza(obitos, x0):
    a, b, c, d = x0
    partial_func = partial(problema, obitos=obitos)
    update_wrapper(partial_func, problema)

    # Instantiate Optimization Problem
    opt_prob = Optimization('Rosenbrock Unconstraint Problem', partial_func)
    # opt_prob.addVarGroup('x', 2, 'c', lower=-1e10, upper=0.5, value=-3.0)
    # opt_prob.addVar('x1', 'c', lower=-10, upper=10, value=-3.0)
    # opt_prob.addVar('x2', 'c', lower=-10, upper=10, value=-4.0)
    opt_prob.addVar('A', 'c', lower=0, upper=2000, value=a)
    opt_prob.addVar('B', 'c', lower=0, upper=2000, value=b)
    opt_prob.addVar('C', 'c', lower=0, upper=2, value=c)
    opt_prob.addVar('D', 'c', lower=0, upper=1, value=d)
    # opt_prob.addCon('C', type='i', lower=0, upper=5, equal=c)
    # opt_prob.addCon('D', type='i', lower=0, upper=1, equal=d)
    opt_prob.addObj('f')
    # print(opt_prob)

    # from pyOpt.pySLSQP.pySLSQP import SLSQP
    # sopt = SLSQP()
    # sopt.setOption('IPRINT', -1)

    from pyOpt.pySOLVOPT.pySOLVOPT import SOLVOPT
    sopt = SOLVOPT()
    sopt.setOption('iprint', -1)

    [fstr, xstr, inform] = sopt(opt_prob, sens_type='FD')
    solution = getlastsolution(opt_prob)
    print(solution)
    return xstr, solution
def solveOpt(int_domain, J, a, model, u0, sign):
    '''
    INPUT:
        int_domain = 
        J = 
        a = 
        model = 
        u0 =
        sign = 
    OUTPUT:
        opt_prob = 
    '''
    def objfun(u, **kwargs):
        '''objfun defines optimization problem using the pyOpt sintax'''
        # 1) extract paraeters
        int_domain = kwargs['int_domain']
        J = kwargs['J']
        x = kwargs['a']
        model = kwargs['model']
        sign = kwargs['sign']
        # 2) define objective function and constraints
        funz = np.trapz(x=int_domain, y=J * model.pf(int_domain, u, x))
        g = []
        fail = 0
        return sign * funz, g, fail

    opt_prob = Optimization('ODAA problem', objfun)
    opt_prob.addObj('funz')
    solver = SLSQP()  # choose the solver
    solver.setOption('IPRINT', -1)
    opt_prob.addVar('u', 'c', lower=-1, upper=1, value=u0)
    #print opt_prob # print optimization problem
    solver(opt_prob, int_domain=int_domain, J=J, a=a, model=model, sign=sign)
    #print opt_prob.solution(0) # print solution
    return opt_prob
Exemple #7
0
    def infill(self, points, method='error'):
        ## We'll be making non-permanent modifications to self.X and self.y here, so lets make a copy just in case
        initX = np.copy(self.X)
        inity = np.copy(self.y)

        ## This array will hold the new values we add
        returnValues = np.zeros([points, self.k], dtype=float)

        for i in range(points):
            opt_prob1 = Optimization('InFillPSO',
                                     self.errorObjective_normalized)
            for k in range(self.k):
                opt_prob1.addVar('{0}'.format(k),
                                 'c',
                                 lower=0,
                                 upper=1,
                                 value=.5)

            pso1 = ALPSO()
            pso1.setOption('SwarmSize', 100)
            pso1.setOption('maxOuterIter', 100)
            pso1.setOption('stopCriteria', 1)
            pso1(opt_prob1)

            newpoint = np.zeros(self.k)

            for j in range(self.k):
                newpoint[j] = opt_prob1.solution(0)._variables[j].value
            returnValues[i][:] = self.inversenormX(newpoint)
            self.addPoint(returnValues[i],
                          self.predict(returnValues[i]),
                          norm=True)
            self.updateModel()
            del (opt_prob1)
            del (pso1)
        self.X = np.copy(initX)
        self.y = np.copy(inity)
        self.n = len(self.X)
        self.updateModel()
        return returnValues
    def configure(self, building):

        # Get building and optimization setup properties
        self.building = deepcopy(building)
        self.T, self.states, self.actions, self.disturbances, self.controlLim, self.actionLim, self.comfort, self.occ, self.nvars, self.ncons = self.building.getConfiguration(
        )

        # Define Box Constraints (min/max values) for the control parameters
        boxConstraints = []
        for ii in range(self.nvars):
            boxConstraints.append(self.controlLim)

        # Link to the python function calculating the cost and the constraints
        self.opt_prob = Optimization('SLSQP Constrained Problem',
                                     self.wrapSimulation)

        # Setupt Box Constrains in pyOpt
        for ii in range(self.nvars):
            self.opt_prob.addVar('x' + str(ii + 1),
                                 'c',
                                 lower=boxConstraints[ii][0],
                                 upper=boxConstraints[ii][1],
                                 value=self.building.policy[0, ii])

        # Setupt Cost Function in pyOpt
        self.opt_prob.addObj('f')

        # Setupt Inequality Constraints in pyOpt
        for ii in range(self.ncons):
            self.opt_prob.addCon('g' + str(ii + 1), 'i')

        # Print the Optimization setup
        print("----------------------------------------")
        print("----------------------------------------")
        print("SLSQP Optimization setup:")
        print(self.opt_prob)
Exemple #9
0
def get_pyopt_optimization(f, g_f, con, g_con, x0, T):
    opt_prob = Optimization('stoc planner', obj_fun(f, con))
    opt_prob.addObj('f')
    opt_prob.addVarGroup('flat_plan', 
                         x0.size, 
                         type='c', 
                         value = x0,
                         lower = 0.,
                         upper = 1.0)
    opt_prob.addConGroup('g', T, 'e')
    
#     opt = SLSQP()
#     opt = pySNOPT.SNOPT()
#     opt = PSQP()
#     opt = CONMIN()
    opt = ALGENCAN()
    
    return opt_prob, opt
def translateProgramToPyOpt(dfovecProgram):
	# Currently only handles inequality
	def objfunc(x):
		f = dfovecProgram.objective(x)

		g = []
		if dfovecProgram.hasInequalityConstraints():
			g = dfovecProgram.inequalityConstraints(x)

		fail = 0
		return f, g, fail

	opt_prob = Optimization('Dfovec problem', objfunc)
	for i in range(len(dfovecProgram.x0)):
		opt_prob.addVar('x' + str(i), lower=-1000.0, upper=1000.0, value=dfovecProgram.x0[i])
	opt_prob.addObj('f')

	numIneq = dfovecProgram.getNumInequalityConstraints()
	opt_prob.addConGroup('g', numIneq, type='i', lower=[-10000] * numIneq, upper=[0] * numIneq)

	print(opt_prob)
	return opt_prob
Exemple #11
0
    g_con[1][0] = x[1] * x[2] * x[3]
    g_con[1][1] = x[0] * x[2] * x[3]
    g_con[1][2] = x[0] * x[1] * x[3]
    g_con[1][3] = x[0] * x[1] * x[2]

    fail = 0

    return g_obj, g_con, fail


# =============================================================================
#
# =============================================================================

# Instantiate Optimization Problem
opt_prob = Optimization('HS 071', objfunc)
opt_prob.addVar('x1', 'c', value=1., lower=1., upper=5.)
opt_prob.addVar('x2', 'c', value=5., lower=1., upper=5.)
opt_prob.addVar('x3', 'c', value=5., lower=1., upper=5.)
opt_prob.addVar('x4', 'c', value=1., lower=1., upper=5.)
opt_prob.addObj('f')
opt_prob.addCon('g1', 'e', equal=40.)
opt_prob.addCon('g2', 'i', lower=25., upper=numpy.inf)

print opt_prob

# Instantiate Optimizer (IPOPT)
ipopt = IPOPT()

# Solve Problem with Optimizer Using Finite Differences
ipopt.setOption('output_file', 'ipopt.out')
#==============================================================================
# Start Matlab engine
#==============================================================================
eng = matlab.engine.start_matlab()
#Go to directory where matlab file is
eng.cd('..')
eng.cd('SMA_temperature_strain_driven')

# =============================================================================
# 
# =============================================================================
chord = 1.
x_hinge = 0.75
safety = 0.005*chord

opt_prob = Optimization('Static model optimization',objfunc)
#xs_n
opt_prob.addVar('x1', 'c', lower = x_hinge/2. , upper = x_hinge - safety, value = 6.817445e-001)
#ys_n
opt_prob.addVar('x2', 'c', lower = -.9, upper = -0., value = -5.216475e-001)
#xs_p
opt_prob.addVar('x3', 'c', lower = x_hinge + safety, upper = chord - safety, value = 9.029895e-001)
#ys_p
opt_prob.addVar('x4', 'c', lower = 0., upper = .9, value = 8.726738e-001)
#xl_n
opt_prob.addVar('x5', 'c',  lower = x_hinge/2., upper = x_hinge - safety, value = 6.958111e-001)
#yl_n
opt_prob.addVar('x6', 'c', lower = -.9, upper = 0.9, value = -4.593744e-001)
#xl_p
opt_prob.addVar('x7', 'c', lower = x_hinge + safety, upper = chord - safety, value =  8.187166e-001)
#yl_p
def main():
    ###########################################
    # Define some values
    ###########################################
    n_blades = 2
    n_elements = 10
    radius = unit_conversion.in2m(9.6) / 2
    root_cutout = 0.1 * radius
    dy = float(radius - root_cutout) / n_elements
    dr = float(1) / n_elements
    y = root_cutout + dy * np.arange(1, n_elements + 1)
    r = y / radius
    pitch = 0.0
    airfoils = (('SDA1075_494p', 0.0, 1.0), )
    allowable_Re = [
        1000000., 500000., 250000., 100000., 90000., 80000., 70000., 60000.,
        50000., 40000., 30000., 20000., 10000.
    ]
    vehicle_weight = 12.455
    max_chord = 0.3
    max_chord_tip = 5.
    alt = 0
    tip_loss = True
    mach_corr = False

    # Forward flight parameters
    v_inf = 4.  # m/s
    alpha0 = 0.0454  # Starting guess for trimmed alpha in radians
    n_azi_elements = 5

    # Mission times
    time_in_hover = 300.  # Time in seconds
    time_in_ff = 500.
    mission_time = [time_in_hover, time_in_ff]

    Cl_tables = {}
    Cd_tables = {}
    Clmax = {}
    # Get lookup tables
    if any(airfoil[0] != 'simple' for airfoil in airfoils):
        for airfoil in airfoils:
            Cl_table, Cd_table, Clmax = aero_coeffs.create_Cl_Cd_table(
                airfoil[0])

            Cl_tables[airfoil[0]] = Cl_table
            Cd_tables[airfoil[0]] = Cd_table
            Clmax[airfoil[0]] = Clmax

    # Create list of Cl functions. One for each Reynolds number. Cl_tables (and Cd_tables) will be empty for the
    # 'simple' case, therefore this will be skipped for the simple case. For the full table lookup case this will be
    # skipped because allowable_Re will be empty.
    Cl_funs = {}
    Cd_funs = {}
    lift_curve_info_dict = {}
    if Cl_tables and allowable_Re:
        Cl_funs = dict(
            zip(allowable_Re, [
                aero_coeffs.get_Cl_fun(Re, Cl_tables[airfoils[0][0]],
                                       Clmax[airfoils[0][0]][Re])
                for Re in allowable_Re
            ]))
        Cd_funs = dict(
            zip(allowable_Re, [
                aero_coeffs.get_Cd_fun(Re, Cd_tables[airfoils[0][0]])
                for Re in allowable_Re
            ]))
        lift_curve_info_dict = aero_coeffs.create_liftCurveInfoDict(
            allowable_Re, Cl_tables[airfoils[0][0]])

    ###########################################
    # Set design variable bounds
    ###########################################
    # Hover opt 500 gen, 1000 pop, 12.455 N weight, 9.6 in prop
    chord = np.array([
        0.11923604, 0.2168746, 0.31540216, 0.39822882, 0.42919, 0.35039799,
        0.3457828, 0.28567224, 0.23418368, 0.13502483
    ])
    twist = np.array([
        0.45316866, 0.38457724, 0.38225075, 0.34671967, 0.33151445, 0.28719111,
        0.25679667, 0.25099005, 0.19400679, 0.10926302
    ])
    omega = 3811.03596674 * 2 * np.pi / 60
    original = (omega, chord, twist)

    dtwist = np.array(
        [twist[i + 1] - twist[i] for i in xrange(len(twist) - 1)])
    dchord = np.array(
        [chord[i + 1] - chord[i] for i in xrange(len(chord) - 1)])
    twist0 = twist[0]
    chord0 = chord[0]

    omega_start = omega

    dtwist_start = dtwist
    dchord_start = dchord
    twist0_start = twist0
    chord0_start = chord0

    omega_lower = 2000 * 2 * np.pi / 60
    omega_upper = 8000.0 * 2 * np.pi / 60

    twist0_lower = 0. * 2 * np.pi / 360
    twist0_upper = 60. * 2 * np.pi / 360

    chord0_upper = 0.1198
    chord0_lower = 0.05

    dtwist_lower = -10.0 * 2 * np.pi / 360
    dtwist_upper = 10.0 * 2 * np.pi / 360
    dchord_lower = -0.1
    dchord_upper = 0.1

    opt_prob = Optimization('Mission Simulator', objfun)
    opt_prob.addVar('omega_h',
                    'c',
                    value=omega_start,
                    lower=omega_lower,
                    upper=omega_upper)
    opt_prob.addVar('twist0',
                    'c',
                    value=twist0_start,
                    lower=twist0_lower,
                    upper=twist0_upper)
    opt_prob.addVar('chord0',
                    'c',
                    value=chord0_start,
                    lower=chord0_lower,
                    upper=chord0_upper)
    opt_prob.addVarGroup('dtwist',
                         n_elements - 1,
                         'c',
                         value=dtwist_start,
                         lower=dtwist_lower,
                         upper=dtwist_upper)
    opt_prob.addVarGroup('dchord',
                         n_elements - 1,
                         'c',
                         value=dchord_start,
                         lower=dchord_lower,
                         upper=dchord_upper)
    opt_prob.addObj('f')
    opt_prob.addCon('thrust', 'i')
    opt_prob.addCon('c_tip', 'i')
    opt_prob.addConGroup('c_lower', n_elements, 'i')
    opt_prob.addConGroup('c_upper', n_elements, 'i')
    print opt_prob

    slsqp = SLSQP()
    slsqp.setOption('IPRINT', 1)
    slsqp.setOption('MAXIT', 1000)
    slsqp.setOption('ACC', 1e-8)
    fstr, xstr, inform = slsqp(opt_prob,
                               sens_type='FD',
                               n_blades=n_blades,
                               radius=radius,
                               dy=dy,
                               dr=dr,
                               y=y,
                               r=r,
                               pitch=pitch,
                               airfoils=airfoils,
                               vehicle_weight=vehicle_weight,
                               max_chord=max_chord,
                               tip_loss=tip_loss,
                               mach_corr=mach_corr,
                               Cl_funs=Cl_funs,
                               Cd_funs=Cd_funs,
                               Cl_tables=Cl_tables,
                               Cd_tables=Cd_tables,
                               allowable_Re=allowable_Re,
                               alt=alt,
                               v_inf=v_inf,
                               alpha0=alpha0,
                               mission_time=mission_time,
                               n_azi_elements=n_azi_elements,
                               lift_curve_info_dict=lift_curve_info_dict,
                               max_chord_tip=max_chord_tip)
    print opt_prob.solution(0)

    # pop_size = 300
    # max_gen = 500
    # opt_method = 'nograd'
    # nsga2 = NSGA2()
    # nsga2.setOption('PrintOut', 2)
    # nsga2.setOption('PopSize', pop_size)
    # nsga2.setOption('maxGen', max_gen)
    # nsga2.setOption('pCross_real', 0.85)
    # nsga2.setOption('xinit', 1)
    # fstr, xstr, inform = nsga2(opt_prob, n_blades=n_blades, radius=radius, dy=dy, dr=dr, y=y, r=r, pitch=pitch,
    #                            airfoils=airfoils, vehicle_weight=vehicle_weight, max_chord=max_chord, tip_loss=tip_loss,
    #                            mach_corr=mach_corr, Cl_funs=Cl_funs, Cd_funs=Cd_funs, Cl_tables=Cl_tables,
    #                            Cd_tables=Cd_tables, allowable_Re=allowable_Re, opt_method=opt_method, alt=alt,
    #                            v_inf=v_inf, alpha0=alpha0, mission_time=mission_time, n_azi_elements=n_azi_elements,
    #                            pop_size=pop_size, max_gen=max_gen, lift_curve_info_dict=lift_curve_info_dict,
    #                            max_chord_tip=max_chord_tip)
    # print opt_prob.solution(0)

    # opt_method = 'nograd'
    # xstart_alpso = np.concatenate((np.array([omega_start, twist0_start, chord0_start]), dtwist_start, dchord_start))
    # alpso = ALPSO()
    # alpso.setOption('xinit', 0)
    # alpso.setOption('SwarmSize', 200)
    # alpso.setOption('maxOuterIter', 100)
    # alpso.setOption('stopCriteria', 0)
    # fstr, xstr, inform = alpso(opt_prob, xstart=xstart_alpso,  n_blades=n_blades, n_elements=n_elements,
    #                            root_cutout=root_cutout, radius=radius, dy=dy, dr=dr, y=y, r=r, pitch=pitch,
    #                            airfoils=airfoils, thrust=thrust, max_chord=max_chord, tip_loss=tip_loss,
    #                            mach_corr=mach_corr, Cl_funs=Cl_funs, Cd_funs=Cd_funs, Cl_tables=Cl_tables,
    #                            Cd_tables=Cd_tables, allowable_Re=allowable_Re, opt_method=opt_method)
    # print opt_prob.solution(0)

    def get_performance(o, c, t):
        chord_meters = c * radius
        prop = propeller.Propeller(t,
                                   chord_meters,
                                   radius,
                                   n_blades,
                                   r,
                                   y,
                                   dr,
                                   dy,
                                   airfoils=airfoils,
                                   Cl_tables=Cl_tables,
                                   Cd_tables=Cd_tables)
        quad = quadrotor.Quadrotor(prop, vehicle_weight)

        ff_kwargs = {
            'propeller': prop,
            'pitch': pitch,
            'n_azi_elements': n_azi_elements,
            'allowable_Re': allowable_Re,
            'Cl_funs': Cl_funs,
            'Cd_funs': Cd_funs,
            'tip_loss': tip_loss,
            'mach_corr': mach_corr,
            'alt': alt,
            'lift_curve_info_dict': lift_curve_info_dict
        }
        trim0 = np.array([alpha0, o])
        alpha_trim, omega_trim, converged = trim.trim(quad, v_inf, trim0,
                                                      ff_kwargs)
        T_ff, H_ff, P_ff = bemt.bemt_forward_flight(
            quad,
            pitch,
            omega_trim,
            alpha_trim,
            v_inf,
            n_azi_elements,
            alt=alt,
            tip_loss=tip_loss,
            mach_corr=mach_corr,
            allowable_Re=allowable_Re,
            Cl_funs=Cl_funs,
            Cd_funs=Cd_funs,
            lift_curve_info_dict=lift_curve_info_dict)

        dT_h, P_h = bemt.bemt_axial(prop,
                                    pitch,
                                    o,
                                    allowable_Re=allowable_Re,
                                    Cl_funs=Cl_funs,
                                    Cd_funs=Cd_funs,
                                    tip_loss=tip_loss,
                                    mach_corr=mach_corr,
                                    alt=alt)
        return sum(dT_h), P_h, T_ff, P_ff, alpha_trim, omega_trim

    omega = xstr[0]
    twist0 = xstr[1]
    chord0 = xstr[2]
    dtwist = xstr[3:3 + len(r) - 1]
    dchord = xstr[3 + len(r) - 1:]

    twist = calc_twist_dist(twist0, dtwist)
    chord = calc_chord_dist(chord0, dchord)

    print "chord = " + repr(chord)
    print "twist = " + repr(twist)

    # twist_base = calc_twist_dist(twist0_base, dtwist_base)
    # chord_base = calc_chord_dist(chord0_base, dchord_base)

    perf_opt = get_performance(omega, chord, twist)
    perf_orig = get_performance(original[0], original[1], original[2])

    print "omega_orig = " + str(original[0])
    print "Hover Thrust of original = " + str(perf_orig[0])
    print "Hover Power of original = " + str(perf_orig[1])
    print "FF Thrust of original = " + str(perf_orig[2])
    print "FF Power of original = " + str(perf_orig[3])
    print "Trim original (alpha, omega) = (%f, %f)" % (perf_orig[4],
                                                       perf_orig[5])

    print "omega = " + str(omega * 60 / 2 / np.pi)
    print "Hover Thrust of optimized = " + str(perf_opt[0])
    print "Hover Power of optimized = " + str(perf_opt[1])
    print "FF Thrust of optimized = " + str(perf_opt[2])
    print "FF Power of optimized = " + str(perf_opt[3])
    print "Trim optimized (alpha, omega) = (%f, %f)" % (perf_opt[4],
                                                        perf_opt[5])
    # print "Omega base = " + str(omega_start*60/2/np.pi)
    # print "Thrust of base = " + str(sum(perf_base[0]))
    # print "Power of base = " + str(sum(perf_base[1]))
    #
    plt.figure(1)
    plt.plot(r, original[1], '-b')
    plt.plot(r, chord, '-r')
    plt.xlabel('radial location')
    plt.ylabel('c/R')
    plt.legend(['start', 'opt'])

    plt.figure(2)
    plt.plot(r, original[2] * 180 / np.pi, '-b')
    plt.plot(r, twist * 180 / np.pi, '-r')
    plt.xlabel('radial location')
    plt.ylabel('twist')
    plt.legend(['start', 'opt'])

    plt.show()
Exemple #14
0
    f = x0**2 + x1**2
    g = [0.0]*2
    g[0] = 3 - x0
    g[1] = 2 - x1
    
    fail = 0
    
    return f,g,fail
    

# =============================================================================
# 
# ============================================================================= 

# Instantiate Optimization Problem
opt_prob = Optimization('TOY Constrained Problem',objfunc,use_groups=True)
opt_prob.addVarGroup('a',2,'c',value=1.0, lower=0.0, upper=10)
opt_prob.delVarGroup('a')
opt_prob.addVar('x','c',value=1.0, lower=0.0, upper=10)
opt_prob.addVarGroup('y',2,'c',value=1.0, lower=0.0, upper=10)
opt_prob.delVarGroup('y')
opt_prob.addVarGroup('z',1,'c',value=1.0, lower=0.0, upper=10)
opt_prob.addVarGroup('b',5,'c',value=3.0, lower=0.0, upper=10)
opt_prob.delVarGroup('b')
opt_prob.addObj('f')
opt_prob.addCon('g1','i')
opt_prob.addCon('g2','i')
print(opt_prob)

# Instantiate Optimizer (SLSQP) & Solve Problem
slsqp = SLSQP()
Exemple #15
0
def theor_variogram(experimental_sv, Sb=(0.01,400), Rb=(2,20), Nb=(0,400),
                    ab=(0,2), vb=(0,1000), candidate_sv=None,
                    candidate_sv_tag=None):
    '''
    Fitting of theoretical variogram
    Parameters
    ----------
        **experimental_sv** -- Experimental semivariogram ''[x,2]'', lag and \
            semivariogram \n
        **Sb** -- Boundaries on Sill of semivariogram ``(min,max)`` \n
        **Rb** -- Boundaries on Range of semivariogram ``(min,max)`` \n
        **Nb** -- Boundaries on Nugget of semivariogram ``(min,max)`` \n
        **ab** -- Boundaries on Power of semivariogram ``(min,max)`` (only \
            valid for power semivariogram) \n
        **vb** -- Boundaries on Shape parameter of semivariogram ``(min,max)``\
            (only valid for matérn type) \n
    
    Returns
    -------
        **xopt** -- Vector with optimal semivariogram parameters ``[5]`` \n
        **ModOpt** -- Pointer to optimal vector location \n
        **candidate_sv** -- Array with pointer to functions in variogram_fit \
        module
    '''                      
    
    if candidate_sv is None:
        # Array with functions to be called from the Variograms library
        candidate_sv = [variogram_fit.exponential_sv, 
                        variogram_fit.gaussian_sv]
     
    if candidate_sv_tag is None:
    # Names of functions for display only
        candidate_sv_tag = ['Exponential','Gaussian']
    
    # Initial seed for variogram fit
    sr = random.uniform(Sb[0], Sb[1])
    rr = random.uniform(Rb[0], Rb[1])
    nr = random.uniform(Nb[0], Nb[1])
    ar = random.uniform(ab[0], ab[1])
    vr = random.uniform(vb[0], vb[1])
    
    Var = []
    Res = []
    Mdl = [] 
    
    # Wrapper of minimisation function (RMSE) for semivariogram fitting
    def _opt_fun(x,*args):
        F, g, fail = variogram_fit.fit_function(x, experimental_sv, 
                                                j,candidate_sv)
        if F == ERROR_CODE:
            fail = 1

        else:
            Var.append(x)
            Res.append(F)
            Mdl.append(j)
        return F, g, fail
    
    # Optimisation starts to minimise differences between experimental and 
    # theoretical semivariograms
    for j in xrange(0,len(candidate_sv)):   
        VarProb = Optimization('Variogram Fitting: ' + candidate_sv_tag[j], 
                               _opt_fun)
        VarProb.addObj('RMSE')
        VarProb.addVar('Sill', 'c', lower=Sb[0], upper=Sb[1], value=sr)
        VarProb.addVar('Range', 'c', lower=Rb[0], upper=Rb[1], value=rr)
        VarProb.addVar('Nugget', 'c', lower=Nb[0], upper=Nb[1], value=nr)
        VarProb.addVar('Exponent (a)', 'c', lower=ab[0], upper=ab[1], value=ar)
        VarProb.addVar('Rank (v)', 'c', lower=vb[0], upper=vb[1], value=vr)
        
        args = (experimental_sv, j, candidate_sv, Var, Res, Mdl)
        optmz = ALHSO()
        optmz.setOption('fileout',0)
        optmz(VarProb)

    # Get pointer to best semivariogram
    k = np.argmin(Res)
    xopt = Var[k]
    ModOpt = Mdl[k]
    
    return xopt, ModOpt, candidate_sv
Exemple #16
0
def getlastsolution(prob: Optimization):
    new_index = prob.firstavailableindex(prob.getSolSet())
    return prob.getSol(new_index - 1)
Exemple #17
0
    f = -x[0] * x[1] * x[2]
    g = [0.0] * 2
    g[0] = x[0] + 2. * x[1] + 2. * x[2] - 72.0
    g[1] = -x[0] - 2. * x[1] - 2. * x[2]

    time.sleep(0.5)

    fail = 0
    return f, g, fail

# =============================================================================
#
# =============================================================================

# Instantiate Optimization Problem
opt_prob = Optimization('TP37 Constrained Problem', objfunc)
opt_prob.addVar('x1', 'c', lower=0.0, upper=42.0, value=10.0)
opt_prob.addVar('x2', 'c', lower=0.0, upper=42.0, value=10.0)
opt_prob.addVar('x3', 'c', lower=0.0, upper=42.0, value=10.0)
opt_prob.addObj('f')
opt_prob.addCon('g1', 'i')
opt_prob.addCon('g2', 'i')

# Instantiate Optimizer (SLSQP)
slsqp = SLSQP()
slsqp.setOption('IPRINT', -1)

# Solve Problem (Without Parallel Gradient)
slsqp(opt_prob, sens_type='CS')

# end
Exemple #18
0
        g=[]
        fail=1

    os.chdir('../..')
    
    CASECOUNT+=1
        
    return f,g,fail
    

# =============================================================================
# 
# =============================================================================

# Instantiate Optimization Problem 
opt_prob = Optimization('Minimize Drag',objfunc)
opt_prob.addVar('x1','c',lower=1.,upper=25.,value=12.0)
opt_prob.addVar('x2','c',lower=1.,upper=30.,value=15.0)
opt_prob.addVar('x3','c',lower=1.,upper=30.,value=15.0)
opt_prob.addVar('x4','c',lower=1.,upper=25.,value=12.0)
opt_prob.addVar('x5','c',lower=1.,upper=25.,value=12.0)
opt_prob.addVar('x6','c',lower=1.,upper=13.,value=6.0)
opt_prob.addObj('f')
#opt_prob.addCon('g','i')
print opt_prob

mkdirCommand='mkdir -p workDir'
subprocess.call(mkdirCommand,shell=True)

# Instantiate Optimizer (NSGA2) & Solve Problem
nsga2 = NSGA2()
Exemple #19
0
def variogram_fit(SVExp,
                  Sb=(0.01, 400),
                  Rb=(2, 20),
                  Nb=(0, 400),
                  ab=(0, 2),
                  vb=(0, 1000)):
    # Array with functions to be called from the Variograms library
    VarFunArr = [
        VariogramFit.SVExponential, VariogramFit.SVGaussian,
        VariogramFit.SVSpherical, VariogramFit.SVCubic,
        VariogramFit.SVPentaspherical, VariogramFit.SVSinehole,
        VariogramFit.SVPower, VariogramFit.SVMatern
    ]

    # Names of functions for display only
    optFunNam = [
        'Exponential', 'Gaussian', 'Spherical', 'Cubic', 'Pentaspherical',
        'Sinehole', 'Power', 'Matern'
    ]

    # Boundaries semivariogram parameters
    #Sb = (0.01,400) # Limit for the sill
    #Rb = (2,20) # Limit for the range
    #Nb = (0,400) # Limit for the Nugget effect
    #ab = (0,2) # Limit for a in power variogram
    #vb = (0,1000) # Limit for Matern v parameters

    # Initial seed for variogram fit
    sr = random.uniform(Sb[0], Sb[1])
    rr = random.uniform(Rb[0], Rb[1])
    nr = random.uniform(Nb[0], Nb[1])
    ar = random.uniform(ab[0], ab[1])
    vr = random.uniform(vb[0], vb[1])
    return sr, rr, nr, ar, vr

    Var = []
    Res = []
    Mdl = []

    # Wrapper of minimisation function (RMSE) for semivariogram fitting
    def OptFun(x, *args):
        F, g, fail = VariogramFit.optFunMaster(x, SVExp, j, VarFunArr)
        if F == 9999:
            fail = 1
        else:
            Var.append(x)
            Res.append(F)
            Mdl.append(j)
        return F, g, fail

    print 'Initialising Variogram fit'
    print ''

    # Optimisation starts to minimise differences between experimental and
    # theoretical semivariograms
    for j in xrange(0, len(VarFunArr)):

        print 'Variogram Fitting ' + optFunNam[j]
        print ''

        VarProb = Optimization('Variogram Fitting: ' + optFunNam[j], OptFun)
        VarProb.addObj('RMSE')
        VarProb.addVar('Sill', 'c', lower=Sb[0], upper=Sb[1], value=sr)
        VarProb.addVar('Range', 'c', lower=Rb[0], upper=Rb[1], value=rr)
        VarProb.addVar('Nugget', 'c', lower=Nb[0], upper=Nb[1], value=nr)
        VarProb.addVar('Exponent (a)', 'c', lower=ab[0], upper=ab[1], value=ar)
        VarProb.addVar('Rank (v)', 'c', lower=vb[0], upper=vb[1], value=vr)

        args = (SVExp, j, VarFunArr, Var, Res, Mdl)
        optmz = ALHSO()
        optmz(VarProb)

        print VarProb.solution(0)
        print ''

    # Get position of best semivariogram
    k = numpy.argmin(Res)
    xopt = Var[k]
    ModOpt = Mdl[k]
    del Var
    del Res
    del Mdl

    print 'Theoretical variogram fit - Done!'
    print ''
    return xopt, ModOpt, VarFunArr
Exemple #20
0
    g_con[2][0] = 4.*x[0] + 2
    g_con[2][1] = 2.*x[1] - 1
    g_con[2][2] = 2.*x[2]
    g_con[2][3] = -1.
    
    fail = 0
    
    return g_obj,g_con,fail
    

# =============================================================================
# 
# =============================================================================

# Instantiate Optimization Problem
opt_prob = Optimization('Constrained Rosen-Suzuki',objfunc)
opt_prob.addVar('x1','c',value=1.5)
opt_prob.addVar('x2','c',value=1.5)
opt_prob.addVar('x3','c',value=1.5)
opt_prob.addVar('x4','c',value=1.5)
opt_prob.addObj('f')
opt_prob.addCon('g1','i')
opt_prob.addCon('g2','i')
opt_prob.addCon('g3','i')
print(opt_prob)

# Instantiate Optimizer (CONMIN)
conmin = CONMIN()

# Solve Problem with Optimizer Using Finite Differences
conmin(opt_prob,sens_type='FD')
#==============================================================================
# Start Matlab engine
#==============================================================================
eng = matlab.engine.start_matlab()
#Go to directory where matlab file is
eng.cd('..')
eng.cd('SMA_temperature_strain_driven')

# =============================================================================
# 
# =============================================================================
chord = 1.
x_hinge = 0.75
safety = 0.005*chord

opt_prob = Optimization('Static model optimization', objfunc)
#l_s
opt_prob.addVar('x1', 'c', lower = 0.1 , upper = 0.6, value = 0.2)
#l_l
opt_prob.addVar('x2', 'c', lower = 0.1, upper = 0.6, value = 0.2)
#R
opt_prob.addVar('x5', 'c',  lower = 0.001, upper = 0.03, value = 0.02)
# #yl_n
# opt_prob.addVar('x6', 'c', lower = -.9, upper = 0.9, value = -4.593744e-001)
# #xl_p
# opt_prob.addVar('x7', 'c', lower = x_hinge + safety, upper = chord - safety, value =  8.187166e-001)
# #yl_p
# opt_prob.addVar('x8', 'c', lower = -.9, upper = 0., value = -5.719241e-001)

opt_prob.addObj('f')
#opt_prob.addCon('g', 'i')
Exemple #22
0
def variogram_fit(SVExp, Sb=(0.01,400), Rb=(2,20), Nb=(0,400),
                  ab=(0,2), vb=(0,1000)):
    # Array with functions to be called from the Variograms library
    VarFunArr = [VariogramFit.SVExponential, VariogramFit.SVGaussian, 
                 VariogramFit.SVSpherical, VariogramFit.SVCubic,
                 VariogramFit.SVPentaspherical, VariogramFit.SVSinehole, 
                 VariogramFit.SVPower, VariogramFit.SVMatern]
    
    # Names of functions for display only
    optFunNam = ['Exponential','Gaussian','Spherical','Cubic',
                 'Pentaspherical','Sinehole','Power','Matern']
    
    # Boundaries semivariogram parameters
    #Sb = (0.01,400) # Limit for the sill
    #Rb = (2,20) # Limit for the range
    #Nb = (0,400) # Limit for the Nugget effect
    #ab = (0,2) # Limit for a in power variogram
    #vb = (0,1000) # Limit for Matern v parameters
    
    # Initial seed for variogram fit
    sr = random.uniform(Sb[0],Sb[1])
    rr = random.uniform(Rb[0],Rb[1])
    nr = random.uniform(Nb[0],Nb[1])
    ar = random.uniform(ab[0],ab[1])
    vr = random.uniform(vb[0],vb[1])
    return sr, rr, nr, ar, vr
    
    Var = []
    Res = []
    Mdl = [] 
    
    # Wrapper of minimisation function (RMSE) for semivariogram fitting
    def OptFun(x,*args):
        F, g, fail = VariogramFit.optFunMaster(x,SVExp,j,VarFunArr)
        if F == 9999:
            fail = 1
        else:
            Var.append(x)
            Res.append(F)
            Mdl.append(j)
        return F, g, fail
        
    print 'Initialising Variogram fit'
    print ''
    
    # Optimisation starts to minimise differences between experimental and 
    # theoretical semivariograms
    for j in xrange(0,len(VarFunArr)):
        
        print 'Variogram Fitting ' + optFunNam[j]
        print ''
        
        VarProb = Optimization('Variogram Fitting: ' + optFunNam[j], OptFun)
        VarProb.addObj('RMSE')
        VarProb.addVar('Sill','c',lower=Sb[0],upper=Sb[1],value=sr)
        VarProb.addVar('Range','c',lower=Rb[0],upper=Rb[1],value=rr)
        VarProb.addVar('Nugget','c',lower=Nb[0],upper=Nb[1],value=nr)
        VarProb.addVar('Exponent (a)','c',lower=ab[0],upper=ab[1],value=ar)
        VarProb.addVar('Rank (v)','c',lower=vb[0],upper=vb[1],value=vr)
        
        args = (SVExp, j, VarFunArr, Var, Res, Mdl)
        optmz = ALHSO()
        optmz(VarProb)
    
        print VarProb.solution(0)
        print ''    
    
    # Get position of best semivariogram
    k = numpy.argmin(Res)
    xopt = Var[k]
    ModOpt = Mdl[k]
    del Var
    del Res
    del Mdl
    
    print 'Theoretical variogram fit - Done!'
    print ''
    return xopt, ModOpt, VarFunArr
#==============================================================================
# Start Matlab engine
#==============================================================================
eng = matlab.engine.start_matlab()
#Go to directory where matlab file is
eng.cd('..')
eng.cd('SMA_temperature_strain_driven')

# =============================================================================
# 
# =============================================================================
chord = 1.
x_hinge = 0.75
safety = 0.05*chord

opt_prob = Optimization('Static model optimization',objfunc)
#xs_n
opt_prob.addVar('x1', 'c', lower = x_hinge/2. , upper = x_hinge - safety, value = 7.407724e-001)
#ys_n
opt_prob.addVar('x2', 'c', lower = -.9, upper = -0., value = -3.680615e-001)
#xs_p
opt_prob.addVar('x3', 'c', lower = x_hinge + safety, upper = chord - safety, value = 9.933211e-001)
#ys_p
opt_prob.addVar('x4', 'c', lower = 0., upper = .9, value = 6.004423e-001)
#xl_n
opt_prob.addVar('x5', 'c',  lower = x_hinge/2., upper = x_hinge - safety, value = 7.290939e-001)
#yl_n
opt_prob.addVar('x6', 'c', lower = -.9, upper = 0.9, value = -7.584186e-001)
#xl_p
opt_prob.addVar('x7', 'c', lower = x_hinge + safety, upper = chord - safety, value =  7.550874e-001)
#yl_p
def objfunc(xdict):
    x = xdict['x']
    y = xdict['y']

    ff = [
        (x - 0.0)**2 + (y - 0.0)**2,
        (x - 1.0)**2 + (y - 1.0)**2,
    ]
    gg = []
    fail = False

    return ff, gg, fail


# Instantiate Optimization Problem
optProb = Optimization('Rosenbrock function', objfunc, use_groups=True)
optProb.addVar('x', 'c', value=0, lower=-600, upper=600)
optProb.addVar('y', 'c', value=0, lower=-600, upper=600)

optProb.addObj('obj1')
optProb.addObj('obj2')

# 300 generations will find x=(0,0), 200 or less will find x=(1,1)
options = {
    'maxGen': 200,
}
opt = NSGA2(options=options)
opt.setOption('PrintOut', 0)
opt(optProb)

print(optProb.getSol(0))
    g[0] = x[0] - 1.0
    g[1] = 1.333333333 - x[1]
    g[2] = 2.666666666 - x[2]

    time.sleep(0.005)

    fail = 0
    return f, g, fail


# =============================================================================
#
# =============================================================================

# Instantiate Optimization Problem
opt_prob = Optimization('MIDACO Toy Problem', objfunc)
opt_prob.addVar('x1', 'c', lower=1.0, upper=4.0, value=1.0)
opt_prob.addVar('x2', 'c', lower=1.0, upper=4.0, value=1.0)
opt_prob.addVar('x3', 'c', lower=1.0, upper=4.0, value=1.0)
opt_prob.addVar('x4', 'c', lower=1.0, upper=4.0, value=1.0)
opt_prob.addObj('f')
opt_prob.addCon('g1', 'e')
opt_prob.addCon('g2', 'i')
opt_prob.addCon('g3', 'i')

# Solve Problem (No-Parallelization)
midaco_none = MIDACO()
midaco_none.setOption('IPRINT', -1)
midaco_none.setOption('MAXEVAL', 50000)
midaco_none(opt_prob)
if myrank == 0:
Exemple #26
0
	f = x[0]**2 + x[1]**2
	g = [0.0]*2
	g[0] = 3 - x[0]
	g[1] = 2 - x[1]
	
	fail = 0
	
	return f,g,fail
	

# =============================================================================
# 
# =============================================================================

# Instanciate Optimization Problem 
opt_prob = Optimization('TOY Constraint Problem',objfunc)
opt_prob.addVar('x1','c',value=1.0,lower=0.0,upper=10.0)
opt_prob.addVar('x2','c',value=1.0,lower=0.0,upper=10.0)
opt_prob.addObj('f')
opt_prob.addCon('g1','i')
opt_prob.addCon('g2','i')
print opt_prob

# Instanciate Optimizer (ALPSO) & Solve Problem Storing History
slsqp = SLSQP()
slsqp.setOption('IFILE','slsqp1.out')
slsqp(opt_prob,store_hst=True)
print opt_prob.solution(0)

# Solve Problem Using Stored History (Warm Start)
slsqp.setOption('IFILE','slsqp2.out')
Exemple #27
0
# =============================================================================
# 
# =============================================================================
def objfunc(x):
    
    f = 100*(x[1]-x[0]**2)**2+(1-x[0])**2
    g = []
    
    fail = 0
    return f,g, fail
    

# =============================================================================
# 
# ============================================================================= 
opt_prob = Optimization('Rosenbrock Unconstraint Problem',objfunc)
opt_prob.addVar('x1','c',lower=-10.0,upper=10.0,value=-3.0)
opt_prob.addVar('x2','c',lower=-10.0,upper=10.0,value=-4.0)
opt_prob.addObj('f')
print opt_prob

# Instantiate Optimizer (PSQP) & Solve Problem
psqp = PSQP()
psqp.setOption('IPRINT',0)
psqp(opt_prob,sens_type='FD')
print opt_prob.solution(0)

# Instantiate Optimizer (SLSQP) & Solve Problem
slsqp = SLSQP()
slsqp.setOption('IPRINT',-1)
slsqp(opt_prob,sens_type='FD')
Exemple #28
0
    g = [0.0]*2
    g[0] = x[0] + 2.*x[1] + 2.*x[2] - 72.0
    g[1] = -x[0] - 2.*x[1] - 2.*x[2]
    
    time.sleep(0.5)
    
    fail = 0
    return f,g, fail
    

# =============================================================================
# 
# =============================================================================

# Instantiate Optimization Problem 
opt_prob = Optimization('TP37 Constrained Problem',objfunc)
opt_prob.addVar('x1','c',lower=0.0,upper=42.0,value=10.0)
opt_prob.addVar('x2','c',lower=0.0,upper=42.0,value=10.0)
opt_prob.addVar('x3','c',lower=0.0,upper=42.0,value=10.0)
opt_prob.addObj('f')
opt_prob.addCon('g1','i')
opt_prob.addCon('g2','i')

# Solve Problem (No-Parallelization)
nlpqlp_none = NLPQLP()
nlpqlp_none.setOption('IPRINT',0)
nlpqlp_none(opt_prob)
if myrank == 0:
    print(opt_prob.solution(0))
#end
Exemple #29
0
def solveOpt(int_domain,J,x,model,u0):
    def objfun(u,**kwargs):
        # 1) extract paraeters
        int_domain = kwargs['int_domain'] 
        J = kwargs['J'] 
        x = kwargs['x'] 
        model = kwargs['model'] 
        # 2) define objective function
        f = np.trapz(int_domain,J * model.pf(int_domain,u,x))
        g = [0]*2
        # 3) budget constraint 
        g[1] = u.sum() - 1
        # 4) VaR constarint
        W = model.W
        sigmaMax = model.VaR / norm.ppf(1-model.alpha)
        g[0] = -sigmaMax + np.sqrt(W.dot(u).dot(u))
        fail = 0
        return f,g,fail
    opt_prob = Optimization('test problem',objfun)
    opt_prob.addObj('f')
    opt_prob.addCon('budget const','e')    
    opt_prob.addCon('VaR const','i')
    opt_prob.addVarGroup('u',model.M,'c',lower=np.zeros(model.M),
                         upper=np.ones(model.M),value=u0)
    print opt_prob
    slsqp = SLSQP()
    slsqp.setOption('IPRINT',-1)
    slsqp(opt_prob,sens_type='FD',int_domain=int_domain,J=J,x=x,model=model)
    print opt_prob.solution(0)
    

      
    
    
Exemple #30
0
# =============================================================================
#
# =============================================================================
def objfunc(x):

    f = 100 * (x[1] - x[0]**2)**2 + (1 - x[0])**2
    g = []

    fail = 0
    return f, g, fail


# =============================================================================
#
# =============================================================================
opt_prob = Optimization('Rosenbrock Unconstraint Problem', objfunc)
opt_prob.addVar('x1', 'c', lower=-10.0, upper=10.0, value=-3.0)
opt_prob.addVar('x2', 'c', lower=-10.0, upper=10.0, value=-4.0)
opt_prob.addObj('f')
print(opt_prob)

# Instantiate Optimizer (PSQP) & Solve Problem
psqp = PSQP()
psqp.setOption('IPRINT', 0)
psqp(opt_prob, sens_type='FD')
print(opt_prob.solution(0))

# Instantiate Optimizer (SLSQP) & Solve Problem
slsqp = SLSQP()
slsqp.setOption('IPRINT', -1)
slsqp(opt_prob, sens_type='FD')
Exemple #31
0
        dareadx = 0.5 * (h1 + h2) * dedx + 0.5 * e * (dh1dx + dh2dx)

        return dareadx


################################################################################

dp = crm_togw()

design_problem = PyOptOptimization(dp.comm,
                                   dp.eval_objcon,
                                   dp.eval_objcon_grad,
                                   number_of_steps=3)

opt_prob = Optimization('crm_togw', design_problem.eval_obj_con)

opt_prob.addObj('TOGW')
opt_prob.addCon('cruise_lift', type='e')
opt_prob.addCon('maneuver_lift', type='e')
opt_prob.addCon('area', type='e')
opt_prob.addCon('ksfailure', type='i')

for i in range(187 - 3):
    opt_prob.addCon('Smoothness %i a' % i, type='i')
    opt_prob.addCon('Smoothness %i b' % i, type='i')

variables = dp.model.get_variables()

for i, var in enumerate(variables):
    print('i', i)
Exemple #32
0
	f = x[0]**2 + x[1]**2
	g = [0.0]*2
	g[0] = 3 - x[0]
	g[1] = 2 - x[1]
	
	fail = 0
	
	return f,g,fail
	

# =============================================================================
# 
# =============================================================================

# Instanciate Optimization Problem 
opt_prob = Optimization('TOY Constrained Problem',objfunc)
opt_prob.addVar('x1','c',value=1.0,lower=0.0,upper=10.0)
opt_prob.addVar('x2','c',value=1.0,lower=0.0,upper=10.0)
opt_prob.addObj('f')
opt_prob.addCon('g1','i')
opt_prob.addCon('g2','i')
print(opt_prob)

# Instanciate Optimizer (SLSQP) & Solve Problem Storing History
slsqp = SLSQP()
slsqp.setOption('IFILE','slsqp1.out')
slsqp(opt_prob,store_hst=True)
print(opt_prob.solution(0))

# Solve Problem Using Stored History (Warm Start)
slsqp.setOption('IFILE','slsqp2.out')
Exemple #33
0
from pyOpt import SOLVOPT
from pyOpt import KSOPT
from pyOpt import NSGA2
from pyOpt import SDPEN


def objfun(x):

    f = 100 * (x[1] - x[0]**2)**2 + (1 - x[0])**2
    g = []
    fail = 0

    return f, g, fail


opt_prob = Optimization('Rosenbrock Unconstrained Problem', objfun)
opt_prob.addVar('x1', 'c', lower=-10.0, upper=10.0, value=0.0)
opt_prob.addVar('x2', 'c', lower=-10.0, upper=10.0, value=0.0)
opt_prob.addObj('f')
print opt_prob

# Instantiate optimizer (PSQP) and solve problem
psqp = PSQP()
psqp.setOption('IPRINT', 0)
psqp(opt_prob, sens_type='FD')
print opt_prob.solution(0)

# Instantiate optimizer (SLSQP) and solve problem
slsqp = SLSQP()
slsqp.setOption('IPRINT', -1)
slsqp(opt_prob, sens_type='FD')
Exemple #34
0
    g_con[2][0] = 4. * x[0] + 2
    g_con[2][1] = 2. * x[1] - 1
    g_con[2][2] = 2. * x[2]
    g_con[2][3] = -1.

    fail = 0

    return g_obj, g_con, fail


# =============================================================================
#
# =============================================================================

# Instantiate Optimization Problem
opt_prob = Optimization('Constrained Rosen-Suzuki', objfunc)
opt_prob.addVar('x1', 'c', value=1.5)
opt_prob.addVar('x2', 'c', value=1.5)
opt_prob.addVar('x3', 'c', value=1.5)
opt_prob.addVar('x4', 'c', value=1.5)
opt_prob.addObj('f')
opt_prob.addCon('g1', 'i')
opt_prob.addCon('g2', 'i')
opt_prob.addCon('g3', 'i')
print opt_prob

# Instantiate Optimizer (CONMIN)
conmin = CONMIN()

# Solve Problem with Optimizer Using Finite Differences
conmin(opt_prob, sens_type='FD')
#print 'i: %d' % i
        g[g_index] = num - divisor * mult_const * float(
            CONS_RHS_MAX_2_php[j - 1])

        num = 0
        for i in range(len(DM_php)):
            num = num + DM_php[i] * float(NUTRIENTS_php[j - 2][i]) * x[i]
        g[g_index +
          1] = -(num - divisor * mult_const * float(CONS_LHS_MIN_2_php[j - 1]))

    #time.sleep(0.5)
    fail = 0
    return f, g, fail

opt_prob = Optimization('TP37 Constrained Problem', objfunc)

for i in range(1, len(CONS_LHS_MIN_1_php) + 1):
    x_value = 'x' + str(i)
    opt_prob.addVar(x_value,
                    'c',
                    lower=float(CONS_LHS_MIN_1_php[i - 1]),
                    upper=float(CONS_RHS_MAX_1_php[i - 1]),
                    value=1)
opt_prob.addObj('f')

for i in range(1, len(NUTRIENTS_php) * 2 + 1):
    g_value = 'g' + str(i)
    opt_prob.addCon(g_value, 'i')

# Instantiate Optimizer (SLSQP)
Exemple #36
0
    
    f = -x[0]*x[1]*x[2]
    g = [0.0]*2
    g[0] = x[0] + 2.*x[1] + 2.*x[2] - 72.0
    g[1] = -x[0] - 2.*x[1] - 2.*x[2]
    
    fail = 0
    return f,g, fail
    

# =============================================================================
# 
# =============================================================================

# Instantiate Optimization Problem 
opt_prob = Optimization('TP37 Constrained Problem',objfunc)
opt_prob.addVar('x1','c',lower=0.0,upper=42.0,value=10.0)
opt_prob.addVar('x2','c',lower=0.0,upper=42.0,value=10.0)
opt_prob.addVar('x3','c',lower=0.0,upper=42.0,value=10.0)
opt_prob.addObj('f')
opt_prob.addCon('g1','i')
opt_prob.addCon('g2','i')
print opt_prob

# Instantiate Optimizer (PSQP) & Solve Problem
psqp = PSQP()
psqp.setOption('IPRINT',0)
psqp(opt_prob,sens_type='FD')
print opt_prob.solution(0)

# Instantiate Optimizer (SLSQP) & Solve Problem
Exemple #37
0
    for i in xrange(5):
        f += -(c[i]*exp(-(1/pi)*((x[0]-a[i])**2 + (x[1]-b[i])**2))*cos(pi*((x[0]-a[i])**2 + (x[1]-b[i])**2)))
    #end
    
    g = [0.0]*1
    g[0] = 20.04895 - (x[0]+2.0)**2 - (x[1]+1.0)**2
    
    fail = 0
    
    return f,g,fail
    

# =============================================================================
# 
# =============================================================================
opt_prob = Optimization('Langermann Function 11',objfunc)
opt_prob.addVar('x1','c',lower=-2.0,upper=10.0,value=8.0)
opt_prob.addVar('x2','c',lower=-2.0,upper=10.0,value=8.0)
opt_prob.addObj('f')
opt_prob.addCon('g','i')
print opt_prob

# Global Optimization
nsga2 = NSGA2()
nsga2(opt_prob)
print opt_prob.solution(0)

# Local Optimization Refinement
slsqp = SLSQP()
slsqp(opt_prob.solution(0))
print opt_prob.solution(0).solution(0)
        g=[]
        fail=1

    os.chdir('../..')
    
    CASECOUNT+=1
        
    return f,g,fail
    

# =============================================================================
# 
# =============================================================================

# Instantiate Optimization Problem 
opt_prob = Optimization('Minimize Drag',objfunc)
opt_prob.addVar('x1','c',lower=1.,upper=25.,value=12.0)
opt_prob.addVar('x2','c',lower=1.,upper=30.,value=15.0)
opt_prob.addVar('x3','c',lower=1.,upper=30.,value=15.0)
opt_prob.addVar('x4','c',lower=1.,upper=25.,value=12.0)
opt_prob.addVar('x5','c',lower=1.,upper=25.,value=12.0)
opt_prob.addVar('x6','c',lower=1.,upper=13.,value=6.0)
opt_prob.addObj('f')
#opt_prob.addCon('g','i')
print opt_prob

mkdirCommand='mkdir -p workDir'
subprocess.call(mkdirCommand,shell=True)

# Instantiate Optimizer (NSGA2) & Solve Problem
nsga2 = NSGA2()
Exemple #39
0
    a3 = kwargs['a3']
    
    f = a1*(x[1]-x[0]**2.)**2. + (a2-x[0])**2.
    g = [0.0]*2
    g[0] = x[0]**2. + x[1]**2.0 - a3
    
    fail = 0
    return f,g, fail
    

# =============================================================================
# 
# =============================================================================

# Instantiate Optimization Problem 
opt_prob = Optimization('Rosenbrock Constrained Problem',objfunc)
opt_prob.addVar('x1','c',lower=0.0,upper=1.0,value=0.5)
opt_prob.addVar('x2','c',lower=0.0,upper=1.0,value=0.5)
opt_prob.addObj('f')
opt_prob.addCon('g1','i')
print(opt_prob)

# Arguments to pass into objfunc
a1 = 100.0
a2 = 1.0
a3 = 1.0

# Instantiate Optimizer (SLSQP) & Solve Problem
slsqp = SLSQP()
slsqp.setOption('IPRINT',-1)
slsqp(opt_prob,sens_type='FD',a12=[a1,a2],a3=a3)
    a3 = kwargs['a3']

    f = a1 * (x[1] - x[0]**2.)**2. + (a2 - x[0])**2.
    g = [0.0] * 2
    g[0] = x[0]**2. + x[1]**2.0 - a3

    fail = 0
    return f, g, fail


# =============================================================================
#
# =============================================================================

# Instantiate Optimization Problem
opt_prob = Optimization('Rosenbrock Constrained Problem', objfunc)
opt_prob.addVar('x1', 'c', lower=0.0, upper=1.0, value=0.5)
opt_prob.addVar('x2', 'c', lower=0.0, upper=1.0, value=0.5)
opt_prob.addObj('f')
opt_prob.addCon('g1', 'i')
print opt_prob

# Arguments to pass into objfunc
a1 = 100.0
a2 = 1.0
a3 = 1.0

# Instantiate Optimizer (SLSQP) & Solve Problem
slsqp = SLSQP()
slsqp.setOption('IPRINT', -1)
slsqp(opt_prob, sens_type='FD', a12=[a1, a2], a3=a3)
Exemple #41
0
def runoptimizer():
    opt_prob = Optimization('TP37 Constrained Problem',objfun)
    opt_prob.addObj('LL')
    opt_prob.addVar('x1','c',lower=0.01,upper=10.0,value=1.0)
    opt_prob.addVar('x2','c',lower=0.01,upper=10.0,value=1.0)
    opt_prob.addVar('x3','c',lower=0.01,upper=10.0,value=1.0)
    opt_prob.addVar('x4','c',lower=0.01,upper=10.0,value=1.0)

    opt_prob.addConGroup('g', 4, 'i')

    # sanity check
    print opt_prob
    print objfun([1.0,1.0,1.0,1.0])

    # other optimization methods can be used here - we use sequential least squares programming
    slsqp = SLSQP() 
    [fstr, xstr, inform] = slsqp(opt_prob)

    print opt_prob.solution(0)
    return [v.value for v in opt_prob.solution(0).getVarSet().values()]
Exemple #42
0
     maxiter= comm.bcast(maxiter, root=0)
     zvariable = comm.bcast(zvariable, root=0)
     mymodelstruct = comm.bcast(mymodelstruct, root=0)
     isdrude = comm.bcast(isdrude, root=0)
     n = comm.bcast(n, root=0)
     myinputdata = comm.bcast(myinputdata, root=0) 
     z = comm.bcast(z, root=0)
     pathwithoutsample = comm.bcast(pathwithoutsample, root=0)
     pathwithsample = comm.bcast(pathwithsample, root=0)
 except:
     print("No parallelization")
 
 ## Optimization dans le cas PyOpt swarm particle ALPSO without parallelization (also works with parallelization)
 if algo>1:
     interm2=0  ## Intermediate variable with a function similar to interm
     opt_prob = Optimization('Dielectric modeling based on TDS pulse fitting',objfunc)
     if zvariable==1:
         opt_prob.addVar('thickness','c',lower=lb[0],upper=up[0],value=drudeinput[0])
         interm2=interm2+1
     if mymodelstruct==2:       #in case of TDCMT
         opt_prob.addVar('w0 tdcmt','c',lower=lb[0+interm2],upper=up[0+interm2],value=drudeinput[0+interm2])
         opt_prob.addVar('tau0','c',lower=lb[1+interm2],upper=up[1+interm2],value=drudeinput[1+interm2])
         opt_prob.addVar('tau1','c',lower=lb[2+interm2],upper=up[2+interm2],value=drudeinput[2+interm2])
         opt_prob.addVar('tau2','c',lower=lb[3+interm2],upper=up[3+interm2],value=drudeinput[3+interm2])
         opt_prob.addVar('delta theta','c',lower=lb[4+interm2],upper=up[4+interm2],value=drudeinput[4+interm2])
         interm2=interm2+5
 
 
     opt_prob.addVar('eps inf','c',lower=lb[0+interm2],upper=up[0+interm2],value=drudeinput[0+interm2])
     if isdrude==1:
         opt_prob.addVar('omega p','c',lower=lb[1+interm2],upper=up[1+interm2],value=drudeinput[1+interm2])
Exemple #43
0
    def train(self, optimizer='pso'):
        #Define the optimization problem for training the kriging model
        opt_prob = Optimization('Surrogate Test', self.fittingObjective)
        for i in range(self.k):
            opt_prob.addVar('theta%d' % i,
                            'c',
                            lower=1e-3,
                            upper=1e2,
                            value=.1)
        for i in range(self.k):
            opt_prob.addVar('pl%d' % i, 'c', lower=1.5, upper=2, value=2)
        opt_prob.addVar('lambda', 'c', lower=1e-5, upper=1, value=1)
        opt_prob.addObj('f')
        opt_prob.addCon('g1', 'i')

        if optimizer == 'pso':
            optimizer = ALPSO()
            optimizer.setOption('SwarmSize', 150)
            optimizer.setOption('maxOuterIter', 150)
            optimizer.setOption('stopCriteria', 1)
            optimizer.setOption('filename', '{0}Results.log'.format(self.name))

        if optimizer == 'ga':
            optimizer = NSGA2()
            optimizer.setOption('PopSize', (4 * 50))

        while True:
            try:
                self.trainingOptimizer(optimizer, opt_prob)
            except Exception as e:
                print e
                print 'Error traning Model, restarting the optimizer with a larger population'
                if optimizer == 'ga':
                    optimizer.setOption('SwarmSize', 200)
                    optimizer.setOption('maxOuterIter', 100)
                    optimizer.setOption('stopCriteria', 1)
                if optimizer == 'ga':
                    optimizer.setOption('PopSize', 400)
            else:
                break
Exemple #44
0
    g_con[2][0] = 4.0 * x[0] + 2
    g_con[2][1] = 2.0 * x[1] - 1
    g_con[2][2] = 2.0 * x[2]
    g_con[2][3] = -1.0

    fail = 0

    return g_obj, g_con, fail


# =============================================================================
#
# =============================================================================

# Instantiate Optimization Problem
opt_prob = Optimization("Constrained Rosen-Suzuki", objfunc)
opt_prob.addVar("x1", "c", value=1.5)
opt_prob.addVar("x2", "c", value=1.5)
opt_prob.addVar("x3", "c", value=1.5)
opt_prob.addVar("x4", "c", value=1.5)
opt_prob.addObj("f")
opt_prob.addCon("g1", "i")
opt_prob.addCon("g2", "i")
opt_prob.addCon("g3", "i")
print opt_prob

# Instantiate Optimizer (CONMIN)
conmin = CONMIN()

# Solve Problem with Optimizer Using Finite Differences
conmin(opt_prob, sens_type="FD")
    def execute(self):
        """pyOpt execution. Note that pyOpt controls the execution, and the
        individual optimizers control the iteration."""

        self.pyOpt_solution = None

        opt_prob = Optimization(self.title, self.objfunc, var_set={}, obj_set={}, con_set={})

        # Add all parameters
        self.param_type = {}
        for name, param in self.get_parameters().iteritems():

            # We need to identify Enums, Lists, Dicts
            metadata = param.get_metadata()[0][1]
            values = param.evaluate()

            # Assuming uniform enumerated, discrete, or continuous for now.
            val = values[0]
            choices = []
            if "values" in metadata and isinstance(metadata["values"], (list, tuple, array, set)):
                vartype = "d"
                choices = metadata["values"]
            elif isinstance(val, bool):
                vartype = "d"
                choices = [True, False]
            elif isinstance(val, (int, int32, int64)):
                vartype = "i"
            elif isinstance(val, (float, float32, float64)):
                vartype = "c"
            else:
                msg = "Only continuous, discrete, or enumerated variables" " are supported. %s is %s." % (
                    name,
                    type(val),
                )
                self.raise_exception(msg, ValueError)
            self.param_type[name] = vartype

            names = param.names
            lower_bounds = param.get_low()
            upper_bounds = param.get_high()
            for i in range(param.size):
                opt_prob.addVar(
                    names[i], vartype, lower=lower_bounds[i], upper=upper_bounds[i], value=values[i], choices=choices
                )
        # Add all objectives
        for name in self.get_objectives():
            opt_prob.addObj(name)

        # Add all equality constraints
        for name in self.get_eq_constraints():
            opt_prob.addCon(name, type="e")

        # Add all inequality constraints
        for name in self.get_ineq_constraints():
            opt_prob.addCon(name, type="i")

        # Instantiate the requested optimizer
        optimizer = self.optimizer
        try:
            exec ("from pyOpt import %s" % optimizer)
        except ImportError:
            msg = "Optimizer %s is not available in this installation." % optimizer
            self.raise_exception(msg, ImportError)

        optname = vars()[optimizer]
        opt = optname()

        # Set optimization options
        for option, value in self.options.iteritems():
            opt.setOption(option, value)

        # Execute the optimization problem
        if self.pyopt_diff:
            # Use pyOpt's internal finite difference
            opt(opt_prob, sens_type="FD")
        else:
            # Use OpenMDAO's differentiator for the gradient
            opt(opt_prob, sens_type=self.gradfunc)

        # Print results
        if self.print_results:
            print opt_prob.solution(0)

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        dvals = []
        for i in range(0, len(opt_prob.solution(0)._variables)):
            dvals.append(opt_prob.solution(0)._variables[i].value)

        # Integer parameters come back as floats, so we need to round them
        # and turn them into python integers before setting.
        if "i" in self.param_type.values():
            for j, param in enumerate(self.get_parameters().keys()):
                if self.param_type[param] == "i":
                    dvals[j] = int(round(dvals[j]))

        self.set_parameters(dvals)
        self.run_iteration()
        self.record_case()

        # Save the most recent solution.
        self.pyOpt_solution = opt_prob.solution(0)
Exemple #46
0
                           (x[1] - b[i])**2)) * cos(pi * ((x[0] - a[i])**2 +
                                                          (x[1] - b[i])**2)))
    #end

    g = [0.0] * 1
    g[0] = 20.04895 - (x[0] + 2.0)**2 - (x[1] + 1.0)**2

    fail = 0

    return f, g, fail


# =============================================================================
#
# =============================================================================
opt_prob = Optimization('Langermann Function 11', objfunc)
opt_prob.addVar('x1', 'c', lower=-2.0, upper=10.0, value=8.0)
opt_prob.addVar('x2', 'c', lower=-2.0, upper=10.0, value=8.0)
opt_prob.addObj('f')
opt_prob.addCon('g', 'i')
print(opt_prob)

# Global Optimization
nsga2 = NSGA2()
nsga2(opt_prob)
print(opt_prob.solution(0))

# Local Optimization Refinement
slsqp = SLSQP()
slsqp(opt_prob.solution(0))
print(opt_prob.solution(0).solution(0))
Exemple #47
0
    f = x0**2 + x1**2
    g = [0.0] * 2
    g[0] = 3 - x0
    g[1] = 2 - x1

    fail = 0

    return f, g, fail


# =============================================================================
#
# =============================================================================

# Instantiate Optimization Problem
opt_prob = Optimization('TOY Constraint Problem', objfunc, use_groups=True)
opt_prob.addVarGroup('a', 2, 'c', value=1.0, lower=0.0, upper=10)
opt_prob.delVarGroup('a')
opt_prob.addVar('x', 'c', value=1.0, lower=0.0, upper=10)
opt_prob.addVarGroup('y', 2, 'c', value=1.0, lower=0.0, upper=10)
opt_prob.delVarGroup('y')
opt_prob.addVarGroup('z', 1, 'c', value=1.0, lower=0.0, upper=10)
opt_prob.addVarGroup('b', 5, 'c', value=3.0, lower=0.0, upper=10)
opt_prob.delVarGroup('b')
opt_prob.addObj('f')
opt_prob.addCon('g1', 'i')
opt_prob.addCon('g2', 'i')
print opt_prob

# Instantiate Optimizer (PSQP) & Solve Problem
slsqp = SLSQP()
Exemple #48
0
    y2 = coordinates[:][1]

    f = 0
    for i in range(N-1):
        f += abs(y1[i]*100 - y2[i]*100)**2

    g = []

    fail = 0
    return f, g, fail


# =============================================================================
#
# =============================================================================
opt_prob = Optimization('CST Parameterization', objfunc)
opt_prob.addVar('x1','c', lower=-2.0,upper=2.0, value=-1.0)
opt_prob.addVar('x2','c', lower=-2.0,upper=2.0, value=-1.0)
opt_prob.addVar('x3','c', lower=-2.0,upper=2.0, value=-1.0)
opt_prob.addVar('x4','c', lower=-2.0,upper=2.0, value=-1.0)
opt_prob.addVar('x5','c', lower=-2.0, upper=2.0, value=1.0)
opt_prob.addVar('x6','c', lower=-2.0, upper=2.0, value=1.0)
opt_prob.addVar('x7','c', lower=-2.0, upper=2.0, value=1.0)
opt_prob.addVar('x8','c', lower=-2.0, upper=2.0, value=1.0)
opt_prob.addObj('f')
print opt_prob

# Instantiate Optimizer (SLSQP) & Solve Problem
slsqp = SLSQP()
slsqp.setOption('IPRINT',-1)
slsqp(opt_prob, sens_type='FD')
Exemple #49
0
# =============================================================================
#
# =============================================================================
def objfunc(x):

    f = 100 * (x[1] - x[0]**2)**2 + (1 - x[0])**2
    g = []

    fail = 0
    return f, g, fail


# =============================================================================
#
# =============================================================================
opt_prob = Optimization('Rosenbrock Unconstraint Problem', objfunc)
#List of references in the queue with their priority order
opt_prob.addVar('x1', 'c', lower=-10.0, upper=10.0, value=-3.0)
#The Processing capacity
opt_prob.addVar('x2', 'c', lower=-10.0, upper=10.0, value=-4.0)
opt_prob.addObj('f')
#Constraints
#Each queue has a given processing capacity expressed in number of workers
#Each worker can process 35 number of references per week
#Notice period to change the number of workers for one step is 8 weeks
#Max total capacity increase is 20% of nominal capacity
#Fast-lange capacity is fixed equal to 10% planned capacity
#Cost to be added in objective function for revising one date
# is equal to 1000 * number of overdue days

print opt_prob
    
def objfunc_3(x):
    f1 = x[0] - x[1]
    f2 = x[0] + x[1]
    f = (f1, f2)
    fail = 0
    g = []
    return f,(g, g), (fail, fail)
# =============================================================================
# 
# =============================================================================
chord = 1.
x_hinge = 0.75
safety = 0.005*chord

opt_prob = Optimization('main', (objfunc_1, objfunc_2))
opt_prob.addObj("f1")
opt_prob.addObj("f2")
#xs_n
opt_prob.addVar('x1', 'c', lower = -1 , upper = 1, value = 6.817445e-001)
#ys_n
opt_prob.addVar('x2', 'c', lower = -1, upper = 1, value = -5.216475e-001)

#opt_prob.addObj('2', objfunc_2)
print opt_prob

# Global Optimization
nsga2 = NSGA2()
nsga2.setOption('PopSize', 10)
nsga2.setOption('maxGen', 10)
nsga2(opt_prob)
    def execute(self):
        """pyOpt execution. Note that pyOpt controls the execution, and the
        individual optimizers control the iteration."""
        
        self.pyOpt_solution = None
    
        opt_prob = Optimization(self.title, self.objfunc, var_set={}, 
                                obj_set={}, con_set={})
        
        # Add all parameters
        for name, param in self.get_parameters().iteritems():
            
            val = param.evaluate()
            
            # We need to identify Enums, Lists, Dicts
            metadata = param.get_metadata()[0][1]          
            
            # enumerated, discrete or continuous
            choices = []
            if ('values' in metadata and \
               isinstance(metadata['values'],(list, tuple, array, set))):
                vartype = 'd'
                choices = metadata['values']
            elif isinstance(val, bool):
                vartype = 'd'
                choices = [True, False]
            elif isinstance(val, (int, int32, int64)):
                vartype = 'i'
            elif isinstance(val, (float, float32, float64)):
                vartype = 'c'
            else:
                msg = 'Only continuous, descrete, or enumerated variables ' + \
                      'are supported. %s is %s.' % (name, type(val))
                self.raise_exception(msg, ValueError)
            
            opt_prob.addVar(name, vartype, lower=param.low, upper=param.high, 
                            value=val, choices=choices)

        # Add all objectives
        for name in self.get_objectives().keys():
            opt_prob.addObj(name)
            
        # Add all equality constraints
        for name in self.get_eq_constraints().keys():
            opt_prob.addCon(name, type='e')
        
        # Add all inequality constraints
        for name in self.get_ineq_constraints().keys():
            opt_prob.addCon(name, type='i')

        # Instantiate the requested optimizer
        optimizer = self.optimizer
        try:
            exec('from pyOpt import %s' % optimizer)
        except ImportError:
            msg = "Optimizer %s is not avialable in this installation." % \
                   optimizer
            self.raise_exception(msg, ImportError)
            
        optname = vars()[optimizer]
        opt = optname()
        
        # Set optimization options
        for option, value in self.options.iteritems():
            opt.setOption(option, value)

        # Execute the optimization problem
        if self.differentiator:
            # Use OpenMDAO's differentiator for the gradient
            opt(opt_prob, sens_type=self.gradfunc)
        else:
            # Use pyOpt's internal finite difference
            opt(opt_prob, sens_type='FD')
        
        # Print results
        if self.print_results:
            print opt_prob.solution(0)
        
        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        dvals = []
        for i in range(0, len(opt_prob.solution(0)._variables)):
            dvals.append(opt_prob.solution(0)._variables[i].value)
        self.set_parameters(dvals)
        self.run_iteration()
        
        # Save the most recent solution.
        self.pyOpt_solution = opt_prob.solution(0)
if __name__ == "__main__":

    print "running deterministic optimization "

    # Physical problem
    rho = 0.2836  # lb/in^3
    L = 5.0  # in
    P = 25000.0  # lb
    E = 30.0e6  # psi
    ys = 36260.0  # psi
    fs = 1.5
    dtruss = TwoBarTruss(rho, L, P, E, ys, fs)

    # Optimization Problem
    optproblem = TwoBarTrussOpt(MPI.COMM_WORLD, dtruss)
    opt_prob = Optimization(args.logfile, optproblem.evalObjCon)

    # Add functions
    opt_prob.addObj('weight')
    opt_prob.addCon('buckling-bar1', type='i')
    opt_prob.addCon('failure-bar1', type='i')
    opt_prob.addCon('failure-bar2', type='i')

    # Add variables
    opt_prob.addVar('area-1', type='c', value=1.0, lower=1.0e-3, upper=2.0)
    opt_prob.addVar('area-2', type='c', value=1.0, lower=1.0e-3, upper=2.0)
    opt_prob.addVar('height', type='c', value=4.0, lower=4.0, upper=10.0)

    # Optimization algorithm
    if args.algorithm == 'ALGENCAN':
        opt = ALGENCAN()
    def optimizeTrajectory(self, plot_func=None):
        # use non-linear optimization to find parameters for minimal
        # condition number trajectory

        self.plot_func = plot_func

        if self.config['showOptimizationGraph']:
            self.initGraph()

        ## describe optimization problem with pyOpt classes

        from pyOpt import Optimization
        from pyOpt import ALPSO, SLSQP

        # Instanciate Optimization Problem
        opt_prob = Optimization('Trajectory optimization', self.objective_func)
        opt_prob.addObj('f')

        # add variables, define bounds
        # w_f - pulsation
        opt_prob.addVar('wf', 'c', value=self.wf_init, lower=self.wf_min, upper=self.wf_max)

        # q - offsets
        for i in range(self.dofs):
            opt_prob.addVar('q_%d'%i,'c', value=self.qinit[i], lower=self.qmin[i], upper=self.qmax[i])
        # a, b - sin/cos params
        for i in range(self.dofs):
            for j in range(self.nf[0]):
                opt_prob.addVar('a{}_{}'.format(i,j), 'c', value=self.ainit[i][j], lower=self.amin, upper=self.amax)
        for i in range(self.dofs):
            for j in range(self.nf[0]):
                opt_prob.addVar('b{}_{}'.format(i,j), 'c', value=self.binit[i][j], lower=self.bmin, upper=self.bmax)

        # add constraint vars (constraint functions are in obfunc)
        if self.config['minVelocityConstraint']:
            opt_prob.addConGroup('g', self.dofs*5, 'i')
        else:
            opt_prob.addConGroup('g', self.dofs*4, 'i')
        #print opt_prob

        initial = [v.value for v in list(opt_prob._variables.values())]

        if self.config['useGlobalOptimization']:
            ### optimize using pyOpt (global)
            opt = ALPSO()  #augmented lagrange particle swarm optimization
            opt.setOption('stopCriteria', 0)
            opt.setOption('maxInnerIter', 3)
            opt.setOption('maxOuterIter', self.config['globalOptIterations'])
            opt.setOption('printInnerIters', 1)
            opt.setOption('printOuterIters', 1)
            opt.setOption('SwarmSize', 30)
            opt.setOption('xinit', 1)
            #TODO: how to properly limit max number of function calls?
            # no. func calls = (SwarmSize * inner) * outer + SwarmSize
            self.iter_max = opt.getOption('SwarmSize') * opt.getOption('maxInnerIter') * opt.getOption('maxOuterIter') + opt.getOption('SwarmSize')

            # run fist (global) optimization
            try:
                #reuse history
                opt(opt_prob, store_hst=False, hot_start=True, xstart=initial)
            except NameError:
                opt(opt_prob, store_hst=False, xstart=initial)
            print(opt_prob.solution(0))

        ### pyOpt local

        # after using global optimization, get more exact solution with
        # gradient based method init optimizer (only local)
        opt2 = SLSQP()   #sequential least squares
        opt2.setOption('MAXIT', self.config['localOptIterations'])
        if self.config['verbose']:
            opt2.setOption('IPRINT', 0)
        # TODO: amount of function calls depends on amount of variables and iterations to approximate gradient
        # iterations are probably steps along the gradient. How to get proper no. of func calls?
        self.iter_max = "(unknown)"

        if self.config['useGlobalOptimization']:
            if self.last_best_sol is not None:
                #use best constrained solution
                for i in range(len(opt_prob._variables)):
                    opt_prob._variables[i].value = self.last_best_sol[i]
            else:
                #reuse previous solution
                for i in range(len(opt_prob._variables)):
                    opt_prob._variables[i].value = opt_prob.solution(0).getVar(i).value

            opt2(opt_prob, store_hst=False, sens_step=0.1)
        else:
            try:
                #reuse history
                opt2(opt_prob, store_hst=True, hot_start=True, sens_step=0.1)
            except NameError:
                opt2(opt_prob, store_hst=True, sens_step=0.1)

        local_sol = opt_prob.solution(0)
        if not self.config['useGlobalOptimization']:
            print(local_sol)
        local_sol_vec = np.array([local_sol.getVar(x).value for x in range(0,len(local_sol._variables))])

        if self.last_best_sol is not None:
            local_sol_vec = self.last_best_sol
            print("using last best constrained solution instead of given solver solution.")

        sol_wf, sol_q, sol_a, sol_b = self.vecToParams(local_sol_vec)

        print("testing final solution")
        self.iter_cnt = 0
        self.objective_func(local_sol_vec)
        print("\n")

        self.trajectory.initWithParams(sol_a, sol_b, sol_q, self.nf, sol_wf)

        if self.config['showOptimizationGraph']:
            plt.ioff()

        return self.trajectory