def main():
    """NSGA2 Optimization of 2D Rosenbrock Function"""
    # Problem setup
    opt_prob = pyOpt.Optimization('2D Rosenbrock', rosenbrock)
    opt_prob.addObj('f')
    opt_prob.addVar('x1', 'c', lower=0.0, upper=5.0, value=4.0)
    opt_prob.addVar('x2', 'c', lower=-5.0, upper=5.0, value=4.0)
    opt_prob.addCon('g1', 'i')

    print opt_prob

    nsga2 = pyOpt.NSGA2()
    nsga2.setOption('maxGen', 250)
    nsga2.setOption('pMut_real', 0.4)
    # nsga2.setOption('PrintOut',2) #Control output files

    import time
    tt = time.time()
    # Run the problem
    nsga2(opt_prob)
    print "Elapsed time: ", time.time() - tt, "seconds"
    print "Function Evaluations: ", count

    # Print the solution
    print opt_prob.solution(0)
def setupSNOPT(distributionClass, subspaceClass, X0, Optfun):
    print 'SNOPT'

    opt_prob = pyOpt.Optimization('Trial UQ', Optfun)
    curvar = 1
    upperLimit = distributionClass.stdmax
    for index in range(distributionClass.dim):
        if subspaceClass.important_index[index] == True:
            opt_prob.addVar('var' + str(curvar),
                            'c',
                            lower=0.001,
                            upper=upperLimit[index],
                            value=X0[curvar - 1])
            curvar = curvar + 1

    opt_prob.addObj('Objective Mass')

    opt_prob.addCon('g1', 'i')
    opt_prob.addCon('g2', 'i')

    print opt_prob
    snopt = pyOpt.pySNOPT.SNOPT()

    snopt.setOption('Major feasibility tolerance', value=5e-6)
    snopt.setOption('Major optimality tolerance', value=1e-5)
    snopt.setOption('Minor feasibility tolerance', value=5e-6)
    snopt.setOption('Major iterations limit', 500)

    exitVal = snopt(opt_prob, sens_type='FD')
    print exitVal
Beispiel #3
0
def PL_identify(D, lamb, ini_sol, alg_name='SLSQP'):
    """  Idenify the Probability Law (PL)

    Parameters
    ---------------
    D : mxn matrix
        D_ij is the entropy value calculated using candidate PL i and data in
        window j.
    lamb : float
        detection threshold
    ini_sol : list with m elements
        initial solution
    alg_name : str {'SLSQP', 'ALGENCAN'}
        algorithm name

    Returns
    --------------

    Examples
    ----------------
    >>> D = [[0.2, 0.3], [0.4, 1.4]]
    >>> ini_sol = [1, 1]
    >>> fstr, xstr, inform, opt_prob = PL_identify(D, 0.5, ini_sol, 'SLSQP')
    >>> print('opt_prob', opt_prob)
    >>> print('inform', inform)
    >>> print('xstr', xstr)
    >>> print('fstr', fstr)
    """

    m = len(D)
    n = len(D[0])

    def objfunc(x):
        f = sum(x)
        cn = n
        g = [0.0] * cn
        for j in range(n):
            g[j] = (D[i][j] - lamb) * x[j] + 0.01

        fail = 0
        return f, g, fail

    opt_prob = pyOpt.Optimization("PL Identification Problem", objfunc)
    opt_prob.addObj('f')
    for i in range(m):
        # opt_prob.addVar('x%d'%(i), 'c', lower=0.0,
        # upper=1.0, value = ini_sol[i])
        opt_prob.addVar('x%d' % (i),
                        'c',
                        lower=0.0,
                        upper=1.0,
                        value=ini_sol[i])

        # opt_prob.addVar('x%d'%(i), 'i', lower=0.0,
        # upper=1.0, value = ini_sol[i])
    opt_prob.addConGroup('ineq', n, type='i')

    alg = getattr(pyOpt, alg_name)()
    [fstr, xstr, inform] = alg(opt_prob)
    return fstr, xstr, inform, opt_prob
    def getMinVal(self, xbounds, ybounds, diam):
        xmin = xbounds[0]
        xmax = xbounds[1]
        ymin = ybounds[0]
        ymax = ybounds[1]

        line = 'min(phi(x)), {}<=x1<={}, {}<=x2<={}'.format(
            xmin, xmax, ymin, ymax)
        opt_prob = pyOpt.Optimization(line, self.objfunc)
        opt_prob.addObj('phi')
        opt_prob.addVar('x1',
                        'c',
                        lower=xmin,
                        upper=xmax,
                        value=(xmax + xmin) / 2.0)
        opt_prob.addVar('x2',
                        'c',
                        lower=ymin,
                        upper=ymax,
                        value=(ymax + ymin) / 2.0)

        nsga2 = pyOpt.MIDACO()
        nsga2.setOption('IPRINT', -1)
        nsga2(opt_prob)

        #MINPHI(x) = MINMAX(g1(x),g2(x),g3(x),g4(x)
        return opt_prob.solution(0)._objectives[0].value
Beispiel #5
0
def solvopt_problem (px, py, pz, lx, ly, lz, \
    scx, scy, scz, radius):

  import pyOpt

  def objfunc(x):

    f = function_for_distance_fast (x[0], x[1], x[2], lx, ly, lz, \
        scx, scy, scz, radius)

    g = [0.0]

    minr2 = -1.0 * f
    maxcon = -1e10;
    dis = (px - x[0]) * (px - x[0]) + \
        (py - x[1]) * (py - x[1]) + \
        (pz - x[2]) * (pz - x[2])

    maxcon = -minr2 + dis

    g[0] = max (0, maxcon)

    fail = 0
    return f,g, fail

  opt_prob = pyOpt.Optimization('TP37 Constrained Problem',objfunc)

  opt_prob.addObj('f')

  opt_prob.addVar('x1','c',lower=float('-inf'),upper=float('inf'),value=px)
  opt_prob.addVar('x2','c',lower=float('-inf'),upper=float('inf'),value=py)
  opt_prob.addVar('x3','c',lower=float('-inf'),upper=float('inf'),value=pz)

  opt_prob.addConGroup('g',1,'i')

  #print opt_prob

  solvopt = pyOpt.SOLVOPT()
  #solvopt = pyOpt.PSQP()
  solvopt.setOption('ftol', 1.0e-3)
  solvopt.setOption('iprint', -1)
  [fstr, xstr, inform] = solvopt(opt_prob,sens_type='FD')

  if len(xstr) < 3:
    print "error in solvopt"
    exit()

  f_px = float(xstr[0])
  f_py = float(xstr[1])
  f_pz = float(xstr[2])

  #print opt_prob.solution(0)

  return float(fstr), f_px, f_py, f_pz
Beispiel #6
0
def main():

    tinitial = time.time()

    # Inputs
    inputs = np.array(
        [.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1.4, 0.5, 0.5, 0.5, 0.5, 0.5, .7])

    # build the vehicle
    vehicle = define_vehicle(inputs)

    # define the mission
    mission = define_mission(vehicle, inputs)

    # Have the optimizer call the wrapper
    mywrap = lambda inputs: wrap(inputs, vehicle, mission)

    opt_prob = pyOpt.Optimization('Fb', mywrap)
    opt_prob.addObj('Battery')
    opt_prob.addVar('x1', 'c', lower=0.06, upper=0.5, value=inputs[0])
    opt_prob.addVar('x2', 'c', lower=1e-5, upper=3.0, value=inputs[1])
    opt_prob.addVar('x3', 'c', lower=1e-5, upper=3.0, value=inputs[2])
    opt_prob.addVar('x4', 'c', lower=1e-5, upper=3.0, value=inputs[3])
    opt_prob.addVar('x5', 'c', lower=1e-5, upper=3.0, value=inputs[4])
    opt_prob.addVar('x6', 'c', lower=1e-5, upper=3.0, value=inputs[5])
    opt_prob.addVar('x7', 'c', lower=1e-5, upper=3.0, value=inputs[6])
    opt_prob.addVar('x8', 'c', lower=1e-5, upper=3.0, value=inputs[7])
    opt_prob.addVar('x9', 'c', lower=1e-5, upper=3.0, value=inputs[8])
    opt_prob.addVar('x10', 'c', lower=1e-5, upper=3.0, value=inputs[9])
    opt_prob.addVar('x11', 'c', lower=1e-5, upper=3.0, value=inputs[10])
    opt_prob.addVar('x12', 'c', lower=1e-5, upper=3.0, value=inputs[11])
    opt_prob.addVar('x13', 'c', lower=1e-5, upper=3.0, value=inputs[12])
    opt_prob.addConGroup('g', 2, 'i')

    opt = pyOpt.pySNOPT.SNOPT()

    print opt_prob
    outputs = opt(opt_prob, sens_type='FD', sens_mode='pgc')

    if myrank == 0:

        vehicle, mission, results = run_plane(outputs[1])

        deltat = time.time() - tinitial
        print('Time Elapsed')
        print(deltat)
        # Plot results
        post_process(vehicle, mission, results)

    return
Beispiel #7
0
    def max_trimmed_efficiency(self,
                               dvs,
                               ref_origin_x=None,
                               ref_origin_z=None):

        new_dvs = copy.copy(dvs)

        def objfunc(x):

            new_dvs[self.desvar.bf_index] = x[0]
            new_dvs[self.desvar.el_index] = x[1]

            f = -self.lift(new_dvs) / self.drag(
                new_dvs)  # minus sign to Maximize

            g = [self.moment_y(new_dvs, ref_origin_x, ref_origin_z)]

            fail = 0

            return f, g, fail

        opt_prob = pyOpt.Optimization('Max Trimmed Efficiency', objfunc)

        opt_prob.addObj('Efficiency')
        opt_prob.addCon('Trim', type='e', equal=0.0)

        opt_prob.addVar('bf',
                        'c',
                        lower=self.desvar.bf_bound[0],
                        upper=self.desvar.bf_bound[1],
                        value=0.0)
        opt_prob.addVar('el',
                        'c',
                        lower=self.desvar.el_bound[0],
                        upper=self.desvar.el_bound[1],
                        value=0.0)

        opt = pyOpt.SLSQP()
        opt.setOption('IPRINT', -1)
        opt.setOption('ACC', 1e-5)

        [Y_min, X_min,
         Info] = opt(opt_prob,
                     sens_type='FD')  # Could Improve sens_type !!!!!!!!!!!

        return -Y_min[0]
Beispiel #8
0
    def optimize(self):
        #self._prepare_opt()
        self._opt_prob = pyOpt.Optimization('Optimial Excitation Trajectory',
                                            self._obj_func)
        self._add_vars2prob()
        self._add_obj2prob()
        self._add_const2prob()

        # print(self._opt_prob)
        #x = np.random.random((self._dyn.rbt_def.dof * (2*self._order+1)))
        #print(self._obj_func(x))

        # PSQP
        # slsqp = pyOpt.pyPSQP.PSQP()
        # slsqp.setOption('MIT', 2)
        # slsqp.setOption('IPRINT', 2)

        # COBYLA
        #slsqp = pyOpt.pyCOBYLA.COBYLA()

        # Genetic Algorithm
        #slsqp = pyOpt.pyNSGA2.NSGA2()

        # SLSQP
        slsqp = pyOpt.pySLSQP.SLSQP()
        slsqp.setOption('IPRINT', 0)
        # slsqp.setOption('MAXIT', 300)
        #slsqp.setOption('ACC', 0.00001)

        # SOLVOPT
        # slsqp = pyOpt.pySOLVOPT.SOLVOPT()
        # slsqp.setOption('maxit', 5)

        #[fstr, xstr, inform] = slsqp(self._opt_prob, sens_type='FD')
        [fstr, xstr, inform] = slsqp(self._opt_prob)

        self.f_result = fstr
        self.x_result = xstr

        print('Condition number: {}'.format(fstr[0]))
        print('x: {}'.format(xstr))
        #print('inform: ', inform)

        print self._opt_prob.solution(0)
Beispiel #9
0
def PMPOpt():
    problem = getProblem()
    problem['h'] = 1
    guess = problem['sol{}'.format(problem['h'])]

    opt = pyOpt.Optimization('Optimal PDG',
                             lambda c: PMPCostScalar(c, problem))

    opt.addVar('lambdaX', 'c', lower=0, upper=.25, value=guess[0])
    opt.addVar('lambdaY', 'c', lower=0, upper=.25, value=guess[1])
    opt.addVar('lambdaU', 'c', lower=0, upper=3, value=guess[2])
    opt.addVar('lambdaV', 'c', lower=-3, upper=0, value=guess[3])
    opt.addVar('lambdaM', 'c', lower=-1, upper=0, value=guess[4])
    # opt.addVar('lambdaMu', 'c', lower=-10, upper=0, value=guess[5])
    opt.addVar('tf',
               'c',
               lower=guess[6] - 1,
               upper=guess[6] + 1,
               value=guess[6])

    opt.addCon('xf', 'e')
    opt.addCon('yf', 'e')
    opt.addCon('vf', 'e')
    opt.addCon('uf', 'e')
    opt.addCon('lambdaMf', 'e')

    opt.addObj('J')

    # optimizer = pyOpt.ALGENCAN()
    # optimizer.setOption('epsfeas',1e-2)
    # optimizer.setOption('epsopt',1e-1)

    optimizer = pyOpt.SLSQP()
    optimizer.setOption('ACC', 1e-1)

    fopt, copt, info = optimizer(opt, sens_step=1e-5)

    print opt.solution(0)
    print info

    return copt, problem
Beispiel #10
0
    def optimizeTrajectory(self):
        # type: () -> PulsedTrajectory
        # use non-linear optimization to find parameters for minimal
        # condition number trajectory

        # Instanciate Optimization Problem
        opt_prob = pyOpt.Optimization('Trajectory optimization',
                                      self.objectiveFunc)
        opt_prob.addObj('f')
        self.opt_prob = opt_prob

        self.addVarsAndConstraints(opt_prob)
        sol_vec = self.runOptimizer(opt_prob)

        sol_wf, sol_q, sol_a, sol_b = self.vecToParams(sol_vec)
        self.trajectory.initWithParams(sol_a, sol_b, sol_q, self.nf, sol_wf)

        if self.config['showOptimizationGraph']:
            plt.ioff()

        return self.trajectory
    def Solve(self, problem):
        objective_function = lambda x: [problem.Calculate(x), 0, 0]
        lb, ub = problem.GetBounds()

        opt_prob = pyOpt.Optimization('Problem', objective_function)
        opt_prob.addObj('f')
        for i in range(problem.GetDimension()):
            opt_prob.addVar('x' + str(i),
                            'c',
                            lower=lb[i],
                            upper=ub[i],
                            value=(lb[i] + ub[i]) / 2.)
        midaco_none = MIDACO(pll_type=None)
        midaco_none.setOption('IPRINT', -1)
        midaco_none.setOption('ISEED', 100)
        midaco_none.setOption('MAXEVAL', self.max_iters)
        midaco_none.setOption('FOCUS', -4)
        fstr, xstr, inform = midaco_none(opt_prob)

        n_evals = problem.GetCalculationsStatistics()
        return xstr, fstr[0], n_evals
Beispiel #12
0
    def max_variance(self,XB, number = 1, exclude = []):

        assert XB.shape[0] == self.ndim, 'wrong dimension'
        
        X_min = [0.0]*len(XB)
        X_min[0] = XB[0][0]-1.0

        while not is_in(X_min,XB):

            obj = self.objective
            prob = pyOpt.Optimization('Variance Maximization',obj)
                    
            for ix in range(XB.shape[0]):
                prob.addVar('X%i'%ix,'c',lower=XB[ix,0],upper=XB[ix,1],value=0.)
                
            prob.addObj('Estimated Variance')

            opt_ALPSO = pyOpt.ALPSO(pll_type=None)
            #opt_ALPSO = pyOpt.ALPSO(pll_type='MP',args=[1.0])
            opt_ALPSO.setOption('fileout',0)
            opt_ALPSO.setOption('maxOuterIter',10)
            opt_ALPSO.setOption('stopCriteria',1)       
#            opt_ALPSO.setOption('SwarmSize',self.ndim*100)
            opt_ALPSO.setOption('SwarmSize',self.ndim*20)

            opt_SLSQP = pyOpt.SLSQP()
            opt_SLSQP.setOption('IPRINT',-1)
            opt_SLSQP.setOption('ACC',1e-5)

            vec = []
            for index in range(number*10):
                print index+1, ' so far: ', len(vec)
                [YI_min,X_min,Info] = opt_ALPSO(prob)
                [YI_min,X_min,Info] = opt_SLSQP(prob.solution(index),sens_type='FD')
                if not is_already_in(X_min,vec+exclude) and is_in(X_min,XB): vec.append(X_min.tolist())
                if len(vec) >= number: break

        if len(vec) == 1: return vec[0]
        return vec
Beispiel #13
0
# Instantiate Optimization Problem


def objfunc(x):
    f = dadi.Inference._object_func(x,
                                    data,
                                    func_ex,
                                    pts_l,
                                    lower_bound=lower_bound,
                                    upper_bound=upper_bound)
    g = []
    fail = 0
    return f, g, fail


opt_prob = pyOpt.Optimization('dadi optimization', objfunc)
opt_prob.addVar('nu1_0',
                'c',
                lower=lower_bound[0],
                upper=upper_bound[0],
                value=p1[0])
opt_prob.addVar('nu2_0',
                'c',
                lower=lower_bound[1],
                upper=upper_bound[1],
                value=p1[1])
opt_prob.addVar('nu1',
                'c',
                lower=lower_bound[2],
                upper=upper_bound[2],
                value=p1[2])
Beispiel #14
0
    def __call__(self):
        """Overloaded call function to optimize cost directly
        """
        def objfun(x, param={}):
            """The objective function, calculates costs and constraints

            Args:
                x(list[50]): A list of model DOF,

            Keyword Args:
                param(dict): Dictonary of parameters passed to the function.
                    ['bayes']: The bayesian object

            Return:
                (float): f, The cost
                (list): g, list of constarints
                (bool): fail, failure flag
            """

            if len(x) > 50: pdb.set_trace()

            bayes = param['bayes']

            model_dict = bayes.models
            opt_key = bayes.opt_key
            model = model_dict[opt_key]

            x = np.dot(model.get_scaling(), x[:50])

            n_end = model.get_option('spline_end')
            n_dof = model.shape()

            # Update the model with the new data
            new_model = model.update_dof(x)
            model_dict[opt_key] = new_model

            new_bayes = bayes.update(models=model_dict)
            initial_data = new_bayes.get_data()

            log_like = 0
            log_like += new_bayes.model_log_like()
            log_like += new_bayes.sim_log_like(initial_data)

            n_dof = new_bayes.shape()[1]
            model_indep = new_model.get_t()

            g = np.zeros(n_dof + 2)
            g[:n_dof] = -model.derivative(n=2)(model_indep[:-n_end])
            g[n_dof] = model.derivative(n=1)(model_indep[n_end])
            g[n_dof + 1] = -model(model_indep[-n_end])

            return float(-log_like), g, False

        # end

        def gradfun(x, f, g, param={}, *args, **kwargs):
            """Function to calculate the gradients directly
            """

            bayes = param['bayes']
            model_dict = bayes.models
            opt_key = bayes.opt_key
            model = model_dict[opt_key]

            f1, g1, fail = objfun(x, param)

            step = 1E-6

            dg = []
            df = []

            old_dof = copy.deepcopy(x)
            for i in xrange(x.shape[0]):
                x[i] += old_dof[i] * step
                f2, g2, fail = objfun(x, param)
                df.append((f2 - f1) / (old_dof[i] * step))
                dg.append((g2 - g1) / (old_dof[i] * step))
                x[i] -= old_dof[i] * step

            return df, np.array(dg).T, False

        opt_prob = pyOpt.Optimization('Cost Optimization', objfun)

        opt_model = self.models[self.opt_key]
        ndof = opt_model.shape()

        scaled_dof = np.dot(np.linalg.inv(opt_model.get_scaling()),
                            opt_model.get_dof())

        opt_prob.addObj('cost')
        opt_prob.addVarGroup('dof',
                             ndof,
                             'c',
                             lower=0.5,
                             upper=1.5,
                             value=scaled_dof)
        opt_prob.addConGroup('Convexity', ndof, 'i')
        opt_prob.addCon('Monotonicity', 'i')
        opt_prob.addCon('Positive', 'i')

        optimizer = pyOpt.SLSQP()
        optimizer.setOption('IPRINT', 0)
        optimizer.setOption('MAXIT', 100)

        # optimizer = pyOpt.PSQP()
        # optimizer.setOption('IPRINT', 2)
        # optimizer.setOption('XMAX', 1e16)

        # optimizer = pyOpt.ALPSO()
        # optimizer.setOption('xinit', 1)
        # optimizer.setOption('fileout', 2)
        # optimizer.setOption('stopCriteria', 0)
        # optimizer.setOption('Scaling', 0)

        # optimizer = pyOpt.CONMIN()
        # optimizer.setOption('IPRINT', 4)

        [fstr, xstr, inform] = optimizer(
            opt_prob,
            sens_type='fd',
            # sens_step=1.0E-4,
            param={'bayes': self})

        print(opt_prob.solution(0))
        model_dict = self.models
        opt_key = self.opt_key
        model = model_dict[opt_key]

        new_model = model.update_dof(np.dot(model.get_scaling(), xstr))
        model_dict[opt_key] = new_model

        new_bayes = self.update(models=model_dict)

        sens_matrix = new_bayes.get_senns()

        return new_bayes, (None, None), sens_matrix
Beispiel #15
0
    def Additive_Solve(self,
                       problem,
                       num_fidelity_levels=2,
                       num_samples=10,
                       max_iterations=10,
                       tolerance=1e-6,
                       opt_type='basic',
                       num_starts=3,
                       print_output=True):
        """Solves a multifidelity problem using an additive corrections
    
        Assumptions:
        N/A
    
        Source:
        N/A
    
        Inputs:
        problem             [nexus()]
        num_fidelity_levels [int]
        num_samples         [int]
        max_iterations      [int]
        tolerance           [float]
        opt_type            [str]
        num_starts          [int]
        print_output        [bool]
        
        Outputs:
        (fOpt,xOpt)  [tuple]
    
        Properties Used:
        N/A
        """

        if print_output == False:
            devnull = open(os.devnull, 'w')
            sys.stdout = devnull

        if num_fidelity_levels != 2:
            raise NotImplementedError(
                'Additive corrections are only implemented for 2 fidelity levels.'
            )

        # History writing
        f_out = open('add_hist.txt', 'w')
        import datetime
        f_out.write(str(datetime.datetime.now()) + '\n')

        inp = problem.optimization_problem.inputs
        obj = problem.optimization_problem.objective
        con = problem.optimization_problem.constraints

        # Set inputs
        nam = inp[:, 0]  # Names
        ini = inp[:, 1]  # Initials
        bnd = inp[:, 2]  # Bounds
        scl = inp[:, 3]  # Scale
        typ = inp[:, 4]  # Type

        (x, scaled_constraints, x_low_bound, x_up_bound, con_up_edge,
         con_low_edge) = self.scale_vals(inp, con, ini, bnd, scl)

        # Get initial set of samples
        x_samples = latin_hypercube_sampling(len(x),
                                             num_samples,
                                             bounds=(x_low_bound, x_up_bound),
                                             criterion='center')

        # Initialize objective and constraint variables
        f = np.zeros([num_fidelity_levels, num_samples])
        g = np.zeros(
            [num_fidelity_levels, num_samples,
             len(scaled_constraints)])

        for level in range(1, num_fidelity_levels + 1):
            problem.fidelity_level = level
            for ii, x in enumerate(x_samples):
                res = self.evaluate_model(problem, x, scaled_constraints)
                f[level - 1, ii] = res[0]  # objective value
                g[level - 1, ii, :] = res[1]  # constraints vector

        converged = False

        for kk in range(max_iterations):
            # Build objective surrogate
            f_diff = f[1, :] - f[0, :]
            f_additive_surrogate_base = gaussian_process.GaussianProcessRegressor(
            )
            f_additive_surrogate = f_additive_surrogate_base.fit(
                x_samples, f_diff)

            # Build constraint surrogate
            g_diff = g[1, :] - g[0, :]
            g_additive_surrogate_base = gaussian_process.GaussianProcessRegressor(
            )
            g_additive_surrogate = g_additive_surrogate_base.fit(
                x_samples, g_diff)

            # Optimize corrected model

            # Chose method ---------------
            if opt_type == 'basic':  # Next point determined by surrogate optimum
                problem.fidelity_level = 1
                x_eval = latin_hypercube_sampling(len(x),
                                                  1,
                                                  bounds=(x_low_bound,
                                                          x_up_bound),
                                                  criterion='random')[0]

                if self.local_optimizer == 'SNOPT':
                    opt_prob = pyOpt.Optimization('SUAVE',self.evaluate_corrected_model, \
                                              obj_surrogate=f_additive_surrogate,cons_surrogate=g_additive_surrogate)

                    # Set up opt_prob
                    self.initialize_opt_vals(opt_prob, obj, inp, x_low_bound,
                                             x_up_bound, con_low_edge,
                                             con_up_edge, nam, con, x_eval)

                    opt = pyOpt.pySNOPT.SNOPT()

                    outputs = opt(opt_prob, sens_type='FD',problem=problem, \
                                  obj_surrogate=f_additive_surrogate,cons_surrogate=g_additive_surrogate)#, sens_step = sense_step)
                    fOpt = outputs[0][0]
                    xOpt = outputs[1]

                elif self.local_optimizer == 'SLSQP':

                    x0, constraints = self.initialize_opt_vals_SLSQP(
                        obj, inp, x_low_bound, x_up_bound, con_low_edge,
                        con_up_edge, nam, con, x_eval, problem,
                        g_additive_surrogate)

                    res = minimize(self.evaluate_corrected_model,
                                   x0,
                                   constraints=constraints,
                                   args=(problem, f_additive_surrogate,
                                         g_additive_surrogate),
                                   options={
                                       'ftol': 1e-6,
                                       'disp': True
                                   })
                    fOpt = res['fun']
                    xOpt = res['x']

                else:
                    raise NotImplementedError

            elif opt_type == 'MEI':  # Next point determined by maximum expected improvement
                fstar = np.min(f[1, :])
                problem.fidelity_level = 1

                if self.global_optimizer == 'ALPSO':
                    opt_prob = pyOpt.Optimization('SUAVE',self.evaluate_expected_improvement, \
                                              obj_surrogate=f_additive_surrogate,cons_surrogate=g_additive_surrogate,fstar=fstar)

                    # Set up opt_prob
                    self.initialize_opt_vals(opt_prob, obj, inp, x_low_bound,
                                             x_up_bound, con_low_edge,
                                             con_up_edge, nam, con, None)

                    # Use a global optimizer
                    opt = pyOpt.pyALPSO.ALPSO()
                    opt.setOption('maxOuterIter', value=20)
                    opt.setOption('seed', value=1.)

                    outputs = opt(opt_prob,problem=problem, \
                                  obj_surrogate=f_additive_surrogate,cons_surrogate=g_additive_surrogate,fstar=fstar,cons=con)#, sens_step = sense_step)
                    fOpt = np.nan
                    imOpt = outputs[0]
                    xOpt = outputs[1]

                elif self.global_optimizer == 'SHGO':

                    xb, shgo_cons = self.initialize_opt_vals_SHGO(
                        obj, inp, x_low_bound, x_up_bound, con_low_edge,
                        con_up_edge, nam, con, problem, g_additive_surrogate)

                    #self.global_optimizer = 'SHGO'
                    options = {}
                    #options['maxfev'] = 1
                    #self.expected_improvement_carpet(x_low_bound, x_up_bound, problem, f_additive_surrogate, g_additive_surrogate, fstar)
                    res = shgo(self.evaluate_expected_improvement,
                               xb,
                               iters=2,
                               args=(problem, f_additive_surrogate,
                                     g_additive_surrogate, fstar),
                               constraints=shgo_cons,
                               options=options)
                    #self.global_optimizer = 'ALPSO'

                    fOpt = np.nan
                    imOpt = res['fun']
                    xOpt = res['x']

                else:
                    raise NotImplementedError

            # ---------------------------------

            complete_flag = False
            if np.any(np.isnan(xOpt)):
                complete_flag = True
            else:

                # Add new samples and check objective and constraint values
                f = np.hstack((f, np.zeros((num_fidelity_levels, 1))))
                g = np.hstack((g, np.zeros(
                    (num_fidelity_levels, 1, len(con)))))
                x_samples = np.vstack((x_samples, xOpt))
                for level in range(1, num_fidelity_levels + 1):
                    problem.fidelity_level = level
                    res = self.evaluate_model(problem, xOpt,
                                              scaled_constraints)
                    f[level - 1][-1] = res[0]
                    g[level - 1][-1] = res[1]

                # History writing
                f_out.write('Iteration: ' + str(kk + 1) + '\n')
                f_out.write('x0       : ' + str(xOpt[0]) + '\n')
                f_out.write('x1       : ' + str(xOpt[1]) + '\n')
                if opt_type == 'basic':
                    f_out.write('expd hi  : ' + str(fOpt) + '\n')
                elif opt_type == 'MEI':
                    f_out.write('expd imp : ' + str(imOpt) + '\n')
                f_out.write('low obj : ' + str(f[0][-1]) + '\n')
                f_out.write('hi  obj : ' + str(f[1][-1]) + '\n')
            if kk == (
                    max_iterations - 1
            ) or complete_flag == True:  # Reached maximum number of iterations
                f_diff = f[1, :] - f[0, :]
                if opt_type == 'basic':  # If basic setting f already has the expected optimum
                    problem.fidelity_level = 2
                    fOpt = self.evaluate_model(problem, xOpt,
                                               scaled_constraints)[0][0]
                elif opt_type == 'MEI':  # If MEI, find the optimum of the final surrogate

                    min_ind = np.argmin(f[1])
                    x_eval = x_samples[min_ind]

                    if self.local_optimizer == 'SNOPT':
                        opt_prob = pyOpt.Optimization('SUAVE',self.evaluate_corrected_model, \
                                                      obj_surrogate=f_additive_surrogate,cons_surrogate=g_additive_surrogate)

                        # Set up opt_prob
                        self.initialize_opt_vals(opt_prob, obj, inp,
                                                 x_low_bound, x_up_bound,
                                                 con_low_edge, con_up_edge,
                                                 nam, con, x_eval)

                        fOpt, xOpt = self.run_objective_optimization(
                            opt_prob, problem, f_additive_surrogate,
                            g_additive_surrogate)

                    elif self.local_optimizer == 'SLSQP':
                        problem.fidelity_level = 1
                        x0, constraints = self.initialize_opt_vals_SLSQP(
                            obj, inp, x_low_bound, x_up_bound, con_low_edge,
                            con_up_edge, nam, con, x_eval, problem,
                            g_additive_surrogate)

                        res = minimize(self.evaluate_corrected_model,
                                       x0,
                                       constraints=constraints,
                                       args=(problem, f_additive_surrogate,
                                             g_additive_surrogate),
                                       options={
                                           'ftol': 1e-6,
                                           'disp': True
                                       })
                        fOpt = res['fun']
                        xOpt = res['x']

                    problem.fidelity_level = 2
                    fOpt = self.evaluate_model(problem, xOpt,
                                               scaled_constraints)[0][0]

                    f_out.write('x0_opt  : ' + str(xOpt[0]) + '\n')
                    f_out.write('x1_opt  : ' + str(xOpt[1]) + '\n')
                    f_out.write('final opt : ' + str(fOpt) + '\n')

                print('Iteration Limit Reached')
                break

            if np.abs(fOpt -
                      f[1][-1]) < tolerance:  # Converged within a tolerance
                print('Convergence reached')
                f_out.write('Convergence reached')
                f_diff = f[1, :] - f[0, :]
                converged = True
                if opt_type == 'MEI':

                    problem.fidelity_level = 1
                    min_ind = np.argmin(f[1])
                    x_eval = x_samples[min_ind]

                    if self.local_optimizer == 'SNOPT':

                        opt_prob = pyOpt.Optimization('SUAVE',self.evaluate_corrected_model, \
                                                      obj_surrogate=f_additive_surrogate,cons_surrogate=g_additive_surrogate)

                        initalize_opt_vals(opt_prob, obj, inp, x_low_bound,
                                           x_up_bound, con_low_edge,
                                           con_up_edge, nam, con, x_eval)

                        opt = pyOpt.pySNOPT.SNOPT()


                        outputs = opt(opt_prob, sens_type='FD',problem=problem, \
                                      obj_surrogate=f_additive_surrogate,cons_surrogate=g_additive_surrogate)#, sens_step = sense_step)
                        fOpt = outputs[0][0]
                        xOpt = outputs[1]

                    elif self.local_optimizer == 'SLSQP':

                        x0, constraints = self.initialize_opt_vals_SLSQP(
                            opt_prob, obj, inp, x_low_bound, x_up_bound,
                            con_low_edge, con_up_edge, nam, con, x_eval,
                            problem, g_additive_surrogate)

                        res = minimize(self.evaluate_corrected_model,
                                       x0,
                                       constraints=constraints,
                                       args=(problem, f_additive_surrogate,
                                             g_additive_surrogate),
                                       options={
                                           'ftol': 1e-6,
                                           'disp': True
                                       })
                        fOpt = res['fun']
                        xOpt = res['x']

                    else:
                        raise NotImplementedError

                    problem.fidelity_level = 2
                    fOpt = self.evaluate_model(problem, xOpt,
                                               scaled_constraints)[0][0]

                    f_out.write('x0_opt  : ' + str(xOpt[0]) + '\n')
                    f_out.write('x1_opt  : ' + str(xOpt[1]) + '\n')
                    f_out.write('final opt : ' + str(fOpt) + '\n')
                break

            fOpt = f[1][-1] * 1.

        if converged == False:
            print('Iteration Limit reached')
            f_out.write('Maximum iteration limit reached')

        # Save sample data
        np.save('x_samples.npy', x_samples)
        np.save('f_data.npy', f)
        f_out.close()
        print(fOpt, xOpt)
        if print_output == False:
            sys.stdout = sys.__stdout__
        return (fOpt, xOpt)
    def pyopt_problem(self,
                      constraints=None,
                      bounds=None,
                      name="Problem",
                      ignore_model_errors=False):
        '''Return a pyopt problem class that can be used with the PyOpt package,
        http://www.pyopt.org/
        '''
        import pyOpt
        from .optimization import constraints

        constraints = optimization.constraints.canonicalise(constraints)

        def obj(x):
            ''' Evaluates the functional for the given controls values. '''

            fail = False
            if not ignore_model_errors:
                j = self(x)
            else:
                try:
                    j = self(x)
                except:
                    fail = True

            if constraints is not None:
                # Not sure how to do this in parallel, FIXME
                g = np.concatenate(constraints.function(x))
            else:
                g = [
                    0
                ]  # SNOPT fails if no constraints are given, hence add a dummy constraint

            return j, g, fail

        def grad(x, f, g):
            ''' Evaluates the gradient for the control values.
            f is the associated functional value and g are the values
            of the constraints. '''

            fail = False
            if not ignore_model_errors:
                dj = self.derivative(x, forget=False)
            else:
                try:
                    dj = self.derivative(x, forget=False)
                except:
                    fail = True

            if constraints is not None:
                gJac = np.concatenate(
                    [gather(c.jacobian(x)) for c in constraints])
            else:
                gJac = np.zeros(
                    len(x)
                )  # SNOPT fails if no constraints are given, hence add a dummy constraint

            info("j = %f\t\t|dJ| = %f" % (f[0], np.linalg.norm(dj)))
            return np.array([dj]), gJac, fail

        # Instantiate the optimization problem
        opt_prob = pyOpt.Optimization(name, obj)
        opt_prob.addObj('J')

        # Compute bounds
        m = self.get_controls()
        n = len(m)

        if bounds is not None:
            bounds_arr = [None, None]
            for i in range(2):
                if isinstance(bounds[i], float) or isinstance(bounds[i], int):
                    bounds_arr[i] = np.ones(n) * bounds[i]
                else:
                    bounds_arr[i] = np.array(bounds[i])
            lb, ub = bounds_arr

        else:
            mx = np.finfo(np.double).max
            ub = mx * np.ones(n)

            mn = np.finfo(np.double).min
            lb = mn * np.ones(n)

        # Add controls
        opt_prob.addVarGroup("variables",
                             n,
                             type='c',
                             value=m,
                             lower=lb,
                             upper=ub)

        # Add constraints
        if constraints is not None:
            for i, c in enumerate(constraints):
                if isinstance(c, optimization.constraints.EqualityConstraint):
                    opt_prob.addConGroup(str(i) + 'th constraint',
                                         c._get_constraint_dim(),
                                         type='e',
                                         equal=0.0)
                elif isinstance(c,
                                optimization.constraints.InequalityConstraint):
                    opt_prob.addConGroup(str(i) + 'th constraint',
                                         c._get_constraint_dim(),
                                         type='i',
                                         lower=0.0,
                                         upper=np.inf)

        return opt_prob, grad
Beispiel #17
0
import pyOpt
from pyOpt import SNOPT

def objfunc(x):

  f = (x[0]+x[1])

  g = [0.0]

  #print g

  g[0] = x[0]*x[0] + x[1]*x[1] 

  fail = 0
  return f,g, fail

opt_prob = pyOpt.Optimization('2-D example wiki Constrained Problem',objfunc)

opt_prob.addObj('f')

opt_prob.addVar('x1','c',lower=0.0,upper=float('inf'),value=2.0)
opt_prob.addVar('x2','c',lower=0.0,upper=float('inf'),value=2.0)

opt_prob.addCon('g', type='i', lower=1, upper=2)

solvopt = SNOPT()
[fstr, xstr, inform] = solvopt(opt_prob,sens_type='FD')

#print opt_prob
print opt_prob.solution(0)
Beispiel #18
0
#=======================================================================================================
#x :[t1,t2,t3,t4,psi]
def objfunc(x):
    Jac = Jacobian(
        x[0], x[1], x[2], x[3],
        x[4])  #compute the jacobian at iteration k. Jacobian(t1,t2,t3,t4,psi)
    f = -sqrt((Jac * Jac.transpose()).det())
    #constraints
    g = [0.0] * 1

    fail = 0

    return f, g, fail


opt_prob = pyOpt.Optimization('TP37 Constrained Problem', objfunc)
opt_prob.addObj('f')
opt_prob.addVar('x1', 'c', lower=-3.14, upper=3.14, value=0)
opt_prob.addVar('x2', 'c', lower=-3.14, upper=3.14, value=0)
opt_prob.addVar('x3', 'c', lower=-3.14, upper=3.14, value=0)
opt_prob.addVar('x4', 'c', lower=-3.14, upper=3.14, value=0)
opt_prob.addVar('x5', 'c', lower=-3.14, upper=3.14, value=0)
slsqp = pyOpt.SLSQP()
slsqp.setOption('IPRINT', -1)
#[fstr, xstr, inform] = slsqp(opt_prob,sens_type='FD')
#print opt_prob.solution(0)
#======================================================================================================
q_1 = Matrix([[0], [0], [0], [0], [0], [0], [3.14], [0]])  #initial posture
pose_goal = Matrix([[5], [2], [1], [1], [1.5], [1]])  #pose goal

q_computed = IK(pose_goal[0:3, 0:1], pose_goal[3:6, 0:1], q_1,
Beispiel #19
0
    def identifyFeasibleStdFromFeasibleBase(self, xBase):
        self.xBase_feas = xBase

        # formulate problem as objective function
        if self.idf.opt['optInFeasibleParamSpace']:
            opt = pyOpt.Optimization('Constrained OLS',
                                     self.minimizeSolToCADFeasible)
        else:
            opt = pyOpt.Optimization('Constrained OLS',
                                     self.minimizeSolToCADStd)
        opt.addObj('u')
        '''
        x_cons = self.mapStdToConsistent(self.idf.model.xStd[self.start_param:self.idf.model.num_model_params])
        test = self.mapConsistentToStd(x_cons)
        print(test - self.idf.model.xStd[self.start_param:self.idf.model.num_model_params])
        '''

        self.addVarsAndConstraints(opt)

        # set previous sol as starting point (as primal should be already within constraints for
        # most solvers to perform well)
        if self.idf.opt['optInFeasibleParamSpace']:
            x_cons = self.mapStdToConsistent(
                self.idf.model.xStd[self.start_param:self.idf.model.
                                    num_model_params])
            for i in range(len(opt.getVarSet())):
                #atm, we have 16*no_link + n_dof vars
                if i < len(x_cons):
                    opt.getVar(i).value = x_cons[i]
                else:
                    j = i - len(x_cons)
                    opt.getVar(i).value = self.model.xStd[
                        self.idf.model.num_model_params + j]
        else:
            for i in range(len(opt.getVarSet())):
                opt.getVar(i).value = self.model.xStd[i + self.start_link *
                                                      self.per_link]

        if self.idf.opt['verbose']:
            print(opt)

        if self.idf.opt['nlOptSolver'] == 'IPOPT':
            # not necessarily deterministic
            if self.idf.opt['verbose']:
                print('Using IPOPT')
            solver = pyOpt.IPOPT()
            #solver.setOption('linear_solver', 'ma97')  #mumps or hsl: ma27, ma57, ma77, ma86, ma97 or mkl: pardiso
            #for details, see http://www.gams.com/latest/docs/solvers/ipopt/index.html#IPOPTlinear_solver
            solver.setOption('max_iter', self.idf.opt['nlOptMaxIterations'])
            solver.setOption('print_level', 3)  #0 none ... 5 max

            #don't start too far away from inital values (boundaries push even if starting inside feasible set)
            solver.setOption('bound_push', 0.0000001)
            solver.setOption('bound_frac', 0.0000001)
            #don't relax bounds
            solver.setOption('bound_relax_factor', 0.0)  #1e-16)

        elif self.idf.opt['nlOptSolver'] == 'SLSQP':
            # solve optimization problem
            if self.idf.opt['verbose']:
                print('Using SLSQP')
            solver = pyOpt.SLSQP(disp_opts=True)
            solver.setOption('MAXIT', self.idf.opt['nlOptMaxIterations'])
            if self.idf.opt['verbose']:
                solver.setOption('IPRINT', 0)

        elif self.idf.opt['nlOptSolver'] == 'PSQP':
            # solve optimization problem
            if self.idf.opt['verbose']:
                print('Using PSQP')
            solver = pyOpt.PSQP(disp_opts=True)
            solver.setOption('MIT', self.idf.opt['nlOptMaxIterations'])
            if self.idf.opt['verbose']:
                solver.setOption('IPRINT', 0)

        elif self.idf.opt['nlOptSolver'] == 'ALPSO':
            if self.idf.opt['verbose']:
                print('Using ALPSO')
            solver = pyOpt.ALPSO(disp_opts=True)
            solver.setOption('stopCriteria', 0)
            solver.setOption('dynInnerIter', 1)  # dynamic inner iter number
            solver.setOption('maxInnerIter', 5)
            solver.setOption('maxOuterIter',
                             self.idf.opt['nlOptMaxIterations'])
            solver.setOption('printInnerIters', 0)
            solver.setOption('printOuterIters', 0)
            solver.setOption('SwarmSize', 100)
            solver.setOption('xinit', 1)

        elif self.idf.opt['nlOptSolver'] == 'NSGA2':
            if self.idf.opt['verbose']:
                print('Using NSGA2')
            solver = pyOpt.NSGA2(disp_opts=True)
            solver.setOption('PopSize',
                             100)  # Population Size (a Multiple of 4)
            solver.setOption('maxGen', self.config['nlOptMaxIterations']
                             )  # Maximum Number of Generations
            solver.setOption(
                'PrintOut',
                0)  # Flag to Turn On Output to files (0-None, 1-Subset, 2-All)
            solver.setOption(
                'xinit', 1
            )  # Use Initial Solution Flag (0 - random population, 1 - use given solution)
            #solver.serion('seed', sr.random())   # Random Number Seed 0..1 (0 - Auto based on time clock)
            #pCross_real    0.6     Probability of Crossover of Real Variable (0.6-1.0)
            solver.setOption(
                'pMut_real',
                0.5)  # Probablity of Mutation of Real Variables (1/nreal)
            #eta_c  10.0    # Distribution Index for Crossover (5-20) must be > 0
            #eta_m  20.0    # Distribution Index for Mutation (5-50) must be > 0
            #pCross_bin     0.0     # Probability of Crossover of Binary Variable (0.6-1.0)
            #pMut_real      0.0     # Probability of Mutation of Binary Variables (1/nbits)
        else:
            print('Solver unknown')

        self.opt_prob = opt
        solver(opt)  #run optimizer

        # set best solution again (is often different than final solver solution)
        if self.last_best_x is not None:
            for i in range(len(opt.getVarSet())):
                opt.getVar(i).value = self.last_best_x[i]
        else:
            self.last_best_x = self.model.xStd[self.start_param:]

        sol = opt.solution(0)
        if self.idf.opt['verbose']:
            print(sol)

        if self.idf.opt['optInFeasibleParamSpace'] and len(
                self.last_best_x) > len(self.model.xStd[self.start_param:]):
            # we get consistent parameterized params as solution
            x_std = self.mapConsistentToStd(self.last_best_x)
            self.model.xStd[self.start_param:self.idf.model.
                            num_model_params] = x_std
        else:
            # we get std vars as solution
            self.model.xStd[self.start_param:] = self.last_best_x
Beispiel #20
0
def pySNOPT(project, x0=None, xb=None, its=100, accu=1e-12, grads=True):

    # handle input cases
    if x0 is None: x0 = []
    if xb is None: xb = []

    # function handles
    func = obj_f
    f_eqcons = con_ceq
    f_ieqcons = con_cieq

    # gradient handles
    if project.config.get('GRADIENT_METHOD', 'NONE') == 'NONE':
        fprime = None
        fprime_eqcons = None
        fprime_ieqcons = None
    else:
        fprime = obj_df
        fprime_eqcons = con_dceq
        fprime_ieqcons = con_dcieq

    # number of design variables
    dv_size = project.config['DEFINITION_DV']['SIZE']
    n_dv = sum(dv_size)
    project.n_dv = n_dv

    # Initial guess
    if not x0: x0 = [0.0] * n_dv

    # prescale x0
    dv_scales = project.config['DEFINITION_DV']['SCALE'] * 1
    k = 0
    for i, dv_scl in enumerate(dv_scales):
        dv_scales[i] = 1000.
        for j in range(dv_size[i]):
            x0[k] = x0[k] / dv_scl
            k = k + 1

    # scale accuracy
    obj = project.config['OPT_OBJECTIVE']
    obj_scale = []
    for this_obj in obj.keys():
        obj_scale = obj_scale + [obj[this_obj]['SCALE']]
    obj_scale = [100.]

    ieq_cons = project.config['OPT_CONSTRAINT']['INEQUALITY']
    ieq_cons_scale = []
    for this_con in ieq_cons.keys():
        ieq_cons_scale = ieq_cons_scale + [ieq_cons[this_con]['SCALE']]
    ieq_cons_scale = [100., 100., 1000.]

    if len(project.config['OPT_CONSTRAINT']['EQUALITY']) > 0:
        raise NotImplementedError(
            'Equality constaints have not been implemented for SU2 <-> SNOPT')

    # Only scale the accuracy for single-objective problems:
    if len(obj.keys()) == 1:
        accu = accu * obj_scale[0]

    # scale accuracy
    eps = 1.0e-06

    # ----------------------------
    #
    # SNOPT Specific Values
    #
    # ----------------------------

    def snopt_func_base(xs, project):
        # s indicated SNOPT, otherwise they are direct SU2 inputs/outputs
        x = xs * 1
        for i, val in enumerate(xs):
            x[i] = x[i] / dv_scales[i]
        f = func(x, project)
        fs = f * obj_scale[0]
        g = np.hstack([f_ieqcons(x, project), f_eqcons(x, project)])
        gs = g * ieq_cons_scale
        fail = 0
        #f *= obj_scale
        return fs, gs.tolist(), fail

    snopt_func_final = lambda x: snopt_func_base(x, project)

    opt_prob = pyOpt.Optimization('SUAVE', snopt_func_final)
    opt_prob.addObj('Objective')
    for i, val in enumerate(x0):
        var_name = 'x' + str(i)
        opt_prob.addVar(var_name,
                        'c',
                        lower=xb[i][0] * dv_scales[i],
                        upper=xb[i][1] * dv_scales[i],
                        value=x0[i])  # final value already scaled

    for con in project.config['OPT_CONSTRAINT']['INEQUALITY']:
        bound = project.config['OPT_CONSTRAINT']['INEQUALITY'][con]['VALUE']
        if project.config['OPT_CONSTRAINT']['INEQUALITY'][con]['SIGN'] == '>':
            opt_prob.addCon(con, type='i', lower=0., upper=np.inf)
        else:
            #opt_prob.addCon(con ,type='i',lower=-np.inf,upper=0.)
            opt_prob.addCon(
                con, type='i', lower=0., upper=np.inf
            )  # no change due to ineq functionality in this module

    for con in project.config['OPT_CONSTRAINT']['EQUALITY']:
        opt_prob.addCon(con, type='e', equal=bound)

    opt = pyOpt.pySNOPT.SNOPT()

    def grad_function_base(xs, fs, gs, project):
        # s indicated SNOPT, otherwise they are direct SU2 inputs/outputs
        x = xs * 1
        for i, val in enumerate(x):
            x[i] = x[i] / dv_scales[i]
        g_obj = fprime(x, project)
        g_obj_s = g_obj * 1
        for i, val in enumerate(dv_scales):
            g_obj_s[i] = g_obj[i] * obj_scale[0] / dv_scales[i]
        g_con = np.vstack(
            [fprime_ieqcons(x, project),
             fprime_eqcons(x, project)])
        g_con_s = g_con * 1
        for i, val in enumerate(dv_scales):
            g_con_s[:, i] = g_con_s[:, i] * np.atleast_2d(
                ieq_cons_scale) / dv_scales[i]
        fail = 0
        return g_obj_s.tolist(), g_con_s.tolist(), fail

    grad_function_final = lambda x, f, g: grad_function_base(x, f, g, project)

    opt.setOption('Function precision', accu)
    opt.setOption('Verify level', 0)
    opt.setOption('Major optimality tolerance', eps)
    opt.setOption('Major iterations limit', its)
    outputs = opt(opt_prob, sens_type=grad_function_final)

    print('Ran SNOPT')
    print(outputs)
    return outputs
Beispiel #21
0
def opt_run(theta, m):
    ## Run a parameterized optimization problem
    # Usage
    #   fs, ds = opt_run(theta, m)
    # Arguments
    #   theta = estimated parameter vector
    #   m     = sample count
    # Returns
    #   fs    = optimum value
    #   ds    = optimum point

    That = gen_That(theta, m)

    def objfunc(x):
        # f = objective value
        # g = [-gc_stress, -gc_disp]
        f = obj(x)
        g = [0] * 2
        try:
            gc_stress, _, _ = pma(
                func=lambda u: fcn_g_stress(u, d=x, theta=theta),
                gradFunc=lambda u: grad_g_stress(u, d=x, theta=theta),
                u0=u0,
                pf=pf_stress,
                tfmJac=lambda u: dUdT(u, theta=theta),
                That=That,
                C=Con)
            g[0] = -gc_stress

            gc_disp, _, _ = pma(
                func=lambda u: fcn_g_disp(u, d=x, theta=theta),
                gradFunc=lambda u: grad_g_disp(u, d=x, theta=theta),
                u0=u0,
                pf=pf_disp,
                tfmJac=lambda u: dUdT(u, theta=theta),
                That=That,
                C=Con)
            g[1] = -gc_disp

            fail = 0
        except ValueError:
            fail = 1

        return f, g, fail

    def gradfunc(x, f, g):
        grad_obj = [0] * 2
        grad_obj[:] = objGrad(x)
        grad_con = np.zeros((2, 2))
        try:
            _, mpp_stress, _ = pma(
                func=lambda u: fcn_g_stress(u, d=x, theta=theta),
                gradFunc=lambda u: grad_g_stress(u, d=x, theta=theta),
                u0=u0,
                pf=pf_stress,
                tfmJac=lambda u: dUdT(u, theta=theta),
                That=That,
                C=Con)
            grad_con[0] = -sens_g_stress(U=mpp_stress, d=x, theta=theta)

            _, mpp_disp, _ = pma(
                func=lambda u: fcn_g_disp(u, d=x, theta=theta),
                gradFunc=lambda u: grad_g_disp(u, d=x, theta=theta),
                u0=u0,
                pf=pf_disp,
                tfmJac=lambda u: dUdT(u, theta=theta),
                That=That,
                C=Con)
            grad_con[1] = -sens_g_disp(U=mpp_disp, d=x, theta=theta)

            fail = 0
        except ValueError:
            fail = 1

        return grad_obj, grad_con, fail

    opt_prob = pyOpt.Optimization("Cantilever Beam", objfunc)
    opt_prob.addObj("f")
    opt_prob.addVar("x1", "c", lower=2.0, upper=4.0, value=3.0)
    opt_prob.addVar("x2", "c", lower=2.0, upper=4.0, value=3.0)
    opt_prob.addCon("g1", "i")
    opt_prob.addCon("g2", "i")

    slsqp = pyOpt.SLSQP()
    slsqp.setOption("IPRINT", -1)
    [fstr, xstr, inform] = slsqp(opt_prob, sens_type=gradfunc)

    ds = [0] * 2
    ds[0] = opt_prob.solution(0)._variables[0].value
    ds[1] = opt_prob.solution(0)._variables[1].value

    fs = opt_prob.solution(0)._objectives[0].value

    return fs, ds
def get_pyOpt_options(method, rd, lb, ub, tol, max_iter, **kwargs):
    """Get options for pyOpt module

    See `<http://www.pyopt.org>`

    method : str
        Which optimization algorithm `not working` SLSQP will be chosen.
    rd : :py:class`dolfin_adjoint.ReducedFunctional` 
            The reduced functional
    lb : list 
        Lower bound on the control
    ub : list
        Upper bound on the control
    tol : float
        Tolerance
    max_iter : int
        Maximum number of iterations

    *Returns*

    opt : tuple
        The optimization solver and the options, (solver, options)

    """

    def obj(x):

        f, fail = rd(x, True)

        g = []

        return f, g, fail

    def grad(x, f, g):
        fail = False
        try:
            dj = rd.derivative()
        except:
            fail = True

        # Contraints gradient
        gJac = np.zeros(len(x))

        # logger.info("j = %f\t\t|dJ| = %f" % (f[0], np.linalg.norm(dj)))
        return np.array([dj]), gJac, fail

    # Create problem
    opt_prob = pyOpt.Optimization("Problem", obj)

    # Assign objective
    opt_prob.addObj("J")

    # Assign design variables (bounds)
    opt_prob.addVarGroup(
        "variables", kwargs["nvar"], type="c", value=kwargs["m"], lower=lb, upper=ub
    )

    opt = pyOpt.pySLSQP.SLSQP()
    opt.setOption("ACC", tol)
    opt.setOption("MAXIT", max_iter)
    opt.setOption("IPRINT", -1)
    opt.setOption("IFILE", "")

    options = {"opt_problem": opt_prob, "sens_type": grad}

    return opt, options
  def __init__(self, mrobot):

  ## "Arbitrary" parameters
    self.N_s = 15 # nb of samples for discretization
    self.n_knot = 4 # nb of non zero lenght intervals of the knot series
    self.t_init = 0.0
    self.Tc = 0.5
    self.Tp = 1.1
    tstep = (self.Tp-self.t_init)/(self.N_s-1)
    Tc_idx = int(round(self.Tc/tstep))
    self.detection_radius = 2.0

  ## Mobile Robot object
    self.mrob = mrobot

  ## Other parameters
    self.d = self.mrob.l+2 # B-spline order (integer | d > l+1)
    self.n_ctrlpts = self.n_knot + self.d - 1 # nb of ctrl points

  ## Constaints values...
  ## ...for equations:
    # initial state
    self.q_init = np.matrix([[0.0], [0.0], [np.pi/2]])
    # final state
    self.q_fin = np.matrix([[2.0], [5.0], [np.pi/2]])
    # initial control input
    self.u_init = np.matrix([[0.0], [0.0]])
    # final control input
    self.u_fin = np.matrix([[0.0], [0.0]])
  ## ...for inequations:
    # Control boundary
    self.u_abs_max = self.mrob.u_abs_max
    # Obstacles (Q_occupied)
    # TODO: Obstacles random generation
    self.obst_map =                          np.matrix([0.25, 2.5, 0.2])
    self.obst_map = np.append(self.obst_map, np.matrix([2.3,  2.5, 0.5]),
        axis = 0)
    self.obst_map = np.append(self.obst_map, np.matrix([1.25, 3,   0.1]),
        axis = 0)
    self.obst_map = np.append(self.obst_map, np.matrix([0.3,  1.0,   0.1]),
        axis = 0)
    self.obst_map = np.append(self.obst_map, np.matrix([-0.5, 1.5, 0.3]),
        axis = 0)
#    self.obst_map = np.append(self.obst_map, np.matrix([1.6, 4.3, 0.2]),
#        axis = 0)
    
    # max distance within Tp
    self.D = self.Tp * self.u_abs_max[0,0]

    # Ctrl pts init
    C = np.array(np.zeros((self.n_ctrlpts,self.mrob.u_dim)))
    C_lower = np.array([-10, -1]*self.n_ctrlpts)
    C_upper = np.array([+10, +6]*self.n_ctrlpts)
    
    self.detected_obst_idxs = []

    # final trajectory
    self.C_ref = []

  ## Generate initial b-spline knots
    self.knots = self._gen_knots(self.t_init, self.Tp)
    self.mtime = np.linspace(self.t_init, self.Tp, self.N_s)

  ## Optimization results
    self.unsatisf_eq_values = []
    self.unsatisf_ieq_values = []

  ## Plot initialization
    plt.ion()
    self.fig = plt.figure()
    self.fig_speed = plt.subplots(2)
    ax = self.fig.gca()

    # creating obstacles' circles
    circ = []
    for r in range(self.obst_map.shape[0]):
      # external dashed circles
      circ = circ + \
          [plt.Circle((self.obst_map[r,0], self.obst_map[r,1]),
          self.obst_map[r,2]+self.mrob.rho,color='k',ls = 'dashed',fill=False)]
      # internal continous circles
      circ = circ + \
          [plt.Circle((self.obst_map[r,0], self.obst_map[r,1]),
          self.obst_map[r,2], color='k', fill=False)]

    # adding circles to axis
    [ax.add_artist(c) for c in circ]

    # plot curve and its control points
    self.rejected_path,self.plt_ctrl_pts,self.plt_curve,self.plt_dot_curve,self.seg_pts = ax.plot(
            0.0, 0.0, 'm:',
            0.0, 0.0, '*',
            0.0, 0.0, 'b-',
            0.0, 0.0, 'g.',
            0.0, 0.0, 'rd')
    
    # formating figure
    plt.xlabel('x(m)')
    plt.ylabel('y(m)')
    plt.title('Generated trajectory')
    ax.axis('equal')

    axarray = self.fig_speed[0].axes
    self.plt_linspeed, = axarray[0].plot(0.0, 0.0)
    self.plt_angspeed, = axarray[1].plot(0.0, 0.0)
    axarray[0].set_ylabel('v(m/s)')
    axarray[0].set_title('Linear speed')
    axarray[1].set_xlabel('time(s)')
    axarray[1].set_ylabel('w(rad/s)')
    axarray[1].set_title('Angular speed')
    axarray[0].grid()
    axarray[1].grid()

    final_z = self.mrob.phi0(self.q_fin)
    self.last_q = self.q_init
    self.last_u = self.u_init
    last_z = self.mrob.phi0(self.last_q)
    self.all_dz = []
    self.all_rejected_z = []
    self.itcount = 0

    usepyopt = False

    while LA.norm(last_z - final_z) > self.D: 
# while the remaining dist (straight line) is greater than the max dist during Tp

        self.detected_obst_idxs = self._detected_obst_idx(last_z)
#        print('No of detected obst: {}'.format(len(self.detected_obst_idxs)))
#        print('Detected obst: {}'.format(self.obst_map[self.detected_obst_idxs,:]))

        # initiate ctrl points (straight line towards final z)
        direc = final_z - last_z
        direc = direc/LA.norm(direc)
        C[:,0] =np.array(np.linspace(last_z[0,0],\
                last_z[0,0]+self.D*direc[0,0], self.n_ctrlpts)).T
        C[:,1] =np.array(np.linspace(last_z[1,0],\
                last_z[1,0]+self.D*direc[1,0], self.n_ctrlpts)).T

        tic = time.time()
        if usepyopt:
            # Define the optimization problem
            self.opt_prob = pyOpt.Optimization(
                    'Faster path with obstacles', # name of the problem
                    self._obj_func) # object function (criterium, eq. and ineq.)
    
            self.opt_prob.addObj('J')
    
            self.opt_prob.addVarGroup( # minimization arguments
                    'C',
                    self.mrob.u_dim*self.n_ctrlpts, # dimension
                    'c', # continous
                    lower=list(C_lower),
                    value=list(np.squeeze(C.reshape(1,self.n_ctrlpts*self.mrob.u_dim))),
                    upper=list(C_upper))
    
            self.opt_prob.addConGroup( # equations constraints
                    'ec',
                    self.mrob.q_dim + self.mrob.u_dim, # dimension
                    'e') # equations
    
            self.opt_prob.addConGroup( # inequations constraints
                    'ic',
                    self.N_s*self.mrob.u_dim +
                            self.N_s*len(self.detected_obst_idxs), # dimenstion
                    'i') # inequations
    
            # solve constrained optmization
#            solver = pyOpt.SLSQP(pll_type='POA')
#            solver.setOption('ACC', 1e-6)
#            solver.setOption('MAXIT', 30)
            solver = pyOpt.ALGENCAN(pll_type='POA')
            solver.setOption('epsfeas', 1e-1)
            solver.setOption('epsopt', 9e-1)
    
            [J, C_aux, information] = solver(self.opt_prob) 
            C_aux = np.array(C_aux)
#            if information.value != 0 and iformation.value != 9:
            usepyopt = False 

        else:
            # solve constrained optmization
            outp = fmin_slsqp(self._criteria,
                                np.squeeze(C.reshape(1,self.n_ctrlpts*self.mrob.u_dim)),
                                eqcons=(),
                                f_eqcons=self._feqcons,
                                ieqcons=(),
                                f_ieqcons=self._fieqcons,
                                iprint=1,
                                iter=50,
                                acc=1e-6,
                                full_output=True,
                                callback=self._plot_update)
            C_aux = outp[0]
            imode = outp[3]
            print('\n'.format(imode))
            if imode != 0 and imode != 9:
                usepyopt = True
                continue
        

        print('Elapsed time for {} iteraction: {}'.format(self.itcount, time.time()-tic))

        print('No of equations unsatisfied: {}'.format(len(self.unsatisf_eq_values)))
        print('Mean and variance of equations unsatisfied: ({},{})'.format(np.mean(self.unsatisf_eq_values), np.std(self.unsatisf_eq_values)))
        print('No of inequations unsatisfied: {}'.format(len(self.unsatisf_ieq_values)))
        print('Mean and variance of inequations unsatisfied: ({},{})'.format(np.mean(self.unsatisf_ieq_values), np.std(self.unsatisf_ieq_values)))

        # test if opt went well
        # if yes
        C = C_aux
        # if no
        # continue

        C = C.reshape(self.n_ctrlpts, self.mrob.u_dim)
        # store ctrl points and [z dz ddz](t)
        self.C_ref += [C]
        dz = self._comb_bsp(self.mtime[0:Tc_idx], C, 0).T
        for dev in range(1,self.mrob.l+1):
            dz = np.append(dz,self._comb_bsp(
                    self.mtime[0:Tc_idx], C, dev).T,axis=0)
        self.all_dz += [dz]

        rejected_z = self._comb_bsp(self.mtime[Tc_idx:], C, 0).T
        self.all_rejected_z += [np.append(np.append(dz[0:2,:], rejected_z, axis=1), np.fliplr(rejected_z), axis=1)]
        
        # update needed values
        self.knots = self.knots + self.Tc
        self.mtime = [tk+self.Tc for tk in self.mtime]
        last_z = self.all_dz[-1][0:self.mrob.u_dim,-1]
#        print('Last z: {}', last_z)
#        print(self.all_dz[-1][:,-1].reshape(
#                self.mrob.l+1, self.mrob.u_dim).T)
        self.last_q = self.mrob.phi1(self.all_dz[-1][:,-1].reshape(
                self.mrob.l+1, self.mrob.u_dim).T)
        self.last_u = self.mrob.phi2(self.all_dz[-1][:,-1].reshape(
                self.mrob.l+1, self.mrob.u_dim).T)

        #raw_input('paradinha')
        self.itcount += 1
    #endwhile
    
    self.detected_obst_idxs = self._detected_obst_idx(last_z)
    print(self.detected_obst_idxs)

    # initiate ctrl points (straight line towards final z)
    C[:,0] =np.array(np.linspace(last_z[0,0],\
            final_z[0,0], self.n_ctrlpts)).T
    C[:,1] =np.array(np.linspace(last_z[1,0],\
            final_z[1,0], self.n_ctrlpts)).T
    x_aux = np.append(np.asarray([self.Tp]), np.squeeze(C.reshape(1,self.n_ctrlpts*self.mrob.u_dim)), axis=1)

    print(x_aux)
    self._lstep_plot_update(x_aux)

    tic = time.time()

    while True:
        if False:
            # Define the optimization problem
            self.opt_prob = pyOpt.Optimization(
                    'Faster path with obstacles', # name of the problem
                    self._lstep_obj_func) # object function (criterium, eq. and ineq.)
        
            self.opt_prob.addObj('J')
        
            self.opt_prob.addVarGroup( # minimization arguments
                    'x',
                    self.mrob.u_dim*self.n_ctrlpts+1, # dimension
                    'c', # continous
                    lower=[0.0]+list(C_lower),
                    value=[self.Tp]+list(np.squeeze(C.reshape(1,self.n_ctrlpts*self.mrob.u_dim))),
                    upper=[1e10]+list(C_upper))
        
            self.opt_prob.addConGroup( # equations constraints
                    'ec',
                    2*self.mrob.q_dim + 2*self.mrob.u_dim, # dimension
                    'e') # equations
        
            self.opt_prob.addConGroup( # inequations constraints
                    'ic',
                    self.N_s*self.mrob.u_dim +
                            self.N_s*len(self.detected_obst_idxs), # dimenstion
                    'i') # inequations
        
            # solve constrained optmization
#            solver = pyOpt.SLSQP(pll_type='POA')
#            solver.setOption('ACC', 1e-6)
#            solver.setOption('MAXIT', 30)
            solver = pyOpt.ALGENCAN(pll_type='POA')
            solver.setOption('epsfeas', 5e-1)
            solver.setOption('epsopt', 9e-1)
        
            [J, x_aux, information] = solver(self.opt_prob) 
            x_aux = np.array(x_aux)
            break

        else:
            # solve constrained optmization
            outp = fmin_slsqp(self._lstep_criteria,
                                x_aux,
                                eqcons=(),
                                f_eqcons=self._lstep_feqcons,
                                ieqcons=(),
                                f_ieqcons=self._lstep_fieqcons,
                                iprint=1,
                                iter=30,
                                acc=1e-6,
                                full_output=True,
                                callback=self._lstep_plot_update)

            x_aux = outp[0]
            imode = outp[3]
            print('\n'.format(imode))
            if imode != 0 and imode != 9:
                usepyopt = True
                continue
            break

    print('Elapsed time for {} (last) iteraction: {}'.format(self.itcount, time.time()-tic))
    print('No of equations unsatisfied: {}'.format(len(self.unsatisf_eq_values)))
    print('Mean and variance of equations unsatisfied: ({},{})'.format(np.mean(self.unsatisf_eq_values), np.std(self.unsatisf_eq_values)))
    print('No of inequations unsatisfied: {}'.format(len(self.unsatisf_ieq_values)))
    print('Mean and variance of inequations unsatisfied: ({},{})'.format(np.mean(self.unsatisf_ieq_values), np.std(self.unsatisf_ieq_values)))

    # test if opt went well
    # if yes
    dt_final = x_aux[0]
    print(x_aux)
    self.t_final = self.mtime[0] + dt_final
    C = x_aux[1:].reshape(self.n_ctrlpts, self.mrob.u_dim)
    # if no
    # continue

    print('Final time: {}'.format(self.t_final))

    # store ctrl points and [z dz ddz](t)
    self.C_ref += [C]

    self.mtime = np.linspace(self.mtime[0], self.t_final, self.N_s)

    dz = self._comb_bsp(self.mtime, C, 0).T
    for dev in range(1,self.mrob.l+1):
        dz = np.append(dz,self._comb_bsp(
                self.mtime, C, dev).T,axis=0)
    self.all_dz += [dz]
Beispiel #24
0
def SNOPTrun(vars0, mission, includeDrag, flagFORCE=1):
    import pyOpt
    import constraints
    import costFunction
    import numpy as np

    # THESE FUNCTIONS ARE USED BY SNOPT
    # Define the functions SNOPT optimizer will call

    def objectiveFunction(inVars, mission, includeDrag):
        x = costFunction.ObjectiveMass(inVars, mission)
        eq = constraints.equality(inVars, mission, 'real', includeDrag)
        ineq = constraints.inequality(inVars, mission, 'real')

        g = np.concatenate((eq, ineq), 1)
        fail = 0
        return x, g, fail

    def sensitivityFunction(inVars, f, g, mission, includeDrag):
        x = costFunction.fprimeObjectiveMass(inVars, mission)
        eq = constraints.fprimeequality(inVars, mission, '2d', includeDrag)
        ineq = constraints.fprimeinequality(inVars, mission, '2d')

        g = np.concatenate((eq, ineq), 0)
        fail = 0
        return x, g, fail

    numEquality = len(constraints.equality(vars0, mission))
    numInequality = len(constraints.inequality(vars0, mission))

    # Find the upper and lower bounds
    #boundsCase = constraints.bounds(mission)
    lb, ub = constraints.getXLXU(vars0, mission)

    #TJC
    opt_prob = pyOpt.Optimization(
        'Trajectory Optimization',
        lambda x: objectiveFunction(x, mission, includeDrag))
    opt_prob.addObj('Objective Mass')

    # Specify all of the variables
    #print 'Setting up variables in a hackish way.  MUST CHANGE!!!'
    for curVar in range(len(vars0)):
        opt_prob.addVar('var' + str(curVar),
                        'c',
                        value=vars0[curVar],
                        lower=lb[curVar],
                        upper=ub[curVar])

    # Now add in equality constraints
    for curCon in range(numEquality):
        opt_prob.addCon('g' + str(curCon), 'e')

    # Now add in inequality constraints
    for curCon in range(numEquality, numEquality + numInequality):
        opt_prob.addCon('g' + str(curCon), 'i')

    # Confirm that everything is correct
    #print opt_prob

    # Set up the optimizer
    snopt = pyOpt.pySNOPT.SNOPT()
    snopt.setOption('Major feasibility tolerance', value=5e-6)
    snopt.setOption('Major optimality tolerance', value=1e-5)
    snopt.setOption('Minor feasibility tolerance', value=5e-6)
    snopt.setOption('Major iterations limit', 500)
    print 'Using SNOPT'

    # Optimize and save results
    sens2 = lambda x, f, g: sensitivityFunction(x, f, g, mission, includeDrag)

    # by default will try complex step first...if fails...then finite diference
    exitVal = snopt(opt_prob, sens_type=sens2)

    infoOpt = exitVal[2]['text']
    if infoOpt != 'finished successfully' and flagFORCE == 1:
        print 'Failed to finish successfully with CS .... trying FD'
        exitVal = snopt(opt_prob, sens_type='FD')

    return exitVal
Beispiel #25
0
print sys.executable

import matplotlib.pyplot as plt
import numpy as np

from anypytools.abcutils import AnyPyProcess
from SensitivityStudyDorsi import run_model, objfunc
app = AnyPyProcess()

import pyOpt
from pyOpt import SNOPT
delta = 25 * 5e-5
AnyBody = 0.0004748338
Factor = 2000

opt_prob = pyOpt.Optimization('Joint Strength Analysis2', objfunc)
opt_prob.addObj('f')

opt_prob.addVar('tarmin', 'c', lower=0.1, upper=0.8, value=0.5)
opt_prob.addVar('edlrmin', 'c', lower=0.1, upper=0.8, value=0.5)
opt_prob.addVar('ehlrmin', 'c', lower=0.1, upper=0.8, value=0.5)
opt_prob.addVar('tarmax', 'c', lower=0.7, upper=1.6, value=1.2)
opt_prob.addVar('edlrmax', 'c', lower=0.7, upper=1.6, value=1.2)
opt_prob.addVar('ehlrmax', 'c', lower=0.7, upper=1.6, value=1.2)
opt_prob.addVar('LocalStrengthFactorDorsiFlexors',
                'c',
                lower=0.1,
                upper=10,
                value=1)
opt_prob.addVar('x1', 'c', lower=0.0, upper=np.inf, value=0)
opt_prob.addVar('x2', 'c', lower=0.0, upper=np.inf, value=0)
Beispiel #26
0
def converge_opt(segment):
    """Interfaces the mission to an optimization algorithm

    Assumptions:
    N/A

    Source:
    N/A

    Inputs:
    state.unknowns                     [Data]
    segment                            [Data]
    segment.algorithm                  [string]

    Outputs:
    state.unknowns                     [Any]

    Properties Used:
    N/A
    """

    # pack up the array
    unknowns = segment.state.unknowns.pack_array()

    # Have the optimizer call the wrapper
    obj = lambda unknowns: get_objective(unknowns, segment)
    econ = lambda unknowns: get_econstraints(unknowns, segment)
    iecon = lambda unknowns: get_ieconstraints(unknowns, segment)

    # Setup the bnds of the problem
    bnds = make_bnds(unknowns, segment)

    # Solve the problem, based on chosen algorithm
    if segment.algorithm == 'SLSQP':
        unknowns = opt.fmin_slsqp(obj,
                                  unknowns,
                                  f_eqcons=econ,
                                  f_ieqcons=iecon,
                                  bounds=bnds,
                                  iter=2000)

    elif segment.algorithm == 'SNOPT':

        # SNOPT imports
        import pyOpt
        import pyOpt.pySNOPT

        # Have the optimizer call the wrapper
        obj_pyopt = lambda unknowns: get_problem_pyopt(unknowns, segment)

        opt_prob = pyOpt.Optimization('SUAVE', obj_pyopt)
        opt_prob.addObj(segment.objective)

        for ii in range(0, len(unknowns)):
            lbd = (bnds[ii][0])
            ubd = (bnds[ii][1])
            vartype = 'c'
            opt_prob.addVar(str(ii),
                            vartype,
                            lower=lbd,
                            upper=ubd,
                            value=unknowns[ii])

        # Setup constraints
        segment_points = segment.state.numerics.number_control_points
        for ii in range(0, 2 * segment_points):
            opt_prob.addCon(str(ii), type='e', equal=0.)
        for ii in range(0, 5 * segment_points - 1):
            opt_prob.addCon(str(ii + segment_points * 2),
                            type='i',
                            lower=0.,
                            upper=np.inf)

        print(opt_prob)

        snopt = pyOpt.pySNOPT.SNOPT()
        outputs = snopt(opt_prob)

        print(outputs)
        print(opt_prob.solution(0))

    return
Beispiel #27
0
    if curIter == 0:
        mission = LDP.processAllMissionData(mission)
        vars0 = IC.CreateInitialGuess(mission)
    else:
        mission, vars0 = postProcessing.UpdateMissionAndInitialGuess(mission)
        #vars0 = IC.UpdateInitialGuess(mission)

    numEquality = len(constraints.equality(vars0, mission))
    numInequality = len(constraints.inequality(vars0, mission))

    # Find the upper and lower bounds
    #boundsCase = constraints.bounds(mission)
    lb, ub = constraints.getXLXU(vars0, mission)

    #TJC
    opt_prob = pyOpt.Optimization('Trajectory Optimization',
                                  lambda x: objectiveFunction(x, mission))
    opt_prob.addObj('Objective Mass')

    # Specify all of the variables
    print 'Setting up variables in a hackish way.  MUST CHANGE!!!'
    for curVar in range(len(vars0)):
        opt_prob.addVar('var' + str(curVar),
                        'c',
                        value=vars0[curVar],
                        lower=lb[curVar],
                        upper=ub[curVar])

    # Now add in equality constraints
    for curCon in range(numEquality):
        opt_prob.addCon('g' + str(curCon), 'e')
Beispiel #28
0
                _, mpp_disp, _ = pma(func=lambda u: fcn_g_disp(u, d=x),
                                     gradFunc=lambda u: grad_g_disp(u, d=x),
                                     u0=u0,
                                     pf=pf_disp_target,
                                     tfmJac=lambda u: dUdT(u),
                                     That=That)
                grad_con[1] = -sens_g_disp(U=mpp_disp, d=x)

                fail = 0
            except ValueError:
                fail = 1

            return grad_obj, grad_con, fail

        opt_prob = pyOpt.Optimization("Cantilever Beam", objfunc)
        opt_prob.addObj("f")
        opt_prob.addVar("x1", "c", lower=1.0, upper=4.0, value=3.0)
        opt_prob.addVar("x2", "c", lower=1.0, upper=4.0, value=3.0)
        opt_prob.addCon("g1", "i")
        opt_prob.addCon("g2", "i")

        print(opt_prob)

        slsqp = pyOpt.SLSQP()
        slsqp.setOption("IPRINT", -1)
        [fstr, xstr, inform] = slsqp(opt_prob, sens_type=gradfunc)

        print(opt_prob.solution(0))

        ds = [0] * 2
Beispiel #29
0
def pyopt_surrogate_setup(surrogate_function, inputs, constraints):
    """ sets up a surrogate problem so it can be run by pyOpt. Makes the problem to be run

        Assumptions:
        None

        Source:
        N/A

        Inputs:
        surrogate_function [nexus()]
        inputs             [array]
        constraints        [array]

        Outputs:
        opt_problem        [pyOpt problem]

        Properties Used:
        None
    """

    #taken from initial optimization problem that you set up
    ini = inputs[:, 1]  # values
    bnd = inputs[:, 2]  # Bounds
    scl = inputs[:, 3]  # Scaling
    input_units = inputs[:, -1] * 1.0
    constraint_scale = constraints[:, 3]
    constraint_units = constraints[:, -1] * 1.0

    import pyOpt  #use pyOpt to set up the problem
    opt_problem = pyOpt.Optimization('surrogate', surrogate_function)

    #constraints
    bnd_constraints = helper_functions.scale_const_bnds(constraints)
    scaled_constraints = helper_functions.scale_const_values(
        constraints, bnd_constraints)
    constraints_out = scaled_constraints * constraint_units
    scaled_inputs = ini / scl
    x = scaled_inputs  #*input_units

    print 'x_setup=', x
    #bound the input variables
    for j in range(len(inputs[:, 1])):
        lbd = bnd[j][0] / (scl[j])  #*input_units[j]
        ubd = bnd[j][1] / (scl[j])  #*input_units[j]
        opt_problem.addVar('x%i' % j, 'c', lower=lbd, upper=ubd, value=x[j])

    #put in the constraints
    for j in range(len(constraints[:, 0])):
        edge = constraints_out[j]
        if constraints[j][1] == '<':
            opt_problem.addCon('g%i' % j, type='i', upper=edge)
        elif constraints[j][1] == '>':
            opt_problem.addCon('g%i' % j, lower=edge, upper=np.inf)

        elif constraints[j][1] == '>':
            opt_problem.addCon('g%i' % j, type='e', equal=edge)

    opt_problem.addObj('f')

    return opt_problem
Beispiel #30
0
def Opt():
    problem = {}
    # Problem Solution Info #
    order = 5 # Order should be kept relatively low <=6, if more accuracy is required, increase the number of partitions
    N = order-1
    problem['N'] = N
    problem['nConstraint'] = 10
    problem['nDivisions'] = 1 # number of segments, each fitted with its own polynomial of the specified order 
    # Problem Info #
    isp = 290
    
    problem['x0'] = -3200
    problem['xf'] = 0
    
    problem['y0'] = 2000
    problem['yf'] = 0
    
    problem['u0'] = 625
    problem['uf'] = 0
    
    problem['v0'] = -270
    problem['vf'] = 0
    
    problem['udotf'] = 0
    
    problem['m0'] =  8500
    problem['ve'] = isp*9.81
    thrust = 600000
    problem['Tmax'] = thrust
    problem['Tmin'] = thrust*0.1 # 10% throttle
    
    V0 = (problem['u0']**2 + problem['v0']**2)**0.5
    fpa0 = np.arcsin(problem['v0']/V0)
    problem['mu0'] = np.pi+fpa0
    problem['mudotmax'] = 40*np.pi/180 # 40 deg/s
    problem['mudotmin'] = -problem['mudotmax']
    
    # Initial Guess
    tf = 12
    x = np.linspace(problem['x0'],problem['xf'],order+1)
    y = np.linspace(problem['y0'],problem['yf'],order+1)
    # tau = -cos(pi*np.arange(0,N+2)/(N+1))
    # t = (tau+1)*0.5*tf
    # x = interp1d(np.linspace(0,tf,order+1),x)(t)
    # y = interp1d(np.linspace(0,tf,order+1),y)(t)
    c0 = np.hstack((x[1:-1],y[1:-1],tf))
    
    # Form D
    problem['D'] = ChebyshevDiff(order)

    opt = pyOpt.Optimization('Flat Pseudospectral PDG',lambda c: Cost(c,problem))
    
    
    # Add the design variables
    for i,xi in enumerate(x[1:-1]):
        opt.addVar('x{}'.format(i+1), 'c', lower = problem['x0'], upper = problem['xf'], value = xi)
    
    for i,xi in enumerate(y[1:-1]):
        opt.addVar('y{}'.format(i+1), 'c', lower = problem['yf'], upper = problem['y0'], value = xi)
    
    opt.addVar('tf','c', lower = 5, upper = 50, value=tf)
    
    # Add the objective and constraints
    opt.addObj('J')
    
    for i in range(1,7):
        opt.addCon('g{}'.format(i),'e')
          
          
    for i in range(1,4*problem['nConstraint'] + 0*order):
        opt.addCon('h{}'.format(i),'i')
        
    # optimizer = pyOpt.COBYLA()
    # optimizer = pyOpt.ALPSO()
    optimizer = pyOpt.ALGENCAN()
    # optimizer = pyOpt.SLSQP()
    # optimizer = pyOpt.SDPEN()
    # optimizer = pyOpt.PSQP()
    # optimizer = pyOpt.SOLVOPT()
    
    
    sens_type = 'CS' # Differencing Type, options ['FD', CS']
    # optimizer.setOption('MAXIT',100) #SLSQP option
    # optimizer.setOption('MIT',200) # PSQP
    # fopt,copt,info = optimizer(opt,sens_type=sens_type)
    fopt,copt,info = optimizer(opt)

    print info
    print opt.solution(0)

    t,x,y,u,v,udot,vdot,m,T,mu,mudot = Parse(copt,problem)


    plt.figure()
    plt.plot(x,y)
    plt.title('Positions')
    
    plt.figure()
    plt.plot(u,v)
    plt.title('Velocities')
    
    plt.figure()
    plt.plot(udot,vdot)
    plt.title('Accelerations')
    
    plt.figure()
    plt.plot(t,m)
    plt.title('Mass')
    
    plt.figure()
    plt.plot(t,mu*180/np.pi)
    plt.title('Thrust Angle')
    
    plt.figure()
    plt.plot(t,T)
    plt.title('Thrust')
    
    plt.figure()
    plt.plot(t,x)
    
    plt.figure()
    plt.plot(t,y)
    
    plt.show()