Exemplo n.º 1
0
def alpso_wrapper(opt_prob):
    alpso = ALPSO()
    alpso.setOption('SwarmSize', 10)  # default 40 -> 150
    alpso.setOption('maxOuterIter', 5)  # defualt 200
    # alpso.setOption('rinit', 1.)  # penalty factor
    alpso.setOption('fileout', 0)
    alpso.setOption('stopCriteria', 0)
    return alpso(opt_prob)
Exemplo n.º 2
0
    def infill(self, points, method='error'):
        ## We'll be making non-permanent modifications to self.X and self.y here, so lets make a copy just in case
        initX = np.copy(self.X)
        inity = np.copy(self.y)

        ## This array will hold the new values we add
        returnValues = np.zeros([points, self.k], dtype=float)

        for i in range(points):
            opt_prob1 = Optimization('InFillPSO',
                                     self.errorObjective_normalized)
            for k in range(self.k):
                opt_prob1.addVar('{0}'.format(k),
                                 'c',
                                 lower=0,
                                 upper=1,
                                 value=.5)

            pso1 = ALPSO()
            pso1.setOption('SwarmSize', 100)
            pso1.setOption('maxOuterIter', 100)
            pso1.setOption('stopCriteria', 1)
            pso1(opt_prob1)

            newpoint = np.zeros(self.k)

            for j in range(self.k):
                newpoint[j] = opt_prob1.solution(0)._variables[j].value
            returnValues[i][:] = self.inversenormX(newpoint)
            self.addPoint(returnValues[i],
                          self.predict(returnValues[i]),
                          norm=True)
            self.updateModel()
            del (opt_prob1)
            del (pso1)
        self.X = np.copy(initX)
        self.y = np.copy(inity)
        self.n = len(self.X)
        self.updateModel()
        return returnValues
Exemplo n.º 3
0
    def train(self, optimizer='pso'):
        #Define the optimization problem for training the kriging model
        opt_prob = Optimization('Surrogate Test', self.fittingObjective)
        for i in range(self.k):
            opt_prob.addVar('theta%d' % i,
                            'c',
                            lower=1e-3,
                            upper=1e2,
                            value=.1)
        for i in range(self.k):
            opt_prob.addVar('pl%d' % i, 'c', lower=1.5, upper=2, value=2)
        opt_prob.addVar('lambda', 'c', lower=1e-5, upper=1, value=1)
        opt_prob.addObj('f')
        opt_prob.addCon('g1', 'i')

        if optimizer == 'pso':
            optimizer = ALPSO()
            optimizer.setOption('SwarmSize', 150)
            optimizer.setOption('maxOuterIter', 150)
            optimizer.setOption('stopCriteria', 1)
            optimizer.setOption('filename', '{0}Results.log'.format(self.name))

        if optimizer == 'ga':
            optimizer = NSGA2()
            optimizer.setOption('PopSize', (4 * 50))

        while True:
            try:
                self.trainingOptimizer(optimizer, opt_prob)
            except Exception as e:
                print e
                print 'Error traning Model, restarting the optimizer with a larger population'
                if optimizer == 'ga':
                    optimizer.setOption('SwarmSize', 200)
                    optimizer.setOption('maxOuterIter', 100)
                    optimizer.setOption('stopCriteria', 1)
                if optimizer == 'ga':
                    optimizer.setOption('PopSize', 400)
            else:
                break
Exemplo n.º 4
0
    

# =============================================================================
# 
# =============================================================================

# Instantiate Optimization Problem 
opt_prob = Optimization('G08 Global Constrained Problem',objfunc)
opt_prob.addVar('x1','c',lower=5.0,upper=1e-6,value=10.0)
opt_prob.addVar('x2','c',lower=5.0,upper=1e-6,value=10.0)
opt_prob.addObj('f')
opt_prob.addCon('g1','i')
opt_prob.addCon('g2','i')

# Solve Problem (No-Parallelization)
alpso_none = ALPSO()
alpso_none.setOption('fileout',0)
alpso_none(opt_prob)
if myrank == 0:
    print opt_prob.solution(0)
#end

# Solve Problem (SPM-Parallelization)
alpso_spm = ALPSO(pll_type='SPM')
alpso_spm.setOption('fileout',0)
alpso_spm(opt_prob)
print opt_prob.solution(1)

# Solve Problem (DPM-Parallelization)
alpso_dpm = ALPSO(pll_type='DPM')
alpso_dpm.setOption('fileout',0)
Exemplo n.º 5
0
def optimALPSO(opt_prob, swarmsize, maxiter,algo):
    if algo == 3:
        alpso_none = ALPSO(pll_type='SPM')
    else:
        alpso_none = ALPSO()
    alpso_none.setOption('fileout',1)
    alpso_none.setOption('filename',"test.out")
    alpso_none.setOption('SwarmSize',swarmsize)
    alpso_none.setOption('maxInnerIter',6)
    alpso_none.setOption('etol',1e-5)
    alpso_none.setOption('rtol',1e-10)
    alpso_none.setOption('atol',1e-10)
    alpso_none.setOption('vcrazy',1e-4)
    alpso_none.setOption('dt',1e0)
    alpso_none.setOption('maxOuterIter',maxiter)
    alpso_none.setOption('stopCriteria',0)#Stopping Criteria Flag (0 - maxIters, 1 - convergence)
    alpso_none.setOption('printInnerIters',1)
    alpso_none.setOption('printOuterIters',1)
    alpso_none.setOption('HoodSize',int(swarmsize/100))
    return(alpso_none(opt_prob))
    def optimizeTrajectory(self, plot_func=None):
        # use non-linear optimization to find parameters for minimal
        # condition number trajectory

        self.plot_func = plot_func

        if self.config['showOptimizationGraph']:
            self.initGraph()

        ## describe optimization problem with pyOpt classes

        from pyOpt import Optimization
        from pyOpt import ALPSO, SLSQP

        # Instanciate Optimization Problem
        opt_prob = Optimization('Trajectory optimization', self.objective_func)
        opt_prob.addObj('f')

        # add variables, define bounds
        # w_f - pulsation
        opt_prob.addVar('wf', 'c', value=self.wf_init, lower=self.wf_min, upper=self.wf_max)

        # q - offsets
        for i in range(self.dofs):
            opt_prob.addVar('q_%d'%i,'c', value=self.qinit[i], lower=self.qmin[i], upper=self.qmax[i])
        # a, b - sin/cos params
        for i in range(self.dofs):
            for j in range(self.nf[0]):
                opt_prob.addVar('a{}_{}'.format(i,j), 'c', value=self.ainit[i][j], lower=self.amin, upper=self.amax)
        for i in range(self.dofs):
            for j in range(self.nf[0]):
                opt_prob.addVar('b{}_{}'.format(i,j), 'c', value=self.binit[i][j], lower=self.bmin, upper=self.bmax)

        # add constraint vars (constraint functions are in obfunc)
        if self.config['minVelocityConstraint']:
            opt_prob.addConGroup('g', self.dofs*5, 'i')
        else:
            opt_prob.addConGroup('g', self.dofs*4, 'i')
        #print opt_prob

        initial = [v.value for v in list(opt_prob._variables.values())]

        if self.config['useGlobalOptimization']:
            ### optimize using pyOpt (global)
            opt = ALPSO()  #augmented lagrange particle swarm optimization
            opt.setOption('stopCriteria', 0)
            opt.setOption('maxInnerIter', 3)
            opt.setOption('maxOuterIter', self.config['globalOptIterations'])
            opt.setOption('printInnerIters', 1)
            opt.setOption('printOuterIters', 1)
            opt.setOption('SwarmSize', 30)
            opt.setOption('xinit', 1)
            #TODO: how to properly limit max number of function calls?
            # no. func calls = (SwarmSize * inner) * outer + SwarmSize
            self.iter_max = opt.getOption('SwarmSize') * opt.getOption('maxInnerIter') * opt.getOption('maxOuterIter') + opt.getOption('SwarmSize')

            # run fist (global) optimization
            try:
                #reuse history
                opt(opt_prob, store_hst=False, hot_start=True, xstart=initial)
            except NameError:
                opt(opt_prob, store_hst=False, xstart=initial)
            print(opt_prob.solution(0))

        ### pyOpt local

        # after using global optimization, get more exact solution with
        # gradient based method init optimizer (only local)
        opt2 = SLSQP()   #sequential least squares
        opt2.setOption('MAXIT', self.config['localOptIterations'])
        if self.config['verbose']:
            opt2.setOption('IPRINT', 0)
        # TODO: amount of function calls depends on amount of variables and iterations to approximate gradient
        # iterations are probably steps along the gradient. How to get proper no. of func calls?
        self.iter_max = "(unknown)"

        if self.config['useGlobalOptimization']:
            if self.last_best_sol is not None:
                #use best constrained solution
                for i in range(len(opt_prob._variables)):
                    opt_prob._variables[i].value = self.last_best_sol[i]
            else:
                #reuse previous solution
                for i in range(len(opt_prob._variables)):
                    opt_prob._variables[i].value = opt_prob.solution(0).getVar(i).value

            opt2(opt_prob, store_hst=False, sens_step=0.1)
        else:
            try:
                #reuse history
                opt2(opt_prob, store_hst=True, hot_start=True, sens_step=0.1)
            except NameError:
                opt2(opt_prob, store_hst=True, sens_step=0.1)

        local_sol = opt_prob.solution(0)
        if not self.config['useGlobalOptimization']:
            print(local_sol)
        local_sol_vec = np.array([local_sol.getVar(x).value for x in range(0,len(local_sol._variables))])

        if self.last_best_sol is not None:
            local_sol_vec = self.last_best_sol
            print("using last best constrained solution instead of given solver solution.")

        sol_wf, sol_q, sol_a, sol_b = self.vecToParams(local_sol_vec)

        print("testing final solution")
        self.iter_cnt = 0
        self.objective_func(local_sol_vec)
        print("\n")

        self.trajectory.initWithParams(sol_a, sol_b, sol_q, self.nf, sol_wf)

        if self.config['showOptimizationGraph']:
            plt.ioff()

        return self.trajectory
Exemplo n.º 7
0
                         minstep=1e-8,
                         debug=1,
                         phip=0.5,
                         phig=0.5,
                         maxiter=maxiter)
    ##############################################################################
    ##############################################################################
    # solving the problem with yopt
    ##############################################################################
    ##############################################################################
swarmsize = comm.bcast(swarmsize, root=0)
maxiter = comm.bcast(maxiter, root=0)

if algo == 2:
    # Solve Problem (No-Parallelization)
    alpso_none = ALPSO()  #pll_type='SPM')
    alpso_none.setOption('fileout', 1)
    alpso_none.setOption('SwarmSize', swarmsize)
    alpso_none.setOption('maxInnerIter', 6)
    alpso_none.setOption('etol', 1e-5)
    alpso_none.setOption('rtol', 1e-10)
    alpso_none.setOption('atol', 1e-10)
    alpso_none.setOption('vcrazy', 1e-4)
    alpso_none.setOption('dt', 1e0)
    alpso_none.setOption('maxOuterIter', maxiter)
    alpso_none.setOption(
        'stopCriteria',
        0)  #Stopping Criteria Flag (0 - maxIters, 1 - convergence)
    alpso_none.setOption('printInnerIters', 1)
    alpso_none.setOption('printOuterIters', 1)
    alpso_none.setOption('HoodSize', swarmsize / 100)

# =============================================================================
#
# =============================================================================

# Instantiate Optimization Problem
opt_prob = Optimization('G08 Global Constrained Problem', objfunc)
opt_prob.addVar('x1', 'c', lower=5.0, upper=1e-6, value=10.0)
opt_prob.addVar('x2', 'c', lower=5.0, upper=1e-6, value=10.0)
opt_prob.addObj('f')
opt_prob.addCon('g1', 'i')
opt_prob.addCon('g2', 'i')

# Solve Problem (No-Parallelization)
alpso_none = ALPSO()
alpso_none.setOption('fileout', 0)
alpso_none(opt_prob)
if myrank == 0:
    print opt_prob.solution(0)
# end

# Solve Problem (SPM-Parallelization)
alpso_spm = ALPSO(pll_type='SPM')
alpso_spm.setOption('fileout', 0)
alpso_spm(opt_prob)
print opt_prob.solution(1)

# Solve Problem (DPM-Parallelization)
alpso_dpm = ALPSO(pll_type='DPM')
alpso_dpm.setOption('fileout', 0)
Exemplo n.º 9
0

# =============================================================================
#
# =============================================================================

# Instantiate Optimization Problem
opt_prob = Optimization('G08 Global Constrained Problem', objfunc)
opt_prob.addVar('x1', 'c', lower=5.0, upper=1e-6, value=10.0)
opt_prob.addVar('x2', 'c', lower=5.0, upper=1e-6, value=10.0)
opt_prob.addObj('f')
opt_prob.addCon('g1', 'i')
opt_prob.addCon('g2', 'i')

# Solve Problem (No-Parallelization)
alpso_none = ALPSO()
alpso_none.setOption('fileout', 0)
alpso_none(opt_prob)
if myrank == 0:
    print(opt_prob.solution(0))

# Solve Problem (SPM-Parallelization)
alpso_spm = ALPSO(pll_type='SPM')
alpso_spm.setOption('fileout', 0)
alpso_spm(opt_prob)
print(opt_prob.solution(1))

# Solve Problem (DPM-Parallelization)
alpso_poa = ALPSO(pll_type='POA')
alpso_poa.setOption('fileout', 0)
alpso_poa(opt_prob)
Exemplo n.º 10
0
for i in range(x.shape[1]):
    opt_prob.addVar('theta%d' % i, 'c', lower=.05, upper=20, value=.2)
for i in range(x.shape[1]):
    opt_prob.addVar('pl%d' % i, 'c', lower=1, upper=2, value=1.75)

opt_prob.addObj('f')
opt_prob.addCon('g1', 'i')

#print out the problem
print opt_prob

#Run the GA
# nsga = NSGA2(PopSize=300, maxGen=500, pMut_real=.1)
# nsga(opt_prob)
#
pso = ALPSO()
pso.setOption('SwarmSize', 30)
pso.setOption('maxOuterIter', 100)
pso.setOption('stopCriteria', 1)
# pso.setOption('dt',1)
pso(opt_prob)

#print the best solution
print opt_prob.solution(0)

# Update the model variables to the best solution found by the optimizer
a.update([
    opt_prob.solution(0)._variables[0].value,
    opt_prob.solution(0)._variables[1].value,
    opt_prob.solution(0)._variables[2].value,
    opt_prob.solution(0)._variables[3].value,