Ejemplo n.º 1
0
    return f, g, fail


opt_prob = pyOpt.Optimization('Minimize Chi**2', objfunc)

opt_prob.addVar('Amp', 'c', lower=0.0, upper=42.0, value=1.0)
opt_prob.addVar('LifeTime', 'c', lower=0.0, upper=42.0, value=2.0)
opt_prob.addVar('Offset', 'c', lower=0.0, upper=42.0, value=3.0)
opt_prob.addVar('ScatterAmp', 'c', lower=0.0, upper=42.0, value=4.0)

opt_prob.addObj('Minimize Chi**2', value=0.0, optimum=0.0)

print opt_prob

slsqp = pyOpt.SLSQP()

slsqp.setOption('IPRINT', -1)
[fstr, xstr, inform] = slsqp(opt_prob, disp_opts=True, sens_type='FD')

print opt_prob.solution(0)

# ------------------------------------------------
Plot = False

if Plot:
    from Display import ForkDisplay
    print "\nPlotting: Aligned Data:"
    ForkDisplay(Time,
                AlignedRaw,
                Title="Aligned Raw Data",
Ejemplo n.º 2
0
for i, name in enumerate(Con_Names):
    The_Problem.addCon(name, 'e')

# inequality constraint
Con_Names = The_Project.config_current['OPT_CONSTR']['INEQUALITY'].keys()
for i, name in enumerate(Con_Names):
    The_Problem.addCon(name, 'i')

print The_Problem

# -------------------------------------------------------------------
#  Run Optimizer
# -------------------------------------------------------------------

# SLSQP
The_Optimizer = pyOpt.SLSQP()
The_Optimizer.setOption('IPRINT', -1)
The_Optimizer.setOption('MAXIT', its)
The_Optimizer.setOption('ACC', 1e-8)

# CONMIN
#The_Optimizer = pyOpt.CONMIN()
#The_Optimizer.setOption('IPRINT',1)
#The_Optimizer.setOption('ITMAX',its)
#The_Optimizer.setOption('DABFUN',1e-6)
#The_Optimizer.setOption('DELFUN',1e-6)

# Call the Optimizer
The_Optimizer(The_Problem, sens_type=the_Gradient)

print The_Problem.solution(0)
Ejemplo n.º 3
0
def main():
    # Default input parameters
    p=para()
    nelx = p.nelx
    nely = p.nely
    nelz = p.nelz
    volfrac = p.volfrac
    rmin = p.rmin
    penal = p.penal
    ft = p.ft # ft==0 -> sens, ft==1 -> dens
    # Max and min stiffness
    Emin = 1e-3
    Emax = 1.0
    # Set loop counter and gradient vectors
    loop = 0
    change = 1
    # Allocate design variables (as array), initialize and allocate sens.
    x = volfrac * np.ones(nely * nelx * nelz, dtype=float)

    # Initialize plot and plot the initial design
    # plot a cross section perpendicular to z direction
    plt.ion()  # Ensure that redrawing is possible
    for i in range(nelz):
        locals()['fig' + str(i+1)], locals()['ax'+str(i+1)] = plt.subplots()
        locals()['im' + str(i+1)] = locals()['ax'+str(i+1)].imshow(-x[nelx*nely*i:nelx*nely*(i+1)].reshape((nelx, nely)).T, cmap='gray', \
                     interpolation='none', norm=colors.Normalize(vmin=-1, vmax=0))
        locals()['fig' + str(i + 1)].show()
        plt.show()

    t1 = 0

    while change > 0.01 and loop < 2000:

        t1old=t1
        t1 = time.clock()

        xold = x.copy()
        loop = loop + 1
        #use pyOpt to solve the problem
        opt_prob = pyOpt.Optimization('3d', objfunc)
        #Assigning Design Variables
        for i in range(nelx * nely * nelz):
            opt_prob.addVar('element by order', 'c', lower=Emin, upper=Emax, value=x[i])
        #Assigning Objective:
        opt_prob.addObj('f')
        #Assigning Constraints:
        opt_prob.addCon('g', 'e')

        t2=time.clock()

        opt = pyOpt.SLSQP()
        #new x and obj
        optsol = opt(opt_prob,sens_type=SENSE)
        # Filter design variables
        # Finalize assembly and convert to csc format
        HH = FILTERMATRIX(nelx, nely, nelz, rmin)
        H = HH.assembly()
        Hs = H.sum(1)
        if ft == 0:
            x = optsol[1]
        elif ft == 1:
            x = np.asarray(H * optsol[1][np.newaxis].T / Hs)[:, 0]

        t3=time.clock()

        obj = optsol[0][0]
        vol = x.sum()/(nelx * nely * nelz)
        #Compute the change by the inf. norm
        change = np.linalg.norm(x.reshape(nelx * nely * nelz, 1) - xold.reshape(nelx * nely * nelz, 1), np.inf)

        t4=time.clock()

        # Plot to screen
        for i in range(nelz):
            locals()['im' + str(i + 1)].set_array(-x[nelx*nely*i:nelx*nely*(i+1)].reshape((nelx, nely)).T)
            locals()['fig' + str(i + 1)].canvas.draw()

        t5=time.clock()

        # Write iteration history to screen (req. Python 2.6 or newer)
        print("it.: {0} , obj.: {1:.3f} Vol.: {2:.3f}, ch.: {3:.3f}".format( \
            loop, obj, vol, change))
        print("t1-t1old.: {0:.5f} , t2-t1.: {1:.5f} , t3-t2.: {2:.5f}".format( \
            t1-t1old, t2-t1, t3-t2, t4-t3, t5-t4))
        print("t4-t3.: {0:.5f} , t4-t4.: {1:.5f} \n".format( \
            t4-t3, t5-t4))
    # Make sure the plot stays and that the shell remains
    plt.show()
    raw_input("Press any key...")
Ejemplo n.º 4
0
    def runOptimizer(self, opt_prob):
        # type: (pyOpt.Optimization) -> np._ArrayLike[float]
        ''' call global followed by local optimizer, return solution '''

        import pyOpt

        initial = [v.value for v in list(opt_prob.getVarSet().values())]

        if self.config['useGlobalOptimization']:
            ### optimize using pyOpt (global)
            sr = random.SystemRandom()
            if self.config['globalSolver'] == 'NSGA2':
                if parallel:
                    opt = pyOpt.NSGA2(pll_type='POA')  # genetic algorithm
                else:
                    opt = pyOpt.NSGA2()
                if self.config['globalOptSize'] % 4:
                    raise IOError(
                        "globalOptSize needs to be a multiple of 4 for NSGA2")
                opt.setOption('PopSize', self.config['globalOptSize']
                              )  # Population Size (a Multiple of 4)
                opt.setOption('maxGen', self.config['globalOptIterations']
                              )  # Maximum Number of Generations
                opt.setOption(
                    'PrintOut', 0
                )  # Flag to Turn On Output to files (0-None, 1-Subset, 2-All)
                opt.setOption(
                    'xinit', 1
                )  # Use Initial Solution Flag (0 - random population, 1 - use given solution)
                opt.setOption('seed', sr.random(
                ))  # Random Number Seed 0..1 (0 - Auto based on time clock)
                #pCross_real    0.6     Probability of Crossover of Real Variable (0.6-1.0)
                opt.setOption(
                    'pMut_real',
                    0.5)  # Probablity of Mutation of Real Variables (1/nreal)
                #eta_c  10.0    # Distribution Index for Crossover (5-20) must be > 0
                #eta_m  20.0    # Distribution Index for Mutation (5-50) must be > 0
                #pCross_bin     0.0     # Probability of Crossover of Binary Variable (0.6-1.0)
                #pMut_real      0.0     # Probability of Mutation of Binary Variables (1/nbits)
                self.iter_max = self.config['globalOptSize'] * self.config[
                    'globalOptIterations']
            elif self.config['globalSolver'] == 'ALPSO':
                if parallel:
                    opt = pyOpt.ALPSO(
                        pll_type='SPM'
                    )  #augmented lagrange particle swarm optimization
                else:
                    opt = pyOpt.ALPSO(
                    )  #augmented lagrange particle swarm optimization
                opt.setOption('stopCriteria', 0)  # stop at max iters
                opt.setOption('dynInnerIter', 1)  # dynamic inner iter number
                opt.setOption('maxInnerIter', 5)
                opt.setOption('maxOuterIter',
                              self.config['globalOptIterations'])
                opt.setOption('printInnerIters', 1)
                opt.setOption('printOuterIters', 1)
                opt.setOption('SwarmSize', self.config['globalOptSize'])
                opt.setOption('xinit', 1)
                opt.setOption('seed',
                              sr.random() *
                              self.mpi_size)  #(self.mpi_rank+1)/self.mpi_size)
                #opt.setOption('vcrazy', 1e-2)
                #TODO: how to properly limit max number of function calls?
                # no. func calls = (SwarmSize * inner) * outer + SwarmSize
                self.iter_max = opt.getOption('SwarmSize') * opt.getOption('maxInnerIter') * \
                    opt.getOption('maxOuterIter') + opt.getOption('SwarmSize')
                self.iter_max = self.iter_max // self.mpi_size
            else:
                print("Solver {} not defined".format(
                    self.config['globalSolver']))
                sys.exit(1)

            # run global optimization

            #try:
            #reuse history
            #    opt(opt_prob, store_hst=False, hot_start=True) #, xstart=initial)
            #except NameError:

            if self.config['verbose']:
                print('Running global optimization with {}'.format(
                    self.config['globalSolver']))
            self.is_global = True
            opt(opt_prob, store_hst=False)  #, xstart=initial)

            if self.mpi_rank == 0:
                print(opt_prob.solution(0))

            self.gather_solutions()

        ### pyOpt local
        if self.config['useLocalOptimization']:
            print("Runnning local gradient based solver")

            # TODO: run local optimization for e.g. the three last best results (global solutions
            # could be more or less optimal within their local minima)

            # after using global optimization, refine solution with gradient based method init
            # optimizer (more or less local)
            if self.config['localSolver'] == 'SLSQP':
                opt2 = pyOpt.SLSQP()  #sequential least squares
                opt2.setOption('MAXIT', self.config['localOptIterations'])
                if self.config['verbose']:
                    opt2.setOption('IPRINT', 0)
            elif self.config['localSolver'] == 'IPOPT':
                opt2 = pyOpt.IPOPT()
                opt2.setOption(
                    'linear_solver', 'ma57'
                )  #mumps or hsl: ma27, ma57, ma77, ma86, ma97 or mkl: pardiso
                opt2.setOption('max_iter', self.config['localOptIterations'])
                if self.config['verbose']:
                    opt2.setOption('print_level', 4)  #0 none ... 5 max
                else:
                    opt2.setOption('print_level', 0)  #0 none ... 5 max
            elif self.config['localSolver'] == 'PSQP':
                opt2 = pyOpt.PSQP()
                opt2.setOption(
                    'MIT', self.config['localOptIterations'])  # max iterations
                #opt2.setOption('MFV', ??)  # max function evaluations
            elif self.config['localSolver'] == 'COBYLA':
                if parallel:
                    opt2 = pyOpt.COBYLA(pll_type='POA')
                else:
                    opt2 = pyOpt.COBYLA()
                opt2.setOption(
                    'MAXFUN',
                    self.config['localOptIterations'])  # max iterations
                opt2.setOption('RHOBEG', 0.1)  # initial step size
                if self.config['verbose']:
                    opt2.setOption('IPRINT', 2)

            self.iter_max = self.local_iter_max

            # use best constrained solution from last run (might be better than what solver thinks)
            if len(self.last_best_sol) > 0:
                for i in range(len(opt_prob.getVarSet())):
                    opt_prob.getVar(i).value = self.last_best_sol[i]

            if self.config['verbose']:
                print('Runing local optimization with {}'.format(
                    self.config['localSolver']))
            self.is_global = False
            if self.config['localSolver'] in ['COBYLA', 'CONMIN']:
                opt2(opt_prob, store_hst=False)
            else:
                if parallel:
                    opt2(opt_prob,
                         sens_step=0.1,
                         sens_mode='pgc',
                         store_hst=False)
                else:
                    opt2(opt_prob, sens_step=0.1, store_hst=False)

            self.gather_solutions()

        if self.mpi_rank == 0:
            sol = opt_prob.solution(0)
            print(sol)
            #sol_vec = np.array([sol.getVar(x).value for x in range(0,len(sol.getVarSet()))])

            if len(self.last_best_sol) > 0:
                print(
                    "using last best constrained solution instead of given solver solution."
                )

                print("testing final solution")
                #self.iter_cnt = 0
                self.objectiveFunc(self.last_best_sol, test=True)
                print("\n")
                return self.last_best_sol
            else:
                print("No feasible solution found!")
                sys.exit(-1)
        else:
            # parallel sub-processes, close
            sys.exit(0)
Ejemplo n.º 5
0
def PL_identify(D, lamb, T, ini_sol):
    """  Idenify the Probability Law (PL)

    Parameters
    ---------------
    D : mxn matrix
        D_ij is the entropy value calculated using candidate PL i and data in
        window j
    lamb : float
        detection threshold
    T : float
        parameter in logistic function. The unit step function is approximated
        by L(x) = 1 / ( 1 + exp(-x / T))
    ini_sol : mxn matrix
        initial solution

    Returns
    --------------
    """

    L = lambda x: 1.0 / ( 1 + math.exp(-1.0 * x / T))
    # m, n = D.shape
    m = len(D)
    n = len(D[0])
    def objfunc(x):
        f = sum(L(sum(x[(n * i):(n * (i + 1))])) for i in range(m))
        # print('f', f)
        cn = (m * n + n)
        g = [0.0] * cn
        for j in range(n):
            g[j] = -1 * sum(x[i * n + j] for i in range(m)) + 1

        for i in range(m):
            for j in range(n):
                g[n + i * n + j] = D[i][j] * x[i * n + j] - lamb

        fail = 0
        # print('g, ', g)
        return f, g, fail

    opt_prob = pyOpt.Optimization("PL Identification Problem", objfunc)
    opt_prob.addObj('f')
    for i in range(m):
        for j in range(n):
            opt_prob.addVar('x%d_%d'%(i,j), 'c', lower=0.0,
                    upper=1.0, value = ini_sol[i][j])
            # opt_prob.addVar('x%d%d'%(i,j), 'i', lower=0,
                    # upper=1, value=0)
    opt_prob.addConGroup('eq', n, type='e')
    opt_prob.addConGroup('ineq', m * n, type='i')
    print('opt_prob: ', opt_prob)
    # return
    slsqp = pyOpt.SLSQP()
    [fstr, xstr, inform] = slsqp(opt_prob, sens_type='FD')

    # alg  = pyOpt.ALGENCAN ()
    # [fstr, xstr, inform] = alg(opt_prob)

    print('fstr', fstr)
    print('inform', inform)
    print('xstr', xstr)

    print(opt_prob.solution(0))
Ejemplo n.º 6
0
    def optimize_pyopt(self, solver, sens, options):
        """Deprecated -- only supported in Python 2.7"""

        # pyOpt requires empty options to be specified as {}, not None
        if options is None: options = {}

        eqConstr = ('SLSQP' not in solver and 'CONMIN' not in solver
                    and 'COBYLA' not in solver and 'FILTERSD' not in solver
                    and 'SDPEN' not in solver)

        # Organize constraints
        indLinEq = []
        indLinIneq = []
        indNonlinEq = []
        indNonlinIneq = []
        for k, bnd in enumerate(self.lin_bounds):
            if bnd[0] == bnd[1] and eqConstr: indLinEq.append(k)
            else: indLinIneq.append(k)
        indLinEq = np.array(indLinEq)
        indLinIneq = np.array(indLinIneq)
        for k, bnd in enumerate(self.nonlin_bounds):
            if bnd[0] == bnd[1] and eqConstr: indNonlinEq.append(k)
            else: indNonlinIneq.append(k)
        indNonlinEq = np.array(indNonlinEq)
        indNonlinIneq = np.array(indNonlinIneq)

        # pyOpt objective
        def objectivePyopt(xIn):

            x = xIn[:self.nvar]

            f = self.objective(x)
            g = np.zeros(0, dtype=float)
            if len(indLinEq) > 0:
                g = np.r_[g, np.dot(self.lin_mat[indLinEq, :], x)]
            if len(indNonlinEq) > 0:
                g = np.r_[
                    g,
                    self.eval_nonlin_constraints(x, 'constr', indNonlinEq)]
            if len(indLinIneq) > 0:
                g = np.r_[g, np.dot(self.lin_mat[indLinIneq, :], x)]
            if len(indNonlinIneq) > 0:
                g = np.r_[
                    g,
                    self.eval_nonlin_constraints(x, 'constr', indNonlinIneq)]

            fail = 0
            if hasattr(f, '__iter__'):
                f = f[0]
            if f >= self.inf or np.any(g >= self.inf): fail = 1

            return f, g.tolist(), fail

        # pyOpt gradient
        def gradientPyopt(xIn, f, g):

            x = xIn[:self.nvar]

            df = self.gradient(x)
            dg = np.zeros((0, self.nvar), dtype=float)
            if len(indLinEq) > 0:
                dg = np.r_[dg, self.lin_mat[indLinEq, :]]
            if len(indNonlinEq) > 0:
                dg = np.r_[dg,
                           self.eval_nonlin_constraints(x, 'jac', indNonlinEq)]
            if len(indLinIneq) > 0:
                dg = np.r_[dg, self.lin_mat[indLinIneq, :]]
            if len(indNonlinIneq) > 0:
                dg = np.r_[
                    dg,
                    self.eval_nonlin_constraints(x, 'jac', indNonlinIneq)]

            fail = 0
            if hasattr(f, '__iter__'):
                f = f[0]
            if f >= self.inf or np.any(g >= self.inf): fail = 1

            return df.reshape(1, -1), dg, fail

        # Instantiate optimization problem
        optProb = pyOpt.Optimization('pyopt', objectivePyopt)

        # Add objective
        optProb.addObj('objective')

        # Add variables
        optProb.addVarGroup('var',
                            self.nvar,
                            type='c',
                            value=self.var_init,
                            lower=self.var_bounds[:, 0],
                            upper=self.var_bounds[:, 1])
        # Add constraints
        if len(indLinEq) > 0:
            optProb.addConGroup('lin-equality',
                                len(indLinEq),
                                type='e',
                                equal=self.lin_bounds[indLinEq, 0])
        if len(indNonlinEq) > 0:
            optProb.addConGroup('nonlin-equality',
                                len(indNonlinEq),
                                type='e',
                                equal=self.nonlin_bounds[indNonlinEq, 0])
        if len(indLinIneq) > 0:
            optProb.addConGroup('lin-inequality',
                                len(indLinIneq),
                                type='i',
                                lower=self.lin_bounds[indLinIneq, 0],
                                upper=self.lin_bounds[indLinIneq, 1])
        if len(indNonlinIneq) > 0:
            optProb.addConGroup('nonlin-inequality',
                                len(indNonlinIneq),
                                type='i',
                                lower=self.nonlin_bounds[indNonlinIneq, 0],
                                upper=self.nonlin_bounds[indNonlinIneq, 1])

        # Setup solver
        if 'SNOPT' in solver:
            optimizer = pyOpt.pySNOPT.SNOPT(options=options)
        if 'SLSQP' in solver:
            optimizer = pyOpt.SLSQP(options=options)
        if 'CONMIN' in solver:
            optimizer = pyOpt.CONMIN(options=options)
        if 'ALGENCAN' in solver:
            optimizer = pyOpt.ALGENCAN(options=options)
        if 'ALPSO' in solver:
            optimizer = pyOpt.ALPSO(options=options)
        if 'ALHSO' in solver:
            optimizer = pyOpt.ALHSO(options=options)
        if 'COBYLA' in solver:
            optimizer = pyOpt.COBYLA(options=options)
        if 'FILTERSD' in solver:
            optimizer = pyOpt.FILTERSD(options=options)
        if 'KOPT' in solver:
            optimizer = pyOpt.KOPT(options=options)
        if 'MIDACO' in solver:
            optimizer = pyOpt.MIDACO(options=options)
        if 'KSQP' in solver:
            optimizer = pyOpt.KSQP(options=options)
        if 'SDPEN' in solver:
            optimizer = pyOpt.SDPEN(options=options)
        if 'SOLVOPT' in solver:
            optimizer = pyOpt.SOLVOPT(options=options)

        # Run optimization
        if sens == 'finite-difference':
            optimizer(optProb, sens_type='FD')
        else:
            optimizer(optProb, sens_type=gradientPyopt)

        # Extract solution
        j = len(optProb._solutions) - 1
        xStar = np.zeros(self.nvar)
        for k in range(self.nvar):
            xStar[k] = optProb._solutions[j].getVar(k).value

        return xStar, objectivePyopt(xStar)[0]