def __init__(self, pll_type=None, *args, **kwargs): ''' FILTERSD Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") # name = 'FILTERSD' category = 'Local Optimizer' def_opts = { 'rho': [float, 100.0], # initial trust region radius 'htol': [float, 1e-6], # tolerance allowed in sum h of constraint feasibilities 'rgtol': [float, 1e-5], # tolerance allowed in reduced gradient l2 norm 'maxit': [int, 1000], # maximum number of major iterations allowed 'maxgr': [int, 1e5], # upper limit on the number of gradient calls 'ubd': [float, 1e5], # upper bound on the allowed constraint violation 'dchk': [int, 0], # derivative check flag (0 - no check, 1 - check) 'dtol': [float, 1e-8], # derivative check tolerance 'iprint': [int, 1], # verbosity of printing (0 - none, 1 - Iter, 2 - Debug) 'iout': [int, 6], # Output Unit Number 'ifile': [str, 'FILTERSD.out'], # Output File Name } informs = { -1: 'ws not large enough', -2: 'lws not large enough', -3: 'inconsistency during derivative check', 0: 'successful run', 1: 'unbounded NLP (f <= fmin at an htol-feasible point)', 2: 'bounds on x are inconsistent', 3: 'local minimum of feasibility problem and h > htol, (nonlinear constraints are locally inconsistent)', 4: 'initial point x has h > ubd (reset ubd or x and re-enter)', 5: 'maxit major iterations have been carried out', 6: 'termination with rho <= htol', 7: 'not enough workspace in ws or lws (see message)', 8: 'insufficient space for filter (increase mxf and re-enter)', 9: 'unexpected fail in LCP solver', 10: 'unexpected fail in LCP solver', } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, *args, **kwargs): ''' HSO Optimizer Class Initialization Documentation last updated: October. 22, 2008 - Ruben E. Perez ''' # name = 'HSO' category = 'Global Optimizer' def_opts = { 'hms': [int, 10], # Memory Size [4,10] 'dbw': [float, 0.01], # 'hmcr': [float, 0.96], # 'par': [float, 0.6], # 'maxiter': [int, 1e4], # Maximum Number Iterations 'printout': [int, 0], # Flag to Turn On Information Output 'xinit': [int, 0], # Initial Position Flag (0 - no position, 1 - position given) 'seed': [float, 0], # Random Number Seed (0 - Auto-Seed based on time clock) } informs = {} Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' KSOPT Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") # name = 'KSOPT' category = 'Local Optimizer' def_opts = { 'ITMAX': [int, 4e2], # Maximum Number of Iterations 'RDFUN': [float, 1e-4], # Objective Convergence Relative Tolerance 'RHOMIN': [float, 5.0], # Initial KS multiplier 'RHOMAX': [float, 100.0], # Final KS multiplier 'IPRINT': [int, 2], # Print Control (0 - None, 1 - Final, 2 - Iters) 'IOUT': [int, 6], # Output Unit Number 'IFILE': [str, 'KSOPT.out'], # Output File Name } informs = {} Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' PSQP Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") # end # name = 'PSQP' category = 'Local Optimizer' def_opts = { 'XMAX': [float, 1e16], # Maximum Stepsize 'TOLX': [float, 1e-16], # Variable Change Tolerance 'TOLC': [float, 1e-6], # Constraint Violation Tolerance 'TOLG': [float, 1e-6], # Lagrangian Gradient Tolerance 'RPF': [float, 1e-4], # Penalty Coefficient 'MIT': [int, 1000], # Maximum Number of Iterations 'MFV': [int, 2000], # Maximum Number of Function Evaluations 'MET': [int, 2], # Variable Metric Update (1 - BFGS, 2 - Hoshino) 'MEC': [ int, 2 ], # Negative Curvature Correction (1 - None, 2 - Powell's Correction) 'IPRINT': [int, 2], # Output Level (0 - None, 1 - Final, 2 - Iter) 'IOUT': [int, 6], # Output Unit Number 'IFILE': [str, 'PSQP.out'], # Output File Name } informs = { 1: 'Change in design variable was less than or equal to tolerance', 2: 'Change in objective function was less than or equal to tolerance', 3: 'Objective function less than or equal to tolerance', 4: 'Maximum constraint value is less than or equal to tolerance', 11: 'Maximum number of iterations exceeded', 12: 'Maximum number of function evaluations exceeded', 13: 'Maximum number of gradient evaluations exceeded', -6: 'Termination criterion not satisfied, but obtained point is acceptable', # <0 : 'Method failed', } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' FSQP Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") #end # name = 'FSQP' category = 'Local Optimizer' def_opts = { 'mode': [int, 100], # FSQP Mode (See Manual) 'iprint': [ int, 2 ], # Output Level (0 - None, 1 - Final, 2 - Major, 3 - Major Details) 'miter': [int, 500], # Maximum Number of Iterations 'bigbnd': [float, 1e10], # Plus Infinity Value 'epstol': [float, 1e-8], # Convergence Tolerance 'epseqn': [float, 0], # Equality Constraints Tolerance 'iout': [int, 6], # Output Unit Number 'ifile': [str, 'FSQP.out'], # Output File Name } informs = { 0: 'Normal termination of execution', 1: 'User-provided initial guess is infeasible for linear constraints, unable to generate a point satisfying all these constraints', 2: 'User-provided initial guess is infeasible for nonlinear inequality constraints and linear constraints, unable to generate a point satisfying all these constraints', 3: 'The maximum number of iterations has been reached before a solution is obtained', 4: 'The line search fails to find a new iterate', 5: 'Failure of the QP solver in attempting to construct d0, a more robust QP solver may succeed', 6: 'Failure of the QP solver in attempting to construct d1, a more robust QP solver may succeed', 7: 'Input data are not consistent, check print out error messages', 8: 'Two consecutive iterates are numerically equivalent before a stopping criterion is satisfied', 9: 'One of the penalty parameters exceeded bigbnd, the algorithm is having trouble satisfying a nonlinear equality constraint', } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' FILTERSD Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") #end # name = 'FILTERSD' category = 'Local Optimizer' def_opts = { 'rho':[float,100.0], # initial trust region radius 'htol':[float,1e-6], # tolerance allowed in sum h of constraint feasibilities 'rgtol':[float,1e-5], # tolerance allowed in reduced gradient l2 norm 'maxit':[int,1000], # maximum number of major iterations allowed 'maxgr':[int,1e5], # upper limit on the number of gradient calls 'ubd':[float,1e5], # upper bound on the allowed constraint violation 'dchk':[int,0], # derivative check flag (0 - no check, 1 - check) 'dtol':[float,1e-8], # derivative check tolerance 'iprint':[int,1], # verbosity of printing (0 - none, 1 - Iter, 2 - Debug) 'iout':[int,6], # Output Unit Number 'ifile':[str,'FILTERSD.out'], # Output File Name } informs = { -1 : 'ws not large enough', -2 : 'lws not large enough', -3 : 'inconsistency during derivative check', 0 : 'successful run', 1 : 'unbounded NLP (f <= fmin at an htol-feasible point)', 2 : 'bounds on x are inconsistent', 3 : 'local minimum of feasibility problem and h > htol, (nonlinear constraints are locally inconsistent)', 4 : 'initial point x has h > ubd (reset ubd or x and re-enter)', 5 : 'maxit major iterations have been carried out', 6 : 'termination with rho <= htol', 7 : 'not enough workspace in ws or lws (see message)', 8 : 'insufficient space for filter (increase mxf and re-enter)', 9 : 'unexpected fail in LCP solver', 10 : 'unexpected fail in LCP solver', } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): """ ALHSO Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen """ # if pll_type == None: self.poa = False elif pll_type.upper() == "POA": self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") # end # name = "ALHSO" category = "Global Optimizer" def_opts = { "hms": [int, 5], # Memory Size [1,50] "hmcr": [float, 0.95], # Probability rate of choosing from memory [0.7,0.99] "par": [float, 0.65], # Pitch adjustment rate [0.1,0.99] "dbw": [int, 2000], # Variable Bandwidth Quantization "maxoutiter": [int, 2e3], # Maximum Number of Outer Loop Iterations (Major Iterations) "maxinniter": [int, 2e2], # Maximum Number of Inner Loop Iterations (Minor Iterations) "stopcriteria": [int, 1], # Stopping Criteria Flag "stopiters": [ int, 10, ], # Consecutively Number of Outer Iterations for which the Stopping Criteria must be Satisfied "etol": [float, 1e-6], # Absolute Tolerance for Equality constraints "itol": [float, 1e-6], # Absolute Tolerance for Inequality constraints "atol": [float, 1e-6], # Absolute Tolerance for Objective Function "rtol": [float, 1e-6], # Relative Tolerance for Objective Function "prtoutiter": [int, 0], # Number of Iterations Before Print Outer Loop Information "prtinniter": [int, 0], # Number of Iterations Before Print Inner Loop Information "xinit": [int, 0], # Initial Position Flag (0 - no position, 1 - position given) "rinit": [float, 1.0], # Initial Penalty Factor "fileout": [int, 1], # Flag to Turn On Output to filename "filename": [ str, "ALHSO.out", ], # We could probably remove fileout flag if filename or fileinstance is given "seed": [float, 0], # Random Number Seed (0 - Auto-Seed based on time clock) "scaling": [int, 1], # Design Variables Scaling Flag (0 - no scaling, 1 - scaling between [-1,1]) } informs = {} Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' PSQP Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") #end # name = 'PSQP' category = 'Local Optimizer' def_opts = { 'XMAX':[float,1e16], # Maximum Stepsize 'TOLX':[float,1e-16], # Variable Change Tolerance 'TOLC':[float,1e-6], # Constraint Violation Tolerance 'TOLG':[float,1e-6], # Lagrangian Gradient Tolerance 'RPF':[float,1e-4], # Penalty Coefficient 'MIT':[int,1000], # Maximum Number of Iterations 'MFV':[int,2000], # Maximum Number of Function Evaluations 'MET':[int,2], # Variable Metric Update (1 - BFGS, 2 - Hoshino) 'MEC':[int,2], # Negative Curvature Correction (1 - None, 2 - Powell's Correction) 'IPRINT':[int,2], # Output Level (0 - None, 1 - Final, 2 - Iter) 'IOUT':[int,6], # Output Unit Number 'IFILE':[str,'PSQP.out'], # Output File Name } informs = { 1 : 'Change in design variable was less than or equal to tolerance', 2 : 'Change in objective function was less than or equal to tolerance', 3 : 'Objective function less than or equal to tolerance', 4 : 'Maximum constraint value is less than or equal to tolerance', 11 : 'Maximum number of iterations exceeded', 12 : 'Maximum number of function evaluations exceeded', 13 : 'Maximum number of gradient evaluations exceeded', -6 : 'Termination criterion not satisfied, but obtained point is acceptable', #<0 : 'Method failed', } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' SOLVOPT Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") # end # name = 'SOLVOPT' category = 'Local Optimizer' def_opts = { 'xtol': [float, 1e-4], # Variables Tolerance 'ftol': [float, 1e-6], # Objective Tolerance 'maxit': [int, 15000], # Maximum Number of Iterations 'iprint': [int, 1], # Output Level (-1 -> None, 0 -> Final, N - each Nth iter) 'gtol': [float, 1e-8], # Constraints Tolerance 'spcdil': [float, 2.5], # Space Dilation 'iout': [int, 6], # Output Unit Number 'ifile': [str, 'SOLVOPT.out'], # Output File Name } informs = { 1: 'Normal termination.', -2: 'Improper space dimension.', -3: 'Objective equals infinity.', -4: 'Gradient equals zero or infinity.', -5: 'Objective equals infinity.', -6: 'Gradient equals zero or infinity.', -7: 'Objective function is unbounded.', -8: 'Gradient zero at the point, but stopping criteria are not fulfilled.', -9: 'Iterations limit exceeded.', -11: 'Premature stop is possible. Try to re-run the routine from the obtained point.', -12: 'Result may not provide the optimum. The function apparently has many extremum points.', -13: 'Result may be inaccurate in the coordinates. The function is flat at the optimum.', -14: 'Result may be inaccurate in a function value. The function is extremely steep at the optimum.', } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' SOLVOPT Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") #end # name = 'SOLVOPT' category = 'Local Optimizer' def_opts = { 'xtol':[float,1e-4], # Variables Tolerance 'ftol':[float,1e-6], # Objective Tolerance 'maxit':[int,15000], # Maximum Number of Iterations 'iprint':[int,1], # Output Level (-1 -> None, 0 -> Final, N - each Nth iter) 'gtol':[float,1e-8], # Constraints Tolerance 'spcdil':[float,2.5], # Space Dilation 'iout':[int,6], # Output Unit Number 'ifile':[str,'SOLVOPT.out'], # Output File Name } informs = { 1 : 'Normal termination.', -2 : 'Improper space dimension.', -3 : 'Objective equals infinity.', -4 : 'Gradient equals zero or infinity.', -5 : 'Objective equals infinity.', -6 : 'Gradient equals zero or infinity.', -7 : 'Objective function is unbounded.', -8 : 'Gradient zero at the point, but stopping criteria are not fulfilled.', -9 : 'Iterations limit exceeded.', -11 : 'Premature stop is possible. Try to re-run the routine from the obtained point.', -12 : 'Result may not provide the optimum. The function apparently has many extremum points.', -13 : 'Result may be inaccurate in the coordinates. The function is flat at the optimum.', -14 : 'Result may be inaccurate in a function value. The function is extremely steep at the optimum.', } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' FSQP Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") #end # name = 'FSQP' category = 'Local Optimizer' def_opts = { 'mode':[int,100], # FSQP Mode (See Manual) 'iprint':[int,2], # Output Level (0 - None, 1 - Final, 2 - Major, 3 - Major Details) 'miter':[int,500], # Maximum Number of Iterations 'bigbnd':[float,1e10], # Plus Infinity Value 'epstol':[float,1e-8], # Convergence Tolerance 'epseqn':[float,0], # Equality Constraints Tolerance 'iout':[int,6], # Output Unit Number 'ifile':[str,'FSQP.out'], # Output File Name } informs = { 0 : 'Normal termination of execution', 1 : 'User-provided initial guess is infeasible for linear constraints, unable to generate a point satisfying all these constraints', 2 : 'User-provided initial guess is infeasible for nonlinear inequality constraints and linear constraints, unable to generate a point satisfying all these constraints', 3 : 'The maximum number of iterations has been reached before a solution is obtained', 4 : 'The line search fails to find a new iterate', 5 : 'Failure of the QP solver in attempting to construct d0, a more robust QP solver may succeed', 6 : 'Failure of the QP solver in attempting to construct d1, a more robust QP solver may succeed', 7 : 'Input data are not consistent, check print out error messages', 8 : 'Two consecutive iterates are numerically equivalent before a stopping criterion is satisfied', 9 : 'One of the penalty parameters exceeded bigbnd, the algorithm is having trouble satisfying a nonlinear equality constraint', } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' SLSQP Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") # end # name = 'SLSQP' category = 'Local Optimizer' def_opts = { # SLSQP Options 'ACC': [float, 1e-6], # Convergence Accurancy 'MAXIT': [int, 50], # Maximum Iterations 'IPRINT': [int, 1], # Output Level (<0 - None, 0 - Screen, 1 - File) 'IOUT': [int, 6], # Output Unit Number 'IFILE': [str, 'SLSQP.out'], # Output File Name } informs = { -1: "Gradient evaluation required (g & a)", 0: "Optimization terminated successfully.", 1: "Function evaluation required (f & c)", 2: "More equality constraints than independent variables", 3: "More than 3*n iterations in LSQ subproblem", 4: "Inequality constraints incompatible", 5: "Singular matrix E in LSQ subproblem", 6: "Singular matrix C in LSQ subproblem", 7: "Rank-deficient equality constraint subproblem HFTI", 8: "Positive directional derivative for linesearch", 9: "Iteration limit exceeded", } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' ALHSO Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") # name = 'ALHSO' category = 'Global Optimizer' def_opts = { 'hms':[int,5], # Memory Size [1,50] 'hmcr':[float,0.95], # Probability rate of choosing from memory [0.7,0.99] 'par':[float,0.65], # Pitch adjustment rate [0.1,0.99] 'dbw':[int,2000], # Variable Bandwidth Quantization 'maxoutiter':[int,2e3], # Maximum Number of Outer Loop Iterations (Major Iterations) 'maxinniter':[int,2e2], # Maximum Number of Inner Loop Iterations (Minor Iterations) 'stopcriteria':[int,1], # Stopping Criteria Flag 'stopiters':[int,10], # Consecutively Number of Outer Iterations for which the Stopping Criteria must be Satisfied 'etol':[float,1e-6], # Absolute Tolerance for Equality constraints 'itol':[float,1e-6], # Absolute Tolerance for Inequality constraints 'atol':[float,1e-6], # Absolute Tolerance for Objective Function 'rtol':[float,1e-6], # Relative Tolerance for Objective Function 'prtoutiter':[int,0], # Number of Iterations Before Print Outer Loop Information 'prtinniter':[int,0], # Number of Iterations Before Print Inner Loop Information 'xinit':[int,0], # Initial Position Flag (0 - no position, 1 - position given) 'rinit':[float,1.0], # Initial Penalty Factor 'fileout':[int,1], # Flag to Turn On Output to filename 'filename':[str,'ALHSO.out'], # We could probably remove fileout flag if filename or fileinstance is given 'seed':[float,0], # Random Number Seed (0 - Auto-Seed based on time clock) 'scaling':[int,1], # Design Variables Scaling Flag (0 - no scaling, 1 - scaling between [-1,1]) } informs = {} Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' SLSQP Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") #end # name = 'SLSQP' category = 'Local Optimizer' def_opts = { # SLSQP Options 'ACC':[float,1e-6], # Convergence Accurancy 'MAXIT':[int,50], # Maximum Iterations 'IPRINT':[int,1], # Output Level (<0 - None, 0 - Screen, 1 - File) 'IOUT':[int,6], # Output Unit Number 'IFILE':[str,'SLSQP.out'], # Output File Name } informs = { -1 : "Gradient evaluation required (g & a)", 0 : "Optimization terminated successfully.", 1 : "Function evaluation required (f & c)", 2 : "More equality constraints than independent variables", 3 : "More than 3*n iterations in LSQ subproblem", 4 : "Inequality constraints incompatible", 5 : "Singular matrix E in LSQ subproblem", 6 : "Singular matrix C in LSQ subproblem", 7 : "Rank-deficient equality constraint subproblem HFTI", 8 : "Positive directional derivative for linesearch", 9 : "Iteration limit exceeded", } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' MMFD Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") #end # name = 'MMFD' category = 'Local Optimizer' def_opts = { 'IOPT': [int, 0], # Feasible Directions Approach (0 - MMFD, 1 - MFD) 'IONED': [int, 0], # One-Dimensional Search Method (0,1,2,3) 'CT': [float, -3e-2], # Constraint Tolerance 'CTMIN': [float, 4e-3], # Active Constraint Tolerance 'DABOBJ': [float, 1e-3], # Objective Absolute Tolerance (DABOBJ*abs(f(x))) 'DELOBJ': [float, 1e-3], # Objective Relative Tolerance 'THETAZ': [float, 1e-1], # Push-Off Factor 'PMLT': [float, 1e1], # Penalty multiplier for equality constraints 'ITMAX': [int, 4e2], # Maximum Number of Iterations 'ITRMOP': [int, 3], # consecutive Iterations Iterations for Convergence 'IPRINT': [int, 2], # Print Control (0 - None, 1 - Final, 2 - Iters) 'IFILE': [str, 'MMFD.out'], # Output File Name } informs = {} Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' ALGENCAN Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") #end # name = 'ALGENCAN' category = 'Local Optimizer' def_opts = { # ALGENCAN Options 'epsfeas': [float, 1.0e-8], # Feasibility Convergence Accurancy 'epsopt': [float, 1.0e-8], # Optimality Convergence Accurancy 'efacc': [float, 1.0e-4], # Feasibility Level for Newton-KKT Acceleration 'eoacc': [float, 1.0e-4], # Optimality Level for Newton-KKT Acceleration 'checkder': [bool, False], # Check Derivatives Flag 'iprint': [int, 10], # Print Flag (0 - None, ) 'ifile': [str, 'ALGENCAN.out'], # Output File Name 'ncomp': [int, 6], # Print Precision } informs = { 0: "Solution was found.", 1: "Stationary or infeasible point was found.", 2: "penalty parameter is too large infeasibile or badly scaled problem", 3: "Maximum of iterations reached.", } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' NSGA2 Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") # end # name = 'NSGA-II' category = 'Global Optimizer' def_opts = { 'PopSize': [int, 100], # 'maxGen': [int, 150], # 'pCross_real': [float, 0.6], # 'pMut_real': [float, 0.2], # 'eta_c': [float, 10], # 'eta_m': [float, 20], # 'pCross_bin': [float, 0], # 'pMut_bin': [float, 0], # 'PrintOut': [int, 1], # Flag to Turn On Output to filename (0 - , 1 - , 2 - ) 'seed': [float, 0], # Random Number Seed (0 - Auto-Seed based on time clock) 'xinit': [ int, 0 ], # Use Initial Solution Flag (0 - random population, 1 - use given solution) } informs = {} Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' ALGENCAN Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") #end # name = 'ALGENCAN' category = 'Local Optimizer' def_opts = { # ALGENCAN Options 'epsfeas':[float,1.0e-8], # Feasibility Convergence Accurancy 'epsopt':[float,1.0e-8], # Optimality Convergence Accurancy 'efacc':[float,1.0e-4], # Feasibility Level for Newton-KKT Acceleration 'eoacc':[float,1.0e-4], # Optimality Level for Newton-KKT Acceleration 'checkder':[bool,False], # Check Derivatives Flag 'iprint':[int,10], # Print Flag (0 - None, ) 'ifile':[str,'ALGENCAN.out'], # Output File Name 'ncomp':[int,6], # Print Precision } informs = { 0 : "Solution was found.", 1 : "Stationary or infeasible point was found.", 2 : "penalty parameter is too large infeasibile or badly scaled problem", 3 : "Maximum of iterations reached.", } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' GCMMA Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") # end # name = 'GCMMA' category = 'Local Optimizer' def_opts = { # GCMMA Options 'MAXIT': [int, 1000], # Maximum Iterations 'INNMAX': [int, 10], # Maximum Inner Iterations 'GEPS': [float, 1e-6], # Dual Objective Gradient Tolerance 'DABOBJ': [float, 1e-6], # 'DELOBJ': [float, 1e-6], # 'ITRM': [int, 2], # 'IPRINT': [int, 1], # Output Level (<0 - None, 0 - Screen, 1 - File) 'IOUT': [int, 6], # Output Unit Number 'IFILE': [str, 'GCMMA.out'], # Output File Name } informs = { 0: 'The optimality conditions are satisfied.', 1: 'The algorithm has been stopped after MAXIT iterations.', } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' MMFD Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") #end # name = 'MMFD' category = 'Local Optimizer' def_opts = { 'IOPT':[int,0], # Feasible Directions Approach (0 - MMFD, 1 - MFD) 'IONED':[int,0], # One-Dimensional Search Method (0,1,2,3) 'CT':[float,-3e-2], # Constraint Tolerance 'CTMIN':[float,4e-3], # Active Constraint Tolerance 'DABOBJ':[float,1e-3], # Objective Absolute Tolerance (DABOBJ*abs(f(x))) 'DELOBJ':[float,1e-3], # Objective Relative Tolerance 'THETAZ':[float,1e-1], # Push-Off Factor 'PMLT':[float,1e1], # Penalty multiplier for equality constraints 'ITMAX':[int,4e2], # Maximum Number of Iterations 'ITRMOP':[int,3], # consecutive Iterations Iterations for Convergence 'IPRINT':[int,2], # Print Control (0 - None, 1 - Final, 2 - Iters) 'IFILE':[str,'MMFD.out'], # Output File Name } informs = { } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' MMA Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") #end # name = 'MMA' category = 'Local Optimizer' def_opts = { # MMA Options 'MAXIT':[int,1000], # Maximum Iterations 'GEPS':[float,1e-6], # Dual Objective Gradient Tolerance 'DABOBJ':[float,1e-6], # 'DELOBJ':[float,1e-6], # 'ITRM':[int,2], # 'IPRINT':[int,1], # Output Level (<0 - None, 0 - Screen, 1 - File) 'IOUT':[int,6], # Output Unit Number 'IFILE':[str,'MMA.out'], # Output File Name } informs = { 0 : 'The optimality conditions are satisfied.', 1 : 'The algorithm has been stopped after MAXIT iterations.', } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' NSGA2 Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") #end # name = 'NSGA-II' category = 'Global Optimizer' def_opts = { 'PopSize':[int,100], # 'maxGen':[int,150], # 'pCross_real':[float,0.6], # 'pMut_real':[float,0.2], # 'eta_c':[float,10], # 'eta_m':[float,20], # 'pCross_bin':[float,0], # 'pMut_bin':[float,0], # 'PrintOut':[int,1], # Flag to Turn On Output to filename (0 - , 1 - , 2 - ) 'seed':[float,0], # Random Number Seed (0 - Auto-Seed based on time clock) 'xinit':[int,0], # Use Initial Solution Flag (0 - random population, 1 - use given solution) } informs = {} Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' COBYLA Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") #end # name = 'COBYLA' category = 'Local Optimizer' def_opts = { 'RHOBEG': [float, 0.5], # Initial Variables Change 'RHOEND': [float, 1.0e-6], # Convergence Accurancy 'IPRINT': [int, 2], # Print Flag (0 - None, 1 - Final, 2,3 - Iteration) 'MAXFUN': [int, 3500], # Maximum Iterations 'IOUT': [int, 6], # Output Unit Number 'IFILE': [str, 'COBYLA.out'], # Output File Name } informs = { 0: 'Normal return', 1: 'Max. number of function evaluations reach', 2: 'Rounding errors are becoming damaging', } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' SDPEN Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: August. 09, 2012 - Ruben E. Perez ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") #end # name = 'SDPEN' category = 'Local Optimizer' def_opts = { # SDPEN Options 'alfa_stop': [float, 1e-6], # Convergence Tolerance 'nf_max': [int, 5000], # Maximum Number of Function Evaluations 'iprint': [int, 0], # Output Level (<0 - None, 0 - Final, 1 - Iters, 2 - Full) 'iout': [int, 6], # Output Unit Number 'ifile': [str, 'SDPEN.out'], # Output File Name } informs = { 1: 'finished successfully', 2: 'maximum number of evaluations reached', } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' COBYLA Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") #end # name = 'COBYLA' category = 'Local Optimizer' def_opts = { 'RHOBEG':[float,0.5], # Initial Variables Change 'RHOEND':[float,1.0e-6], # Convergence Accurancy 'IPRINT':[int,2], # Print Flag (0 - None, 1 - Final, 2,3 - Iteration) 'MAXFUN':[int,3500], # Maximum Iterations 'IOUT':[int,6], # Output Unit Number 'IFILE':[str,'COBYLA.out'], # Output File Name } informs = { 0: 'Normal return', 1: 'Max. number of function evaluations reach', 2: 'Rounding errors are becoming damaging', } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, *args, **kwargs): ''' HSO Optimizer Class Initialization Documentation last updated: October. 22, 2008 - Ruben E. Perez ''' # name = 'HSO' category = 'Global Optimizer' def_opts = { 'hms':[int,10], # Memory Size [4,10] 'dbw':[float,0.01], # 'hmcr':[float,0.96], # 'par':[float,0.6], # 'maxiter':[int,1e4], # Maximum Number Iterations 'printout':[int,0], # Flag to Turn On Information Output 'xinit':[int,0], # Initial Position Flag (0 - no position, 1 - position given) 'seed':[float,0], # Random Number Seed (0 - Auto-Seed based on time clock) } informs = {} Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, *args, **kwargs): """ HSO Optimizer Class Initialization Documentation last updated: October. 22, 2008 - Ruben E. Perez """ # name = "HSO" category = "Global Optimizer" def_opts = { "hms": [int, 10], # Memory Size [4,10] "dbw": [float, 0.01], # "hmcr": [float, 0.96], # "par": [float, 0.6], # "maxiter": [int, 1e4], # Maximum Number Iterations "printout": [int, 0], # Flag to Turn On Information Output "xinit": [int, 0], # Initial Position Flag (0 - no position, 1 - position given) "seed": [float, 0], # Random Number Seed (0 - Auto-Seed based on time clock) } informs = {} Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' SDPEN Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: August. 09, 2012 - Ruben E. Perez ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") #end # name = 'SDPEN' category = 'Local Optimizer' def_opts = { # SDPEN Options 'alfa_stop':[float,1e-6], # Convergence Tolerance 'nf_max':[int,5000], # Maximum Number of Function Evaluations 'iprint':[int,0], # Output Level (<0 - None, 0 - Final, 1 - Iters, 2 - Full) 'iout':[int,6], # Output Unit Number 'ifile':[str,'SDPEN.out'], # Output File Name } informs = { 1 : 'finished successfully', 2 : 'maximum number of evaluations reached', } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' CONMIN Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") #end # name = 'CONMIN' category = 'Local Optimizer' def_opts = { 'ITMAX': [int, 1e4], # Maximum Number of Iterations 'DELFUN': [float, 1e-6], # Objective Relative Tolerance 'DABFUN': [float, 1e-6], # Objective Absolute Tolerance 'ITRM': [int, 2], # 'NFEASCT': [int, 20], # 'IPRINT': [int, 2], # Print Control (0 - None, 1 - Final, 2,3,4,5 - Debug) 'IOUT': [int, 6], # Output Unit Number 'IFILE': [str, 'CONMIN.out'], # Output File Name } informs = {} Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' CONMIN Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") #end # name = 'CONMIN' category = 'Local Optimizer' def_opts = { 'ITMAX':[int,1e4], # Maximum Number of Iterations 'DELFUN':[float,1e-6], # Objective Relative Tolerance 'DABFUN':[float,1e-6], # Objective Absolute Tolerance 'ITRM':[int,2], # 'NFEASCT':[int,20], # 'IPRINT':[int,2], # Print Control (0 - None, 1 - Final, 2,3,4,5 - Debug) 'IOUT':[int,6], # Output Unit Number 'IFILE':[str,'CONMIN.out'], # Output File Name } informs = { } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): """ CONMIN Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen """ # if pll_type == None: self.poa = False elif pll_type.upper() == "POA": self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") # end # name = "CONMIN" category = "Local Optimizer" def_opts = { "ITMAX": [int, 1e4], # Maximum Number of Iterations "DELFUN": [float, 1e-6], # Objective Relative Tolerance "DABFUN": [float, 1e-6], # Objective Absolute Tolerance "ITRM": [int, 2], # "NFEASCT": [int, 20], # "IPRINT": [int, 2], # Print Control (0 - None, 1 - Final, 2,3,4,5 - Debug) "IOUT": [int, 6], # Output Unit Number "IFILE": [str, "CONMIN.out"], # Output File Name } informs = {} Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' KSOPT Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") #end # name = 'KSOPT' category = 'Local Optimizer' def_opts = { 'ITMAX':[int,4e2], # Maximum Number of Iterations 'RDFUN':[float,1e-4], # Objective Convergence Relative Tolerance 'RHOMIN':[float,5.0], # Initial KS multiplier 'RHOMAX':[float,100.0], # Final KS multiplier 'IPRINT':[int,2], # Print Control (0 - None, 1 - Final, 2 - Iters) 'IOUT':[int,6], # Output Unit Number 'IFILE':[str,'KSOPT.out'], # Output File Name } informs = { } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def RunCalibration(ConceptualModel, Paths, Basic_inputs, SpatialVarFun, SpatialVarArgs, OF, OF_args, Q_obs, OptimizationArgs, printError=None): """ ======================================================================= RunCalibration(Paths, p2, Q_obs, UB, LB, SpatialVarFun, lumpedParNo, lumpedParPos, objective_function, printError=None, *args): ======================================================================= this function runs the conceptual distributed hydrological model Inputs: ---------- 1-Paths: 1-PrecPath: [String] path to the Folder contains precipitation rasters 2-Evap_Path: [String] path to the Folder contains Evapotranspiration rasters 3-TempPath: [String] path to the Folder contains Temperature rasters 4-FlowAccPath: [String] path to the Flow Accumulation raster of the catchment (it should include the raster name and extension) 5-FlowDPath: [String] path to the Flow Direction raster of the catchment (it should include the raster name and extension) 2-Basic_inputs: 1-p2: [List] list of unoptimized parameters p2[0] = tfac, 1 for hourly, 0.25 for 15 min time step and 24 for daily time step p2[1] = catchment area in km2 2-init_st: [list] initial values for the state variables [sp,sm,uz,lz,wc] in mm 3-UB: [Numeric] upper bound of the values of the parameters 4-LB: [Numeric] Lower bound of the values of the parameters 3-Q_obs: [Numeric] Observed values of discharge 6-lumpedParNo: [int] nomber of lumped parameters, you have to enter the value of the lumped parameter at the end of the list, default is 0 (no lumped parameters) 7-lumpedParPos: [List] list of order or position of the lumped parameter among all the parameters of the lumped model (order starts from 0 to the length of the model parameters), default is [] (empty), the following order of parameters is used for the lumped HBV model used [ltt, utt, rfcf, sfcf, ttm, cfmax, cwh, cfr, fc, beta, e_corr, etf, lp, c_flux, k, k1, alpha, perc, pcorr, Kmuskingum, Xmuskingum] 8-objective_function: [function] objective function to calculate the performance of the model and to be used in the calibration 9-*args: other arguments needed on the objective function Outputs: ---------- 1- st: [4D array] state variables 2- q_out: [1D array] calculated Discharge at the outlet of the catchment 3- q_uz: [3D array] Distributed discharge for each cell Example: ---------- PrecPath = prec_path="meteodata/4000/calib/prec" Evap_Path = evap_path="meteodata/4000/calib/evap" TempPath = temp_path="meteodata/4000/calib/temp" FlowAccPath = "GIS/4000/acc4000.tif" FlowDPath = "GIS/4000/fd4000.tif" ParPath = "meteodata/4000/"+"parameters.txt" p2=[1, 227.31] st, q_out, q_uz_routed = RunModel(PrecPath,Evap_Path,TempPath,DemPath, FlowAccPath,FlowDPath,ParPath,p2) """ ### inputs validation # data type assert len(Paths) == 5, "Paths should include 5 folder pathes " + str( len(Paths)) + " paths are only provided" PrecPath = Paths[0] Evap_Path = Paths[1] TempPath = Paths[2] # DemPath=Paths[3] FlowAccPath = Paths[3] FlowDPath = Paths[4] assert type(PrecPath) == str, "PrecPath input should be string type" assert type(Evap_Path) == str, "Evap_Path input should be string type" assert type(TempPath) == str, "TempPath input should be string type" # assert type(DemPath)== str, "DemPath input should be string type" assert type(FlowAccPath) == str, "FlowAccPath input should be string type" assert type(FlowDPath) == str, "FlowDPath input should be string type" # input values # dem_ext=DemPath[-4:] # assert dem_ext == ".tif", "please add the extension at the end of the DEM raster path input" acc_ext = FlowAccPath[-4:] assert acc_ext == ".tif", "please add the extension at the end of the Flow accumulation raster path input" fd_ext = FlowDPath[-4:] assert fd_ext == ".tif", "please add the extension at the end of the Flow Direction path input" # check wether the path exists or not assert os.path.exists( PrecPath), PrecPath + " you have provided does not exist" assert os.path.exists( Evap_Path), Evap_Path + " path you have provided does not exist" assert os.path.exists( TempPath), TempPath + " path you have provided does not exist" # assert os.path.exists(DemPath), DemPath+ " you have provided does not exist" assert os.path.exists( FlowAccPath), FlowAccPath + " you have provided does not exist" assert os.path.exists( FlowDPath), FlowDPath + " you have provided does not exist" # check wether the folder has the rasters or not assert len(os.listdir( PrecPath)) > 0, PrecPath + " folder you have provided is empty" assert len(os.listdir( Evap_Path)) > 0, Evap_Path + " folder you have provided is empty" assert len(os.listdir( TempPath)) > 0, TempPath + " folder you have provided is empty" # basic inputs # check if all inputs are included assert all( ["p2", "init_st", "UB", "LB", "snow "][i] in Basic_inputs.keys() for i in range( 4)), "Basic_inputs should contain ['p2','init_st','UB','LB'] " p2 = Basic_inputs['p2'] init_st = Basic_inputs["init_st"] UB = Basic_inputs['UB'] LB = Basic_inputs['LB'] snow = Basic_inputs['snow'] assert len(UB) == len(LB), "length of UB should be the same like LB" # check objective_function assert callable(OF), "second argument should be a function" if OF_args == None: OF_args = [] # read data ### meteorological data prec = GIS.ReadRastersFolder(PrecPath) evap = GIS.ReadRastersFolder(Evap_Path) temp = GIS.ReadRastersFolder(TempPath) print("meteorological data are read successfully") #### GIS data # dem= gdal.Open(DemPath) acc = gdal.Open(FlowAccPath) fd = gdal.Open(FlowDPath) print("GIS data are read successfully") ### optimization # get arguments store_history = OptimizationArgs[0] history_fname = OptimizationArgs[1] # check optimization arguement assert store_history != 0 or store_history != 1, "store_history should be 0 or 1" assert type( history_fname) == str, "history_fname should be of type string " assert history_fname[ -4:] == ".txt", "history_fname should be txt file please change extension or add .txt ad the end of the history_fname" print('Calibration starts') ### calculate the objective function def opt_fun(par): try: # parameters klb = float(par[-2]) kub = float(par[-1]) par = par[:-2] par_dist = SpatialVarFun(par, *SpatialVarArgs, kub=kub, klb=klb) #run the model _, q_out, q_uz_routed, q_lz_trans = Wrapper.Dist_model( ConceptualModel, acc, fd, prec, evap, temp, par_dist, p2, snow, init_st) # calculate performance of the model try: error = OF(Q_obs, q_out, q_uz_routed, q_lz_trans, *OF_args) except TypeError: # if no of inputs less than what the function needs assert 1 == 5, "the objective function you have entered needs more inputs please enter then in a list as *args" # print error if printError != 0: print(error) print(par) fail = 0 except: error = np.nan fail = 1 return error, [], fail ### define the optimization components opt_prob = Optimization('HBV Calibration', opt_fun) for i in range(len(LB)): opt_prob.addVar('x{0}'.format(i), type='c', lower=LB[i], upper=UB[i]) print(opt_prob) opt_engine = ALHSO(etol=0.0001, atol=0.0001, rtol=0.0001, stopiters=10, hmcr=0.5, par=0.5) #,filename='mostafa.out' Optimizer.__init__( opt_engine, def_options={ 'hms': [int, 9], # Memory Size [1,50] 'hmcr': [float, 0.95 ], # Probability rate of choosing from memory [0.7,0.99] 'par': [float, 0.99], # Pitch adjustment rate [0.1,0.99] 'dbw': [int, 2000], # Variable Bandwidth Quantization 'maxoutiter': [int, 2e3 ], # Maximum Number of Outer Loop Iterations (Major Iterations) 'maxinniter': [int, 2e2 ], # Maximum Number of Inner Loop Iterations (Minor Iterations) 'stopcriteria': [int, 1], # Stopping Criteria Flag 'stopiters': [ int, 20 ], # Consecutively Number of Outer Iterations for which the Stopping Criteria must be Satisfied 'etol': [float, 0.0001], # Absolute Tolerance for Equality constraints 'itol': [float, 0.0001], # Absolute Tolerance for Inequality constraints 'atol': [float, 0.0001], # Absolute Tolerance for Objective Function 1e-6 'rtol': [float, 0.0001], # Relative Tolerance for Objective Function 'prtoutiter': [int, 0], # Number of Iterations Before Print Outer Loop Information 'prtinniter': [int, 0], # Number of Iterations Before Print Inner Loop Information 'xinit': [int, 0], # Initial Position Flag (0 - no position, 1 - position given) 'rinit': [float, 1.0], # Initial Penalty Factor 'fileout': [int, store_history], # Flag to Turn On Output to filename 'filename': [ str, 'parameters.txt' ], # We could probably remove fileout flag if filename or fileinstance is given 'seed': [float, 0.5], # Random Number Seed (0 - Auto-Seed based on time clock) 'scaling': [ int, 1 ], # Design Variables Scaling Flag (0 - no scaling, 1 - scaling between [-1,1]) }) res = opt_engine(opt_prob) return res
def __init__(self, pll_type=None, *args, **kwargs): ''' NLPQL Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") # end # name = 'NLPQL' category = 'Local Optimizer' def_opts = { # NLPQL Options 'Accurancy': [float, 1e-6], # Convergence Accurancy 'ScaleBound': [float, 1e30], # 'maxFun': [int, 20], # Maximum Number of Function Calls During Line Search 'maxIt': [int, 500], # Maximum Number of Iterations 'iPrint': [ int, 2 ], # Output Level (0 - None, 1 - Final, 2 - Major, 3 - Major/Minor, 4 - Full) 'mode': [int, 0], # NLPQL Mode (0 - Normal Execution, 1 to 18 - See Manual) 'iout': [int, 6], # Output Unit Number 'lmerit': [ bool, True ], # Merit Function Type (True - L2 Augmented Penalty, False - L1 Penalty) 'lql': [bool, False ], # QP Subproblem Solver (True - Quasi-Newton, False - Cholesky) 'iFile': [str, 'NLPQL.out'], # Output File Name } informs = { -2: 'Compute gradient values w.r.t. the variables stored in' \ ' first column of X, and store them in DF and DG.' \ ' Only derivatives for active constraints ACTIVE(J)=.TRUE. need to be computed.', -1: 'Compute objective fn and all constraint values subject' \ 'the variables found in the first L columns of X, and store them in F and G.', 0: 'The optimality conditions are satisfied.', 1: ' The algorithm has been stopped after MAXIT iterations.', 2: ' The algorithm computed an uphill search direction.', 3: ' Underflow occurred when determining a new approximation matrix' \ 'for the Hessian of the Lagrangian.', 4: 'The line search could not be terminated successfully.', 5: 'Length of a working array is too short.' \ ' More detailed error information is obtained with IPRINT>0', 6: 'There are false dimensions, for example M>MMAX, N>=NMAX, or MNN2<>M+N+N+2.', 7: 'The search direction is close to zero, but the current iterate is still infeasible.', 8: 'The starting point violates a lower or upper bound.', 9: 'Wrong input parameter, i.e., MODE, LDL decomposition in D and C' \ ' (in case of MODE=1), IPRINT, IOUT', 10: 'Internal inconsistency of the quadratic subproblem, division by zero.', 100: 'The solution of the quadratic programming subproblem has been' \ ' terminated with an error message and IFAIL is set to IFQL+100,' \ ' where IFQL denotes the index of an inconsistent constraint.', } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' SNOPT Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") # name = 'SNOPT' category = 'Local Optimizer' def_opts = { # SNOPT Printing Options 'Major print level': [int, 1], # Majors Print (1 - line major iteration log) 'Minor print level': [int, 1], # Minors Print (1 - line minor iteration log) 'Print file': [str, 'SNOPT_print.out' ], # Print File Name (specified by subroutine snInit) 'iPrint': [int, 18], # Print File Output Unit (override internally in snopt?) 'Summary file': [str, 'SNOPT_summary.out' ], # Summary File Name (specified by subroutine snInit) 'iSumm': [int, 19], # Summary File Output Unit (override internally in snopt?) 'Print frequency': [int, 100], # Minors Log Frequency on Print File 'Summary frequency': [int, 100], # Minors Log Frequency on Summary File 'Solution': [str, 'Yes'], # Print Solution on the Print File 'Suppress options listing': [type(None), None ], # (options are normally listed) 'System information': [str, 'No'], # Print System Information on the Print File # SNOPT Problem Specification Options 'Problem Type': [ str, 'Minimize' ], # ('Maximize': alternative over Minimize, 'Feasible point': alternative over Minimize or Maximize) 'Objective row': [int, 1], # (has precedence over ObjRow (snOptA)) 'Infinite bound': [float, 1.0e+20], # Infinite Bound Value # SNOPT Convergence Tolerances Options 'Major feasibility tolerance': [float, 1.0e-6], # Target Nonlinear Constraint Violation 'Major optimality tolerance': [float, 1.0e-6 ], # Target Complementarity Gap 'Minor feasibility tolerance': [float, 1.0e-6 ], # For Satisfying the QP Bounds # SNOPT Derivative Checking Options 'Verify level': [int, 0], # Gradients Check Flag # SNOPT Scaling Options 'Scale option': [int, 1], # Scaling (1 - linear constraints and variables) 'Scale tolerance': [float, 0.9], # Scaling Tolerance 'Scale Print': [type(None), None], # Default: scales are not printed # SNOPT Other Tolerances Options 'Crash tolerance': [float, 0.1], # 'Linesearch tolerance': [float, 0.9], # smaller for more accurate search 'Pivot tolerance': [float, 3.7e-11], # epsilon^(2/3) # SNOPT QP subproblems Options 'QPSolver': [str, 'Cholesky'], # Default: Cholesky 'Crash option': [int, 3], # (3 - first basis is essentially triangular) 'Elastic mode': [str, 'No' ], # (start with elastic mode until necessary) 'Elastic weight': [float, 1.0e+4], # (used only during elastic mode) 'Iterations limit': [int, 10000], # (or 20*ncons if that is more) 'Partial price': [int, 1], # (10 for large LPs) # SNOPT SQP method Options 'Start': [ str, 'Cold' ], # has precedence over argument start, ('Warm': alternative to a cold start) 'Major iterations limit': [int, 1000], # or ncons if that is more 'Minor iterations limit': [int, 500], # or 3*ncons if that is more 'Major step limit': [float, 2.0], # 'Superbasics limit': [int, None], # (n1 + 1, n1 = number of nonlinear variables) 'Derivative level': [int, 3], # (NOT ALLOWED IN snOptA) 'Derivative option': [int, 1], # (ONLY FOR snOptA) 'Derivative linesearch': [type(None), None], # 'Nonderivative linesearch': [type(None), None], # 'Function precision': [float, 3.0e-13 ], # epsilon^0.8 (almost full accuracy) 'Difference interval': [float, 5.5e-7], # Function precision^(1/2) 'Central difference interval': [float, 6.7e-5 ], # Function precision^(1/3) 'New superbasics limit': [int, 99], # controls early termination of QPs 'Objective row': [int, 1], # row number of objective in F(x) 'Penalty parameter': [float, 0.0], # initial penalty parameter 'Proximal point method': [int, 1], # (1 - satisfies linear constraints near x0) 'Reduced Hessian dimension': [int, 2000], # (or Superbasics limit if that is less) 'Violation limit': [int, 10.0], # (unscaled constraint violation limit) 'Unbounded step size': [float, 1.0e+18], # 'Unbounded objective': [float, 1.0e+15], # # SNOPT Hessian approximation Options 'Hessian full memory': [type(None), None], # default if n1 <= 75 'Hessian limited memory': [type(None), None], # default if n1 > 75 'Hessian frequency': [int, 999999], # for full Hessian (never reset) 'Hessian updates': [int, 10], # for limited memory Hessian 'Hessian flush': [int, 999999], # no flushing # SNOPT Frequencies Options 'Check frequency': [int, 60], # test row residuals ||Ax - sk|| 'Expand frequency': [int, 10000], # for anti-cycling procedure 'Factorization frequency': [int, 50], # 100 for LPs 'Save frequency': [int, 100], # save basis map # SNOPT LUSOL Options 'LU factor tolerance': [float, 3.99], # for NP (100.0 for LP) 'LU update tolerance': [float, 3.99], # for NP ( 10.0 for LP) 'LU singularity tolerance': [float, 3.2e-11], # 'LU partial pivoting': [type(None), None ], # default threshold pivoting strategy 'LU rook pivoting': [type(None), None], # threshold rook pivoting 'LU complete pivoting': [type(None), None], # threshold complete pivoting # SNOPT Basis files Options 'Old basis file': [int, 0], # input basis map 'New basis file': [int, 0], # output basis map 'Backup basis file': [int, 0], # output extra basis map 'Insert file': [int, 0], # input in industry format 'Punch file': [int, 0], # output Insert data 'Load file': [int, 0], # input names and values 'Dump file': [int, 0], # output Load data 'Solution file': [int, 0], # different from printed solution # SNOPT Partitions of cw, iw, rw Options 'Total character workspace': [int, 500], # lencw: 500 'Total integer workspace': [int, None], # leniw: 500 + 100 * (m+n) 'Total real workspace': [int, None], # lenrw: 500 + 200 * (m+n) 'User character workspace': [int, 500], # 'User integer workspace': [int, 500], # 'User real workspace': [int, 500], # #SNOPT Miscellaneous Options 'Debug level': [int, 0], # (0 - Normal, 1 - for developers) 'Timing level': [int, 3], # (3 - print cpu times) } informs = { 0: 'finished successfully', 1: 'optimality conditions satisfied', 2: 'feasible point found', 3: 'requested accuracy could not be achieved', 4: 'weak QP minimizer', 10: 'the problem appears to be infeasible', 11: 'infeasible linear constraints', 12: 'infeasible linear equalities', 13: 'nonlinear infeasibilities minimized', 14: 'infeasibilities minimized', 15: 'infeasible linear constraints in QP subproblem', 20: 'the problem appears to be unbounded', 21: 'unbounded objective', 22: 'constraint violation limit reached', 30: 'resource limit error', 31: 'iteration limit reached', 32: 'major iteration limit reached', 33: 'the superbasics limit is too small', 40: 'terminated after numerical difficulties', 41: 'current point cannot be improved', 42: 'singular basis', 43: 'cannot satisfy the general constraints', 44: 'ill-conditioned null-space basis', 50: 'error in the user-supplied functions', 51: 'incorrect objective derivatives', 52: 'incorrect constraint derivatives', 53: 'the QP Hessian is indefinite', 54: 'incorrect second derivatives', 55: 'incorrect derivatives', 60: 'undefined user-supplied functions', 61: 'undefined function at the first feasible point', 62: 'undefined function at the initial point', 63: 'unable to proceed into undefined region', 70: 'user requested termination', 71: 'terminated during function evaluation', 72: 'terminated during constraint evaluation', 73: 'terminated during objective evaluation', 74: 'terminated from monitor routine', 80: 'insufficient storage allocated', 81: 'work arrays must have at least 500 elements', 82: 'not enough character storage', 83: 'not enough integer storage', 84: 'not enough real storage', 90: 'input arguments out of range', 91: 'invalid input argument', 92: 'basis file dimensions do not match this problem', 93: 'the QP Hessian is indefinite', 100: 'finished successfully', 101: 'SPECS file read', 102: 'Jacobian structure estimated', 103: 'MPS file read', 104: 'memory requirements estimated', 105: 'user-supplied derivatives appear to be correct', 106: 'no derivatives were checked', 107: 'some SPECS keywords were not recognized', 110: 'errors while processing MPS data', 111: 'no MPS file specified', 112: 'problem-size estimates too small', 113: 'fatal error in the MPS file', 120: 'errors while estimating Jacobian structure', 121: 'cannot find Jacobian structure at given point', 130: 'fatal errors while reading the SP', 131: 'no SPECS file (iSpecs le 0 or iSpecs gt 99)', 132: 'End-of-file while looking for a BEGIN', 133: 'End-of-file while reading SPECS file', 134: 'ENDRUN found before any valid SPECS', 140: 'system error', 141: 'wrong no of basic variables', 142: 'error in basis package', 142: 'Problem dimensions are too large' } self.set_options = [] Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' ALPSO Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: ALPSO Parallel Implementation (None, SPM- Static, DPM- Dynamic, POA-Parallel Analysis), *Default* = None Documentation last updated: February. 2, 2011 - Ruben E. Perez ''' if (pll_type == None): try: from . import alpso as alpso except: raise ImportError( 'pyALPSO: ALPSO shared library failed to import') name = 'ALPSO' self.alpso = alpso elif (pll_type.upper() == 'SPM'): try: from . import alpso_spm from mpi4py import MPI except: raise ImportError( 'pyALPSO: ALPSO SPM shared library failed to import') name = 'ALPSO - SPM' self.alpso = alpso_spm elif (pll_type.upper() == 'DPM'): #if not 'alpso_dpm' in sys.modules: # raise ImportError('pyALPSO: ALPSO DPM shared library failed to import') # try: from . import alpso_dpm from mpi4py import MPI except: raise ImportError( 'pyALPSO: ALPSO DPM shared library failed to import') name = 'ALPSO - DPM' self.alpso = alpso_dpm elif (pll_type.upper() == 'POA'): try: from . import alpso_poa from mpi4py import MPI except: raise ImportError( 'pyALPSO: ALPSO POA shared library failed to import') name = 'ALPSO - POA' self.alpso = alpso_poa else: raise ValueError( "pll_type must be either None,'SPM', 'DPM' or 'POA'") category = 'Global Optimizer' def_opts = { # Number of Particles (Depends on Problem dimensions) 'SwarmSize': [int, 40], # Maximum Number of Outer Loop Iterations (Major Iterations) 'maxOuterIter': [int, 200], # Maximum Number of Inner Loop Iterations (Minor Iterations) 'maxInnerIter': [int, 6], # Minimum Number of Inner Loop Iterations (Dynamic Inner # Iterations) 'minInnerIter': [int, 6], # Dynamic Number of Inner Iterations Flag 'dynInnerIter': [int, 0], # Stopping Criteria Flag (0 - maxIters, 1 - convergence) 'stopCriteria': [int, 1], # Consecutively Number of Iterations for which the Stopping # Criteria must be Satisfied 'stopIters': [int, 5], # Absolute Tolerance for Equality constraints 'etol': [float, 1e-3], # Absolute Tolerance for Inequality constraints 'itol': [float, 1e-3], #'ltol':[float,1e-2], # Absolute Tolerance for Lagrange Multipliers # Relative Tolerance for Lagrange Multipliers 'rtol': [float, 1e-2], # Absolute Tolerance for Lagrange Function 'atol': [float, 1e-2], # Relative Tolerance in Distance of All Particles to Terminate # (GCPSO) 'dtol': [float, 1e-1], # Number of Iterations Before Print Outer Loop Information 'printOuterIters': [int, 0], # Number of Iterations Before Print Inner Loop Information 'printInnerIters': [int, 0], # Initial Penalty Factor 'rinit': [float, 1.0], # Initial Position Flag (0 - no position, 1 - position given) 'xinit': [int, 0], # Initial Velocity of Particles in Normalized [-1,1] Design Space 'vinit': [float, 1.0], # Maximum Velocity of Particles in Normalized [-1,1] Design Space 'vmax': [float, 2.0], # Cognitive Parameter 'c1': [float, 2.0], # Social Parameter 'c2': [float, 1.0], # Initial Inertia Weight 'w1': [float, 0.99], # Final Inertia Weight 'w2': [float, 0.55], # Number of Consecutive Successes in Finding New Best Position # of Best Particle Before Search Radius will be Increased # (GCPSO) 'ns': [int, 15], # Number of Consecutive Failures in Finding New Best Position # of Best Particle Before Search Radius will be Increased # (GCPSO) 'nf': [int, 5], 'dt': [float, 1.0], # Time step # Craziness Velocity (Added to Particle Velocity After Updating # the Penalty Factors and Langangian Multipliers) 'vcrazy': [float, 1e-4], # Flag to Turn On Output to filename 'fileout': [int, 1], # We could probably remove fileout flag if filename or # fileinstance is given 'filename': [str, 'ALPSO.out'], # Random Number Seed (0 - Auto-Seed based on time clock) 'seed': [float, 0], # Number of Neighbours of Each Particle 'HoodSize': [int, 40], # Neighbourhood Model (dl/slring - Double/Single Link Ring, # wheel - Wheel, Spatial - based on spatial distance, sfrac - # Spatial Fraction) 'HoodModel': [str, 'gbest'], # Selfless Neighbourhood Model (0 - Include Particle i in NH i, # 1 - Don't Include Particle i) 'HoodSelf': [int, 1], # Design Variables Scaling Flag (0 - no scaling, 1 - scaling # between [-1,1]) 'Scaling': [int, 1], } informs = {} Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs) if (self.name in ('ALPSO - SPM', 'ALPSO - DPM', 'ALPSO - POA')): self.myrank = MPI.COMM_WORLD.Get_rank() else: self.myrank = 0
def __init__(self, pll_type=None, *args, **kwargs): """ SNOPT Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen """ # if pll_type == None: self.poa = False elif pll_type.upper() == "POA": self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") # end # name = "SNOPT" category = "Local Optimizer" def_opts = { # SNOPT Printing Options "Major print level": [int, 1], # Majors Print (1 - line major iteration log) "Minor print level": [int, 1], # Minors Print (1 - line minor iteration log) "Print file": [str, "SNOPT_print.out"], # Print File Name (specified by subroutine snInit) "iPrint": [int, 18], # Print File Output Unit (override internally in snopt?) "Summary file": [str, "SNOPT_summary.out"], # Summary File Name (specified by subroutine snInit) "iSumm": [int, 19], # Summary File Output Unit (override internally in snopt?) "Print frequency": [int, 100], # Minors Log Frequency on Print File "Summary frequency": [int, 100], # Minors Log Frequency on Summary File "Solution": [str, "Yes"], # Print Solution on the Print File "Suppress options listing": [type(None), None], # (options are normally listed) "System information": [str, "No"], # Print System Information on the Print File # SNOPT Problem Specification Options "Problem Type": [ str, "Minimize", ], # ('Maximize': alternative over Minimize, 'Feasible point': alternative over Minimize or Maximize) "Objective row": [int, 1], # (has precedence over ObjRow (snOptA)) "Infinite bound": [float, 1.0e20], # Infinite Bound Value # SNOPT Convergence Tolerances Options "Major feasibility tolerance": [float, 1.0e-6], # Target Nonlinear Constraint Violation "Major optimality tolerance": [float, 1.0e-6], # Target Complementarity Gap "Minor feasibility tolerance": [float, 1.0e-6], # For Satisfying the QP Bounds # SNOPT Derivative Checking Options "Verify level": [int, 0], # Gradients Check Flag # SNOPT Scaling Options "Scale option": [int, 1], # Scaling (1 - linear constraints and variables) "Scale tolerance": [float, 0.9], # Scaling Tolerance "Scale Print": [type(None), None], # Default: scales are not printed # SNOPT Other Tolerances Options "Crash tolerance": [float, 0.1], # "Linesearch tolerance": [float, 0.9], # smaller for more accurate search "Pivot tolerance": [float, 3.7e-11], # epsilon^(2/3) # SNOPT QP subproblems Options "QPSolver": [str, "Cholesky"], # Default: Cholesky "Crash option": [int, 3], # (3 - first basis is essentially triangular) "Elastic mode": [str, "No"], # (start with elastic mode until necessary) "Elastic weight": [float, 1.0e4], # (used only during elastic mode) "Iterations limit": [int, 10000], # (or 20*ncons if that is more) "Partial price": [int, 1], # (10 for large LPs) # SNOPT SQP method Options "Start": [str, "Cold"], # has precedence over argument start, ('Warm': alternative to a cold start) "Major iterations limit": [int, 1000], # or ncons if that is more "Minor iterations limit": [int, 500], # or 3*ncons if that is more "Major step limit": [float, 2.0], # "Superbasics limit": [int, None], # (n1 + 1, n1 = number of nonlinear variables) "Derivative level": [int, 3], # (NOT ALLOWED IN snOptA) "Derivative option": [int, 1], # (ONLY FOR snOptA) "Derivative linesearch": [type(None), None], # "Nonderivative linesearch": [type(None), None], # "Function precision": [float, 3.0e-13], # epsilon^0.8 (almost full accuracy) "Difference interval": [float, 5.5e-7], # Function precision^(1/2) "Central difference interval": [float, 6.7e-5], # Function precision^(1/3) "New superbasics limit": [int, 99], # controls early termination of QPs "Objective row": [int, 1], # row number of objective in F(x) "Penalty parameter": [float, 0.0], # initial penalty parameter "Proximal point method": [int, 1], # (1 - satisfies linear constraints near x0) "Reduced Hessian dimension": [int, 2000], # (or Superbasics limit if that is less) "Violation limit": [int, 10.0], # (unscaled constraint violation limit) "Unbounded step size": [float, 1.0e18], # "Unbounded objective": [float, 1.0e15], # # SNOPT Hessian approximation Options "Hessian full memory": [type(None), None], # default if n1 <= 75 "Hessian limited memory": [type(None), None], # default if n1 > 75 "Hessian frequency": [int, 999999], # for full Hessian (never reset) "Hessian updates": [int, 10], # for limited memory Hessian "Hessian flush": [int, 999999], # no flushing # SNOPT Frequencies Options "Check frequency": [int, 60], # test row residuals ||Ax - sk|| "Expand frequency": [int, 10000], # for anti-cycling procedure "Factorization frequency": [int, 50], # 100 for LPs "Save frequency": [int, 100], # save basis map # SNOPT LUSOL Options "LU factor tolerance": [float, 3.99], # for NP (100.0 for LP) "LU update tolerance": [float, 3.99], # for NP ( 10.0 for LP) "LU singularity tolerance": [float, 3.2e-11], # "LU partial pivoting": [type(None), None], # default threshold pivoting strategy "LU rook pivoting": [type(None), None], # threshold rook pivoting "LU complete pivoting": [type(None), None], # threshold complete pivoting # SNOPT Basis files Options "Old basis file": [int, 0], # input basis map "New basis file": [int, 0], # output basis map "Backup basis file": [int, 0], # output extra basis map "Insert file": [int, 0], # input in industry format "Punch file": [int, 0], # output Insert data "Load file": [int, 0], # input names and values "Dump file": [int, 0], # output Load data "Solution file": [int, 0], # different from printed solution # SNOPT Partitions of cw, iw, rw Options "Total character workspace": [int, 500], # lencw: 500 "Total integer workspace": [int, None], # leniw: 500 + 100 * (m+n) "Total real workspace": [int, None], # lenrw: 500 + 200 * (m+n) "User character workspace": [int, 500], # "User integer workspace": [int, 500], # "User real workspace": [int, 500], # # SNOPT Miscellaneous Options "Debug level": [int, 0], # (0 - Normal, 1 - for developers) "Timing level": [int, 3], # (3 - print cpu times) } informs = { 0: "finished successfully", 1: "optimality conditions satisfied", 2: "feasible point found", 3: "requested accuracy could not be achieved", 4: "weak QP minimizer", 10: "the problem appears to be infeasible", 11: "infeasible linear constraints", 12: "infeasible linear equalities", 13: "nonlinear infeasibilities minimized", 14: "infeasibilities minimized", 15: "infeasible linear constraints in QP subproblem", 20: "the problem appears to be unbounded", 21: "unbounded objective", 22: "constraint violation limit reached", 30: "resource limit error", 31: "iteration limit reached", 32: "major iteration limit reached", 33: "the superbasics limit is too small", 40: "terminated after numerical difficulties", 41: "current point cannot be improved", 42: "singular basis", 43: "cannot satisfy the general constraints", 44: "ill-conditioned null-space basis", 50: "error in the user-supplied functions", 51: "incorrect objective derivatives", 52: "incorrect constraint derivatives", 53: "the QP Hessian is indefinite", 54: "incorrect second derivatives", 55: "incorrect derivatives", 60: "undefined user-supplied functions", 61: "undefined function at the first feasible point", 62: "undefined function at the initial point", 63: "unable to proceed into undefined region", 70: "user requested termination", 71: "terminated during function evaluation", 72: "terminated during constraint evaluation", 73: "terminated during objective evaluation", 74: "terminated from monitor routine", 80: "insufficient storage allocated", 81: "work arrays must have at least 500 elements", 82: "not enough character storage", 83: "not enough integer storage", 84: "not enough real storage", 90: "input arguments out of range", 91: "invalid input argument", 92: "basis file dimensions do not match this problem", 93: "the QP Hessian is indefinite", 100: "finished successfully", 101: "SPECS file read", 102: "Jacobian structure estimated", 103: "MPS file read", 104: "memory requirements estimated", 105: "user-supplied derivatives appear to be correct", 106: "no derivatives were checked", 107: "some SPECS keywords were not recognized", 110: "errors while processing MPS data", 111: "no MPS file specified", 112: "problem-size estimates too small", 113: "fatal error in the MPS file", 120: "errors while estimating Jacobian structure", 121: "cannot find Jacobian structure at given point", 130: "fatal errors while reading the SP", 131: "no SPECS file (iSpecs le 0 or iSpecs gt 99)", 132: "End-of-file while looking for a BEGIN", 133: "End-of-file while reading SPECS file", 134: "ENDRUN found before any valid SPECS", 140: "system error", 141: "wrong no of basic variables", 142: "error in basis package", 142: "Problem dimensions are too large", } self.set_options = [] Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' SNOPT Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") #end # name = 'SNOPT' category = 'Local Optimizer' def_opts = { # SNOPT Printing Options 'Major print level':[int,1], # Majors Print (1 - line major iteration log) 'Minor print level':[int,1], # Minors Print (1 - line minor iteration log) 'Print file':[str,'SNOPT_print.out'], # Print File Name (specified by subroutine snInit) 'iPrint':[int,18], # Print File Output Unit (override internally in snopt?) 'Summary file':[str,'SNOPT_summary.out'], # Summary File Name (specified by subroutine snInit) 'iSumm':[int,19], # Summary File Output Unit (override internally in snopt?) 'Print frequency':[int,100], # Minors Log Frequency on Print File 'Summary frequency':[int,100], # Minors Log Frequency on Summary File 'Solution':[str,'Yes'], # Print Solution on the Print File 'Suppress options listing':[type(None),None], # (options are normally listed) 'System information':[str,'No'], # Print System Information on the Print File # SNOPT Problem Specification Options 'Problem Type':[str,'Minimize'], # ('Maximize': alternative over Minimize, 'Feasible point': alternative over Minimize or Maximize) 'Objective row':[int,1], # (has precedence over ObjRow (snOptA)) 'Infinite bound':[float,1.0e+20], # Infinite Bound Value # SNOPT Convergence Tolerances Options 'Major feasibility tolerance':[float,1.0e-6], # Target Nonlinear Constraint Violation 'Major optimality tolerance':[float,1.0e-6], # Target Complementarity Gap 'Minor feasibility tolerance':[float,1.0e-6], # For Satisfying the QP Bounds # SNOPT Derivative Checking Options 'Verify level':[int,0], # Gradients Check Flag # SNOPT Scaling Options 'Scale option':[int,1], # Scaling (1 - linear constraints and variables) 'Scale tolerance':[float,0.9], # Scaling Tolerance 'Scale Print':[type(None),None], # Default: scales are not printed # SNOPT Other Tolerances Options 'Crash tolerance':[float,0.1], # 'Linesearch tolerance':[float,0.9], # smaller for more accurate search 'Pivot tolerance':[float,3.7e-11], # epsilon^(2/3) # SNOPT QP subproblems Options 'QPSolver':[str,'Cholesky'], # Default: Cholesky 'Crash option':[int,3], # (3 - first basis is essentially triangular) 'Elastic mode':[str,'No'], # (start with elastic mode until necessary) 'Elastic weight':[float,1.0e+4], # (used only during elastic mode) 'Iterations limit':[int,10000], # (or 20*ncons if that is more) 'Partial price':[int,1], # (10 for large LPs) # SNOPT SQP method Options 'Start':[str,'Cold'], # has precedence over argument start, ('Warm': alternative to a cold start) 'Major iterations limit':[int,1000], # or ncons if that is more 'Minor iterations limit':[int,500], # or 3*ncons if that is more 'Major step limit':[float,2.0], # 'Superbasics limit':[int,None], # (n1 + 1, n1 = number of nonlinear variables) 'Derivative level':[int,3], # (NOT ALLOWED IN snOptA) 'Derivative option':[int,1], # (ONLY FOR snOptA) 'Derivative linesearch':[type(None),None], # 'Nonderivative linesearch':[type(None),None], # 'Function precision':[float,3.0e-13], # epsilon^0.8 (almost full accuracy) 'Difference interval':[float,5.5e-7], # Function precision^(1/2) 'Central difference interval':[float,6.7e-5], # Function precision^(1/3) 'New superbasics limit':[int,99], # controls early termination of QPs 'Objective row':[int,1], # row number of objective in F(x) 'Penalty parameter':[float,0.0], # initial penalty parameter 'Proximal point method':[int,1], # (1 - satisfies linear constraints near x0) 'Reduced Hessian dimension':[int,2000], # (or Superbasics limit if that is less) 'Violation limit':[int,10.0], # (unscaled constraint violation limit) 'Unbounded step size':[float,1.0e+18], # 'Unbounded objective':[float,1.0e+15], # # SNOPT Hessian approximation Options 'Hessian full memory':[type(None),None], # default if n1 <= 75 'Hessian limited memory':[type(None),None], # default if n1 > 75 'Hessian frequency':[int,999999], # for full Hessian (never reset) 'Hessian updates':[int,10], # for limited memory Hessian 'Hessian flush':[int,999999], # no flushing # SNOPT Frequencies Options 'Check frequency':[int,60], # test row residuals ||Ax - sk|| 'Expand frequency':[int,10000], # for anti-cycling procedure 'Factorization frequency':[int,50], # 100 for LPs 'Save frequency':[int,100], # save basis map # SNOPT LUSOL Options 'LU factor tolerance':[float,3.99], # for NP (100.0 for LP) 'LU update tolerance':[float,3.99], # for NP ( 10.0 for LP) 'LU singularity tolerance':[float,3.2e-11], # 'LU partial pivoting':[type(None),None], # default threshold pivoting strategy 'LU rook pivoting':[type(None),None], # threshold rook pivoting 'LU complete pivoting':[type(None),None], # threshold complete pivoting # SNOPT Basis files Options 'Old basis file':[int,0], # input basis map 'New basis file':[int,0], # output basis map 'Backup basis file':[int,0], # output extra basis map 'Insert file':[int,0], # input in industry format 'Punch file':[int,0], # output Insert data 'Load file':[int,0], # input names and values 'Dump file':[int,0], # output Load data 'Solution file':[int,0], # different from printed solution # SNOPT Partitions of cw, iw, rw Options 'Total character workspace':[int,500], # lencw: 500 'Total integer workspace':[int,None], # leniw: 500 + 100 * (m+n) 'Total real workspace':[int,None], # lenrw: 500 + 200 * (m+n) 'User character workspace':[int,500], # 'User integer workspace':[int,500], # 'User real workspace':[int,500], # #SNOPT Miscellaneous Options 'Debug level':[int,0], # (0 - Normal, 1 - for developers) 'Timing level':[int,3], # (3 - print cpu times) } informs = { 0 : 'finished successfully', 1 : 'optimality conditions satisfied', 2 : 'feasible point found', 3 : 'requested accuracy could not be achieved', 4 : 'weak QP minimizer', 10 : 'the problem appears to be infeasible', 11 : 'infeasible linear constraints', 12 : 'infeasible linear equalities', 13 : 'nonlinear infeasibilities minimized', 14 : 'infeasibilities minimized', 15 : 'infeasible linear constraints in QP subproblem', 20 : 'the problem appears to be unbounded', 21 : 'unbounded objective', 22 : 'constraint violation limit reached', 30 : 'resource limit error', 31 : 'iteration limit reached', 32 : 'major iteration limit reached', 33 : 'the superbasics limit is too small', 40 : 'terminated after numerical difficulties', 41 : 'current point cannot be improved', 42 : 'singular basis', 43 : 'cannot satisfy the general constraints', 44 : 'ill-conditioned null-space basis', 50 : 'error in the user-supplied functions', 51 : 'incorrect objective derivatives', 52 : 'incorrect constraint derivatives', 53 : 'the QP Hessian is indefinite', 54 : 'incorrect second derivatives', 55 : 'incorrect derivatives', 60 : 'undefined user-supplied functions', 61 : 'undefined function at the first feasible point', 62 : 'undefined function at the initial point', 63 : 'unable to proceed into undefined region', 70 : 'user requested termination', 71 : 'terminated during function evaluation', 72 : 'terminated during constraint evaluation', 73 : 'terminated during objective evaluation', 74 : 'terminated from monitor routine', 80 : 'insufficient storage allocated', 81 : 'work arrays must have at least 500 elements', 82 : 'not enough character storage', 83 : 'not enough integer storage', 84 : 'not enough real storage', 90 : 'input arguments out of range', 91 : 'invalid input argument', 92 : 'basis file dimensions do not match this problem', 93 : 'the QP Hessian is indefinite', 100 : 'finished successfully', 101 : 'SPECS file read', 102 : 'Jacobian structure estimated', 103 : 'MPS file read', 104 : 'memory requirements estimated', 105 : 'user-supplied derivatives appear to be correct', 106 : 'no derivatives were checked', 107 : 'some SPECS keywords were not recognized', 110 : 'errors while processing MPS data', 111 : 'no MPS file specified', 112 : 'problem-size estimates too small', 113 : 'fatal error in the MPS file', 120 : 'errors while estimating Jacobian structure', 121 : 'cannot find Jacobian structure at given point', 130 : 'fatal errors while reading the SP', 131 : 'no SPECS file (iSpecs le 0 or iSpecs gt 99)', 132 : 'End-of-file while looking for a BEGIN', 133 : 'End-of-file while reading SPECS file', 134 : 'ENDRUN found before any valid SPECS', 140 : 'system error', 141 : 'wrong no of basic variables', 142 : 'error in basis package', 142 : 'Problem dimensions are too large' } self.set_options = [] Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): """IPOPT Optimizer Class Initialization. Documentation last updated: Feb. 16, 2010 - Peter W. Jansen """ name = "IPOPT" category = "Local Optimizer" def_opts = { # IPOPT Printing Options # Print Control (0 - None, 1 - Final,2,3,4,5 - Debug) "IPRINT": [int, 2], "IOUT": [int, 6], # Output Unit Number "output_file": [str, "IPOPT.out"], # Output File Name # Output options "print_level": [int, 5], # Output verbosity level # Print all options set by the user "print_user_options": [str, "no"], # Switch to print all algorithmic options "print_options_documentation": [str, "no"], "output_file": [str, ""], # File name of desired output file "file_print_level": [int, 5], # Verbosity level for output file "option_file_name": [str, ""], # File name of options file # Termination options "tol": [float, 1e-8], # relative convergence tolerance "max_iter": [int, 3000], # Maximum number of iterations "max_cpu_time": [float, 1e6], # Maximum number of CPU seconds. # Desired threshold for the dual infeasibility "dual_inf_tol": [float, 1], # Desired threshold for the constraint violation "constr_viol_tol": [float, 1e-4], # Desired threshold for the complementarity conditions "compl_inf_tol": [float, 1e-4], # "Acceptable" convergence tolerance (relative) "acceptable_tol": [float, 1e-6], # Number of "acceptable" iterates before triggering termination "acceptable_iter": [int, 15], # "Acceptance" threshold for the constraint violation "acceptable_constr_viol_tol": [float, 1e-2], # "Acceptance" threshold for the dual infeasibility. "acceptable_dual_inf_tol": [float, 1e10], # "Acceptance" threshold for the complementarity conditions "acceptable_compl_inf_tol": [float, 1e-2], # "Acceptance" stopping criterion based on objective function change "acceptable_obj_change_tol": [float, 1e20], # Threshold for maximal value of primal iterates "diverging_iterates_tol": [float, 1e20], # NLP scaling options # Scaling factor for the objective function "obj_scaling_factor": [float, 1], # Select the technique use for scaling the NLP ('none', # 'user-scaling', 'gradient-based', 'equilibration-based') "nlp_scaling_method": [str, "gradient-based"], # Maximum gradient after NLP scaling "nlp_scaling_max_gradient": [float, 100], # Minimum value of gradient-based scaling values "nlp_scaling_min_value": [float, 1e-8], # NLP options # Factor for initial relaxation of the bounds "bound_relax_factor": [float, 1e-8], # Indicates whether final points should be projected into original # bounds "honor_original_bounds": [str, "yes"], # Indicates whether it is desired to check for Nan/Inf in derivative # matrices "check_derivatives_for_naninf": [str, "no"], # any bound less or equal this value will be considered -inf (i.e. # not lower bounded) "nlp_lower_bound_inf": [float, -1e19], # any bound greater or this value will be considered +inf (i.e. not # upper bounded) "nlp_upper_bound_inf": [float, 1e19], # Determines how fixed variables should be handled # ('make_parameter', 'make_constraint', 'relax_bounds') "fixed_variable_treatment": [str, "make_parameter"], # Indicates whether all equality constraints are linear "jac_c_constant": [str, "no"], # Indicates whether all inequality constraints are linear "jac_d_constant": [str, "no"], # Indicates whether the problem is a quadratic problem "hessian_constant": [str, "no"], # Initialization options # Desired minimum relative distance from the initial point to bound "bound_frac": [float, 0.01], # Desired minimum absolute distance from the initial point to bound "bound_push": [float, 0.01], # Desired minimum relative distance from the initial slack to bound "slack_bound_frac": [float, 0.01], # Desired minimum absolute distance from the initial slack to bound "slack_bound_push": [float, 0.01], # Initial value for the bound multipliers "bound_mult_init_val": [float, 1], # Maximum allowed least-square guess of constraint multipliers "constr_mult_init_max": [float, 1000], # Initialization method for bound multipliers ('constant', # 'mu_based') "bound_mult_init_method": [str, "constant"], # Barrier parameter options # Indicates if we want to do Mehrotra's algorithm "mehrotra_algorithm": [str, "no"], # Update strategy for barrier parameter ('monotone', 'adaptive') "mu_strategy": [str, "monotone"], # Oracle for a new barrier parameter in the adaptive strategy # ('probing', 'loqo', 'quality-function') "mu_oracle": [str, "quality-function"], # Maximum number of search steps during direct search procedure # determining the optimal centering parameter "quality_function_max_section_steps": [int, 8], # Oracle for the barrier parameter when switching to fixed mode # ('probing', 'loqo', 'quality-function', 'average_compl') "fixed_mu_oracle": [str, "average_compl"], "mu_init": [float, 0.1], # Initial value for the barrier parameter # Factor for initialization of maximum value for barrier parameter "mu_max_fact": [float, 1000], "mu_max": [float, 1e5], # Maximum value for barrier parameter "mu_min": [float, 1e-11], # Minimum value for barrier parameter "mu_target": [float, 0], # Desired value of complementarity # Factor for mu in barrier stop test. "barrier_tol_factor": [float, 10], # Determines linear decrease rate of barrier parameter "mu_linear_decrease_factor": [float, 0.2], # Determines superlinear decrease rate of barrier parameter (between # 1 and 2) "mu_superlinear_decrease_power": [float, 1.5], # Multiplier updates # Method to determine the step size for constraint multiplier # ('primal', 'bound-mult', 'min', 'max', 'full', 'min-dual-infeas', # 'safer-min-dual-infeas', 'primal-and-full', 'dual-and-full', # 'acceptor') "alpha_for_y": [str, "primal"], # Tolerance for switching to full equality multiplier steps "alpha_for_y_tol": [float, 10], # Tells the algorithm to recalculate the equality and inequality # multipliers as least square estimates "recalc_y": [str, "no"], # Feasibility threshold for recomputation of multipliers "recalc_y_feas_tol": [float, 1e-6], # Line search options # Maximum number of second order correction trial steps at each # iteration "max_soc": [int, 4], # Number of shortened iterations that trigger the watchdog "watchdog_shortened_iter_trigger": [int, 10], # Maximum number of watchdog iterations "watchdog_trial_iter_max": [int, 3], # Always accept the first trial step "accept_every_trial_step": [str, "no"], # The type of corrector steps that should be taken (unsupported!) # ('none', 'affine', 'primal-dual') "corrector_type": [str, "none"], # Warm start options # Warm-start for initial point "warm_start_init_point": [str, "no"], # same as bound_push for the regular initializer "warm_start_bound_push": [float, 0.001], # same as bound_frac for the regular initializer "warm_start_bound_frac": [float, 0.001], # same as slack_bound_frac for the regular initializer "warm_start_slack_bound_frac": [float, 0.001], # same as slack_bound_push for the regular initializer "warm_start_slack_bound_push": [float, 0.001], # same as mult_bound_push for the regular initializer. "warm_start_mult_bound_push": [float, 0.001], # Maximum initial value for the equality multipliers "warm_start_mult_init_max": [float, 1e6], # Restoration Phase # Enable heuristics to quickly detect an infeasible problem "expect_infeasible_problem": [str, "no"], # Threshold for disabling "expect_infeasible_problem" option "expect_infeasible_problem_ctol": [float, 0.001], # Multiplier threshold for activating "expect_infeasible_problem" # option "expect_infeasible_problem_ytol": [float, 1e8], # Tells algorithm to switch to restoration phase in first iteration "start_with_resto": [str, "no"], # Required reduction in primal-dual error in the soft restoration # phase "soft_resto_pderror_reduction_factor": [float, 0.9999], # Required reduction of infeasibility before leaving restoration # phase "required_infeasibility_reduction": [float, 0.9], # Threshold for resetting bound multipliers after the restoration # phase "bound_mult_reset_threshold": [float, 1000], # Threshold for resetting equality and inequality multipliers after # restoration phase "constr_mult_reset_threshold": [float, 0], # Determines if the original objective function should be evaluated # at restoration phase trial points "evaluate_orig_obj_at_resto_trial": [str, "yes"], # Linear Solver "linear_solver": [str, "ma57"], # Method for scaling the linear system ('none', 'mc19', # 'slack-based') "linear_system_scaling": [str, "mc19"], # Flag indicating that linear scaling is only done if it seems # required "linear_scaling_on_demand": [str, "yes"], # Maximum number of iterative refinement steps per linear system # solve "max_refinement_steps": [float, 10], # Minimum number of iterative refinement steps per linear system # solve "min_refinement_steps": [float, 1], # Hessian Perturbation # Maximum value of regularization parameter for handling negative # curvature "max_hessian_perturbation": [float, 1e20], # Smallest perturbation of the Hessian block "min_hessian_perturbation": [float, 1e-20], # Size of first x-s perturbation tried "first_hessian_perturbation": [float, 0.0001], # Increase factor for x-s perturbation for very first perturbation "perturb_inc_fact_first": [float, 100], # Increase factor for x-s perturbation "perturb_inc_fact": [float, 8], # Decrease factor for x-s perturbation "perturb_dec_fact": [float, 0.333333], # Size of the regularization for rank-deficient constraint Jacobians "jacobian_regularization_value": [float, 1e-8], # Quasi-Newton # Indicates what Hessian information is to be used ('exact', # 'limited-memory') "hessian_approximation": [str, "exact"], # Quasi-Newton update formula for the limited memory approximation # ('bfgs', 'sr1') "limited_memory_update_type": [str, "bfgs"], # Maximum size of the history for the limited quasi-Newton Hessian # approximation "limited_memory_max_history": [int, 6], # Threshold for successive iterations where update is skipped "limited_memory_max_skipping": [int, 2], # Initialization strategy for the limited memory quasi-Newton # approximation ('scalar1', 'scalar2', 'scalar3', 'scalar4', # 'constant') "limited_memory_initialization": [str, "scalar1"], # Value for B0 in low-rank update "limited_memory_init_val": [float, 1], # Upper bound on value for B0 in low-rank update "limited_memory_init_val_max": [float, 1e8], # Lower bound on value for B0 in low-rank update "limited_memory_init_val_min": [float, 1e-8], # Determines if the quasi-Newton updates should be special during # the restoration phase "limited_memory_special_for_resto": [str, "no"], # Derivative Test options # Enable derivative checker ('none', 'first-order', 'second-order', # 'only-second-order') "derivative_test": [str, "none"], # Size of the finite difference perturbation in derivative test "derivative_test_perturbation": [float, 1e-8], # Threshold for indicating wrong derivative "derivative_test_tol": [float, 0.0001], # Indicates whether information for all estimated derivatives should # be printed "derivative_test_print_all": [str, "no"], # Index of first quantity to be checked by derivative checker "derivative_test_first_index": [int, -2], # Maximal perturbation of an evaluation point "point_perturbation_radius": [float, 10], # MA57 Linear Solver options # Pivot tolerance for the linear solver MA57 "ma57_pivtol": [float, 1e-8], # Maximum pivot tolerance for the linear solver MA57 "ma57_pivtolmax": [float, 0.0001], # Safety factor for work space memory allocation for the linear # solver MA57 "ma57_pre_alloc": [float, 1.05], "ma57_pivot_order": [int, 5], # Controls pivot order in MA57 # Controls MA57 automatic scaling "ma57_automatic_scaling": [str, "yes"], # Controls block size used by Level 3 BLAS in MA57BD "ma57_block_size": [int, 16], "ma57_node_amalgamation": [int, 16], # Node amalgamation parameter "ma57_small_pivot_flag": [int, 0], } informs = { 0: "Solve Succeeded", 1: "Solved To Acceptable Level", 2: "Infeasible Problem Detected", 3: "Search Direction Becomes Too Small", 4: "Diverging Iterates", 5: "User Requested Stop", 6: "Feasible Point Found", -1: "Maximum Iterations Exceeded", -2: "Restoration Failed", -3: "Error In Step Computation", -4: "Maximum CpuTime Exceeded", -10: "Not Enough Degrees Of Freedom", -11: "Invalid Problem Definition", -12: "Invalid Option", -13: "Invalid Number Detected", -100: "Unrecoverable Exception", -101: "NonIpopt Exception Thrown", -102: "Insufficient Memory", -199: "Internal Error", } self.set_options = [] Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): """IPOPT Optimizer Class Initialization. Documentation last updated: Feb. 16, 2010 - Peter W. Jansen """ name = 'IPOPT' category = 'Local Optimizer' def_opts = { # IPOPT Printing Options # Print Control (0 - None, 1 - Final,2,3,4,5 - Debug) 'IPRINT': [int, 2], 'IOUT': [int, 6], # Output Unit Number 'output_file': [str, 'IPOPT.out'], # Output File Name # Output options 'print_level': [int, 5], # Output verbosity level # Print all options set by the user 'print_user_options': [str, 'no'], # Switch to print all algorithmic options 'print_options_documentation': [str, 'no'], 'file_print_level': [int, 5], # Verbosity level for output file 'option_file_name': [str, ''], # File name of options file # Termination options 'tol': [float, 1e-8], # relative convergence tolerance 'max_iter': [int, 3000], # Maximum number of iterations 'max_cpu_time': [float, 1e+6], # Maximum number of CPU seconds. # Desired threshold for the dual infeasibility 'dual_inf_tol': [float, 1], # Desired threshold for the constraint violation 'constr_viol_tol': [float, 1e-4], # Desired threshold for the complementarity conditions 'compl_inf_tol': [float, 1e-4], # "Acceptable" convergence tolerance (relative) 'acceptable_tol': [float, 1e-6], # Number of "acceptable" iterates before triggering termination 'acceptable_iter': [int, 15], # "Acceptance" threshold for the constraint violation 'acceptable_constr_viol_tol': [float, 1e-2], # "Acceptance" threshold for the dual infeasibility. 'acceptable_dual_inf_tol': [float, 1e+10], # "Acceptance" threshold for the complementarity conditions 'acceptable_compl_inf_tol': [float, 1e-2], # "Acceptance" stopping criterion based on objective function change 'acceptable_obj_change_tol': [float, 1e+20], # Threshold for maximal value of primal iterates 'diverging_iterates_tol': [float, 1e+20], # NLP scaling options # Scaling factor for the objective function 'obj_scaling_factor': [float, 1], # Select the technique use for scaling the NLP ('none', # 'user-scaling', 'gradient-based', 'equilibration-based') 'nlp_scaling_method': [str, 'gradient-based'], # Maximum gradient after NLP scaling 'nlp_scaling_max_gradient': [float, 100], # Minimum value of gradient-based scaling values 'nlp_scaling_min_value': [float, 1e-8], # NLP options # Factor for initial relaxation of the bounds 'bound_relax_factor': [float, 1e-8], # Indicates whether final points should be projected into original # bounds 'honor_original_bounds': [str, 'yes'], # Indicates whether it is desired to check for Nan/Inf in derivative # matrices 'check_derivatives_for_naninf': [str, 'no'], # any bound less or equal this value will be considered -inf (i.e. # not lower bounded) 'nlp_lower_bound_inf': [float, -1e+19], # any bound greater or this value will be considered +inf (i.e. not # upper bounded) 'nlp_upper_bound_inf': [float, 1e+19], # Determines how fixed variables should be handled # ('make_parameter', 'make_constraint', 'relax_bounds') 'fixed_variable_treatment': [str, 'make_parameter'], # Indicates whether all equality constraints are linear 'jac_c_constant': [str, 'no'], # Indicates whether all inequality constraints are linear 'jac_d_constant': [str, 'no'], # Indicates whether the problem is a quadratic problem 'hessian_constant': [str, 'no'], # Initialization options # Desired minimum relative distance from the initial point to bound 'bound_frac': [float, 0.01], # Desired minimum absolute distance from the initial point to bound 'bound_push': [float, 0.01], # Desired minimum relative distance from the initial slack to bound 'slack_bound_frac': [float, 0.01], # Desired minimum absolute distance from the initial slack to bound 'slack_bound_push': [float, 0.01], # Initial value for the bound multipliers 'bound_mult_init_val': [float, 1], # Maximum allowed least-square guess of constraint multipliers 'constr_mult_init_max': [float, 1000], # Initialization method for bound multipliers ('constant', # 'mu_based') 'bound_mult_init_method': [str, 'constant'], # Barrier parameter options # Indicates if we want to do Mehrotra's algorithm 'mehrotra_algorithm': [str, 'no'], # Update strategy for barrier parameter ('monotone', 'adaptive') 'mu_strategy': [str, 'monotone'], # Oracle for a new barrier parameter in the adaptive strategy # ('probing', 'loqo', 'quality-function') 'mu_oracle': [str, 'quality-function'], # Maximum number of search steps during direct search procedure # determining the optimal centering parameter 'quality_function_max_section_steps': [int, 8], # Oracle for the barrier parameter when switching to fixed mode # ('probing', 'loqo', 'quality-function', 'average_compl') 'fixed_mu_oracle': [str, 'average_compl'], 'mu_init': [float, 0.1], # Initial value for the barrier parameter # Factor for initialization of maximum value for barrier parameter 'mu_max_fact': [float, 1000], 'mu_max': [float, 1e+5], # Maximum value for barrier parameter 'mu_min': [float, 1e-11], # Minimum value for barrier parameter 'mu_target': [float, 0], # Desired value of complementarity # Factor for mu in barrier stop test. 'barrier_tol_factor': [float, 10], # Determines linear decrease rate of barrier parameter 'mu_linear_decrease_factor': [float, 0.2], # Determines superlinear decrease rate of barrier parameter (between # 1 and 2) 'mu_superlinear_decrease_power': [float, 1.5], # Multiplier updates # Method to determine the step size for constraint multiplier # ('primal', 'bound-mult', 'min', 'max', 'full', 'min-dual-infeas', # 'safer-min-dual-infeas', 'primal-and-full', 'dual-and-full', # 'acceptor') 'alpha_for_y': [str, 'primal'], # Tolerance for switching to full equality multiplier steps 'alpha_for_y_tol': [float, 10], # Tells the algorithm to recalculate the equality and inequality # multipliers as least square estimates 'recalc_y': [str, 'no'], # Feasibility threshold for recomputation of multipliers 'recalc_y_feas_tol': [float, 1e-6], # Line search options # Maximum number of second order correction trial steps at each # iteration 'max_soc': [int, 4], # Number of shortened iterations that trigger the watchdog 'watchdog_shortened_iter_trigger': [int, 10], # Maximum number of watchdog iterations 'watchdog_trial_iter_max': [int, 3], # Always accept the first trial step 'accept_every_trial_step': [str, 'no'], # The type of corrector steps that should be taken (unsupported!) # ('none', 'affine', 'primal-dual') 'corrector_type': [str, 'none'], 'dependency_detector': [str, 'none'], # Which linear solver should be used to detect linearly dependent equality constraints (experimental) # ('ma28, 'mumps', 'none') # Warm start options # Warm-start for initial point 'warm_start_init_point': [str, 'no'], # same as bound_push for the regular initializer 'warm_start_bound_push': [float, 0.001], # same as bound_frac for the regular initializer 'warm_start_bound_frac': [float, 0.001], # same as slack_bound_frac for the regular initializer 'warm_start_slack_bound_frac': [float, 0.001], # same as slack_bound_push for the regular initializer 'warm_start_slack_bound_push': [float, 0.001], # same as mult_bound_push for the regular initializer. 'warm_start_mult_bound_push': [float, 0.001], # Maximum initial value for the equality multipliers 'warm_start_mult_init_max': [float, 1e+6], # Restoration Phase # Enable heuristics to quickly detect an infeasible problem 'expect_infeasible_problem': [str, 'no'], # Threshold for disabling "expect_infeasible_problem" option 'expect_infeasible_problem_ctol': [float, 0.001], # Multiplier threshold for activating "expect_infeasible_problem" # option 'expect_infeasible_problem_ytol': [float, 1e+8], # Tells algorithm to switch to restoration phase in first iteration 'start_with_resto': [str, 'no'], # Required reduction in primal-dual error in the soft restoration # phase 'soft_resto_pderror_reduction_factor': [float, 0.9999], # Required reduction of infeasibility before leaving restoration # phase 'required_infeasibility_reduction': [float, 0.9], # Threshold for resetting bound multipliers after the restoration # phase 'bound_mult_reset_threshold': [float, 1000], # Threshold for resetting equality and inequality multipliers after # restoration phase 'constr_mult_reset_threshold': [float, 0], # Determines if the original objective function should be evaluated # at restoration phase trial points 'evaluate_orig_obj_at_resto_trial': [str, 'yes'], # Linear Solver 'linear_solver': [str, 'ma57'], # Method for scaling the linear system ('none', 'mc19', # 'slack-based') 'linear_system_scaling': [str, 'mc19'], # Flag indicating that linear scaling is only done if it seems # required 'linear_scaling_on_demand': [str, 'yes'], # Maximum number of iterative refinement steps per linear system # solve 'max_refinement_steps': [float, 10], # Minimum number of iterative refinement steps per linear system # solve 'min_refinement_steps': [float, 1], # Hessian Perturbation # Maximum value of regularization parameter for handling negative # curvature 'max_hessian_perturbation': [float, 1e+20], # Smallest perturbation of the Hessian block 'min_hessian_perturbation': [float, 1e-20], # Size of first x-s perturbation tried 'first_hessian_perturbation': [float, 0.0001], # Increase factor for x-s perturbation for very first perturbation 'perturb_inc_fact_first': [float, 100], # Increase factor for x-s perturbation 'perturb_inc_fact': [float, 8], # Decrease factor for x-s perturbation 'perturb_dec_fact': [float, 0.333333], # Size of the regularization for rank-deficient constraint Jacobians 'jacobian_regularization_value': [float, 1e-8], # Quasi-Newton # Indicates what Hessian information is to be used ('exact', # 'limited-memory') 'hessian_approximation': [str, 'exact'], # Quasi-Newton update formula for the limited memory approximation # ('bfgs', 'sr1') 'limited_memory_update_type': [str, 'bfgs'], # Maximum size of the history for the limited quasi-Newton Hessian # approximation 'limited_memory_max_history': [int, 6], # Threshold for successive iterations where update is skipped 'limited_memory_max_skipping': [int, 2], # Initialization strategy for the limited memory quasi-Newton # approximation ('scalar1', 'scalar2', 'scalar3', 'scalar4', # 'constant') 'limited_memory_initialization': [str, 'scalar1'], # Value for B0 in low-rank update 'limited_memory_init_val': [float, 1], # Upper bound on value for B0 in low-rank update 'limited_memory_init_val_max': [float, 1e+8], # Lower bound on value for B0 in low-rank update 'limited_memory_init_val_min': [float, 1e-8], # Determines if the quasi-Newton updates should be special during # the restoration phase 'limited_memory_special_for_resto': [str, 'no'], # Derivative Test options # Enable derivative checker ('none', 'first-order', 'second-order', # 'only-second-order') 'derivative_test': [str, 'none'], # Size of the finite difference perturbation in derivative test 'derivative_test_perturbation': [float, 1e-8], # Threshold for indicating wrong derivative 'derivative_test_tol': [float, 0.0001], # Indicates whether information for all estimated derivatives should # be printed 'derivative_test_print_all': [str, 'no'], # Index of first quantity to be checked by derivative checker 'derivative_test_first_index': [int, -2], # Maximal perturbation of an evaluation point 'point_perturbation_radius': [float, 10], # MA57 Linear Solver options # Pivot tolerance for the linear solver MA57 'ma57_pivtol': [float, 1e-8], # Maximum pivot tolerance for the linear solver MA57 'ma57_pivtolmax': [float, 0.0001], # Safety factor for work space memory allocation for the linear # solver MA57 'ma57_pre_alloc': [float, 1.05], 'ma57_pivot_order': [int, 5], # Controls pivot order in MA57 # Controls MA57 automatic scaling 'ma57_automatic_scaling': [str, 'yes'], # Controls block size used by Level 3 BLAS in MA57BD 'ma57_block_size': [int, 16], 'ma57_node_amalgamation': [int, 16], # Node amalgamation parameter 'ma57_small_pivot_flag': [int, 0], } informs = { 0: 'Solve Succeeded', 1: 'Solved To Acceptable Level', 2: 'Infeasible Problem Detected', 3: 'Search Direction Becomes Too Small', 4: 'Diverging Iterates', 5: 'User Requested Stop', 6: 'Feasible Point Found', -1: 'Maximum Iterations Exceeded', -2: 'Restoration Failed', -3: 'Error In Step Computation', -4: 'Maximum CpuTime Exceeded', -10: 'Not Enough Degrees Of Freedom', -11: 'Invalid Problem Definition', -12: 'Invalid Option', -13: 'Invalid Number Detected', -100: 'Unrecoverable Exception', -101: 'NonIpopt Exception Thrown', -102: 'Insufficient Memory', -199: 'Internal Error' } self.set_options = [] Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' ALPSO Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: ALPSO Parallel Implementation (None, SPM- Static, DPM- Dynamic, POA-Parallel Analysis), *Default* = None Documentation last updated: February. 2, 2011 - Ruben E. Perez ''' # if (pll_type == None): try: from . import alpso as alpso except: raise ImportError('pyALPSO: ALPSO shared library failed to import') #end name = 'ALPSO' self.alpso = alpso elif (pll_type.upper() == 'SPM'): try: from . import alpso_spm from mpi4py import MPI except: raise ImportError('pyALPSO: ALPSO SPM shared library failed to import') #end name = 'ALPSO - SPM' self.alpso = alpso_spm elif (pll_type.upper() == 'DPM'): #if not 'alpso_dpm' in sys.modules: # raise ImportError('pyALPSO: ALPSO DPM shared library failed to import') ##end try: from . import alpso_dpm from mpi4py import MPI except: raise ImportError('pyALPSO: ALPSO DPM shared library failed to import') #end name = 'ALPSO - DPM' self.alpso = alpso_dpm elif (pll_type.upper() == 'POA'): try: from . import alpso_poa from mpi4py import MPI except: raise ImportError('pyALPSO: ALPSO POA shared library failed to import') #end name = 'ALPSO - POA' self.alpso = alpso_poa else: raise ValueError("pll_type must be either None,'SPM', 'DPM' or 'POA'") #end category = 'Global Optimizer' def_opts = { 'SwarmSize':[int,40], # Number of Particles (Depends on Problem dimensions) 'maxOuterIter':[int,200], # Maximum Number of Outer Loop Iterations (Major Iterations) 'maxInnerIter':[int,6], # Maximum Number of Inner Loop Iterations (Minor Iterations) 'minInnerIter':[int,6], # Minimum Number of Inner Loop Iterations (Dynamic Inner Iterations) 'dynInnerIter':[int,0], # Dynamic Number of Inner Iterations Flag 'stopCriteria':[int,1], # Stopping Criteria Flag (0 - maxIters, 1 - convergence) 'stopIters':[int,5], # Consecutively Number of Iterations for which the Stopping Criteria must be Satisfied 'etol':[float,1e-3], # Absolute Tolerance for Equality constraints 'itol':[float,1e-3], # Absolute Tolerance for Inequality constraints #'ltol':[float,1e-2], # Absolute Tolerance for Lagrange Multipliers 'rtol':[float,1e-2], # Relative Tolerance for Lagrange Multipliers 'atol':[float,1e-2], # Absolute Tolerance for Lagrange Function 'dtol':[float,1e-1], # Relative Tolerance in Distance of All Particles to Terminate (GCPSO) 'printOuterIters':[int,0], # Number of Iterations Before Print Outer Loop Information 'printInnerIters':[int,0], # Number of Iterations Before Print Inner Loop Information 'rinit':[float,1.0], # Initial Penalty Factor 'xinit':[int,0], # Initial Position Flag (0 - no position, 1 - position given) 'vinit':[float,1.0], # Initial Velocity of Particles in Normalized [-1,1] Design Space 'vmax':[float,2.0], # Maximum Velocity of Particles in Normalized [-1,1] Design Space 'c1':[float,2.0], # Cognitive Parameter 'c2':[float,1.0], # Social Parameter 'w1':[float,0.99], # Initial Inertia Weight 'w2':[float,0.55], # Final Inertia Weight 'ns':[int,15], # Number of Consecutive Successes in Finding New Best Position of Best Particle Before Search Radius will be Increased (GCPSO) 'nf':[int,5], # Number of Consecutive Failures in Finding New Best Position of Best Particle Before Search Radius will be Increased (GCPSO) 'dt':[float,1.0], # Time step 'vcrazy':[float,1e-4], # Craziness Velocity (Added to Particle Velocity After Updating the Penalty Factors and Langangian Multipliers) 'fileout':[int,1], # Flag to Turn On Output to filename 'filename':[str,'ALPSO.out'], # We could probably remove fileout flag if filename or fileinstance is given 'seed':[float,0], # Random Number Seed (0 - Auto-Seed based on time clock) 'HoodSize':[int,40], # Number of Neighbours of Each Particle 'HoodModel':[str,'gbest'], # Neighbourhood Model (dl/slring - Double/Single Link Ring, wheel - Wheel, Spatial - based on spatial distance, sfrac - Spatial Fraction) 'HoodSelf':[int,1], # Selfless Neighbourhood Model (0 - Include Particle i in NH i, 1 - Don't Include Particle i) 'Scaling':[int,1], # Design Variables Scaling Flag (0 - no scaling, 1 - scaling between [-1,1]) } informs = {} Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs) # if (self.name in ('ALPSO - SPM','ALPSO - DPM','ALPSO - POA')): self.myrank = MPI.COMM_WORLD.Get_rank() else: self.myrank = 0
def __init__(self, pll_type=None, *args, **kwargs): ''' MIDACO Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False self.spm = False elif (pll_type.upper() == 'POA'): self.poa = True self.spm = False elif (pll_type.upper() == 'SPM'): self.poa = False self.spm = True else: raise ValueError("pll_type must be either None, 'POA' or 'SPM'") #end # name = 'MIDACO' category = 'Global Optimizer' def_opts = { # MIDACO Options 'ACC':[float,0], # Accuracy for constraint violation (0 - default) 'ISEED':[int,0], # Seed for random number generator (e.g. ISEED = 0,1,2,3,...) 'FSTOP':[int,0], # Objective Function Stopping Value (0 - disabled) 'AUTOSTOP':[int,0], # Automatic stopping criteria (0 - disable, 1 to 500 - from local to global) 'ORACLE':[float,0], # Oracle parameter for constrained problems (0 - Use internal default) 'FOCUS':[int,0], # Focus of MIDACO search process around best solution (0 - Use internal default) 'ANTS':[int,0], # Number of iterates (ants) per generation (0 - Use internal default) 'KERNEL':[int,0], # Size of the solution archive (0 - Use internal default) 'CHARACTER':[int,0], # Internal custom parameters (0 - Use internal default, 1 - IP problems, 2 - NLP problems, 3 - MINLP problems) 'MAXEVAL':[int,10000], # Maximum function evaluations 'MAXTIME':[int,86400], # Maximum time limit, in seconds 'IPRINT':[int,1], # Output Level (<0 - None, 0 - Screen, 1 - File(s)) 'PRINTEVAL':[int,1000], # Print-Frequency for current best solution 'IOUT1':[int,36], # History output unit number 'IOUT2':[int,37], # Best solution output unit number 'IFILE1':[str,'MIDACO_HIST.out'], # History output file name 'IFILE2':[str,'MIDACO_BEST.out'], # Best output file name 'LKEY':[str,'MIDACO_LIMITED_VERSION___[CREATIVE_COMMONS_BY-NC-ND_LICENSE]'], } informs = { 1 : 'Feasible solution, MIDACO was stopped by the user submitting ISTOP=1', 2 : 'Infeasible solution, MIDACO was stopped by the user submitting ISTOP=1', 3 : 'Feasible solution, MIDACO stopped automatically using AUTOSTOP option', 4 : 'Infeasible solution, MIDACO stopped automatically using AUTOSTOP option', 5 : 'Feasible solution, MIDACO stopped automatically by FSTOP', 51 : 'WARNING: Some X(i) is greater/lower than +/- 1.0D+12 (try to avoid huge values!)', 52 : 'WARNING: Some XL(i) is greater/lower than +/- 1.0D+12 (try to avoid huge values!)', 53 : 'WARNING: Some XU(i) is greater/lower than +/- 1.0D+12 (try to avoid huge values!)', 61 : 'WARNING: Some X(i) should be discrete (e.g. 1.000) , but is continuous (e.g. 1.234)', 62 : 'WARNING: Some XL(i) should be discrete (e.g. 1.000) , but is continuous (e.g. 1.234)', 63 : 'WARNING: Some XU(i) should be discrete (e.g. 1.000) , but is continuous (e.g. 1.234)', 71 : 'WARNING: Some XL(i) = XU(I) (fixed variable)', 81 : 'WARNING: F(X) has value NaN for starting point X (sure your problem is correct?)', 82 : 'WARNING: Some G(X) has value NaN for starting point X (sure your problem is correct?)', 91 : 'WARNING: FSTOP is greater/lower than +/- 1.0D+8', 92 : 'WARNING: ORACLE is greater/lower than +/- 1.0D+8', 101 : 'ERROR: L <= 0 or L > 1.0D+6', 102 : 'ERROR: N <= 0 or N > 1.0D+6', 103 : 'ERROR: NINT < 0', 104 : 'ERROR: NINT > N', 105 : 'ERROR: M < 0 or M > 1.0D+6', 106 : 'ERROR: ME < 0', 107 : 'ERROR: ME > M', 201 : 'ERROR: some X(i) has type NaN', 202 : 'ERROR: some XL(i) has type NaN', 203 : 'ERROR: some XU(i) has type NaN', 204 : 'ERROR: some X(i) < XL(i)', 205 : 'ERROR: some X(i) > XU(i)', 206 : 'ERROR: some XL(i) > XU(i)', 301 : 'ERROR: ACC < 0 or ACC > 1.0D+6', 302 : 'ERROR: ISEED < 0 or ISEED > 1.0D+12', 303 : 'ERROR: FSTOP greater/lower than +/- 1.0D+12', 304 : 'ERROR: AUTOSTOP < 0 or AUTOSTOP > 1.0D+6', 305 : 'ERROR: ORACLE greater/lower than +/- 1.0D+12', 306 : 'ERROR: |FOCUS| < 1 or FOCUS > 1.0D+12', 307 : 'ERROR: ANTS < 0 or ANTS > 1.0D+8', 308 : 'ERROR: KERNEL < 0 or KERNEL > 100', 309 : 'ERROR: ANTS < KERNEL', 310 : 'ERROR: ANTS > 0 but KERNEL = 0', 311 : 'ERROR: KERNEL > 0 but ANTS = 0', 312 : 'ERROR: CHARACTER < 0 or CHARACTER > 1000', 313 : 'ERROR: some MIDACO parameters has type NaN', 401 : 'ERROR: ISTOP < 0 or ISTOP > 1', 501 : 'ERROR: Double precision work space size LRW is too small (see below LRW), RW must be at least of size LRW = 200*N+2*M+1000', 601 : 'ERROR: Integer work space size LIW is too small (see below LIW), IW must be at least of size LIW = 2*N+L+100', 701 : 'ERROR: Input check failed! MIDACO must be called initially with IFAIL = 0', 801 : 'ERROR: L > LMAX (user must specifiy LMAX below in the MIDACO source code)', 802 : 'ERROR: L*M+1 > LXM (user must specifiy LXM below in the MIDACO source code)', 900 : 'ERROR: Invalid or corrupted LICENSE_KEY', 999 : 'ERROR: N > 4. The free test version is limited up to 4 variables', } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' NLPQL Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False elif (pll_type.upper() == 'POA'): self.poa = True else: raise ValueError("pll_type must be either None or 'POA'") #end # name = 'NLPQL' category = 'Local Optimizer' def_opts = { # NLPQL Options 'Accurancy':[float,1e-6], # Convergence Accurancy 'ScaleBound':[float,1e30], # 'maxFun':[int,20], # Maximum Number of Function Calls During Line Search 'maxIt':[int,500], # Maximum Number of Iterations 'iPrint':[int,2], # Output Level (0 - None, 1 - Final, 2 - Major, 3 - Major/Minor, 4 - Full) 'mode':[int,0], # NLPQL Mode (0 - Normal Execution, 1 to 18 - See Manual) 'iout':[int,6], # Output Unit Number 'lmerit':[bool,True], # Merit Function Type (True - L2 Augmented Penalty, False - L1 Penalty) 'lql':[bool,False], # QP Subproblem Solver (True - Quasi-Newton, False - Cholesky) 'iFile':[str,'NLPQL.out'], # Output File Name } informs = { -2 : 'Compute gradient values w.r.t. the variables stored in' \ ' first column of X, and store them in DF and DG.' \ ' Only derivatives for active constraints ACTIVE(J)=.TRUE. need to be computed.', -1 : 'Compute objective fn and all constraint values subject' \ 'the variables found in the first L columns of X, and store them in F and G.', 0 : 'The optimality conditions are satisfied.', 1 : ' The algorithm has been stopped after MAXIT iterations.', 2 : ' The algorithm computed an uphill search direction.', 3 : ' Underflow occurred when determining a new approximation matrix' \ 'for the Hessian of the Lagrangian.', 4 : 'The line search could not be terminated successfully.', 5 : 'Length of a working array is too short.' \ ' More detailed error information is obtained with IPRINT>0', 6 : 'There are false dimensions, for example M>MMAX, N>=NMAX, or MNN2<>M+N+N+2.', 7 : 'The search direction is close to zero, but the current iterate is still infeasible.', 8 : 'The starting point violates a lower or upper bound.', 9 : 'Wrong input parameter, i.e., MODE, LDL decomposition in D and C' \ ' (in case of MODE=1), IPRINT, IOUT', 10 : 'Internal inconsistency of the quadratic subproblem, division by zero.', 100 : 'The solution of the quadratic programming subproblem has been' \ ' terminated with an error message and IFAIL is set to IFQL+100,' \ ' where IFQL denotes the index of an inconsistent constraint.', } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __init__(self, pll_type=None, *args, **kwargs): ''' MIDACO Optimizer Class Initialization **Keyword arguments:** - pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None Documentation last updated: Feb. 16, 2010 - Peter W. Jansen ''' # if (pll_type == None): self.poa = False self.spm = False elif (pll_type.upper() == 'POA'): self.poa = True self.spm = False elif (pll_type.upper() == 'SPM'): self.poa = False self.spm = True else: raise ValueError("pll_type must be either None, 'POA' or 'SPM'") #end # name = 'MIDACO' category = 'Global Optimizer' def_opts = { # MIDACO Options 'ACC': [float, 0], # Accuracy for constraint violation (0 - default) 'ISEED': [int, 0 ], # Seed for random number generator (e.g. ISEED = 0,1,2,3,...) 'FSTOP': [int, 0], # Objective Function Stopping Value (0 - disabled) 'AUTOSTOP': [ int, 0 ], # Automatic stopping criteria (0 - disable, 1 to 500 - from local to global) 'ORACLE': [ float, 0 ], # Oracle parameter for constrained problems (0 - Use internal default) 'FOCUS': [ int, 0 ], # Focus of MIDACO search process around best solution (0 - Use internal default) 'ANTS': [ int, 0 ], # Number of iterates (ants) per generation (0 - Use internal default) 'KERNEL': [int, 0], # Size of the solution archive (0 - Use internal default) 'CHARACTER': [ int, 0 ], # Internal custom parameters (0 - Use internal default, 1 - IP problems, 2 - NLP problems, 3 - MINLP problems) 'MAXEVAL': [int, 10000], # Maximum function evaluations 'MAXTIME': [int, 86400], # Maximum time limit, in seconds 'IPRINT': [int, 1], # Output Level (<0 - None, 0 - Screen, 1 - File(s)) 'PRINTEVAL': [int, 1000], # Print-Frequency for current best solution 'IOUT1': [int, 36], # History output unit number 'IOUT2': [int, 37], # Best solution output unit number 'IFILE1': [str, 'MIDACO_HIST.out'], # History output file name 'IFILE2': [str, 'MIDACO_BEST.out'], # Best output file name 'LKEY': [ str, 'MIDACO_LIMITED_VERSION___[CREATIVE_COMMONS_BY-NC-ND_LICENSE]' ], } informs = { 1: 'Feasible solution, MIDACO was stopped by the user submitting ISTOP=1', 2: 'Infeasible solution, MIDACO was stopped by the user submitting ISTOP=1', 3: 'Feasible solution, MIDACO stopped automatically using AUTOSTOP option', 4: 'Infeasible solution, MIDACO stopped automatically using AUTOSTOP option', 5: 'Feasible solution, MIDACO stopped automatically by FSTOP', 51: 'WARNING: Some X(i) is greater/lower than +/- 1.0D+12 (try to avoid huge values!)', 52: 'WARNING: Some XL(i) is greater/lower than +/- 1.0D+12 (try to avoid huge values!)', 53: 'WARNING: Some XU(i) is greater/lower than +/- 1.0D+12 (try to avoid huge values!)', 61: 'WARNING: Some X(i) should be discrete (e.g. 1.000) , but is continuous (e.g. 1.234)', 62: 'WARNING: Some XL(i) should be discrete (e.g. 1.000) , but is continuous (e.g. 1.234)', 63: 'WARNING: Some XU(i) should be discrete (e.g. 1.000) , but is continuous (e.g. 1.234)', 71: 'WARNING: Some XL(i) = XU(I) (fixed variable)', 81: 'WARNING: F(X) has value NaN for starting point X (sure your problem is correct?)', 82: 'WARNING: Some G(X) has value NaN for starting point X (sure your problem is correct?)', 91: 'WARNING: FSTOP is greater/lower than +/- 1.0D+8', 92: 'WARNING: ORACLE is greater/lower than +/- 1.0D+8', 101: 'ERROR: L <= 0 or L > 1.0D+6', 102: 'ERROR: N <= 0 or N > 1.0D+6', 103: 'ERROR: NINT < 0', 104: 'ERROR: NINT > N', 105: 'ERROR: M < 0 or M > 1.0D+6', 106: 'ERROR: ME < 0', 107: 'ERROR: ME > M', 201: 'ERROR: some X(i) has type NaN', 202: 'ERROR: some XL(i) has type NaN', 203: 'ERROR: some XU(i) has type NaN', 204: 'ERROR: some X(i) < XL(i)', 205: 'ERROR: some X(i) > XU(i)', 206: 'ERROR: some XL(i) > XU(i)', 301: 'ERROR: ACC < 0 or ACC > 1.0D+6', 302: 'ERROR: ISEED < 0 or ISEED > 1.0D+12', 303: 'ERROR: FSTOP greater/lower than +/- 1.0D+12', 304: 'ERROR: AUTOSTOP < 0 or AUTOSTOP > 1.0D+6', 305: 'ERROR: ORACLE greater/lower than +/- 1.0D+12', 306: 'ERROR: |FOCUS| < 1 or FOCUS > 1.0D+12', 307: 'ERROR: ANTS < 0 or ANTS > 1.0D+8', 308: 'ERROR: KERNEL < 0 or KERNEL > 100', 309: 'ERROR: ANTS < KERNEL', 310: 'ERROR: ANTS > 0 but KERNEL = 0', 311: 'ERROR: KERNEL > 0 but ANTS = 0', 312: 'ERROR: CHARACTER < 0 or CHARACTER > 1000', 313: 'ERROR: some MIDACO parameters has type NaN', 401: 'ERROR: ISTOP < 0 or ISTOP > 1', 501: 'ERROR: Double precision work space size LRW is too small (see below LRW), RW must be at least of size LRW = 200*N+2*M+1000', 601: 'ERROR: Integer work space size LIW is too small (see below LIW), IW must be at least of size LIW = 2*N+L+100', 701: 'ERROR: Input check failed! MIDACO must be called initially with IFAIL = 0', 801: 'ERROR: L > LMAX (user must specifiy LMAX below in the MIDACO source code)', 802: 'ERROR: L*M+1 > LXM (user must specifiy LXM below in the MIDACO source code)', 900: 'ERROR: Invalid or corrupted LICENSE_KEY', 999: 'ERROR: N > 4. The free test version is limited up to 4 variables', } Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)