class Comp(Component): x = Float(iotype='in') y = Float(iotype='in') indct = Dict(iotype='in') outdct = Dict(iotype='out') cont = Slot(A) contlist = List(Slot(A), iotype='in') def get_cont(self, i): return self.contlist[i] def get_attrib(self, name): return getattr(self, name)
class Broadcaster(Component): """Takes inputs and passes them directly to outputs to be broadcast out to other components.""" names = List( Str, iotype="in", desc="Names of the variables you want to broadcast from this component." ) types = Dict( {'default': Float}, iotype="in", desc= "Name/type pairs describing the variable types of each broadcast variable; " "'default' name is used if no other type is set explicitly.") def __init__(self, names, types=None): """names: ListSrt, list of the variable names you would like the broadcaster to create for you. All inputs will be named with an '_in' added. Outputs will follow the name given. types: Dict, dictionary of the name/type pairs describing which types you would like to broadcast. If given, the name 'default' indicates the default variable type to use.""" super(Broadcaster, self).__init__() self._vars = [] if types is not None: self.types = types self.names = names def _types_changed(self, old, new): if self.names: self._names_changed(self.names, self.names) #code to create inputs and outputs when names is changed def _names_changed(self, old, new): for in_var, out_var in self._vars: if self.parent: self.parent.disconnect('.'.join([self.name, in_var])) self.parent.disconnect('.'.join([self.name, out_var])) self.remove_trait(in_var) self.remove_trait(out_var) self._vars = [] for name in new: if name in self.types: traits = self.types[name] elif 'default' in self.types: traits = self.types['default'] else: self.raise_exception( 'No type was provided for "%s" and no "default" type was provided. ' 'Specify at least one of these.' % name, ValueError) in_var = "%s_in" % name out_var = name self.add_trait(in_var, Float(iotype="in", low=-9e99, high=9e99)) self.add_trait(out_var, Float(iotype="out")) self._vars.append((in_var, out_var)) def execute(self, *args, **kwargs): for in_var, out_var in self._vars: setattr(self, out_var, getattr(self, in_var))
class Dummy(Component): x = Array([[-1, 1], [-2, 2]], iotype='in', shape=(2, 2)) xlist = List([1, 2], iotype='in') xdict = Dict({'a': 'b'}, iotype='in') def execute(self): self.y = self.x
class OptProblem(ArchitectureAssembly): """Class for specifying test problems for optimization algorithms and architectures.""" solution = Dict({}, iotype="in", desc="Dictionary of expected values for " "all des_vars and coupling_vars.") def check_solution(self, strict=False): """Return dictionary errors (actual-expected) of all des_vars, coupling_vars, and objectives. strict: Boolean (optional) If True, then an error will be raised for any des_var, coupling_var, or objective where no solution is provided. If False, missing items are ignored. Defaults to False. """ error = {} try: for k, v in self.get_parameters().iteritems(): sol = self.solution[k] error[k] = v.evaluate() - sol except KeyError: if strict: self.raise_exception( "No solution was given for the des_var %s" % str(k), ValueError) else: pass try: for k, v in self.list_coupling_vars().iteritems(): sol = self.solution[k] error[k] = (v.indep.evaluate() - sol, v.dep.evaluate() - sol) except KeyError: if strict: self.raise_exception( "No solution was given for the coupling_var %s" % str(k), ValueError) else: pass try: for k, v in self.get_objectives().iteritems(): sol = self.solution[k] error[k] = v.evaluate() - sol except KeyError: if strict: self.raise_exception( "No solution was given for the objective %s" % str(k), ValueError) else: pass return error
def test_list_and_dict_slot_attributes(self): top = Assembly() top.add('sock', Slot(MyClass, desc='Stuff0')) top.add('list_sock', List(Slot(MyClass), iotype='in', desc='Stuff')) top.add('dict_sock', Dict(key_trait=Str, value_trait=Slot(MyClass), iotype='in', desc='Stuff2')) attrs = top.get_attributes(io_only=False) slot_attrs = attrs['Slots'] self.assertTrue({'name': 'list_sock', 'containertype': 'list', 'filled': [], 'klass': 'MyClass', 'desc': 'Stuff'} in slot_attrs) self.assertTrue({'name': 'dict_sock', 'containertype': 'dict', 'filled': {}, 'klass': 'MyClass', 'desc': 'Stuff2'} in slot_attrs) self.assertTrue({'name': 'sock', 'containertype': 'singleton', 'filled': None, 'klass': 'MyClass', 'desc': 'Stuff0'} in slot_attrs) # Now fill some slots. top.list_sock.append(MyClass()) top.list_sock.append(MyClass()) top.dict_sock['Testing'] = MyClass() top.sock = MyClass() # Note, only tested with one item in the dict because it is not ordered, # and hash order will vary on different platforms. attrs = top.get_attributes(io_only=False) slot_attrs = attrs['Slots'] self.assertTrue({'name': 'list_sock', 'containertype': 'list', 'filled': ['MyClass', 'MyClass'], 'klass': 'MyClass', 'desc': 'Stuff'} in slot_attrs) # Need some special checking for the dict slot # since we get back a MyClass instance dict_slots = filter(lambda x: x["name"] == "dict_sock", slot_attrs) self.assertEqual(len(dict_slots), 1) dict_slot = dict_slots[0] self.assertEqual(dict_slot["containertype"], "dict") self.assertEqual(dict_slot["klass"], "MyClass") self.assertEqual(dict_slot["desc"], "Stuff2") self.assertEqual(dict_slot["filled"], {'Testing': 'MyClass'}) self.assertTrue({'name': 'sock', 'containertype': 'singleton', 'filled': 'MyClass', 'klass': 'MyClass', 'desc': 'Stuff0'} in slot_attrs)
class dummy_comp(Component): x = Float(0.0, iotype='in') e = Enum(0, [0, 1, 2, 3], iotype='in') d = Dict(value={ 'e': 2.71, "pi": 3.14159 }, value_trait=Float, key_trait=Str, iotype='in') X = Array([0, 1, 2, 3], iotype='in') Xfixed = Array([0, 1, 2, 3], iotype='in', shape=(4, )) Y = Array([[0, 1], [2, 3]], iotype='in') Y2 = Array([[5], [8]], iotype='in') Y3 = Array([[1]], iotype='in') Z = List([1, 2, 3, 4], iotype='in') def execute(self): return
def setUp(self): self.top = top = set_as_top(Assembly()) driver = top.add('driver', SimpleCaseIterDriver()) top.add('comp1', ExecComp(exprs=['z=x+y'])) top.add('comp2', ExecComp(exprs=['z=x+1'])) top.comp1.add('a_dict', Dict({}, iotype='in')) top.comp1.add('a_list', List([], iotype='in')) top.connect('comp1.z', 'comp2.x') driver.workflow.add(['comp1', 'comp2']) # now create some Cases outputs = ['comp1.z', 'comp2.z'] cases = [] for i in range(10): inputs = [('comp1.x', i), ('comp1.y', i*2), ('comp1.a_dict', {'a': 'b'}), ('comp1.a_list', ['a', 'b'])] cases.append(Case(inputs=inputs, outputs=outputs, label='case%s' % i)) driver.iterator = ListCaseIterator(cases)
def setUp(self): self.top = top = set_as_top(Assembly()) driver = top.add('driver', SimpleCaseIterDriver()) top.add('comp1', ExecComp(exprs=['z=x+y'])) top.add('comp2', ExecComp(exprs=['z=x+1'])) top.comp1.add('a_dict', Dict({}, iotype='in')) top.comp1.add('a_list', List([], iotype='in')) top.connect('comp1.z', 'comp2.x') driver.workflow.add(['comp1', 'comp2']) # now create some Cases outputs = ['comp1.z', 'comp2.z'] cases = [] for i in range(10): i = float(i) inputs = [('comp1.x', i), ('comp1.y', i * 2), ('comp1.a_dict', { 'a': 'b' }), ('comp1.a_list', ['a', 'b'])] cases.append(Case(inputs=inputs, outputs=outputs)) Case.set_vartree_inputs(driver, cases) driver.add_responses(outputs)
class pyOptSparseDriver(Driver): """ Driver wrapper for pyOpt. """ implements(IHasParameters, IHasConstraints, IHasObjective, IOptimizer, IHas2SidedConstraints) optimizer = Enum('ALPSO', _check_imports(), iotype='in', desc='Name of optimizers to use') title = Str('Optimization using pyOpt', iotype='in', desc='Title of this optimization run') options = Dict(iotype='in', desc='Dictionary of optimization parameters') print_results = Bool(True, iotype='in', desc='Print pyOpt results if True') pyopt_diff = Bool(False, iotype='in', desc='Set to True to let pyOpt calculate the gradient') exit_flag = Int(0, iotype="out", desc="0 for fail, 1 for ok") def __init__(self, n_x=None): """Initialize pyopt n_x: number of design variables""" super(pyOptSparseDriver, self).__init__() #create lb and ub inputs so external components can set the bounds self.n_x = None if n_x is not None: shape = (n_x, ) self.n_x = n_x self.add( 'lb', Array( np.zeros(shape), iotype="in", desc= "lower bounds for the design variables, which will override values given in the add_parameter", shape=shape)) self.add( 'ub', Array( np.zeros(shape), iotype="in", desc= "upper bounds for the design variables, which will override values given in the add_parameter", shape=shape)) self.pyOpt_solution = None self.param_type = {} self.nparam = None self.objs = None self.nlcons = None self.lin_jacs = {} def execute(self): """pyOpt execution. Note that pyOpt controls the execution, and the individual optimizers control the iteration.""" self.pyOpt_solution = None self.run_iteration() opt_prob = Optimization(self.title, self.objfunc) # Add all parameters self.param_type = {} self.nparam = self.total_parameters() param_list = [] #need a counter for lb and ub arrays i_param = 0 for name, param in self.get_parameters().iteritems(): # We need to identify Enums, Lists, Dicts metadata = param.get_metadata()[1] values = param.evaluate() # Assuming uniform enumerated, discrete, or continuous for now. val = values[0] n_vals = len(values) choices = [] if 'values' in metadata and \ isinstance(metadata['values'], (list, tuple, array, set)): vartype = 'd' choices = metadata['values'] elif isinstance(val, bool): vartype = 'd' choices = [True, False] elif isinstance(val, (int, int32, int64)): vartype = 'i' elif isinstance(val, (float, float32, float64)): vartype = 'c' else: msg = 'Only continuous, discrete, or enumerated variables' \ ' are supported. %s is %s.' % (name, type(val)) self.raise_exception(msg, ValueError) self.param_type[name] = vartype if self.n_x is None: lower_bounds = param.get_low() upper_bounds = param.get_high() else: lower_bounds = self.lb[i_param:i_param + n_vals] upper_bounds = self.ub[i_param:i_param + n_vals] i_param += n_vals opt_prob.addVarGroup(name, n_vals, type=vartype, lower=lower_bounds, upper=upper_bounds, value=values, choices=choices) param_list.append(name) # Add all objectives for name, obj in self.get_objectives().iteritems(): name = '%s.out0' % obj.pcomp_name opt_prob.addObj(name) # Calculate and save gradient for any linear constraints. lcons = self.get_constraints(linear=True).values() + \ self.get_2sided_constraints(linear=True).values() if len(lcons) > 0: lcon_names = ['%s.out0' % obj.pcomp_name for obj in lcons] self.lin_jacs = self.workflow.calc_gradient(param_list, lcon_names, return_format='dict') #print "Linear Gradient" #print self.lin_jacs # Add all equality constraints nlcons = [] for name, con in self.get_eq_constraints().iteritems(): size = con.size lower = zeros((size)) upper = zeros((size)) name = '%s.out0' % con.pcomp_name if con.linear is True: opt_prob.addConGroup(name, size, lower=lower, upper=upper, linear=True, wrt=param_list, jac=self.lin_jacs[name]) else: opt_prob.addConGroup(name, size, lower=lower, upper=upper) nlcons.append(name) # Add all inequality constraints for name, con in self.get_ineq_constraints().iteritems(): size = con.size upper = zeros((size)) name = '%s.out0' % con.pcomp_name if con.linear is True: opt_prob.addConGroup(name, size, upper=upper, linear=True, wrt=param_list, jac=self.lin_jacs[name]) else: opt_prob.addConGroup(name, size, upper=upper) nlcons.append(name) # Add all double_sided constraints for name, con in self.get_2sided_constraints().iteritems(): size = con.size upper = con.high * ones((size)) lower = con.low * ones((size)) name = '%s.out0' % con.pcomp_name if con.linear is True: opt_prob.addConGroup(name, size, upper=upper, lower=lower, linear=True, wrt=param_list, jac=self.lin_jacs[name]) else: opt_prob.addConGroup(name, size, upper=upper, lower=lower) nlcons.append(name) self.objs = self.list_objective_targets() self.nlcons = nlcons # Instantiate the requested optimizer optimizer = self.optimizer try: exec('from pyoptsparse import %s' % optimizer) except ImportError: msg = "Optimizer %s is not available in this installation." % \ optimizer self.raise_exception(msg, ImportError) optname = vars()[optimizer] opt = optname() # Set optimization options for option, value in self.options.iteritems(): opt.setOption(option, value) # Execute the optimization problem if self.pyopt_diff: # Use pyOpt's internal finite difference sol = opt(opt_prob, sens='FD', sensStep=self.gradient_options.fd_step) else: # Use OpenMDAO's differentiator for the gradient sol = opt(opt_prob, sens=self.gradfunc) # Print results if self.print_results: print sol # Pull optimal parameters back into framework and re-run, so that # framework is left in the right final state dv_dict = sol.getDVs() param_types = self.param_type for name, param in self.get_parameters().iteritems(): val = dv_dict[name] if param_types[name] == 'i': val = int(round(val)) self.set_parameter_by_name(name, val) self.run_iteration() # Save the most recent solution. self.pyOpt_solution = sol try: exit_status = sol.optInform['value'] self.exit_flag = 1 if exit_status > 2: # bad self.exit_flag = 0 except KeyError: #nothing is here, so something bad happened! self.exit_flag = 0 def objfunc(self, dv_dict): """ Function that evaluates and returns the objective function and constraints. This function is passed to pyOpt's Optimization object and is called from its optimizers. dv_dict: dict Dictionary of design variable values Returns func_dict: dict Dictionary of all functional variables evaluated at design point fail: int 0 for successful function evaluation 1 for unsuccessful function evaluation """ fail = 1 func_dict = {} try: # Integer parameters come back as floats, so we need to round them # and turn them into python integers before setting. param_types = self.param_type for name, param in self.get_parameters().iteritems(): val = dv_dict[name] if param_types[name] == 'i': val = int(round(val)) self.set_parameter_by_name(name, val) # Execute the model #print "Setting DV" #print dv_dict self.run_iteration() # Get the objective function evaluations for key, obj in self.get_objectives().iteritems(): name = '%s.out0' % obj.pcomp_name func_dict[name] = array(obj.evaluate()) # Get the constraint evaluations for key, con in self.get_constraints().iteritems(): name = '%s.out0' % con.pcomp_name func_dict[name] = array(con.evaluate(self.parent)) # Get the double-sided constraint evaluations for key, con in self.get_2sided_constraints().iteritems(): name = '%s.out0' % con.pcomp_name func_dict[name] = array(con.evaluate(self.parent)) fail = 0 except Exception as msg: # Exceptions seem to be swallowed by the C code, so this # should give the user more info than the dreaded "segfault" print "Exception: %s" % str(msg) print 70 * "=" import traceback traceback.print_exc() print 70 * "=" #print "Functions calculated" #print func_dict return func_dict, fail def gradfunc(self, dv_dict, func_dict): """ Function that evaluates and returns the gradient of the objective function and constraints. This function is passed to pyOpt's Optimization object and is called from its optimizers. dv_dict: dict Dictionary of design variable values func_dict: dict Dictionary of all functional variables evaluated at design point Returns sens_dict: dict Dictionary of dictionaries for gradient of each dv/func pair fail: int 0 for successful function evaluation 1 for unsuccessful function evaluation """ fail = 1 sens_dict = {} try: sens_dict = self.workflow.calc_gradient(dv_dict.keys(), self.objs + self.nlcons, return_format='dict') #for key, value in self.lin_jacs.iteritems(): # sens_dict[key] = value fail = 0 except Exception as msg: # Exceptions seem to be swallowed by the C code, so this # should give the user more info than the dreaded "segfault" print "Exception: %s" % str(msg) print 70 * "=" import traceback traceback.print_exc() print 70 * "=" #print "Derivatives calculated" #print dv_dict #print sens_dict return sens_dict, fail
class IPOPTdriver(Driver): """ Driver wrapper of C version of IPOPT. """ implements(IHasParameters, IHasConstraints, IHasObjective, IOptimizer) # Control parameters for IPOPT. Specifically list the most common. # Leave the rest for the dictionary "options". print_level = Enum(5, range(13), iotype='in', desc='Print ' 'information during IPOPT solution. Higher values are ' 'more verbose. Use 0 for no output') tol = Float(1.0e-8, iotype='in', low=0.0, desc='convergence tolerance. ' 'Algorithm terminates if the scaled NLP error becomes ' 'smaller than this value and if additional conditions ' '(see Ipopt manual) are met') max_iter = Int(3000, iotype='in', low=0, desc='Maximum number of iterations') max_cpu_time = Float(1.0e6, iotype='in', low=0.0, desc='limit on CPU seconds') constr_viol_tol = Float(0.0001, iotype='in', low=0.0, desc='absolute tolerance on constraint violation') obj_scaling_factor = Float( 1.0, iotype='in', desc='scaling factor for the objective function') # Previous default of 'ma27' isn't always available. linear_solver = Enum( 'mumps', ['ma27', 'ma57', 'ma77', 'pardiso', 'wsmp', 'mumps', 'custom'], iotype='in', desc='linear algebra package used') status = Enum( IpoptReturnStatus.Undefined, [ IpoptReturnStatus.Solve_Succeeded, IpoptReturnStatus.Solved_To_Acceptable_Level, IpoptReturnStatus.Infeasible_Problem_Detected, IpoptReturnStatus.Search_Direction_Becomes_Too_Small, IpoptReturnStatus.Diverging_Iterates, IpoptReturnStatus.User_Requested_Stop, IpoptReturnStatus.Feasible_Point_Found, IpoptReturnStatus.Maximum_Iterations_Exceeded, IpoptReturnStatus.Restoration_Failed, IpoptReturnStatus.Error_In_Step_Computation, IpoptReturnStatus.Maximum_CpuTime_Exceeded, IpoptReturnStatus.Not_Enough_Degrees_Of_Freedom, IpoptReturnStatus.Invalid_Problem_Definition, IpoptReturnStatus.Invalid_Option, IpoptReturnStatus.Invalid_Number_Detected, IpoptReturnStatus.Unrecoverable_Exception, IpoptReturnStatus.NonIpopt_Exception_Thrown, IpoptReturnStatus.Insufficient_Memory, IpoptReturnStatus.Internal_Error, IpoptReturnStatus.Undefined, ], iotype='out', desc='Ipopt return code indicating status of optimization result') # Available options can be listed by 'ipopt --print-options'. options = Dict( { # this would just turn off copyright banner # self.nlp.str_option("sb", 'yes') # to suppress all output set the following to 'yes' 'suppress_all_output': 'no', # Output. # 'print_level' : 5, # Made into variable. 'output_file': "", 'file_print_level': 5, 'print_user_options': "no", 'print_options_documentation': "no", 'print_timing_statistics': "no", 'option_file_name': "", # Normal default is 'ipopt.opt'. 'replace_bounds': "no", 'skip_finalize_solution_call': "no", 'print_info_string': "no", 'inf_pr_output': "original", 'print_frequency_iter': 1, 'print_frequency_time': 0., # Convergence. #'tol' : 1e-08, # Made into variable. 's_max': 100.0, #'max_iter' : 3000, # Made into variable. #'max_cpu_time' : 1e+06, # Made into variable. 'dual_inf_tol': 1.0, #'constr_viol_tol' : 0.0001, # Made into variable. 'compl_inf_tol': 0.0001, 'acceptable_tol': 1e-06, 'acceptable_iter': 15, 'acceptable_dual_inf_tol': 1e+10, 'acceptable_constr_viol_tol': 0.01, 'acceptable_compl_inf_tol': 0.01, 'acceptable_obj_change_tol': 1e+20, 'diverging_iterates_tol': 1e+20, 'mu_target': 0.0, # NLP Scaling. 'nlp_scaling_method': "gradient-based", #'obj_scaling_factor' : 1., # Made into variable. 'nlp_scaling_max_gradient': 100.0, 'nlp_scaling_obj_target_gradient': 0.0, 'nlp_scaling_constr_target_gradient': 0.0, 'nlp_scaling_min_value': 1e-08, # NLP. 'nlp_lower_bound_inf': -1e+19, 'nlp_upper_bound_inf': 1e+19, 'fixed_variable_treatment': "make_parameter", 'dependency_detector': "none", 'dependency_detection_with_rhs': "no", 'num_linear_variables': 0, 'kappa_d': 1e-05, 'bound_relax_factor': 1e-08, 'honor_original_bounds': "yes", 'check_derivatives_for_naninf': "no", 'jac_c_constant': "no", 'jac_d_constant': "no", 'hessian_constant': "no", # Initialization. 'bound_push': 0.01, 'bound_frac': 0.01, 'slack_bound_push': 0.01, 'slack_bound_frac': 0.01, 'constr_mult_init_max': 1000.0, 'bound_mult_init_val': 1.0, 'bound_mult_init_method': "constant", 'least_square_init_primal': "no", 'least_square_init_duals': "no", # Barrier parameter update. 'mu_max_fact': 1000.0, 'mu_max': 100000.0, 'mu_min': 1e-11, 'adaptive_mu_globalization': "obj-constr-filter", 'adaptive_mu_kkterror_red_iters': 4, 'adaptive_mu_kkterror_red_fact': 0.9999, 'filter_margin_fact': 1e-05, 'filter_max_margin': 1.0, 'adaptive_mu_restore_previous_iterate': "no", 'adaptive_mu_monotone_init_factor': 0.8, 'adaptive_mu_kkt_norm_type': "2-norm-squared", 'mu_strategy': "monotone", 'mu_oracle': "quality-function", 'fixed_mu_oracle': "average_compl", 'mu_init': 0.1, 'barrier_tol_factor': 10.0, 'mu_linear_decrease_factor': 0.2, 'mu_superlinear_decrease_power': 1.5, 'mu_allow_fast_monotone_decrease': "yes", 'tau_min': 0.99, 'sigma_max': 100.0, 'sigma_min': 1e-06, 'quality_function_norm_type': "2-norm-squared", 'quality_function_centrality': "none", 'quality_function_balancing_term': "none", 'quality_function_max_section_steps': 8, 'quality_function_section_sigma_tol': 0.01, 'quality_function_section_qf_tol': 0.0, # Line Search. 'line_search_method': "filter", 'alpha_red_factor': 0.5, 'accept_every_trial_step': "no", 'accept_after_max_steps': -1, 'alpha_for_y': "primal", 'alpha_for_y_tol': 10.0, 'tiny_step_tol': 2.22045e-15, 'tiny_step_y_tol': 0.01, 'watchdog_shortened_iter_trigger': 10, 'watchdog_trial_iter_max': 3, 'theta_max_fact': 10000.0, 'theta_min_fact': 0.0001, 'eta_phi': 1e-08, 'delta': 1.0, 's_phi': 2.3, 's_theta': 1.1, 'gamma_phi': 1e-08, 'gamma_theta': 1e-05, 'alpha_min_frac': 0.05, 'max_soc': 4, 'kappa_soc': 0.99, 'obj_max_inc': 5.0, 'max_filter_resets': 5, 'filter_reset_trigger': 5, 'corrector_type': "none", 'skip_corr_if_neg_curv': "yes", 'skip_corr_in_monotone_mode': "yes", 'corrector_compl_avrg_red_fact': 1.0, 'nu_init': 1e-06, 'nu_inc': 0.0001, 'rho': 0.1, 'kappa_sigma': 1e+10, 'recalc_y': "no", 'recalc_y_feas_tol': 1e-06, 'slack_move': 1.81899e-12, 'constraint_violation_norm_type': "1-norm", # Warm Start. 'warm_start_init_point': "no", 'warm_start_same_structure': "no", 'warm_start_bound_push': 0.001, 'warm_start_bound_frac': 0.001, 'warm_start_slack_bound_push': 0.001, 'warm_start_slack_bound_frac': 0.001, 'warm_start_mult_bound_push': 0.001, 'warm_start_mult_init_max': 1e+06, 'warm_start_entire_iterate': "no", # Linear Solver. #'linear_solver' : "mumps", # Made into variable. 'linear_system_scaling': "none", # Had been "mc19", but not always available. 'linear_scaling_on_demand': "yes", # Step Calculation. 'mehrotra_algorithm': "no", 'fast_step_computation': "no", 'min_refinement_steps': 1, 'max_refinement_steps': 10, 'residual_ratio_max': 1e-10, 'residual_ratio_singular': 1e-05, 'residual_improvement_factor': 1.0, 'neg_curv_test_tol': 1.0, 'max_hessian_perturbation': 1e+20, 'min_hessian_perturbation': 1e-20, 'perturb_inc_fact_first': 100.0, 'perturb_inc_fact': 8.0, 'perturb_dec_fact': 0.333333, 'first_hessian_perturbation': 0.0001, 'jacobian_regularization_value': 1e-08, 'jacobian_regularization_exponent': 0.25, 'perturb_always_cd': "no", # Restoration Phase. 'expect_infeasible_problem': "no", 'expect_infeasible_problem_ctol': 0.001, 'expect_infeasible_problem_ytol': 1e+08, 'start_with_resto': "no", 'soft_resto_pderror_reduction_factor': 0.9999, 'max_soft_resto_iters': 10, 'required_infeasibility_reduction': 0.9, 'max_resto_iter': 3000000, 'evaluate_orig_obj_at_resto_trial': "yes", 'resto_penalty_parameter': 1000.0, 'resto_proximity_weight': 1.0, 'bound_mult_reset_threshold': 1000.0, 'constr_mult_reset_threshold': 0.0, 'resto_failure_feasibility_threshold': 0.0, # Derivative Checker. 'derivative_test': "none", 'derivative_test_first_index': -2, 'derivative_test_perturbation': 1e-08, 'derivative_test_tol': 0.0001, 'derivative_test_print_all': "no", 'jacobian_approximation': "exact", 'findiff_perturbation': 1e-07, 'point_perturbation_radius': 10.0, # Hessian Approximation. 'limited_memory_aug_solver': "sherman-morrison", 'limited_memory_max_history': 6, 'limited_memory_update_type': "bfgs", 'limited_memory_initialization': "scalar1", 'limited_memory_init_val': 1.0, 'limited_memory_init_val_max': 1e+08, 'limited_memory_init_val_min': 1e-08, 'limited_memory_max_skipping': 2, 'limited_memory_special_for_resto': "no", #'hessian_approximation' : "exact", # Forced to "limited-memory" 'hessian_approximation_space': "nonlinear-variables", # MA27 Linear Solver. 'ma27_pivtol': 1e-08, 'ma27_pivtolmax': 0.0001, 'ma27_liw_init_factor': 5.0, 'ma27_la_init_factor': 5.0, 'ma27_meminc_factor': 10.0, 'ma27_skip_inertia_check': "no", 'ma27_ignore_singularity': "no", # MA57 Linear Solver. 'ma57_pivtol': 1e-08, 'ma57_pivtolmax': 0.0001, 'ma57_pre_alloc': 1.05, 'ma57_pivot_order': 5, 'ma57_automatic_scaling': "yes", # ipopt default is "no". 'ma57_block_size': 16, 'ma57_node_amalgamation': 16, 'ma57_small_pivot_flag': 0, # Paridiso Linear Solver. 'pardiso_matching_strategy': "complete+2x2", 'pardiso_redo_symbolic_fact_only_if_inertia_wrong': "no", 'pardiso_repeated_perturbation_means_singular': "no", 'pardiso_msglvl': 0, 'pardiso_skip_inertia_check': "no", 'pardiso_max_iter': 500, 'pardiso_iter_relative_tol': 1e-06, 'pardiso_iter_coarse_size': 5000, 'pardiso_iter_max_levels': 10000, 'pardiso_iter_dropping_factor': 0.5, 'pardiso_iter_dropping_schur': 0.1, 'pardiso_iter_max_row_fill': 10000000, 'pardiso_iter_inverse_norm_factor': 5e+06, 'pardiso_iterative': "no", 'pardiso_max_droptol_corrections': 4, # Mumps Linear Solver. 'mumps_pivtol': 1e-06, 'mumps_pivtolmax': 0.1, 'mumps_mem_percent': 1000, 'mumps_permuting_scaling': 7, 'mumps_pivot_order': 7, 'mumps_scaling': 77, 'mumps_dep_tol': -1.0, # MA28 Linear Solver. 'ma28_pivtol': 0.01, # Uncategorized. 'warm_start_target_mu': 0.0, }, iotype='in', desc='Dictionary of additional optimization parameters') def __init__(self): super(IPOPTdriver, self).__init__() self.iter_count = 0 # define the IPOPTdriver's private variables # note, these are all resized in config_ipopt self.design_vals = zeros(0, 'd') self.nlp = None self.num_params = 0 self.num_eq_constraints = 0 self.num_constraints = 0 self.obj = 0.0 self._prev_parameters = None self._saved_j = None self.inputs = None self.obj = None self.con = None def set_option(self, name, value): '''Set one of the options in the large dict of options''' if name in self.options: self.options[name] = value else: self.raise_exception('%s is not a valid option for Ipopt' % name, ValueError) def start_iteration(self): """Perform initial setup before iteration loop begins.""" # Inital run to make sure the workflow executes super(IPOPTdriver, self).run_iteration() self._prev_parameters = None self._saved_j = None self._config_ipopt() # get the initial values of the parameters self.design_vals = self.eval_parameters(self.parent) x_L = self.get_lower_bounds() x_U = self.get_upper_bounds() # Ipopt treats equality and inequality constraints together. # For the equality constraints, both g_L and g_U are set to zero. # For the inequality constraints, g_L is set to -(largest float) and # g_U is set to zero. g_L = zeros(self.num_constraints, 'd') g_U = zeros(self.num_constraints, 'd') g_L[self.num_eq_constraints:] = -sys.float_info.max # number of non zeros in Jacobian nnzj = self.num_params * self.num_constraints # of constraints. Assumed to be dense # number of non zeros in hessian nnzh = self.num_params * (self.num_params + 1) / 2 try: self.nlp = pyipopt.create( self.num_params, x_L, x_U, self.num_constraints, g_L, g_U, nnzj, nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g, eval_h, # Placeholder, should not be called. functools.partial(apply_new, driver=self)) self.nlp.set_intermediate_callback(intermediate_callback) except Exception as err: self._logger.error(str(err)) raise # Set optimization options self.nlp.int_option('print_level', self.print_level) self.nlp.num_option('tol', self.tol) self.nlp.int_option('max_iter', self.max_iter) self.nlp.num_option('max_cpu_time', self.max_cpu_time) self.nlp.num_option('constr_viol_tol', self.constr_viol_tol) self.nlp.num_option('obj_scaling_factor', self.obj_scaling_factor) self.nlp.str_option('linear_solver', self.linear_solver) # Set optimization options set via the options dict for option, value in self.options.iteritems(): if isinstance(value, int): self.nlp.int_option(option, value) elif isinstance(value, str): self.nlp.str_option(option, value) elif isinstance(value, float): self.nlp.num_option(option, value) else: self.raise_exception( "Cannot handle option '%s' of type '%s'" % (option, type(value)), ValueError) # Ipopt does the Hessian calculation so we do not have to self.nlp.str_option("hessian_approximation", "limited-memory") def continue_iteration(self): """Returns True if iteration should continue. Get info from the optimizer to see if it is done iterating """ return self.iter_count == 0 def pre_iteration(self): """Checks or RunStopped and evaluates objective""" super(IPOPTdriver, self).pre_iteration() if self._stop: self.raise_exception('Stop requested', RunStopped) def run_iteration(self): """ The IPOPT driver iteration""" try: ( self.design_vals, _zl, _zu, _lambda, # lambda is a 'recent' addition self.obj, self.status) = self.nlp.solve(self.design_vals, self) # so we can check for stops except Exception as err: self._logger.error(str(err)) raise # update the model if necessary if self._prev_parameters is None or \ not (self.design_vals == self._prev_parameters).all(): self.set_parameters(self.design_vals) super(IPOPTdriver, self).run_iteration() # release storage self._prev_parameters = None self._saved_j = None def _config_ipopt(self): """Set up arrays, and perform some validation and make sure that array sizes are consistent. """ # size arrays based on number of parameters self.num_params = self.total_parameters() self.design_vals = zeros(self.num_params, 'd') if self.num_params < 1: self.raise_exception('no parameters specified', RuntimeError) # size constraint related arrays self.num_eq_constraints = self.total_eq_constraints() self.num_constraints = self.num_eq_constraints + \ self.total_ineq_constraints() self.inputs = self.list_param_group_targets() self.obj = self.list_objective_targets() self.con = self.list_constraint_targets() def _recalc_j(self): """Update _saved_j.""" self._saved_j = self.workflow.calc_gradient(self.inputs, self.obj + self.con)
class MetaModel(Component): """ Class that creates a reduced order model for a tuple of outputs from a tuple of inputs. Accepts surrogate models that adhere to ISurrogate. Multiple surrogate models can be used. Training inputs and outputs should be provided in the params and responses variable trees. For a Float variable, the training data is an array of length m. """ default_surrogate = Slot(ISurrogate, allow_none=True, desc="This surrogate will be used for all " "outputs that don't have a specific surrogate " "assigned to them in their sur_<name> slot.") surrogates = Dict(key_trait=Str, value_trait=Slot(ISurrogate), desc='surrogates for output variables') params = VarTree(VariableTree(), iotype='in') responses = VarTree(VariableTree(), iotype='in') recorder = Slot(ICaseRecorder, desc='Records training cases') warm_restart = Bool(False, iotype='in', desc="When set to False (default), the metamodel " "retrains with the new dataset whenever the " "training data values are changed. When set to " "True, the new data is appended to the old data " "and all of the data is used to train.") def __init__(self, params=None, responses=None): super(MetaModel, self).__init__() if params is None or not isinstance(params, tuple): msg = "Metamodel params argument needs to be a tuple of " + \ "variable names." self.raise_exception(msg, ValueError) if responses is None or not isinstance(responses, tuple): msg = "Metamodel responses argument needs to be a tuple of " + \ "variable names." self.raise_exception(msg, ValueError) # Inputs and Outputs created immediately. input_tree = self.get('params') self._param_data = [] for name in params: self.add(name, Float(0.0, iotype='in', desc='metamodel param')) input_tree.add(name, List([], desc='training param')) output_tree = self.get('responses') self._response_data = {} for name in responses: self.add(name, Float(0.0, iotype='out', desc='metamodel response')) output_tree.add(name, List([], desc='training response')) self._response_data[name] = [] self.surrogates[name] = None self._surrogate_input_names = params self._surrogate_output_names = responses self._train = True # keeps track of which sur_<name> slots are full self._surrogate_overrides = set() # need to maintain separate copy of default surrogate for each sur_* # that doesn't have a surrogate defined self._default_surrogate_copies = {} # Special callback for whenver anything changes in the surrogates # Dict items. self.on_trait_change(self._surrogate_updated, "surrogates_items") def _input_updated(self, name, fullpath=None): ''' Set _train if anything changes in our inputs so that training occurs on the next execution.''' if fullpath is not None: if fullpath.startswith('params.') or \ fullpath.startswith('responses.'): self._train = True super(MetaModel, self)._input_updated(name.split('.', 1)[0]) def check_config(self, strict=False): '''Called as part of pre_execute. Does some simple error checking.''' super(MetaModel, self).check_config(strict=strict) # Either there are no surrogates set and no default surrogate (just # do passthrough ) or all outputs must have surrogates assigned # either explicitly or through the default surrogate if self.default_surrogate is None: no_sur = [] for name in self._surrogate_output_names: if name not in self.surrogates or \ self.surrogates[name] is None: no_sur.append(name) if len(no_sur) > 0: self.raise_exception( "No default surrogate model is defined and" " the following outputs do not have a" " surrogate model: %s. Either specify" " default_surrogate, or specify a" " surrogate model for all outputs." % no_sur, RuntimeError) def execute(self): """If the training flag is set, train the metamodel. Otherwise, predict outputs. """ # Train first if self._train: input_data = self._param_data if self.warm_restart is False: input_data = [] base = 0 else: base = len(input_data) for name in self._surrogate_input_names: train_name = "params.%s" % name val = self.get(train_name) num_sample = len(val) for j in xrange(base, base + num_sample): if j > len(input_data) - 1: input_data.append([]) input_data[j].append(val[j - base]) # Surrogate models take an (m, n) list of lists # m = number of training samples # n = number of inputs # # TODO - Why not numpy array instead? for name in self._surrogate_output_names: train_name = "responses.%s" % name output_data = self._response_data[name] if self.warm_restart is False: output_data = [] output_data.extend(self.get(train_name)) surrogate = self._get_surrogate(name) if surrogate is not None: surrogate.train(input_data, output_data) self._train = False # Now Predict for current inputs inputs = [] for name in self._surrogate_input_names: val = self.get(name) inputs.append(val) for name in self._surrogate_output_names: surrogate = self._get_surrogate(name) if surrogate is not None: setattr(self, name, surrogate.predict(inputs)) def _get_surrogate(self, name): """Return the designated surrogate for the given output.""" surrogate = self.surrogates.get(name) if surrogate is None and self.default_surrogate is not None: surrogate = self._default_surrogate_copies.get(name) return surrogate def _default_surrogate_changed(self, old_obj, new_obj): """Callback whenever the default_surrogate model is changed.""" if old_obj: old_obj.on_trait_change(self._def_surrogate_trait_modified, remove=True) if new_obj: new_obj.on_trait_change(self._def_surrogate_trait_modified) # due to the way "add" works, container will always remove the # old before it adds the new one. So you actually get this method # called twice on a replace. You only do this update when the new # one gets set for name in self._surrogate_output_names: if name not in self._surrogate_overrides: surrogate = deepcopy(self.default_surrogate) self._default_surrogate_copies[name] = surrogate self._update_var_for_surrogate(surrogate, name) self.config_changed() self._train = True def _def_surrogate_trait_modified(self, surrogate, name, old, new): # a trait inside of the default_surrogate was changed, so we need to # replace all of the default copies for name in self._default_surrogate_copies: surr_copy = deepcopy(self.default_surrogate) self._default_surrogate_copies[name] = surr_copy def _surrogate_updated(self, obj, name, old, new): """Called when self.surrogates Dict is updated.""" all_changes = new.changed.keys() + new.added.keys() + \ new.removed.keys() for varname in all_changes: surr = self.surrogates.get(varname) if surr is None: if self.default_surrogate: def_surr = deepcopy(self.default_surrogate) self._default_surrogate_copies[varname] = def_surr self._update_var_for_surrogate(def_surr, varname) if varname in self._surrogate_overrides: self._surrogate_overrides.remove(varname) else: self._surrogate_overrides.add(varname) if name in self._default_surrogate_copies: del self._default_surrogate_copies[name] self._update_var_for_surrogate(surr, varname) self.config_changed() self._train = True def _update_var_for_surrogate(self, surrogate, varname): """Different surrogates have different types of output values, so create the appropriate type of output Variable based on the return value of get_uncertain_value on the surrogate. Presently, this just adds the UncertainVariable for Kriging """ # TODO - ISurrogate should have a get_datatype or get_uncertainty_type val = surrogate.get_uncertain_value(1.0) if has_interface(val, IUncertainVariable): ttype = UncertainDistVar #elif isinstance(val, int_types): # ttype = Int elif isinstance(val, real_types): ttype = Float else: self.raise_exception("value type of '%s' is not a supported" " surrogate return value" % val.__class__.__name__) self.add( varname, ttype(default_value=val, iotype='out', desc=self.trait(varname).desc)) setattr(self, varname, val) return
def test_add_trait_w_subtrait(self): obj = Container() obj.add('lst', List([1, 2, 3], iotype='in')) obj.add('dct', Dict({}, iotype='in'))
class Ext(Container): resources = Dict(iotype='in')
class MetaModelBase(Component): """ Base class for functionality of a meta model. Should be subclassed. """ # pylint: disable-msg=E1101 model = Slot(IComponent, allow_none=True, desc='Slot for the Component or Assembly being ' 'encapsulated.') includes = List(Str, iotype='in', desc='A list of names of variables to be included ' 'in the public interface.') excludes = List(Str, iotype='in', desc='A list of names of variables to be excluded ' 'from the public interface.') default_surrogate = Slot(ISurrogate, allow_none=True, desc="This surrogate will be used for all " "outputs that don't have a specific surrogate " "assigned to them in their sur_<name> slot.") surrogates = Dict(key_trait=Str, value_trait=Slot(ISurrogate), desc='surrogates for output variables') report_errors = Bool(True, iotype="in", desc="If True, metamodel will report errors reported " "from the component. If False, metamodel will swallow " "the errors but log that they happened and " "exclude the case from the training set.") recorder = Slot(ICaseRecorder, desc='Records training cases') # when fired, the next execution will train the metamodel train_next = Event(desc='Train metamodel on next execution') #when fired, the next execution will reset all training data reset_training_data = Event(desc='Reset training data on next execution') def __init__(self): super(MetaModelBase, self).__init__() self._surrogate_input_names = None self._surrogate_output_names = None self._surrogate_overrides = set() # keeps track of which sur_<name> slots are full self._training_data = {} self._training_input_history = [] self._const_inputs = {} # dict of constant training inputs indices and their values self._train = False self._new_train_data = False self._failed_training_msgs = [] self._default_surrogate_copies = {} # need to maintain separate copy of # default surrogate for each sur_* # that doesn't have a surrogate # defined # the following line will work for classes that inherit from MetaModel # as long as they declare their traits in the class body and not in # the __init__ function. If they need to create traits dynamically # during initialization they'll have to provide the value of # _mm_class_traitnames self._mm_class_traitnames = set(self.traits(iotype=not_none).keys()) self.on_trait_change(self._surrogate_updated, "surrogates_items") def _train_next_fired(self): self._train = True self._new_train_data = True def _reset_training_data_fired(self): self._training_input_history = [] self._const_inputs = {} self._failed_training_msgs = [] # remove output history from training_data for name in self._training_data: self._training_data[name] = [] def _warm_start_data_changed(self, oldval, newval): self.reset_training_data = True # build list of inputs for case in newval: if self.recorder: self.recorder.record(case) inputs = [] for inp_name in self.surrogate_input_names(): var_name = '.'.join([self.name, inp_name]) try: inp_val = case[var_name] except KeyError: pass #self.raise_exception('The variable "%s" was not ' #'found as an input in one of the cases provided ' #'for warm_start_data.' % var_name, ValueError) else: if inp_val is not None: inputs.append(inp_val) self._training_input_history.append(inputs) for output_name in self.surrogate_output_names(): #grab value from case data var_name = '.'.join([self.name, output_name]) try: val = case.get_output(var_name) except KeyError: self.raise_exception('The output "%s" was not found ' 'in one of the cases provided for ' 'warm_start_data' % var_name, ValueError) else: # save to training output history self._training_data[output_name].append(val) self._new_train_data = True def child_run_finished(self, childname, outs=None): pass def check_config(self): '''Called as part of pre_execute.''' # 1. model must be set if self.model is None: self.raise_exception("MetaModel object must have a model!", RuntimeError) # 2. can't have both includes and excludes if self.excludes and self.includes: self.raise_exception("includes and excludes are mutually exclusive", RuntimeError) # 3. the includes and excludes must match actual inputs and outputs of the model input_names = self.surrogate_input_names() output_names = self.surrogate_output_names() input_and_output_names = input_names + output_names for include in self.includes: if include not in input_and_output_names: self.raise_exception('The include "%s" is not one of the ' 'model inputs or outputs ' % include, ValueError) for exclude in self.excludes: if exclude not in input_and_output_names: self.raise_exception('The exclude "%s" is not one of the ' 'model inputs or outputs ' % exclude, ValueError) # 4. Either there are no surrogates set and no default surrogate # ( just do passthrough ) # or # all outputs must have surrogates assigned either explicitly # or through the default surrogate if self.default_surrogate is None: no_sur = [] for name in self.surrogate_output_names(): if not self.surrogates[name]: no_sur.append(name) if len(no_sur) > 0 and len(no_sur) != len(self._surrogate_output_names): self.raise_exception("No default surrogate model is defined and" " the following outputs do not have a" " surrogate model: %s. Either specify" " default_surrogate, or specify a" " surrogate model for all outputs." % no_sur, RuntimeError) # 5. All the explicitly set surrogates[] should match actual outputs of the model for surrogate_name in self.surrogates.keys(): if surrogate_name not in output_names: self.raise_exception('The surrogate "%s" does not match one of the ' 'model outputs ' % surrogate_name, ValueError) def execute(self): """If the training flag is set, train the metamodel. Otherwise, predict outputs. """ if self._train: try: inputs = self.update_model_inputs() self.model.run(force=True) except Exception as err: if self.report_errors: raise err else: self._failed_training_msgs.append(str(err)) else: # if no exceptions are generated, save the data self._training_input_history.append(inputs) self.update_outputs_from_model() case_outputs = [] for name, output_history in self._training_data.items(): case_outputs.append(('.'.join([self.name, name]), output_history[-1])) # save the case, making sure to add out name to the local input # name since this Case is scoped to our parent Assembly case_inputs = [('.'.join([self.name, name]), val) for name, val in zip(self.surrogate_input_names(), inputs)] if self.recorder: self.recorder.record(Case(inputs=case_inputs, outputs=case_outputs)) self._train = False else: # NO surrogates defined. just run model and get outputs if self.default_surrogate is None and not self._surrogate_overrides: inputs = self.update_model_inputs() self.model.run() self.update_outputs_from_model() return if self._new_train_data: if len(self._training_input_history) < 2: self.raise_exception("ERROR: need at least 2 training points!", RuntimeError) # figure out if we have any constant training inputs tcases = self._training_input_history in_hist = tcases[0][:] # start off assuming every input is constant idxlist = range(len(in_hist)) self._const_inputs = dict(zip(idxlist, in_hist)) for i in idxlist: val = in_hist[i] for case in range(1, len(tcases)): if val != tcases[case][i]: del self._const_inputs[i] break if len(self._const_inputs) == len(in_hist): self.raise_exception("ERROR: all training inputs are constant.") elif len(self._const_inputs) > 0: # some inputs are constant, so we have to remove them from the training set training_input_history = [] for inputs in self._training_input_history: training_input_history.append([val for i, val in enumerate(inputs) if i not in self._const_inputs]) else: training_input_history = self._training_input_history for name, output_history in self._training_data.items(): surrogate = self._get_surrogate(name) if surrogate is not None: surrogate.train(training_input_history, output_history) self._new_train_data = False inputs = [] for i, name in enumerate(self.surrogate_input_names()): val = self.get(name) cval = self._const_inputs.get(i, _missing) if cval is _missing: inputs.append(val) elif val != cval: self.raise_exception("ERROR: training input '%s' was a" " constant value of (%s) but the value" " has changed to (%s)." % (name, cval, val), ValueError) for name in self._training_data: surrogate = self._get_surrogate(name) # copy output to boundary if surrogate is None: self._set_output(name, self.model.get(name)) else: self._set_output(name, surrogate.predict(inputs))
class CaseIteratorDriver(Driver): """ A base class for Drivers that run sets of cases. Concurrent evaluation is supported, with the various evaluations executed across servers obtained from the :class:`ResourceAllocationManager`. """ implements(IHasParameters, IHasResponses) sequential = Bool(True, iotype='in', desc='If True, evaluate cases sequentially.') reload_model = Bool(True, iotype='in', desc='If True, reload the model between executions.') error_policy = Enum(values=('ABORT', 'RETRY'), iotype='in', desc='If ABORT, any error stops the evaluation of the' ' whole set of cases.') max_retries = Int(1, low=0, iotype='in', desc='Maximum number of times to retry a failed case.') extra_resources = Dict(iotype='in', desc='Extra resource requirements (unusual).') ignore_egg_requirements = Bool(False, iotype='in', desc='If True, no distribution or orphan' ' requirements will be included in the' ' generated egg.') def __init__(self, *args, **kwargs): super(CaseIteratorDriver, self).__init__(*args, **kwargs) self._iter = None # Set to None when iterator is empty. self._replicants = 0 self._abort_exc = None # Set if error_policy == ABORT. self._egg_file = None self._egg_required_distributions = None self._egg_orphan_modules = None self._reply_q = None # Replies from server threads. self._server_lock = None # Lock for server data. # Various per-server data keyed by server name. self._servers = {} self._seq_server = _ServerData(None) self._todo = [] # Cases grabbed during server startup. self._rerun = [] # Cases that failed and should be retried. self._generation = 0 # Used to keep worker names unique. # var wasn't showing up in parent depgraph without this self.error_policy = 'ABORT' def set_inputs(self, generator): """ Set case inputs from generator values. """ inputs = array([vals for vals in generator]) start = 0 for path, param in self.get_parameters().items(): size = param.size if size == 1: values = inputs[:, start] else: end = start + size values = inputs[:, start:end].reshape((-1, ) + param.shape) start += size if isinstance(path, tuple): path = path[0] # Use first target of ParameterGroup. path = make_legal_path(path) self.set('case_inputs.' + path, list(values)) def execute(self): """ Runs all cases and records results in `recorder`. Uses :meth:`setup` and :meth:`resume` with default arguments. """ self._setup() try: if self.sequential: self._logger.info('Start sequential evaluation.') server = self._servers[None] = self._seq_server server.top = self.parent while self._iter is not None: try: case = self._iter.next() self._todo.append(case) server.exception = None server.case = None server.state = _LOADING # 'server' already loaded. while self._server_ready(server): pass except StopIteration: if not self._rerun: self._iter = None break
class ExternalCode(Component): """ Run an external code as a component. The component can be configured to run the code on a remote server. See :meth:`execute`. Default stdin is the 'null' device, default stdout is the console, and default stderr is ``error.out``. """ STDOUT = shellproc.STDOUT DEV_NULL = shellproc.DEV_NULL # pylint: disable-msg=E1101 command = List(Str, desc='The command to be executed.') env_vars = Dict({}, iotype='in', desc='Environment variables required by the command.') resources = Dict({}, iotype='in', desc='Resources required to run this component.') poll_delay = Float(0., low=0., units='s', iotype='in', desc='Delay between polling for command completion.' ' A value of zero will use an internally computed' ' default.') timeout = Float(0., low=0., iotype='in', units='s', desc='Maximum time to wait for command completion.' ' A value of zero implies an infinite wait.') timed_out = Bool(False, iotype='out', desc='True if the command timed-out.') return_code = Int(0, iotype='out', desc='Return code from the command.') def __init__(self): super(ExternalCode, self).__init__() self.check_external_outputs = True self.stdin = self.DEV_NULL self.stdout = None self.stderr = "error.out" self._process = None self._server = None # This gets used by remote server. def get_access_controller(self): #pragma no cover """ Return :class:`AccessController` for this object. """ return _AccessController() @rbac(('owner', 'user')) def set(self, path, value, index=None, force=False): """ Don't allow setting of 'command' or 'resources' by a remote client. """ if path in ('command', 'resources', 'get_access_controller') \ and remote_access(): self.raise_exception('%r may not be set() remotely' % path, RuntimeError) return super(ExternalCode, self).set(path, value, index, force) def execute(self): """ Runs the specified command. 1. Checks that all external input files exist. 2. Runs the command. 3. Checks that all external output files exist. If a subclass generates outputs (such as postprocessing results), then it should set attribute ``check_external_outputs`` False and call :meth:`check_files` itself. If `resources` have been specified, an appropriate server is allocated and the command is run on that server. Otherwise the command is run locally. When running remotely, the following resources are set: ================ ===================================== Key Value ================ ===================================== job_name self.get_pathname() ---------------- ------------------------------------- remote_command self.command (first item) ---------------- ------------------------------------- args self.command (2nd through last items) ---------------- ------------------------------------- job_environment self.env_vars ---------------- ------------------------------------- input_path self.stdin ---------------- ------------------------------------- output_path self.stdout ---------------- ------------------------------------- error_path self.stderr (if != STDOUT) ---------------- ------------------------------------- join_files If self.stderr == STDOUT ---------------- ------------------------------------- wallclock_time self.timeout (if non-zero) ================ ===================================== .. note:: Input files to be sent to the remote server are defined by :class:`FileMetadata` entries in the `external_files` list with `input` True. Similarly, output files to be retrieved from the remote server are defined by entries with `output` True. .. warning:: Any file **not** labeled with `binary` True will undergo newline translation if the local and remote machines have different newline representations. Newline translation will corrupt a file which is binary but hasn't been labeled as such. """ self.return_code = -12345678 self.timed_out = False if not self.command: self.raise_exception('Empty command list', ValueError) self.check_files(inputs=True) return_code = None error_msg = '' try: if self.resources: return_code, error_msg = self._execute_remote() else: return_code, error_msg = self._execute_local() if return_code is None: if self._stop: self.raise_exception('Run stopped', RunStopped) else: self.timed_out = True self.raise_exception('Timed out', RunInterrupted) elif return_code: if isinstance(self.stderr, str): if os.path.exists(self.stderr): stderrfile = open(self.stderr, 'r') error_desc = stderrfile.read() stderrfile.close() err_fragment = "\nError Output:\n%s" % error_desc else: err_fragment = "\n[stderr %r missing]" % self.stderr else: err_fragment = error_msg self.raise_exception('return_code = %d%s' \ % (return_code, err_fragment), RuntimeError) if self.check_external_outputs: self.check_files(inputs=False) finally: self.return_code = -999999 if return_code is None else return_code def check_files(self, inputs): """ Check that all 'specific' input or output external files exist. If an external file path specifies a pattern, it is *not* checked. inputs: bool If True, check inputs; otherwise outputs. """ # External files. for metadata in self.external_files: path = metadata.path for ch in ('*?['): if ch in path: break else: if inputs: if not metadata.get('input', False): continue else: if not metadata.get('output', False): continue if not os.path.exists(path): iotype = 'input' if inputs else 'output' self.raise_exception('missing %s file %r' % (iotype, path), RuntimeError) # Stdin, stdout, stderr. if inputs and self.stdin and self.stdin != self.DEV_NULL: if not os.path.exists(self.stdin): self.raise_exception('missing stdin file %r' % self.stdin, RuntimeError) if not inputs and self.stdout and self.stdout != self.DEV_NULL: if not os.path.exists(self.stdout): self.raise_exception('missing stdout file %r' % self.stdout, RuntimeError) if not inputs and self.stderr \ and self.stderr != self.DEV_NULL \ and self.stderr != self.STDOUT \ and (not self.resources or \ not self.resources.get('join_files')): if not os.path.exists(self.stderr): self.raise_exception('missing stderr file %r' % self.stderr, RuntimeError) # File variables. if inputs: for pathname, obj in self.items(iotype='in', recurse=True): if isinstance(obj, FileRef): path = self.get_metadata(pathname, 'local_path') if path and not os.path.exists(path): self.raise_exception("missing 'in' file %r" % path, RuntimeError) else: for pathname, obj in self.items(iotype='out', recurse=True): if isinstance(obj, FileRef): if not os.path.exists(obj.path): self.raise_exception( "missing 'out' file %r" % obj.path, RuntimeError) def _execute_local(self): """ Run command. """ self._logger.info('executing %s...', self.command) start_time = time.time() # check to make sure command exists if isinstance(self.command, basestring): program_to_execute = self.command else: program_to_execute = self.command[0] command_full_path = find_executable(program_to_execute) if not command_full_path: self.raise_exception( "The command to be executed, '%s', cannot be found" % program_to_execute, ValueError) self._process = \ shellproc.ShellProc(self.command, self.stdin, self.stdout, self.stderr, self.env_vars) self._logger.debug('PID = %d', self._process.pid) try: return_code, error_msg = \ self._process.wait(self.poll_delay, self.timeout) finally: self._process.close_files() self._process = None et = time.time() - start_time if et >= 60: #pragma no cover self._logger.info('elapsed time: %.1f sec.', et) return (return_code, error_msg) def _execute_remote(self): """ Allocate a server based on required resources, send inputs, run command, and retrieve results. """ rdesc = self.resources.copy() # Allocate server. self._server, server_info = RAM.allocate(rdesc) if self._server is None: self.raise_exception('Server allocation failed :-(', RuntimeError) if self._logger.level == logging.NOTSET: # By default avoid lots of protocol messages. self._server.set_log_level(logging.DEBUG) else: self._server.set_log_level(self._logger.level) return_code = -88888888 error_msg = '' try: # Create resource description for command. rdesc['job_name'] = self.get_pathname() or self.__class__.__name__ rdesc['remote_command'] = self.command[0] if len(self.command) > 1: rdesc['args'] = self.command[1:] if self.env_vars: rdesc['job_environment'] = self.env_vars if not self.stdin: self.raise_exception( 'Remote execution requires stdin of' ' DEV_NULL or filename, got %r' % self.stdin, ValueError) if self.stdin != self.DEV_NULL: rdesc['input_path'] = self.stdin if self.stdout: rdesc['output_path'] = self.stdout else: rdesc['output_path'] = '%s.stdout' % self.command[0] if self.stderr: if self.stderr == self.STDOUT: rdesc['join_files'] = True else: rdesc['error_path'] = self.stderr else: rdesc['error_path'] = '%s.stderr' % self.command[0] if self.timeout: if 'resource_limits' in rdesc: limits = rdesc['resource_limits'].copy() else: limits = {} limits['wallclock_time'] = self.timeout rdesc['resource_limits'] = limits # Send inputs. patterns = [] textfiles = [] for metadata in self.external_files: if metadata.get('input', False): patterns.append(metadata.path) if not metadata.binary: textfiles.append(metadata.path) for pathname, obj in self.items(iotype='in', recurse=True): if isinstance(obj, FileRef): local_path = self.get_metadata(pathname, 'local_path') if local_path: patterns.append(local_path) if not obj.binary: textfiles.append(local_path) if self.stdin and self.stdin != self.DEV_NULL: patterns.append(self.stdin) textfiles.append(self.stdin) if patterns: self._send_inputs(patterns, textfiles) else: self._logger.debug('No input files') # Run command. self._logger.info('executing %s...', self.command) start_time = time.time() return_code, error_msg = \ self._server.execute_command(rdesc) et = time.time() - start_time if et >= 60: #pragma no cover self._logger.info('elapsed time: %.1f sec.', et) # Retrieve results. patterns = [] textfiles = [] for metadata in self.external_files: if metadata.get('output', False): patterns.append(metadata.path) if not metadata.binary: textfiles.append(metadata.path) for pathname, obj in self.items(iotype='out', recurse=True): if isinstance(obj, FileRef): patterns.append(obj.path) if not obj.binary: textfiles.append(obj.path) patterns.append(rdesc['output_path']) textfiles.append(rdesc['output_path']) if self.stderr != self.STDOUT: patterns.append(rdesc['error_path']) textfiles.append(rdesc['error_path']) self._retrieve_results(patterns, textfiles) # Echo stdout if not redirected. if not self.stdout: name = rdesc['output_path'] if os.path.exists(name): with open(name, 'rU') as inp: sys.stdout.write(inp.read()) os.remove(name) else: sys.stdout.write('\n[No stdout available]\n') # Echo stderr if not redirected. if not self.stderr: name = rdesc['error_path'] if os.path.exists(name): with open(name, 'rU') as inp: sys.stderr.write(inp.read()) os.remove(name) else: sys.stdout.write('\n[No stderr available]\n') finally: RAM.release(self._server) self._server = None return (return_code, error_msg) def _send_inputs(self, patterns, textfiles): """ Sends input files matching `patterns`. """ self._logger.info('sending inputs...') start_time = time.time() filename = 'inputs.zip' pfiles, pbytes = pack_zipfile(patterns, filename, self._logger) try: filexfer(None, filename, self._server, filename, 'b', False) ufiles, ubytes = self._server.unpack_zipfile(filename, textfiles=textfiles) finally: os.remove(filename) self._server.remove(filename) # Difficult to force file transfer error. if ufiles != pfiles or ubytes != pbytes: #pragma no cover msg = 'Inputs xfer error: %d:%d vs. %d:%d' \ % (ufiles, ubytes, pfiles, pbytes) self.raise_exception(msg, RuntimeError) et = time.time() - start_time if et >= 60: #pragma no cover self._logger.info('elapsed time: %f sec.', et) def _retrieve_results(self, patterns, textfiles): """ Retrieves result files matching `patterns`. """ self._logger.info('retrieving results...') start_time = time.time() filename = 'outputs.zip' pfiles, pbytes = self._server.pack_zipfile(patterns, filename) filexfer(self._server, filename, None, filename, 'b', False) # Valid, but empty, file causes unpack_zipfile() problems. try: if os.path.getsize(filename) > 0: ufiles, ubytes = unpack_zipfile(filename, logger=self._logger, textfiles=textfiles) else: ufiles, ubytes = 0, 0 finally: os.remove(filename) self._server.remove(filename) # Difficult to force file transfer error. if ufiles != pfiles or ubytes != pbytes: #pragma no cover msg = 'Results xfer error: %d:%d vs. %d:%d' \ % (ufiles, ubytes, pfiles, pbytes) self.raise_exception(msg, RuntimeError) et = time.time() - start_time if et >= 60: #pragma no cover self._logger.info('elapsed time: %f sec.', et) def stop(self): """ Stop the external code. """ self._stop = True if self._process: self._process.terminate() def copy_inputs(self, inputs_dir, patterns): """ Copy inputs from `inputs_dir` that match `patterns`. inputs_dir: string Directory to copy files from. Relative paths are evaluated from the component's execution directory. patterns: list or string One or more :mod:`glob` patterns to match against. This can be useful for resetting problem state. """ self._logger.info('copying initial inputs from %s...', inputs_dir) with self.dir_context: if not os.path.exists(inputs_dir): self.raise_exception("inputs_dir '%s' does not exist" \ % inputs_dir, RuntimeError) self._copy(inputs_dir, patterns) def copy_results(self, results_dir, patterns): """ Copy files from `results_dir` that match `patterns`. results_dir: string Directory to copy files from. Relative paths are evaluated from the component's execution directory. patterns: list or string One or more :mod:`glob` patterns to match against. This can be useful for workflow debugging when the external code takes a long time to execute. """ self._logger.info('copying precomputed results from %s...', results_dir) with self.dir_context: if not os.path.exists(results_dir): self.raise_exception("results_dir '%s' does not exist" \ % results_dir, RuntimeError) self._copy(results_dir, patterns) def _copy(self, directory, patterns): """ Copy files from `directory` that match `patterns` to the current directory and ensure they are writable. directory: string Directory to copy files from. patterns: list or string One or more :mod:`glob` patterns to match against. """ if isinstance(patterns, basestring): patterns = [patterns] for pattern in patterns: pattern = os.path.join(directory, pattern) for src_path in sorted(glob.glob(pattern)): dst_path = os.path.basename(src_path) self._logger.debug(' %s', src_path) shutil.copy(src_path, dst_path) # Ensure writable. mode = os.stat(dst_path).st_mode mode |= stat.S_IWUSR os.chmod(dst_path, mode)
class MyComponent(Component): arr = Dict(key_trait=Int, value_trait=Float(units='MW'), iotype='in') def execute(self): pass
class pyOptDriver(Driver): """ Driver wrapper for pyOpt. """ implements(IHasParameters, IHasConstraints, IHasObjective, IOptimizer) optimizer = Enum('ALPSO', _check_imports(), iotype='in', desc='Name of optimizers to use') title = Str('Optimization using pyOpt', iotype='in', desc='Title of this optimization run') options = Dict(iotype='in', desc='Dictionary of optimization parameters') print_results = Bool(True, iotype='in', desc='Print pyOpt results if True') pyopt_diff = Bool(False, iotype='in', desc='Set to True to let pyOpt calculate the gradient') store_hst = Bool(False, iotype='in', desc='Store optimization history if True') hot_start = Bool(False, iotype='in', desc='resume optimization run using stored history if True') def __init__(self): """Initialize pyopt - not much needed.""" super(pyOptDriver, self).__init__() self.pyOpt_solution = None self.param_type = {} self.nparam = None self.inputs = None self.objs = None self.cons = None def execute(self): """pyOpt execution. Note that pyOpt controls the execution, and the individual optimizers control the iteration.""" self.pyOpt_solution = None self.run_iteration() opt_prob = Optimization(self.title, self.objfunc, var_set={}, obj_set={}, con_set={}) # Add all parameters self.param_type = {} self.nparam = self.total_parameters() for name, param in self.get_parameters().iteritems(): # We need to identify Enums, Lists, Dicts metadata = param.get_metadata()[1] values = param.evaluate() # Assuming uniform enumerated, discrete, or continuous for now. val = values[0] choices = [] if 'values' in metadata and \ isinstance(metadata['values'], (list, tuple, array, set)): vartype = 'd' choices = metadata['values'] elif isinstance(val, bool): vartype = 'd' choices = [True, False] elif isinstance(val, (int, int32, int64)): vartype = 'i' elif isinstance(val, (float, float32, float64)): vartype = 'c' else: msg = 'Only continuous, discrete, or enumerated variables' \ ' are supported. %s is %s.' % (name, type(val)) self.raise_exception(msg, ValueError) self.param_type[name] = vartype names = param.names lower_bounds = param.get_low() upper_bounds = param.get_high() for i in range(param.size): opt_prob.addVar(names[i], vartype, lower=lower_bounds[i], upper=upper_bounds[i], value=values[i], choices=choices) # Add all objectives for name in self.get_objectives(): opt_prob.addObj(name) # Add all equality constraints for name, con in self.get_eq_constraints().items(): if con.size > 1: for i in range(con.size): opt_prob.addCon('%s [%s]' % (name, i), type='e') else: opt_prob.addCon(name, type='e') # Add all inequality constraints for name, con in self.get_ineq_constraints().items(): if con.size > 1: for i in range(con.size): opt_prob.addCon('%s [%s]' % (name, i), type='i') else: opt_prob.addCon(name, type='i') self.inputs = self.list_param_group_targets() self.objs = self.list_objective_targets() self.cons = self.list_constraint_targets() # Instantiate the requested optimizer optimizer = self.optimizer try: exec('from pyOpt import %s' % optimizer) except ImportError: msg = "Optimizer %s is not available in this installation." % \ optimizer self.raise_exception(msg, ImportError) optname = vars()[optimizer] opt = optname() # Set optimization options for option, value in self.options.iteritems(): opt.setOption(option, value) # Execute the optimization problem if self.pyopt_diff: # Use pyOpt's internal finite difference opt(opt_prob, sens_type='FD', sens_step=self.gradient_options.fd_step, store_hst=self.store_hst, hot_start=self.hot_start) else: # Use OpenMDAO's differentiator for the gradient opt(opt_prob, sens_type=self.gradfunc, store_hst=self.store_hst, hot_start=self.hot_start) # Print results if self.print_results: print opt_prob.solution(0) # Pull optimal parameters back into framework and re-run, so that # framework is left in the right final state dvals = [] for i in range(0, len(opt_prob.solution(0)._variables)): dvals.append(opt_prob.solution(0)._variables[i].value) # Integer parameters come back as floats, so we need to round them # and turn them into python integers before setting. if 'i' in self.param_type.values(): for j, param in enumerate(self.get_parameters().keys()): if self.param_type[param] == 'i': dvals[j] = int(round(dvals[j])) self.set_parameters(dvals) self.run_iteration() # Save the most recent solution. self.pyOpt_solution = opt_prob.solution(0) def objfunc(self, x, *args, **kwargs): """ Function that evaluates and returns the objective function and constraints. This function is passed to pyOpt's Optimization object and is called from its optimizers. x: array Design variables args and kwargs are also passed in, but aren't used. Returns f: array Objective function evaluated at design variables g: array Constraints evaluated at design variables fail: int 0 for successful function evaluation 1 for unsuccessful function evaluation """ fail = 1 f = [] g = [] try: # Note: Sometimes pyOpt sends us an x array that is larger than # the number of parameters. In the pyOpt examples, they just take # the first n entries as the parameters, so we do too. # Integer parameters come back as floats, so we need to round them # and turn them into python integers before setting. param_types = self.param_type if 'i' in param_types.values(): j = 0 for name, param in self.get_parameters().iteritems(): size = param.size if param_types[name] == 'i': self.set_parameter_by_name(name, int(round(x[j:j+size]))) else: self.set_parameter_by_name(name, x[j:j+size]) j += size else: self.set_parameters(x[0:self.nparam]) # Execute the model self.run_iteration() # Get the objective function evaluations f = array(self.eval_objectives()) # Get the constraint evaluations g = array(self.eval_constraints(self.parent)) fail = 0 except Exception as msg: # Exceptions seem to be swallowed by the C code, so this # should give the user more info than the dreaded "segfault" print "Exception: %s" % str(msg) print 70*"=" import traceback traceback.print_exc() print 70*"=" return f, g, fail def gradfunc(self, x, f, g, *args, **kwargs): """ Function that evaluates and returns the gradient of the objective function and constraints. This function is passed to pyOpt's Optimization object and is called from its optimizers. x: array Design variables f: array Objective function evaluated at design variables Note: unneeded in OpenMDAO, so unused g: array Constraints evaluated at design variables Note: unneeded in OpenMDAO, so unused args and kwargs are also passed in, but aren't used. Returns d_obj: array Gradient of the objective d_con: array Gradient of the constraints fail: int 0 for successful function evaluation 1 for unsuccessful function evaluation """ fail = 1 df = [] dg = [] try: J = self.workflow.calc_gradient(self.inputs, self.objs + self.cons) nobj = len(self.objs) df = J[0:nobj, :] dg = J[nobj:, :] fail = 0 except Exception as msg: # Exceptions seem to be swallowed by the C code, so this # should give the user more info than the dreaded "segfault" print "Exception: %s" % str(msg) print 70*"=" import traceback traceback.print_exc() print 70*"=" return df, dg, fail
class ExtCode(Component): """ Just a component with resources. """ resources = Dict({}, iotype='in', desc='Resources required to run this component.')
def test_list_and_dict_slot_attributes(self): top = Assembly() top.add('sock', Slot(MyClass, iotype='in', desc='Stuff0')) top.add('list_sock', List(Slot(MyClass), iotype='in', desc='Stuff')) top.add( 'dict_sock', Dict(key_trait=Str, value_trait=Slot(MyClass), iotype='in', desc='Stuff2')) attrs = top.get_attributes(io_only=False) slot_attrs = attrs['Slots'] self.assertTrue({ 'name': 'list_sock', 'containertype': 'list', 'filled': [], 'klass': 'MyClass', 'desc': 'Stuff' } in slot_attrs) self.assertTrue({ 'name': 'dict_sock', 'containertype': 'dict', 'filled': [], 'klass': 'MyClass', 'desc': 'Stuff2' } in slot_attrs) self.assertTrue({ 'name': 'sock', 'containertype': 'singleton', 'filled': None, 'klass': 'MyClass', 'desc': 'Stuff0' } in slot_attrs) # Now fill some slots. top.list_sock.append(MyClass()) top.list_sock.append(MyClass()) top.dict_sock['Testing'] = MyClass() top.sock = MyClass() # Note, only tested with one item in the dict because it is not ordered, # and hash order will vary on different platforms. attrs = top.get_attributes(io_only=False) slot_attrs = attrs['Slots'] self.assertTrue({ 'name': 'list_sock', 'containertype': 'list', 'filled': ['MyClass', 'MyClass'], 'klass': 'MyClass', 'desc': 'Stuff' } in slot_attrs) self.assertTrue({ 'name': 'dict_sock', 'containertype': 'dict', 'filled': ['Testing'], 'klass': 'MyClass', 'desc': 'Stuff2' } in slot_attrs) self.assertTrue({ 'name': 'sock', 'containertype': 'singleton', 'filled': 'MyClass', 'klass': 'MyClass', 'desc': 'Stuff0' } in slot_attrs)