class OptLatinHypercube(Container): """IDOEgenerator which provides a Latin hypercube DOE sample set. The Morris-Mitchell sampling criterion of the DOE is optimzied using an evolutionary algorithm. """ implements(IDOEgenerator) num_samples = Int(20, desc="Number of sample points in the DOE sample set.") num_parameters = Int( 2, desc="Number of parameters, or dimensions, for the DOE.") population = Int( 20, desc="Size of the population used in the evolutionary optimization.") generations = Int( 2, desc="Number of generations the optimization will evolve over.") norm_method = Enum( ["1-norm", "2-norm"], desc= "Vector norm calculation method. '1-norm' is faster but less accurate." ) def __init__(self, num_samples=None, population=None, generations=None): super(OptLatinHypercube, self).__init__() self.qs = [1, 2, 5, 10, 20, 50, 100] #list of qs to try for Phi_q optimization if num_samples is not None: self.num_samples = num_samples if population is not None: self.population = population if generations is not None: self.generations = generations def __iter__(self): """Return an iterator over our sets of input values.""" return self._get_input_values() def _get_input_values(self): rand_doe = rand_latin_hypercube(self.num_samples, self.num_parameters) best_lhc = LHC_indivudal(rand_doe, q=1, p=_norm_map[self.norm_method]) for q in self.qs: lh = LHC_indivudal(rand_doe, q, _norm_map[self.norm_method]) lh_opt = _mmlhs(lh, self.population, self.generations) if lh_opt.mmphi() < best_lhc.mmphi(): best_lhc = lh_opt for row in best_lhc: yield row
class NeuralNet(Container): """ Surrogate model based on an artificial nueral network using the ffnet package (http://ffnet.sourceforge.net/, GPL license). This class follows the ISurrogate interface. """ implements(ISurrogate) n_hidden_nodes = Int( 4, iotype='in', desc='Number of hidden nodes in hidden layer of network') def __init__(self, n_hidden_nodes=4): """Initializes neural net surrogate model. n_hidden_nodes: int number of hidden nodes """ self.n_hidden_nodes = n_hidden_nodes def get_uncertain_value(self, value): """Returns the value iself. Neural network can provide its own uncertainty. """ return value def train(self, X, Y): """ Trains the neural network based on the given set of inputs and outputs. """ inp = array(X) targ = array(Y) n_inputs = len(inp[0]) # 1 Output node because Surrogate Model has only 1 output self._nn_surr = ffnet(mlgraph((n_inputs, self.n_hidden_nodes, 1))) # Start the training self._nn_surr.train_cg(inp, targ, disp=False) # Not sure why all these lines commented lines are here. -- KTM #self._nn_surr.train_genetic(inp, targ, individuals=10*n_inputs, generations=500) #self._nn_surr.train_tnc(inp, targ,maxfun=5000) #self._nn_surr.train_momentum(inp,targ,momentum=1) #self._nn_surr.train_rprop(inp,targ) #self._nn_surr.train_bfgs(inp,targ) def predict(self, X): """ Calculates a predicted value of the response based on the weights determined by the current neural network. """ output = self._nn_surr(X) return output[0]
class GeoWithDerivatives(BoxParametricGeometry): '''Adds derivative functions to the famous box geometry.''' implements(IParametricGeometry, IStaticGeometry) def apply_deriv(self, arg, result): pass def apply_derivT(self, arg, result): pass def provideJ(self): pass
class ListCaseIterator(object): """An iterator that returns :class:`Case` objects from a passed-in iterator of cases. This can be useful for runtime-generated cases from an optimizer, etc. """ implements(ICaseIterator) def __init__(self, cases): self._cases = cases def __getitem__(self, num): return self._cases[num]
class DumpCaseRecorder(object): """Dumps cases in a "pretty" form to a file-like object called "out" (defaults to ``sys.stdout``). If out is None, cases will be ignored. """ implements(ICaseRecorder) def __init__(self, out=sys.stdout): self.out = out def record(self, case): """Dump the given Case in a "pretty" form.""" if self.out: # if self.out is None, just do nothing self.out.write(str(case))
class GeoMACHGeometry(object): '''A wrapper for a GeoMACH object that respresents a specific instance of a geometry at a designated set of parameters. Parameters are not modifiable. This object is able to provide data for visualization. ''' if USE_OPENDMAO: implements(IStaticGeometry) def get_visualization_data(self, wv): '''Fills the given WV_Wrapper object with data for faces, edges, colors, etc. wv: WV_Wrapper object ''' if self._model is None: return [] t0 = time.time() xyzs = self._model.oml0.P0[:, :3] tris = self.tris mins = numpy.min(xyzs, axis=0) maxs = numpy.max(xyzs, axis=0) box = [mins[0], mins[1], mins[2], maxs[0], maxs[1], maxs[2]] print 'xyz shape = %s' % list(xyzs.shape) #xyzs = xyzs.astype(numpy.float32).flatten(order='C') print 'len(tris) = ', len(tris) # wv.set_face_data(xyzs.astype(numpy.float32).flatten(order='C'), # tris.astype(numpy.int32).flatten(order='C'), bbox=box, name="oml_surf") for i, tri in enumerate(tris): min_idx = int(numpy.min(tri)) max_idx = int(numpy.max(tri)) #print "type = %s" % type(tri[0,0]) #print 'i: %d len(tri): %d' % (i,len(tri)) new_a = xyzs[min_idx:max_idx + 1] new_tri = tri - min_idx wv.set_face_data(new_a.astype(numpy.float32).flatten(order='C'), new_tri.astype(numpy.int32).flatten(order='C'), bbox=box, name="oml_surf%d" % i)
class ListCaseRecorder(object): """Stores cases in a list.""" implements(ICaseRecorder) def __init__(self): self.cases = [] self._cfg_map = {} def __len__(self): return len(self.cases) def startup(self): """ Nothing needed for a list case.""" pass def register(self, driver, inputs, outputs): """Register names for later record call from `driver`.""" self._cfg_map[driver] = (inputs, outputs) def record_constants(self, constants): """Record constant data - currently ignored.""" pass def record(self, driver, inputs, outputs, exc, case_uuid, parent_uuid): """Store the case in our internal list.""" in_names, out_names = self._cfg_map[driver] self.cases.append( Case(zip(in_names, inputs), zip(out_names, outputs), exc, case_uuid, parent_uuid)) def close(self): """Does nothing.""" return def get_iterator(self): '''Return ListCaseIterator that uses our current list.''' return ListCaseIterator(self.cases) def get_attributes(self, io_only=True): """ We need a custom get_attributes because we aren't using Traits to manage our changeable settings. This is unfortunate and should be changed to something that automates this somehow.""" attrs = {} attrs['type'] = type(self).__name__ return attrs
class DumbClass(object): implements(IImplicitComponent) def __init__(self, depgraph, name, inputs=('a','b'), outputs=('c','d'), states=(), resids=()): self.name = name self.dep = depgraph self._inputs = inputs[:] self._outputs = outputs[:] self._states = states[:] self._resids = resids[:] def get_pathname(self): return self.name def get(self, name): return getattr(self, name, None) def run(self, *args, **kwargs): self.dep.child_run_finished(self.name) def get_invalidation_type(self): return 'full' def list_inputs(self): return self._inputs def list_outputs(self): return self._outputs def list_states(self): return self._states def list_residuals(self): return self._resids def contains(self, name): return name in self._inputs or name in self._outputs or hasattr(self, name) def invalidate_deps(self, vnames=None): return None def _get_required_compnames(self): return [] def list_deriv_vars(self): return self._inputs, self._outputs
class ListCaseRecorder(object): """Stores cases in a list.""" implements(ICaseRecorder) def __init__(self): self.cases = [] def __len__(self): return len(self.cases) def record(self, case): """Store the case in our internal list.""" self.cases.append(case) def get_iterator(self): return ListCaseIterator(self.cases)
class MockSurrogate(Container): implements(ISurrogate, IMultiFiSurrogate) def __init__(self): super(MockSurrogate, self).__init__() def train(self, X, Y): pass def train_multifi(self, X, Y): pass def get_uncertain_value(self, value): return 0.0 def predict(self, x): return 0.0
class DumbClass(object): implements(IImplicitComponent) def __init__(self, depgraph, name, inputs=('a', 'b'), outputs=('c', 'd'), states=(), resids=()): self.name = name self._depgraph = depgraph self._inputs = inputs[:] self._outputs = outputs[:] self._states = states[:] self._resids = resids[:] def get_pathname(self): return self.name def get(self, name): return getattr(self, name, None) def list_inputs(self): return self._inputs def list_outputs(self): return self._outputs def list_states(self): return self._states def list_residuals(self): return self._resids def contains(self, name): return name in self._inputs or name in self._outputs or hasattr( self, name) def _get_required_compnames(self): return [] def list_deriv_vars(self): return self._inputs, self._outputs
class Iterator(object): """ Just keeps returning `case` until told to stop. """ implements(ICaseIterator) def __init__(self, case): super(Iterator, self).__init__() self.case = case self.stop = False def __iter__(self): return self._next_case() def _next_case(self): """ Generator which just returns copies of the given case. """ while not self.stop: yield copy.copy(self.case) raise StopIteration
class ListCaseIterator(list): """An iterator that returns :class:`Case` objects from a passed-in iterator of cases. This can be useful for runtime-generated cases from an optimizer, etc. """ implements(ICaseIterator) def __init__(self, cases): super(ListCaseIterator, self).__init__(cases) def get_attributes(self, io_only=True): """ We need a custom get_attributes because we aren't using Traits to manage our changeable settings. This is unfortunate, and should be changed to something that automates this somehow.""" attrs = {} attrs['type'] = type(self).__name__ return attrs
class UncertainDistribution(object): """Base class for uncertain variables.""" implements(IUncertainVariable) default_val_method = 'expected' def __init__(self, valmethod=None): self.valmethod = valmethod def getvalue(self): if self.valmethod: return getattr(self, self.valmethod)() return getattr(self, self.default_val_method)() def sample(self): raise NotImplementedError('The %s class has no sample() method' % self.__class__.__name__) def expected(self): raise NotImplementedError('The %s class has no expected() method' % self.__class__.__name__)
class Variable(TraitType): """An OpenMDAO-specific trait type that serves as a common base class for framework visible inputs and outputs. """ implements(IVariable) def __init__(self, default_value=NoDefaultSpecified, **metadata): if 'vartypename' not in metadata: metadata['vartypename'] = self.__class__.__name__ super(Variable, self).__init__(default_value=default_value, **metadata) def get_attribute(self, name, value, trait, meta): """Return the attribute dictionary for this variable. This dict is used by the GUI to populate the edit UI. The basic functionality that most variables need is provided here; you can overload this for special cases, like lists and dictionaries, or custom datatypes. name: str Name of variable. value: object The value of the variable. trait: CTrait The variable's trait. meta: dict Dictionary of metadata for this variable. """ attr = {} attr['name'] = name attr['type'] = type(value).__name__ attr['value'] = value for field in meta: if field not in gui_excludes: attr[field] = meta[field] return attr, None
class MyDriver(SimpleDriver): implements(ISolver) def execute(self): # Direct uvec setting uvec = self._system.vec['u'] #print uvec.keys() # Only can interact with the var that is in our node for num in [1.0, 2.0, 3.0]: if 'comp1.x' in uvec: uvec['comp1.x'] = num #print "SETTING", 'comp1.x', uvec['comp1.x'] if 'comp2.x' in uvec: uvec['comp2.x'] = num #print "SETTING", 'comp2.x', uvec['comp2.x'] self.run_iteration() def requires_derivs(self): return False
class Uniform(Container): """ DOEgenerator that performs a space-filling Design of Experiments with uniform distributions on all design variables. Plugs into the DOEgenerator socket on a DOEdriver.""" implements(IDOEgenerator) # pylint: disable-msg=E1101 num_parameters = Int(0, iotype="in", desc="Number of independent " "parameters in the DOE.") num_samples = Int(0, iotype="in", desc="Number of total samples in " "the DOE.") def __init__(self, num_samples=None, *args, **kwargs): super(Uniform, self).__init__(*args, **kwargs) self.num = 0 if num_samples is not None: self.num_samples = num_samples def __iter__(self): """Return an iterator over our sets of input values""" if self.num_samples < 2: raise ValueError( "Uniform distributions must have at least 2 samples. num_samples is set to less than 2." ) return self def next(self): if self.num < self.num_samples: self.num = self.num + 1 return random.uniform(0, 1, self.num_parameters) else: raise StopIteration()
class FullFactorial(HasTraits): """ DOEgenerator that performs a full-factorial Design of Experiments. Plugs into the DOEgenerator socket on a DOEdriver.""" implements(IDOEgenerator) # pylint: disable-msg=E1101 num_parameters = Int(0, iotype="in", desc="Number of independent " "parameters in the DOE.") num_levels = Int(0, iotype="in", desc="Number of levels of values for " "each parameter.") def __init__(self, num_levels=0, *args, **kwargs): super(FullFactorial, self).__init__(*args, **kwargs) self.num_levels = num_levels def __iter__(self): """Return an iterator over our sets of input values.""" return product(*[linspace(0., 1., self.num_levels) for i in range(self.num_parameters)])
class ListCaseRecorder(object): """Stores cases in a list.""" implements(ICaseRecorder) def __init__(self): self.cases = [] def __len__(self): return len(self.cases) def record(self, case): """Store the case in our internal list.""" self.cases.append(case) def close(self): """Does nothing.""" return def get_iterator(self): '''Return ListCaseIterator that uses our current list.''' return ListCaseIterator(self.cases)
class ListCaseRecorder(object): """Stores cases in a list.""" implements(ICaseRecorder) def __init__(self): self.cases = [] self._cfg_map = {} def __len__(self): return len(self.cases) def startup(self): """ Nothing needed for a list case.""" pass def register(self, driver, inputs, outputs): """Register names for later record call from `driver`.""" self._cfg_map[driver] = (inputs, outputs) def record_constants(self, constants): """Record constant data - currently ignored.""" pass def record(self, driver, inputs, outputs, exc, case_uuid, parent_uuid): """Store the case in our internal list.""" in_names, out_names = self._cfg_map[driver] self.cases.append(Case(zip(in_names, inputs), zip(out_names, outputs), exc, case_uuid, parent_uuid)) def close(self): """Does nothing.""" return def get_iterator(self): '''Return ListCaseIterator that uses our current list.''' return ListCaseIterator(self.cases)
class LatinHypercube(Container): """IDOEgenerator which provides a Latin hypercube DOE sample set. """ implements(IDOEgenerator) num_samples = Int(20, desc="Number of sample points in the DOE sample set.") num_parameters = Int( 2, desc="Number of parameters, or dimensions, for the DOE.") seed = Int(None, iotype="in", desc="Random seed for the optimizer. Set to a specific value " "for repeatable results; otherwise leave as None for truly " "random seeding.") def __init__( self, num_samples=None, ): super(LatinHypercube, self).__init__() if num_samples is not None: self.num_samples = num_samples def __iter__(self): """Return an iterator over our sets of input values.""" if self.seed is not None: seed(self.seed) return self._get_input_values() def _get_input_values(self): rand_doe = rand_latin_hypercube(self.num_samples, self.num_parameters) for row in rand_doe: yield row
class ListCaseRecorder(object): """Stores cases in a list.""" implements(ICaseRecorder) def __init__(self): self.cases = [] def __len__(self): return len(self.cases) def startup(self): """ Nothing needed for a list case.""" pass def record(self, case): """Store the case in our internal list.""" self.cases.append(case) def close(self): """Does nothing.""" return def get_iterator(self): '''Return ListCaseIterator that uses our current list.''' return ListCaseIterator(self.cases) def get_attributes(self, io_only=True): """ We need a custom get_attributes because we aren't using Traits to manage our changeable settings. This is unfortunate and should be changed to something that automates this somehow.""" attrs = {} attrs['type'] = type(self).__name__ return attrs
class pyOptSparseDriver(Driver): """ Driver wrapper for pyOpt. """ implements(IHasParameters, IHasConstraints, IHasObjective, IOptimizer, IHas2SidedConstraints) optimizer = Enum('ALPSO', _check_imports(), iotype='in', desc='Name of optimizers to use') title = Str('Optimization using pyOpt', iotype='in', desc='Title of this optimization run') options = Dict(iotype='in', desc='Dictionary of optimization parameters') print_results = Bool(True, iotype='in', desc='Print pyOpt results if True') pyopt_diff = Bool(False, iotype='in', desc='Set to True to let pyOpt calculate the gradient') exit_flag = Int(0, iotype="out", desc="0 for fail, 1 for ok") def __init__(self, n_x=None): """Initialize pyopt n_x: number of design variables""" super(pyOptSparseDriver, self).__init__() #create lb and ub inputs so external components can set the bounds self.n_x = None if n_x is not None: shape = (n_x, ) self.n_x = n_x self.add( 'lb', Array( np.zeros(shape), iotype="in", desc= "lower bounds for the design variables, which will override values given in the add_parameter", shape=shape)) self.add( 'ub', Array( np.zeros(shape), iotype="in", desc= "upper bounds for the design variables, which will override values given in the add_parameter", shape=shape)) self.pyOpt_solution = None self.param_type = {} self.nparam = None self.objs = None self.nlcons = None self.lin_jacs = {} def execute(self): """pyOpt execution. Note that pyOpt controls the execution, and the individual optimizers control the iteration.""" self.pyOpt_solution = None self.run_iteration() opt_prob = Optimization(self.title, self.objfunc) # Add all parameters self.param_type = {} self.nparam = self.total_parameters() param_list = [] #need a counter for lb and ub arrays i_param = 0 for name, param in self.get_parameters().iteritems(): # We need to identify Enums, Lists, Dicts metadata = param.get_metadata()[1] values = param.evaluate() # Assuming uniform enumerated, discrete, or continuous for now. val = values[0] n_vals = len(values) choices = [] if 'values' in metadata and \ isinstance(metadata['values'], (list, tuple, array, set)): vartype = 'd' choices = metadata['values'] elif isinstance(val, bool): vartype = 'd' choices = [True, False] elif isinstance(val, (int, int32, int64)): vartype = 'i' elif isinstance(val, (float, float32, float64)): vartype = 'c' else: msg = 'Only continuous, discrete, or enumerated variables' \ ' are supported. %s is %s.' % (name, type(val)) self.raise_exception(msg, ValueError) self.param_type[name] = vartype if self.n_x is None: lower_bounds = param.get_low() upper_bounds = param.get_high() else: lower_bounds = self.lb[i_param:i_param + n_vals] upper_bounds = self.ub[i_param:i_param + n_vals] i_param += n_vals opt_prob.addVarGroup(name, n_vals, type=vartype, lower=lower_bounds, upper=upper_bounds, value=values, choices=choices) param_list.append(name) # Add all objectives for name, obj in self.get_objectives().iteritems(): name = '%s.out0' % obj.pcomp_name opt_prob.addObj(name) # Calculate and save gradient for any linear constraints. lcons = self.get_constraints(linear=True).values() + \ self.get_2sided_constraints(linear=True).values() if len(lcons) > 0: lcon_names = ['%s.out0' % obj.pcomp_name for obj in lcons] self.lin_jacs = self.workflow.calc_gradient(param_list, lcon_names, return_format='dict') #print "Linear Gradient" #print self.lin_jacs # Add all equality constraints nlcons = [] for name, con in self.get_eq_constraints().iteritems(): size = con.size lower = zeros((size)) upper = zeros((size)) name = '%s.out0' % con.pcomp_name if con.linear is True: opt_prob.addConGroup(name, size, lower=lower, upper=upper, linear=True, wrt=param_list, jac=self.lin_jacs[name]) else: opt_prob.addConGroup(name, size, lower=lower, upper=upper) nlcons.append(name) # Add all inequality constraints for name, con in self.get_ineq_constraints().iteritems(): size = con.size upper = zeros((size)) name = '%s.out0' % con.pcomp_name if con.linear is True: opt_prob.addConGroup(name, size, upper=upper, linear=True, wrt=param_list, jac=self.lin_jacs[name]) else: opt_prob.addConGroup(name, size, upper=upper) nlcons.append(name) # Add all double_sided constraints for name, con in self.get_2sided_constraints().iteritems(): size = con.size upper = con.high * ones((size)) lower = con.low * ones((size)) name = '%s.out0' % con.pcomp_name if con.linear is True: opt_prob.addConGroup(name, size, upper=upper, lower=lower, linear=True, wrt=param_list, jac=self.lin_jacs[name]) else: opt_prob.addConGroup(name, size, upper=upper, lower=lower) nlcons.append(name) self.objs = self.list_objective_targets() self.nlcons = nlcons # Instantiate the requested optimizer optimizer = self.optimizer try: exec('from pyoptsparse import %s' % optimizer) except ImportError: msg = "Optimizer %s is not available in this installation." % \ optimizer self.raise_exception(msg, ImportError) optname = vars()[optimizer] opt = optname() # Set optimization options for option, value in self.options.iteritems(): opt.setOption(option, value) # Execute the optimization problem if self.pyopt_diff: # Use pyOpt's internal finite difference sol = opt(opt_prob, sens='FD', sensStep=self.gradient_options.fd_step) else: # Use OpenMDAO's differentiator for the gradient sol = opt(opt_prob, sens=self.gradfunc) # Print results if self.print_results: print sol # Pull optimal parameters back into framework and re-run, so that # framework is left in the right final state dv_dict = sol.getDVs() param_types = self.param_type for name, param in self.get_parameters().iteritems(): val = dv_dict[name] if param_types[name] == 'i': val = int(round(val)) self.set_parameter_by_name(name, val) self.run_iteration() # Save the most recent solution. self.pyOpt_solution = sol try: exit_status = sol.optInform['value'] self.exit_flag = 1 if exit_status > 2: # bad self.exit_flag = 0 except KeyError: #nothing is here, so something bad happened! self.exit_flag = 0 def objfunc(self, dv_dict): """ Function that evaluates and returns the objective function and constraints. This function is passed to pyOpt's Optimization object and is called from its optimizers. dv_dict: dict Dictionary of design variable values Returns func_dict: dict Dictionary of all functional variables evaluated at design point fail: int 0 for successful function evaluation 1 for unsuccessful function evaluation """ fail = 1 func_dict = {} try: # Integer parameters come back as floats, so we need to round them # and turn them into python integers before setting. param_types = self.param_type for name, param in self.get_parameters().iteritems(): val = dv_dict[name] if param_types[name] == 'i': val = int(round(val)) self.set_parameter_by_name(name, val) # Execute the model #print "Setting DV" #print dv_dict self.run_iteration() # Get the objective function evaluations for key, obj in self.get_objectives().iteritems(): name = '%s.out0' % obj.pcomp_name func_dict[name] = array(obj.evaluate()) # Get the constraint evaluations for key, con in self.get_constraints().iteritems(): name = '%s.out0' % con.pcomp_name func_dict[name] = array(con.evaluate(self.parent)) # Get the double-sided constraint evaluations for key, con in self.get_2sided_constraints().iteritems(): name = '%s.out0' % con.pcomp_name func_dict[name] = array(con.evaluate(self.parent)) fail = 0 except Exception as msg: # Exceptions seem to be swallowed by the C code, so this # should give the user more info than the dreaded "segfault" print "Exception: %s" % str(msg) print 70 * "=" import traceback traceback.print_exc() print 70 * "=" #print "Functions calculated" #print func_dict return func_dict, fail def gradfunc(self, dv_dict, func_dict): """ Function that evaluates and returns the gradient of the objective function and constraints. This function is passed to pyOpt's Optimization object and is called from its optimizers. dv_dict: dict Dictionary of design variable values func_dict: dict Dictionary of all functional variables evaluated at design point Returns sens_dict: dict Dictionary of dictionaries for gradient of each dv/func pair fail: int 0 for successful function evaluation 1 for unsuccessful function evaluation """ fail = 1 sens_dict = {} try: sens_dict = self.workflow.calc_gradient(dv_dict.keys(), self.objs + self.nlcons, return_format='dict') #for key, value in self.lin_jacs.iteritems(): # sens_dict[key] = value fail = 0 except Exception as msg: # Exceptions seem to be swallowed by the C code, so this # should give the user more info than the dreaded "segfault" print "Exception: %s" % str(msg) print 70 * "=" import traceback traceback.print_exc() print 70 * "=" #print "Derivatives calculated" #print dv_dict #print sens_dict return sens_dict, fail
class SLSQPdriver(DriverUsesDerivatives): """Minimize a function using the Sequential Least SQuares Programming (SLSQP) method. SLSQP is a gradient optimizer that can handle both equality and inequality constraints. Note: Constraints should be added using the OpenMDAO convention (positive = violated). """ implements(IHasParameters, IHasConstraints, IHasObjective) # pylint: disable-msg=E1101 accuracy = Float(1.0e-6, iotype='in', desc = 'Convergence accuracy') maxiter = Int(50, iotype='in', desc = 'Maximum number of iterations.') iprint = Enum(0, [0, 1, 2, 3], iotype='in', desc = 'Controls the frequency of output: 0 (no output),1,2,3.') iout = Int(6, iotype='in', desc = 'Fortran output unit. Leave this at 6 for STDOUT.') output_filename = Str('slsqp.out', iotype='in', desc = 'Name of output file (if iout not 6).') error_code = Int(0, iotype='out', desc = 'Error code returned from SLSQP.') def __init__(self, *args, **kwargs): super(SLSQPdriver, self).__init__(*args, **kwargs) self.error_messages = { -1 : "Gradient evaluation required (g & a)", 1 : "Function evaluation required (f & c)", 2 : "More equality constraints than independent variables", 3 : "More than 3*n iterations in LSQ subproblem", 4 : "Inequality constraints incompatible", 5 : "Singular matrix E in LSQ subproblem", 6 : "Singular matrix C in LSQ subproblem", 7 : "Rank-deficient equality constraint subproblem HFTI", 8 : "Positive directional derivative for linesearch", 9 : "Iteration limit exceeded", } self.x = zeros(0,'d') self.x_lower_bounds = zeros(0,'d') self.x_upper_bounds = zeros(0,'d') # We auto-fill the slot because the gradient is required # in this implementation self.differentiator = FiniteDifference() def start_iteration(self): """Perform initial setup before iteration loop begins.""" if not self.differentiator: msg = 'A differentiator must be socketed for this driver.' self.raise_exception(msg, RuntimeError) self.nparam = len(self.get_parameters().values()) self.ncon = len(self.get_constraints()) self.neqcon = len(self.get_eq_constraints()) # get the initial values of the parameters self.x = zeros(self.nparam,'d') params = self.get_parameters().values() for i, val in enumerate(params): self.x[i] = val.evaluate(self.parent) # create lower and upper bounds arrays self.x_lower_bounds = zeros(self.nparam) self.x_upper_bounds = zeros(self.nparam) for i, param in enumerate(params): self.x_lower_bounds[i] = param.low self.x_upper_bounds[i] = param.high self.ff = 0 self.nfunc = 0 self.ngrad = 0 self._continue = True def run_iteration(self): """ Note: slsqp controls the looping.""" n = self.nparam m = self.ncon meq = self.neqcon la = max(m,1) self.gg = zeros([la], 'd') df = zeros([n+1], 'd') dg = zeros([la, n+1], 'd') mineq = m - meq + 2*(n+1) lsq = (n+1)*((n+1)+1) + meq*((n+1)+1) + mineq*((n+1)+1) lsi = ((n+1)-meq+1)*(mineq+2) + 2*mineq lsei = ((n+1)+mineq)*((n+1)-meq) + 2*meq + (n+1) slsqpb = (n+1)*(n/2) + 2*m + 3*n + 3*(n+1) + 1 lw = lsq + lsi + lsei + slsqpb + n + m w = zeros([lw], 'd') ljw = max(mineq,(n+1)-meq) jw = zeros([ljw], 'i') try: dg, self.error_code, self.nfunc, self.ngrad = \ slsqp(self.ncon, self.neqcon, la, self.nparam, \ self.x, self.x_lower_bounds, self.x_upper_bounds, \ self.ff, self.gg, df, dg, self.accuracy, self.maxiter, \ self.iprint-1, self.iout, self.output_filename, \ self.error_code, w, lw, jw, ljw, \ self.nfunc, self.ngrad, \ self._func, self._grad) #slsqp(m,meq,la,n,xx,xl,xu,ff,gg,df,dg,acc,maxit,iprint, # iout,ifile,mode,w,lw,jw,ljw,nfunc,ngrad,slfunc,slgrad) except Exception, err: self._logger.error(str(err)) raise if self.iprint > 0 : closeunit(self.iout) # Log any errors if self.error_code != 0 : self._logger.warning(self.error_messages[self.error_code]) # Iteration is complete self._continue = False
class SpecialDriver(Driver): implements(IHasParameters) def execute(self): self.set_parameters([1.0])
class NEWSUMTdriver(DriverUsesDerivatives): """ Driver wrapper of Fortran version of NEWSUMT. .. todo:: Check to see if this itmax variable is needed. NEWSUMT might handle it for us. """ implements(IHasParameters, IHasIneqConstraints, IHasObjective) itmax = Int(10, iotype='in', desc='Maximum number of iterations before \ termination.') default_fd_stepsize = Float(0.01, iotype='in', desc='Default finite ' \ 'difference stepsize. Parameters with ' \ 'specified values override this.') ilin = Array(dtype=numpy_int, default_value=zeros(0, 'i4'), iotype='in', desc='Array designating whether each constraint is linear.') # Control parameters for NEWSUMT. # NEWSUMT has quite a few parameters to give the user control over aspects # of the solution. epsgsn = Float(0.001, iotype='in', desc='Convergence criteria \ of the golden section algorithm used for the \ one dimensional minimization.') epsodm = Float(0.001, iotype='in', desc='Convergence criteria \ of the unconstrained minimization.') epsrsf = Float(0.001, iotype='in', desc='Convergence criteria \ for the overall process.') g0 = Float(0.1, iotype='in', desc='Initial value of the transition \ parameter.') ra = Float(1.0, iotype='in', desc='Penalty multiplier. Required if mflag=1') racut = Float(0.1, iotype='in', desc='Penalty multiplier decrease ratio. \ Required if mflag=1.') ramin = Float(1.0e-13, iotype='in', desc='Lower bound of \ penalty multiplier. \ Required if mflag=1.') stepmx = Float(2.0, iotype='in', desc='Maximum bound imposed on the \ initial step size of the one-dimensional \ minimization.') jprint = Int(0, iotype='in', desc='Print information during NEWSUMT \ solution. Higher values are more verbose. If 0,\ print initial and final designs only.', high=3, low=-1) lobj = Int(0, iotype='in', desc='Set to 1 if linear objective function.') maxgsn = Int(20, iotype='in', desc='Maximum allowable number of golden \ section iterations used for 1D minimization.') maxodm = Int(6, iotype='in', desc='Maximum allowable number of one \ dimensional minimizations.') maxrsf = Int(15, iotype='in', desc='Maximum allowable number of \ unconstrained minimizations.') mflag = Int(0, iotype='in', desc='Flag for penalty multiplier. \ If 0, initial value computed by NEWSUMT. \ If 1, initial value set by ra.') def __init__(self, *args, **kwargs): super(NEWSUMTdriver, self).__init__(*args, **kwargs) self.iter_count = 0 # Save data from common blocks into the driver self.contrl = _contrl() self.countr = _countr() # define the NEWSUMTdriver's private variables # note, these are all resized in config_newsumt # basic stuff self.design_vals = zeros(0, 'd') self.constraint_vals = [] # temp storage self.__design_vals_tmp = zeros(0, 'd') self._ddobj = zeros(0) self._dg = zeros(0) self._dh = zeros(0) self._dobj = zeros(0) self._g = zeros(0) self._gb = zeros(0) self._g1 = zeros(0) self._g2 = zeros(0) self._g3 = zeros(0) self._s = zeros(0) self._sn = zeros(0) self._x = zeros(0) self._iik = zeros(0, dtype=int) self._lower_bounds = zeros(0) self._upper_bounds = zeros(0) self._iside = zeros(0) self.fdcv = zeros(0) # Just defined here. Set elsewhere self.n1 = self.n2 = self.n3 = self.n4 = 0 # Ready inputs for NEWSUMT self._obj = 0.0 self._objmin = 0.0 self.isdone = False self.resume = False self.uses_Hessians = False def start_iteration(self): """Perform the optimization.""" # Flag used to figure out if we are starting a new finite difference self.baseline_point = True # set newsumt array sizes and more... self._config_newsumt() self.iter_count = 0 # get the values of the parameters # check if any min/max constraints are violated by initial values for i, val in enumerate(self.get_parameters().values()): value = val.evaluate(self.parent) self.design_vals[i] = value # next line is specific to NEWSUMT self.__design_vals_tmp[i] = value # Call the interruptible version of SUMT in a loop that we manage self.isdone = False self.resume = False def continue_iteration(self): """Returns True if iteration should continue.""" return not self.isdone and self.iter_count < self.itmax def pre_iteration(self): """Checks or RunStopped and evaluates objective.""" super(NEWSUMTdriver, self).pre_iteration() if self._stop: self.raise_exception('Stop requested', RunStopped) def run_iteration(self): """ The NEWSUMT driver iteration.""" self._load_common_blocks() try: ( fmin, self._obj, self._objmin, self.design_vals, self.__design_vals_tmp, self.isdone, self.resume) = \ newsumtinterruptible.newsuminterruptible(user_function, self._lower_bounds, self._upper_bounds, self._ddobj, self._dg, self._dh, self._dobj, self.fdcv, self._g, self._gb, self._g1, self._g2, self._g3, self._obj, self._objmin, self._s, self._sn, self.design_vals, self.__design_vals_tmp, self._iik, self.ilin, self._iside, self.n1, self.n2, self.n3, self.n4, self.isdone, self.resume, analys_extra_args = (self,)) except Exception, err: self._logger.error(str(err)) raise self._save_common_blocks() self.iter_count += 1 # Update the parameters and run one final time with what it gave us. # This update is needed because I obeserved that the last callback to # user_function is the final leg of a finite difference, so the model # is not in sync with the final design variables. if not self.continue_iteration(): dvals = [float(val) for val in self.design_vals] self.set_parameters(dvals) super(NEWSUMTdriver, self).run_iteration()
class CSVCaseIterator(object): """An iterator that returns :class:`Case` objects from a passed-in iterator of cases. This can be useful for runtime-generated cases from an optimizer, etc. Current limitations: Quote character in the input CSV file should be ``'`` or ``"``. Other choices don't seem to get identified by csv.Sniffer. All string data must be contained inside of quotes. This includes field headers. """ implements(ICaseIterator) def __init__(self, filename='cases.csv', headers=None): self.data = [] self.headers = headers self.label_field = None #Open Input file self.filename = filename @property def filename(self): """Get the name of the CSV file.""" return self._filename @filename.setter def filename(self, name): """Set the CSV file name.""" self._filename = name with open(self.filename, 'r') as infile: # Sniff out the dialect #infile.seek(1) dialect = csv.Sniffer().sniff(infile.readline()) infile.seek(0) reader = csv.reader(infile, dialect, quoting=csv.QUOTE_NONNUMERIC) self.data = [] for row in reader: self.data.append(row) if self.headers is None: self.need_fieldnames = True else: self.need_fieldnames = False if 'label' in self.headers.values(): for key, value in self.headers.iteritems(): if value == 'label': self.label_field = key del self.headers[key] break def __iter__(self): return self._next_case() def _next_case(self): """ Generator which returns Cases one at a time. """ # Default case label for external csv files that don't have labels. label = "External Case" retries = max_retries = 0 parent_uuid = msg = "" retries_field = None if self.headers is None: input_fields = {} else: input_fields = self.headers output_fields = {} for row in self.data: # Get fieldnames from file if self.need_fieldnames: # OpenMDAO-style CSV file if row[1] == '/INPUTS': input_fields, output_fields = self._parse_fieldnames(row) self.label_field = 0 retries_field = row.index('/METADATA') + 1 max_retries_field = retries_field + 1 parent_uuid_field = retries_field + 2 msg_field = retries_field + 3 # Read headers from file elif self.headers is None: for i, field in enumerate(row): if field == 'label': self.label_field = i else: input_fields[i] = field self.need_fieldnames = False continue if self.label_field is not None: label = row[self.label_field] if retries_field is not None: retries = row[retries_field] max_retries = row[max_retries_field] parent_uuid = row[parent_uuid_field] msg = row[msg_field] # For some reason, default for these in a case is None if not retries: retries = None if not max_retries: max_retries = None inputs = [] for i, field in input_fields.iteritems(): inputs.append((field, row[i])) outputs = [] for i, field in output_fields.iteritems(): outputs.append((field, row[i])) yield Case(inputs=inputs, outputs=outputs, label=label, \ retries=retries, max_retries=max_retries, \ parent_uuid=parent_uuid, msg=msg) def _parse_fieldnames(self, row): ''' Parse our input and output fieldname dictionaries ''' input_fields = {} output_fields = {} # This file was generated by a CSVCaseRecorder if row[1] == '/INPUTS': in_start = 2 out_start = row.index('/OUTPUTS') + 1 out_end = row.index('/METADATA') if in_start < out_start - 1: for i in range(in_start, out_start - 1): input_fields[i] = row[i] if out_start < len(row) - 1: for i in range(out_start, out_end): output_fields[i] = row[i] # This file was generated externally else: pass return input_fields, output_fields def get_attributes(self, io_only=True): """ We need a custom get_attributes because we aren't using Traits to manage our changeable settings. This is unfortunate, and should be changed to something that automates this somehow.""" attrs = {} attrs['type'] = type(self).__name__ variables = [] attr = {} attr['name'] = "filename" attr['type'] = type(self.filename).__name__ attr['value'] = str(self.filename) attr['connected'] = '' attr['desc'] = 'Name of the CSV file to be iterated.' variables.append(attr) attr = {} attr['name'] = "headers" attr['type'] = type(self.headers).__name__ attr['value'] = str(self.headers) attr['connected'] = '' attr[ 'desc'] = 'Optional dictionary of header labels, where the key is the column number.' variables.append(attr) attrs["Inputs"] = variables return attrs
class SLSQPdriver(Driver): """Minimize a function using the Sequential Least SQuares Programming (SLSQP) method. SLSQP is a gradient optimizer that can handle both equality and inequality constraints. Note: Constraints should be added using the OpenMDAO convention (positive = violated). """ implements(IHasParameters, IHasConstraints, IHasObjective, IOptimizer) # pylint: disable=E1101 accuracy = Float(1.0e-6, iotype='in', desc='Convergence accuracy') maxiter = Int(50, iotype='in', desc='Maximum number of iterations.') iprint = Enum( 0, [0, 1, 2, 3], iotype='in', desc='Controls the frequency of output: 0 (no output),1,2,3.') iout = Int(6, iotype='in', desc='Fortran output unit. Leave this at 6 for STDOUT.') output_filename = Str('slsqp.out', iotype='in', desc='Name of output file (if iout not 6).') error_code = Int(0, iotype='out', desc='Error code returned from SLSQP.') def __init__(self): super(SLSQPdriver, self).__init__() self.error_messages = { -1: "Gradient evaluation required (g & a)", 1: "Function evaluation required (f & c)", 2: "More equality constraints than independent variables", 3: "More than 3*n iterations in LSQ subproblem", 4: "Inequality constraints incompatible", 5: "Singular matrix E in LSQ subproblem", 6: "Singular matrix C in LSQ subproblem", 7: "Rank-deficient equality constraint subproblem HFTI", 8: "Positive directional derivative for linesearch", 9: "Iteration limit exceeded", } self.x = zeros(0, 'd') self.x_lower_bounds = zeros(0, 'd') self.x_upper_bounds = zeros(0, 'd') self.inputs = None self.obj = None self.con = None self.nparam = None self.ncon = None self.neqcon = None self.ff = 0 self.nfunc = 0 self.ngrad = 0 self._continue = None def start_iteration(self): """Perform initial setup before iteration loop begins.""" # Inital run to make sure the workflow executes super(SLSQPdriver, self).run_iteration() self.inputs = self.list_param_group_targets() self.obj = self.list_objective_targets() self.con = self.list_constraint_targets() self.nparam = self.total_parameters() self.ncon = self.total_constraints() self.neqcon = self.total_eq_constraints() self.x = self.eval_parameters(self.parent) self.x_lower_bounds = self.get_lower_bounds() self.x_upper_bounds = self.get_upper_bounds() self.ff = 0 self.nfunc = 0 self.ngrad = 0 self._continue = True def run_iteration(self): """ Note: slsqp controls the looping.""" n = self.nparam m = self.ncon meq = self.neqcon la = max(m, 1) gg = zeros([la], 'd') df = zeros([n + 1], 'd') dg = zeros([la, n + 1], 'd') mineq = m - meq + 2 * (n + 1) lsq = (n + 1) * ((n + 1) + 1) + meq * ((n + 1) + 1) + mineq * ( (n + 1) + 1) lsi = ((n + 1) - meq + 1) * (mineq + 2) + 2 * mineq lsei = ((n + 1) + mineq) * ((n + 1) - meq) + 2 * meq + (n + 1) slsqpb = (n + 1) * (n / 2) + 2 * m + 3 * n + 3 * (n + 1) + 1 lw = lsq + lsi + lsei + slsqpb + n + m w = zeros([lw], 'd') ljw = max(mineq, (n + 1) - meq) jw = zeros([ljw], 'i') try: dg, self.error_code, self.nfunc, self.ngrad = \ slsqp(self.ncon, self.neqcon, la, self.nparam, self.x, self.x_lower_bounds, self.x_upper_bounds, self.ff, gg, df, dg, self.accuracy, self.maxiter, self.iprint-1, self.iout, self.output_filename, self.error_code, w, lw, jw, ljw, self.nfunc, self.ngrad, self._func, self._grad) #slsqp(m,meq,la,n,xx,xl,xu,ff,gg,df,dg,acc,maxit,iprint, # iout,ifile,mode,w,lw,jw,ljw,nfunc,ngrad,slfunc,slgrad) except Exception as err: self._logger.error(str(err)) raise if self.iprint > 0: closeunit(self.iout) # Log any errors if self.error_code != 0: self._logger.warning(self.error_messages[self.error_code]) # Iteration is complete self._continue = False def _func(self, m, me, la, n, f, g, xnew): """ Return ndarrays containing the function and constraint evaluations. Note: m, me, la, n, f, and g are unused inputs.""" self.set_parameters(xnew) super(SLSQPdriver, self).run_iteration() f = self.eval_objective() if isnan(f): msg = "Numerical overflow in the objective." self.raise_exception(msg, RuntimeError) # Constraints. Note that SLSQP defines positive as satisfied. if self.ncon > 0: g = -1. * array(self.eval_constraints(self.parent)) if self.iprint > 0: pyflush(self.iout) return f, g def _grad(self, m, me, la, n, f, g, df, dg, xnew): """ Return ndarrays containing the gradients of the objective and constraints. Note: m, me, la, n, f, and g are unused inputs.""" J = self.workflow.calc_gradient(self.inputs, self.obj + self.con) #print "gradient", J df[0:self.nparam] = J[0, :].ravel() if self.ncon > 0: dg[0:self.ncon, 0:self.nparam] = -J[1:1 + self.ncon, :] return df, dg def requires_derivs(self): """SLSQP requires derivatives.""" return True
class CSVCaseRecorder(object): """Stores cases in a csv file. Defaults to cases.csv.""" implements(ICaseRecorder) def __init__(self, filename='cases.csv', append=False, delimiter=',', quotechar='"'): self.delimiter = delimiter self.quotechar = quotechar self.append = append self.outfile = None self.csv_writer = None self._header_size = 0 #Open output file self._write_headers = False self.filename = filename @property def filename(self): """Get the name of the CSV file.""" return self._filename @filename.setter def filename(self, name): """Set the CSV file name.""" self._filename = name if self.append: self.outfile = open(self.filename, 'a') else: self.outfile = open(self.filename, 'w') # Whenever we start a new CSV file, we need to insert a line # of headers. These won't be available until the first # case is passed to self.record. self._write_headers = True self.csv_writer = csv.writer(self.outfile, delimiter=self.delimiter, quotechar=self.quotechar, quoting=csv.QUOTE_NONNUMERIC) def record(self, case): """Store the case in a csv file. The format for a line of data follows: Field 1 - label Field 2 - [Empty] Field 3 - Input 1 ... Field i+2 - Input i Field i+3 - [Empty] Field i+4 - Output 1 ... Field i+j+4 - Output j Field i+j+5 - [Empty] Field i+j+6 - retries Field i+j+7 - max_retries Field i+j+8 - parent_uuid Field i+j+9 - msg """ if self.outfile is None: raise RuntimeError('Attempt to record on closed recorder') if self._write_headers: headers = ['label', '/INPUTS'] headers.extend(case.keys(iotype='in', flatten=True)) headers.append('/OUTPUTS') headers.extend(case.keys(iotype='out', flatten=True)) headers.extend( ['/METADATA', 'retries', 'max_retries', 'parent_uuid', 'msg']) self.csv_writer.writerow(headers) self._write_headers = False self._header_size = len(headers) data = [case.label] for iotype in ['in', 'out']: data.append('') data.extend(case.values(iotype=iotype, flatten=True)) data.extend( ['', case.retries, case.max_retries, case.parent_uuid, case.msg]) if self._header_size != len(data): raise RuntimeError( "number of data points doesn't match header size in CSV recorder" ) self.csv_writer.writerow(data) def close(self): """Closes the file.""" if self.csv_writer is not None: if not isinstance(self.outfile, (StringIO.StringIO, cStringIO.OutputType)): # Closing a StringIO deletes its contents. self.outfile.close() self.outfile = None self.csv_writer = None def get_iterator(self): '''Return CSVCaseIterator that points to our current file''' # I think we can safely close the oufile if someone is # requesting the iterator self.close() return CSVCaseIterator(self.filename) def get_attributes(self, io_only=True): """ We need a custom get_attributes because we aren't using Traits to manage our changeable settings. This is unfortunate, and should be changed to something that automates this somehow.""" attrs = {} attrs['type'] = type(self).__name__ variables = [] attr = {} attr['name'] = "filename" attr['type'] = type(self.filename).__name__ attr['value'] = str(self.filename) attr['connected'] = '' attr['desc'] = 'Name of the CSV file to be output.' variables.append(attr) attr = {} attr['name'] = "append" attr['type'] = type(self.append).__name__ attr['value'] = str(self.append) attr['connected'] = '' attr['desc'] = 'Set to True to append to the existing CSV file.' variables.append(attr) attr = {} attr['name'] = "delimiter" attr['type'] = type(self.delimiter).__name__ attr['value'] = str(self.delimiter) attr['connected'] = '' attr['desc'] = 'CSV delimiter. Default is ",".' variables.append(attr) attrs["Inputs"] = variables return attrs
class MPICaseDriver(Driver): """ A Driver that runs each parameter set concurrently in the specified number of processes. """ implements(IHasParameters, IHasResponses) def get_req_cpus(self): # None means there is no max procs. It will use as many as it's given req = self.workflow.get_req_cpus() return (req[0], None) def execute(self): """ Run each parameter set. """ color = self._color[self.mpi.rank] if color == MPI.UNDEFINED or self.mpi.comm == MPI.COMM_NULL: return # Prepare parameters and responses. case_paths = {} inputs = [] values = [] for path in self.get_parameters(): if isinstance(path, tuple): for target in path: inputs.append(target) path = path[0] else: inputs.append(path) val = self.case_inputs.get(make_legal_path(path)) values.append(val) if not inputs: return length = len(values[0]) for path in self.get_responses(): case_paths[path] = make_legal_path(path) sizes, offsets = evenly_distrib_idxs(self._num_parallel_subs, length) start = offsets[color] end = start + sizes[color] self.init_responses(length) # Run each parameter set. for i in range(start, end): # Set inputs. for j, path in enumerate(inputs): self.set_parameter_by_name(path, values[j][i]) # Run workflow. with MPIContext(): self.run_iteration() # Get outputs. for path in self.get_responses(): cpath = case_paths[path] self.case_outputs.get(cpath)[i] = self.parent.get(path) if self._num_parallel_subs > 1: # Now, collect the results back from all parallel processes for path in self.get_responses(): path = case_paths[path] vals = self.case_outputs.get(path) if self._resp_comm != MPI.COMM_NULL: allvals = self._resp_comm.gather(vals, root=0) if self._resp_comm.rank == 0: for i in range(self._num_parallel_subs): vals[offsets[i]:offsets[i] + sizes[i]] = allvals[i][offsets[i]:offsets[i] + sizes[i]] junk = self.mpi.comm.bcast(vals, root=0) else: vals = self.mpi.comm.bcast(None, root=0) else: vals = self.mpi.comm.bcast(vals, root=0) self.case_outputs.set(path, vals) def setup_communicators(self, comm): self.mpi.comm = comm size = comm.size rank = comm.rank mincpu, maxcpu = self.workflow.get_req_cpus() self._num_parallel_subs = size / mincpu leftover = size % mincpu color = [] resp_color = [] undefs = [MPI.UNDEFINED] * mincpu for i in range(self._num_parallel_subs): color.extend([i] * mincpu) resp_color.extend([0] + undefs[1:]) # TODO: give leftover procs to subsystems if they can utilize them if leftover: color.extend([MPI.UNDEFINED] * leftover) resp_color.extend([MPI.UNDEFINED] * leftover) sub_comm = comm.Split(color[rank]) self._color = color # if we weren't given enough procs to run parallel workflows, # just run serial if self._num_parallel_subs == 1: self.workflow.setup_communicators(comm) self._resp_comm = MPI.COMM_NULL return if mincpu > comm.size: raise RuntimeError( "subsystem %s requested %d processors but got %s" % (self.name, mincpu, comm.size)) self.workflow.setup_communicators(sub_comm) # Now set up a special comm for just the MPICaseDrivers that have 0 rank # sub_comm. The responses are duplicated in each proc of the sub_comm, # so we just want the first one in order to avoid unnecessary data # passing. Later we'll broadcast the fully assembled case_outputs # vartree to all procs. self._resp_comm = comm.Split(resp_color[rank])