示例#1
0
class fuzzy_mean(Component):

    fuzzy_inputs = Dict(
        {},
        iotype='in',
        desc='input dict of fuzzy inputs {key:fuzzyVal, key2:fuzzyVal2, ...]')
    output_key = Str('', iotype='in', desc='key for output value')
    fuzzy_output = Dict({},
                        iotype='out',
                        desc='output {output_key:fuzzy average}')

    def exceute(self):
        pass
示例#2
0
class CrossSectionStructureVT(VariableTree):
    """
    Container for a cross-sectional definition of the
    internal structure of a blade.
    """
    s = Float()
    regions = List(desc='List of names of regions in the cross section')
    webs = List(desc='List of names of regions in the cross section')
    materials = Dict(desc='Dictionary of MaterialProps vartrees')
    airfoil = VarTree(AirfoilShape(), desc='Cross sectional shape')
    DPs = List(desc='Region division points (nregion + 1)')

    def add_region(self, name):

        self.add(name, VarTree(Region()))
        self.regions.append(name)
        return getattr(self, name)

    def add_web(self, name):

        self.add(name, VarTree(Region()))
        self.webs.append(name)
        return getattr(self, name)

    def add_material(self, name, material):

        if name in self.materials.keys():
            return
        else:
            self.materials[name] = material

        return self.materials[name]
示例#3
0
class Broadcaster(Component):
    """Takes inputs and passes them directly to outputs
    to be broadcast out to other components."""

    names = List(
        Str,
        iotype="in",
        desc="Names of the variables you want to broadcast from this component."
    )
    types = Dict(
        {'default': Float},
        iotype="in",
        desc=
        "Name/type pairs describing the variable types of each broadcast variable; "
        "'default' name is used if no other type is set explicitly.")

    def __init__(self, names, types=None):
        """names: ListSrt, list of the variable names you would like the broadcaster to create for you. All inputs will be named with an '_in' added. Outputs will follow the name given.
        types: Dict, dictionary of the name/type pairs describing which types you would like to broadcast. If given, the name 'default' indicates the default variable type to use."""

        super(Broadcaster, self).__init__()
        self._vars = []
        if types is not None:
            self.types = types
        self.names = names

    def _types_changed(self, old, new):
        if self.names:
            self._names_changed(self.names, self.names)

    #code to create inputs and outputs when names is changed
    def _names_changed(self, old, new):
        for in_var, out_var in self._vars:
            if self.parent:
                self.parent.disconnect('.'.join([self.name, in_var]))
                self.parent.disconnect('.'.join([self.name, out_var]))
            self.remove_trait(in_var)
            self.remove_trait(out_var)
        self._vars = []
        for name in new:
            if name in self.types:
                traits = self.types[name]
            elif 'default' in self.types:
                traits = self.types['default']
            else:
                self.raise_exception(
                    'No type was provided for "%s" and no "default" type was provided. '
                    'Specify at least one of these.' % name, ValueError)

            in_var = "%s_in" % name
            out_var = name
            self.add_trait(in_var, Float(iotype="in", low=-9e99, high=9e99))
            self.add_trait(out_var, Float(iotype="out"))

            self._vars.append((in_var, out_var))

    def execute(self, *args, **kwargs):
        for in_var, out_var in self._vars:
            setattr(self, out_var, getattr(self, in_var))
示例#4
0
 class Dummy(Component): 
 
     x = Array([[-1, 1],[-2, 2]], iotype='in', shape=(2,2))
     xlist = List([1,2], iotype='in')
     xdict = Dict({'a' : 'b'}, iotype='in')
     
     def execute(self): 
         self.y = self.x
示例#5
0
class dummy_comp(Component):

    x = Float(0.0, iotype='in')
    e = Enum(0, [0,1,2,3], iotype = 'in')
    d = Dict(value = {'e':2.71, "pi": 3.14159}, value_trait = Float, key_trait = Str, iotype = 'in')
    X = Array([0,1,2,3], iotype = 'in')
    Y = Array([[0,1],[2,3]], iotype = 'in')
    Y2 = Array([[5],[8]], iotype = 'in')
    Y3 = Array([[1]], iotype = 'in')
    Z = List([1,2,3,4], iotype='in')
    
    def execute(self):
        return 
示例#6
0
class InputDict(VariableTree):
    """ input pair to add to dictionary for input to fuzzy system"""
    input_dict = Dict({}, desc='Input Dictionary (usually from another output)')
    input_keys = List([], desc = 'Keys for items in input_dict to pull as inputs')
示例#7
0
class Postprocess_Fuzzy_Outputs(Component):
    """
    Takes in some outputs from the fuzzy systems and creates crisp values by which 
    to find optimal solutions. Uses alpha cuts at the the given alpha_val level.
    """
    
    
    # set up interface to the framework
    #inputs are outputs from systems
    fuzzSys_in_1 = VarTree(FuzzyMF(), iotype='in', desc='fuzzy system output')
    fuzzSys_in_2 = VarTree(FuzzyMF(), iotype='in', desc='fuzzy system output')
    fuzzSys_in_3 = VarTree(FuzzyMF(), iotype='in', desc='fuzzy system output')
    fuzzSys_in_4 = VarTree(FuzzyMF(), iotype='in', desc='fuzzy system output')
    fuzzSys_in_5 = VarTree(FuzzyMF(), iotype='in', desc='fuzzy system output')
    fuzzSys_in_6 = VarTree(FuzzyMF(), iotype='in', desc='fuzzy system output')
    fuzzSys_in_7 = VarTree(FuzzyMF(), iotype='in', desc='fuzzy system output')
    fuzzSys_in_8 = VarTree(FuzzyMF(), iotype='in', desc='fuzzy system output')
    fuzzSys_in_9 = VarTree(FuzzyMF(), iotype='in', desc='fuzzy system output')

    alpha_val = Float(0.7, iotype='in', desc='alpha-cut to perform range post processing at')
    goalVals = Dict({'sys_phi'  :6.7, 
                     'sys_FoM'  :0.775, 
                     'sys_LoD'  :12.5, 
                     'sys_etaP' :0.875, 
                     'sys_Pin'  :3500.0,
                     'sys_GWT'  :10000,
                     'sys_VH'   :325}, iotype='in', desc='crisp goals for each output')

    PLOTMODE = Int(0, iotype='in', desc='Flag for plotting, 1 for plot')
    printResults = Int(1, iotype='in', desc='print results each iteration?')

    passthrough = Int(0, iotype='in', low=0, high=1, desc='catch flag for incompatible options')
    incompatCount = Int(0, iotype='in', desc='count of incomatible options')

    runFlag_in = Int(0, iotype='in', desc='test')
    runFlag_out = Int(0, iotype='out', desc='test')

    ranges_out = Dict({}, iotype='out', desc='alpha cuts for each fuzzy input')
    response_1     = Float(0.0, iotype='out', desc='crisp measure 1 to perform optimization')
    response_1_r   = Float(0.0, iotype='out', desc='range for crisp measure 1')
    response_1_POS = Float(0.0, iotype='out', desc='fuzzy POS measure (dominance to crisp goal)')

    response_2     = Float(0.0, iotype='out', desc='crisp measure 2 to perform optimization')
    response_2_r   = Float(0.0, iotype='out', desc='range for crisp measure 2')
    response_2_POS = Float(0.0, iotype='out', desc='fuzzy POS measure (dominance to crisp goal)')   

    response_3     = Float(0.0, iotype='out', desc='crisp measure 3 to perform optimization')
    response_3_r   = Float(0.0, iotype='out', desc='range for crisp measure 3')
    response_3_POS = Float(0.0, iotype='out', desc='fuzzy POS measure (dominance to crisp goal)')

    response_4     = Float(0.0, iotype='out', desc='crisp measure 4 to perform optimization')	
    response_4_r   = Float(0.0, iotype='out', desc='range for crisp measure 4')
    response_4_POS = Float(0.0, iotype='out', desc='fuzzy POS measure (dominance to crisp goal)')

    response_5     = Float(0.0, iotype='out', desc='crisp measure 5 to perform optimization')
    response_5_r   = Float(0.0, iotype='out', desc='range for crisp measure 5')
    response_5_POS = Float(0.0, iotype='out', desc='fuzzy POS measure (dominance to crisp goal)')

    response_6     = Float(0.0, iotype='out', desc='crisp measure 6 to perform optimization')
    response_6_r   = Float(0.0, iotype='out', desc='range for crisp measure 6')
    response_6_POS = Float(0.0, iotype='out', desc='fuzzy POS measure (dominance to crisp goal)')

    response_7     = Float(0.0, iotype='out', desc='crisp measure 7 to perform optimization')
    response_7_r   = Float(0.0, iotype='out', desc='range for crisp measure 7')
    response_7_POS = Float(0.0, iotype='out', desc='fuzzy POS measure (dominance to crisp goal)')

    response_8     = Float(0.0, iotype='out', desc='crisp measure 8 to perform optimization')
    response_8_r   = Float(0.0, iotype='out', desc='range for crisp measure 8')
    response_8_POS = Float(0.0, iotype='out', desc='fuzzy POS measure (dominance to crisp goal)')

    response_9     = Float(0.0, iotype='out', desc='crisp measure 9 to perform optimization')
    response_9_r   = Float(0.0, iotype='out', desc='range for crisp measure 9')
    response_9_POS = Float(0.0, iotype='out', desc='fuzzy POS measure (dominance to crisp goal)')

    fuzzyPOS       = Float(0.0, iotype='out', desc='Fuzzy Measure for POS (product of all POS measures')

    def execute(self):
        """
        Translate fuzzy inputs to crisp values to optimize system.
        """      
        inputs = [self.fuzzSys_in_1, self.fuzzSys_in_2, self.fuzzSys_in_3, 
        		  self.fuzzSys_in_4, self.fuzzSys_in_5, self.fuzzSys_in_6,
                  self.fuzzSys_in_7, self.fuzzSys_in_8, self.fuzzSys_in_9]
        outs  = [[self.response_1][0], [self.response_2][0], self.response_3, 
                 self.response_4, self.response_5, self.response_6,
                 self.response_7, self.response_8, self.response_9]
        outs_r  = [self.response_1_r, self.response_2_r, self.response_3_r, 
                   self.response_4_r, self.response_5_r, self.response_6_r,
                   self.response_7_r, self.response_8_r, self.response_9_r]
        
        if self.passthrough == 1:
            if self.printResults == 1: print "Incompatible combo found..."
            self.response_1     = 0.0 #phi
            self.response_1_r   = 0.0 
            self.response_1_POS = -1.0

            self.response_2     = 0.0 #FoM
            self.response_2_r   = 0.0
            self.response_2_POS = -1.0

            self.response_3     = 0.0 #LoD
            self.response_3_r   = 0.0
            self.response_3_POS = -1.0

            self.response_4     = 0.0 #etaP
            self.response_4_r   = 0.0
            self.response_4_POS = -1.0

            self.response_5     = 0.0 #GWT
            self.response_5_r   = 0.0
            self.response_5_POS = -1.0

            self.response_6     = -99999.0 #P
            self.response_6_r   = 0.0
            self.response_6_POS = 0.0 

            self.response_7     = 0.0 #VH
            self.response_7_r   = 0.0
            self.response_7_POS = -1.0

            return None

        else:
            #get alpha cuts for crisp responses and ranges 
            for i in range(len(inputs)):
                
                if inputs[i].mf_key	<> '':
                    if len(inputs[i].mf_dict[inputs[i].mf_key]) 	 == 1: #crisp value
                        self.ranges_out[inputs[i].mf_key] = [inputs[i].mf_dict[inputs[i].mf_key][0], inputs[i].mf_dict[inputs[i].mf_key][0]]
                    elif len(inputs[i].mf_dict[inputs[i].mf_key])  == 2: #fuzzy function
                        self.ranges_out[inputs[i].mf_key] = fuzzyOps.alpha_cut(self.alpha_val, inputs[i].mf_dict[inputs[i].mf_key])

                    #capture results for crisp measures
                    if self.ranges_out[inputs[i].mf_key] <> None: y = self.ranges_out[inputs[i].mf_key]
                    else:                                         y = [0.0, 0.0]

                    if inputs[i].mf_key == 'sys_phi': 
                        self.response_1 = fuzz.defuzz(inputs[i].mf_dict[inputs[i].mf_key][0],np.array(inputs[i].mf_dict[inputs[i].mf_key][1]),'centroid')
                        self.response_1 = self.response_1 * math.exp(-self.incompatCount)**0.5
                        self.response_1_r = max(y) - min(y)
                        self.response_1_POS = fuzzyOps.fuzzyPOS(inputs[i].mf_dict[inputs[i].mf_key][0],np.array(inputs[i].mf_dict[inputs[i].mf_key][1]), self.goalVals['sys_phi'])
                   
                    if inputs[i].mf_key == 'sys_FoM': 
                        self.response_2 = fuzz.defuzz(inputs[i].mf_dict[inputs[i].mf_key][0],np.array(inputs[i].mf_dict[inputs[i].mf_key][1]),'centroid')
                        self.response_2 = self.response_2 * math.exp(-self.incompatCount)**0.5
                        self.response_2_r = max(y) - min(y)
                        #if self.response_2 < 0.6:  
                        #    self.response_2_POS = fuzzyOps.fuzzyPOS(inputs[i].mf_dict[inputs[i].mf_key][0],np.array(inputs[i].mf_dict[inputs[i].mf_key][1]), self.goalVals['sys_FoM'], direction='max', plot=True)
                        self.response_2_POS = fuzzyOps.fuzzyPOS(inputs[i].mf_dict[inputs[i].mf_key][0],np.array(inputs[i].mf_dict[inputs[i].mf_key][1]), self.goalVals['sys_FoM'])

                    if inputs[i].mf_key == 'sys_LoD': 
                        self.response_3 = fuzz.defuzz(inputs[i].mf_dict[inputs[i].mf_key][0],np.array(inputs[i].mf_dict[inputs[i].mf_key][1]),'centroid')
                        self.response_2 = self.response_3 * math.exp(-self.incompatCount)**0.5
                        self.response_3_r = max(y) - min(y)
                        self.response_3_POS = fuzzyOps.fuzzyPOS(inputs[i].mf_dict[inputs[i].mf_key][0],np.array(inputs[i].mf_dict[inputs[i].mf_key][1]), self.goalVals['sys_LoD'])

                    if inputs[i].mf_key == 'sys_etaP': 
                        self.response_4 = fuzz.defuzz(inputs[i].mf_dict[inputs[i].mf_key][0],np.array(inputs[i].mf_dict[inputs[i].mf_key][1]),'centroid')
                        self.response_4 = self.response_4 * math.exp(-self.incompatCount)**0.5
                        self.response_4_r = max(y) - min(y)
                        self.response_4_POS = fuzzyOps.fuzzyPOS(inputs[i].mf_dict[inputs[i].mf_key][0],np.array(inputs[i].mf_dict[inputs[i].mf_key][1]), self.goalVals['sys_etaP'])

                    if inputs[i].mf_key == 'sys_GWT': #invert GWT to maximize all
                        self.response_5 = fuzz.defuzz(inputs[i].mf_dict[inputs[i].mf_key][0],np.array(inputs[i].mf_dict[inputs[i].mf_key][1]),'centroid')
                        self.response_5 = self.response_5 * math.exp(-self.incompatCount)**0.5
                        self.response_5_r = max(y) - min(y)
                        self.response_5_POS = fuzzyOps.fuzzyPOS(inputs[i].mf_dict[inputs[i].mf_key][0],np.array(inputs[i].mf_dict[inputs[i].mf_key][1]), self.goalVals['sys_GWT'], direction='min')

                    
                    if inputs[i].mf_key == 'sys_P': #invert GWT to maximize all
                        self.response_6 = 0.0-fuzz.defuzz(inputs[i].mf_dict[inputs[i].mf_key][0],np.array(inputs[i].mf_dict[inputs[i].mf_key][1]),'centroid')
                        self.response_6 = self.response_6 * math.exp(-self.incompatCount)**0.5
                        self.response_6_r = max(y) - min(y)
                        self.response_6_POS = fuzzyOps.fuzzyPOS(inputs[i].mf_dict[inputs[i].mf_key][0],np.array(inputs[i].mf_dict[inputs[i].mf_key][1]), self.goalVals['sys_Pin'], direction='min')

                    if inputs[i].mf_key == 'sys_VH': #invert GWT to maximize all
                        self.response_7 = fuzz.defuzz(inputs[i].mf_dict[inputs[i].mf_key][0],np.array(inputs[i].mf_dict[inputs[i].mf_key][1]),'centroid')
                        self.response_7 = self.response_7 * math.exp(-self.incompatCount)**0.5
                        self.response_7_r = max(y) - min(y)
                        self.response_7_POS = fuzzyOps.fuzzyPOS(inputs[i].mf_dict[inputs[i].mf_key][0],np.array(inputs[i].mf_dict[inputs[i].mf_key][1]), self.goalVals['sys_VH'])

                        self.fuzzyPOS = self.response_1_POS*self.response_2_POS*self.response_3_POS*self.response_4_POS*self.response_6_POS


        if self.printResults == 1: #print results
            print "Alternative:", self.passthrough, ":",
            print "PHI: %.1f, (%.3f)" % (self.response_1, self.response_1_POS),
            print "  FoM: %.3f, (%.3f)" % (self.response_2, self.response_2_POS), 
            print "  L/D: %.1f, (%.3f)" % (self.response_3, self.response_3_POS),
            print "  etaP: %.3f, (%.3f)" % (self.response_4, self.response_4_POS),
            print "  GWT: %.0f, (%.3f)" % (self.response_5, self.response_5_POS),
            print "  Pinst: %.0f, (%.3f)" % (self.response_6, self.response_6_POS),
            print "  VH: %.0f, (%.3f)" % (self.response_7, self.response_7_POS),
            print "  FPOS: %.3f" % self.fuzzyPOS

        #plotting for testing
        if self.PLOTMODE == 1: #plot results
            plt.figure()
            i=1
            for r in inputs:
                plt.subplot(3,2,i)
                if r.mf_key <> '':
                    if len(r.mf_dict[r.mf_key])      == 1: #crisp value
                        pass#self.ranges_out[r.mf_key] = [r.mf_dict[r.mf_key][0], r.mf_dict[r.mf_key][1]]
                    elif len(r.mf_dict[r.mf_key])  == 2: #fuzzy function
                        plt.plot(r.mf_dict[r.mf_key][0],r.mf_dict[r.mf_key][1])
                    i = i + 1

            plt.show()
示例#8
0
class FuzzyMF(VariableTree):
    """ 
    Membershipt function for fuzzy output (or input) 'key', [x_vals, y_vals]
    """
    mf_key 		= Str('', desc='Key for Output Membership Function of interest')
    mf_dict		= Dict({}, desc = 'Dict of for MF(s) (key:[crisp] or key:[fuzzy_x, fuzzy_y])')
示例#9
0
class MetaModel(Component):

    # pylint: disable-msg=E1101
    model = Slot(IComponent,
                 allow_none=True,
                 desc='Slot for the Component or Assembly being '
                 'encapsulated.')
    includes = ListStr(iotype='in',
                       desc='A list of names of variables to be included '
                       'in the public interface.')
    excludes = ListStr(iotype='in',
                       desc='A list of names of variables to be excluded '
                       'from the public interface.')

    warm_start_data = Slot(ICaseIterator,
                           iotype="in",
                           desc="CaseIterator containing cases to use as "
                           "initial training data. When this is set, all "
                           "previous training data is cleared, and replaced "
                           "with data from this CaseIterator")

    surrogate = Dict(
        key_trait=Str,
        value_trait=Slot(ISurrogate),
        allow_none=True,
        desc='Dictionary that provides a mapping between variables and '
        'surrogate models for each output. The "default" '
        'key must be given. It is the default surrogate model for all '
        'outputs. Any specific surrogate models can be '
        'specifed by a key with the desired variable name.')
    surrogate_args = Dict(
        key_trait=Str,
        allow_none=True,
        desc='Dictionary that provides mapping between variables and '
        'arguments that should be passed to the surrogate model. Keys should '
        'match those in the surrogate dictionary. Values can be a list of ordered '
        'arguments, a dictionary of named arguments, or a two-tuple of a list and a dictionary.'
    )

    recorder = Slot(ICaseRecorder, desc='Records training cases')

    # when fired, the next execution will train the metamodel
    train_next = Event()
    #when fired, the next execution will reset all training data
    reset_training_data = Event()

    def __init__(self, *args, **kwargs):
        super(MetaModel, self).__init__(*args, **kwargs)
        self._current_model_traitnames = set()
        self._surrogate_info = {}
        self._surrogate_input_names = []
        self._training_input_history = []
        self._const_inputs = {
        }  # dict of constant training inputs indices and their values
        self._train = False
        self._new_train_data = False
        self._failed_training_msgs = []

        # the following line will work for classes that inherit from MetaModel
        # as long as they declare their traits in the class body and not in
        # the __init__ function.  If they need to create traits dynamically
        # during initialization they'll have to provide the value of
        # _mm_class_traitnames
        self._mm_class_traitnames = set(self.traits(iotype=not_none).keys())

    def _train_next_fired(self):
        self._train = True
        self._new_train_data = True

    def _reset_training_data_fired(self):
        self._training_input_history = []
        self._const_inputs = {}
        self._failed_training_msgs = []

        # remove output history from surrogate_info
        for name, tup in self._surrogate_info.items():
            surrogate, output_history = tup
            self._surrogate_info[name] = (surrogate, [])

    def _warm_start_data_changed(self, oldval, newval):
        self.reset_training_data = True

        #build list of inputs
        for case in newval:
            if self.recorder:
                self.recorder.record(case)
            inputs = []
            for inp_name in self._surrogate_input_names:
                var_name = '.'.join([self.name, inp_name])
                inp_val = case[var_name]
                if inp_val is not None:
                    inputs.append(inp_val)
                else:
                    self.raise_exception(
                        'The variable "%s" was not '
                        'found as an input in one of the cases provided '
                        'for warm_start_data.' % var_name, ValueError)
            #print "inputs", inputs
            self._training_input_history.append(inputs)

            for output_name in self.list_outputs_from_model():
                #grab value from case data
                var_name = '.'.join([self.name, output_name])
                try:
                    val = case.get_output(var_name)
                except KeyError:
                    self.raise_exception(
                        'The output "%s" was not found '
                        'in one of the cases provided for '
                        'warm_start_data' % var_name, ValueError)
                else:  # save to training output history
                    self._surrogate_info[output_name][1].append(val)

        self._new_train_data = True

    def execute(self):
        """If the training flag is set, train the metamodel. Otherwise, 
        predict outputs.
        """

        if self._train:
            if self.model is None:
                self.raise_exception("MetaModel object must have a model!",
                                     RuntimeError)
            try:
                inputs = self.update_model_inputs()

                #print '%s training with inputs: %s' % (self.get_pathname(), inputs)
                self.model.run(force=True)

            except Exception as err:
                self._failed_training_msgs.append(str(err))
            else:  #if no exceptions are generated, save the data
                self._training_input_history.append(inputs)
                self.update_outputs_from_model()
                case_outputs = []

                for name, tup in self._surrogate_info.items():
                    surrogate, output_history = tup
                    case_outputs.append(('.'.join([self.name,
                                                   name]), output_history[-1]))
                # save the case, making sure to add out name to the local input name since
                # this Case is scoped to our parent Assembly
                case_inputs = [
                    ('.'.join([self.name, name]), val)
                    for name, val in zip(self._surrogate_input_names, inputs)
                ]
                if self.recorder:
                    self.recorder.record(
                        Case(inputs=case_inputs, outputs=case_outputs))

            self._train = False
        else:
            #print '%s predicting' % self.get_pathname()
            if self._new_train_data:
                if len(self._training_input_history) < 2:
                    self.raise_exception(
                        "ERROR: need at least 2 training points!",
                        RuntimeError)

                # figure out if we have any constant training inputs
                tcases = self._training_input_history
                in_hist = tcases[0][:]
                # start off assuming every input is constant
                idxlist = range(len(in_hist))
                self._const_inputs = dict(zip(idxlist, in_hist))
                for i in idxlist:
                    val = in_hist[i]
                    for case in range(1, len(tcases)):
                        if val != tcases[case][i]:
                            del self._const_inputs[i]
                            break

                if len(self._const_inputs) == len(in_hist):
                    self.raise_exception(
                        "ERROR: all training inputs are constant.")
                elif len(self._const_inputs) > 0:
                    # some inputs are constant, so we have to remove them from the training set
                    training_input_history = []
                    for inputs in self._training_input_history:
                        training_input_history.append([
                            val for i, val in enumerate(inputs)
                            if i not in self._const_inputs
                        ])
                else:
                    training_input_history = self._training_input_history
                for name, tup in self._surrogate_info.items():
                    surrogate, output_history = tup
                    surrogate.train(training_input_history, output_history)

                self._new_train_data = False

            inputs = []
            for i, name in enumerate(self._surrogate_input_names):
                val = getattr(self, name)
                cval = self._const_inputs.get(i, _missing)
                if cval is _missing:
                    inputs.append(val)
                elif val != cval:
                    self.raise_exception(
                        "ERROR: training input '%s' was a constant value of (%s) but the value has changed to (%s)."
                        % (name, cval, val), ValueError)
            for name, tup in self._surrogate_info.items():
                surrogate = tup[0]
                # copy output to boudary
                setattr(self, name, surrogate.predict(inputs))
示例#10
0
class CS2DtoBECAS(Component):
    """
    Component that generates a set of BECAS input files based on
    a fusedwind.turbine.structure_vt.CrossSectionStructureVT.

    This class uses shellexpander, which comes as part of the BECAS distribution.
    """

    path_shellexpander = Str(iotype='in',
                             desc='Absolute path to shellexpander.py')

    cs2d = VarTree(CrossSectionStructureVT(),
                   iotype='in',
                   desc='Cross-sectional properties vartree')
    total_points = Int(
        100,
        iotype='in',
        desc=
        'Number of total geometry points to define the shape of the cross-section'
    )
    max_layers = Int(0,
                     iotype='in',
                     desc='Maximum number of layers through thickness')
    open_te = Bool(False, iotype='in', desc='If True, TE will be left open')
    becas_inputs = Str('becas_inputs',
                       iotype='in',
                       desc='Relative path for the future BECAS input files')
    section_name = Str(
        'BECAS_SECTION',
        iotype='in',
        desc='Section name used by shellexpander, also by BECASWrapper')
    dominant_elsets = List(
        ['REGION03', 'REGION06'],
        iotype='in',
        desc='define the spar cap regions for correct meshing')

    path_input = Str(iotype='out',
                     desc='path to the generated BECAS input files')
    airfoil = Array(iotype='out',
                    desc='the re-distributed airfoil coordinates')
    web_coord = Array(iotype='out',
                      desc='the distributed shear web coordinates')
    nodes = Array(iotype='out',
                  desc='all the nodes, web+airfoil for the 2D cross section')
    elset_defs = Dict(iotype='out', desc='')
    elements = Array(iotype='out', desc='')
    nodes_3d = Array(iotype='out', desc='')
    el_3d = Array(iotype='out', desc='')
    te_ratio = Float(
        iotype='out',
        desc='ratio between outer TE planform and TE layup thickness')

    def execute(self):
        """  """

        self._logger.info('starting CS2DtoBECAS ...')
        if self.path_shellexpander == '':
            raise RuntimeError('path_shellexpander not specified')

        tt = time.time()

        self.path_input = os.path.join(self.becas_inputs, self.section_name)

        if self.cs2d.airfoil.points.shape[0] > 0:
            self.iCPs = []
            self.WCPs = []
            self.iWCPs = []
            self.CPs = []

        self.total_points_input = self.total_points

        try:
            self.compute_max_layers()
            self.compute_airfoil()
            # self.flatback()
            # self.adjust_te_panels()
            self.output_te_ratio()
            self.add_shearweb_nodes()
            self.create_elements()
            self.create_elements_3d(reverse_normals=False)
            self.write_abaqus_inp()
            self.write_becas_inp()
        except:
            self._logger.info('CS2DtoBECAS failed building BECAS mesh')

        self._logger.info('CS2DtoBECAS calc time: % 10.6f seconds' %
                          (time.time() - tt))

    def compute_max_layers(self):
        """
        The number of elements used to discretize the shell thickness.
        The minimum value is the largest number of layers of different
        material anywhere in the airfoil.
        """

        self.max_thickness = 0.
        for r_name in self.cs2d.regions:
            r = getattr(self.cs2d, r_name)
            self.max_layers = max(self.max_layers, len(r.layers))

            if r.thickness == 0.:
                # in case the total layer thickness for each region is not already computed
                for l_name in r.layers:
                    lay = getattr(r, l_name)
                    r.thickness += lay.thickness
            if r.thickness > self.max_thickness:
                self.max_thickness = r.thickness

    def compute_airfoil(self, debug=False):
        """Redistributed mesh points evenly among regions

        After defining different regions this method will assure that, given
        a total number of mesh points, the cell size in each region is similar.
        The region boundaries and shear web positions will be approximately
        on the same positions as defined.

        Region numbers can be added arbitrarely.
        """

        af = self.cs2d.airfoil

        if np.linalg.norm(self.cs2d.airfoil.points[0] -
                          self.cs2d.airfoil.points[-1]) > 0.:
            self.open_te = True

        # construct distfunc for airfoil curve
        dist = []

        # the distribution function only cares about control points and does
        # not have any notion of the regions. Prepare a sorted list for all
        # the control points, including the webs
        # add first point already: the TE
        CPs_s, WCPs_s = [-1], []
        self.iCPs.append(0)

        # because we want an evenly distributed mesh, calculate the cell
        # size note that distfunc ds does not relate to self._af.smax for
        # its length
        ds_const = 1.0 / self.total_points
        if ds_const * af.length < 1.2 * self.max_thickness:
            ds_old = ds_const
            new_ds = 1.2 * self.max_thickness
            self.total_points = np.maximum(int(af.length / new_ds), 70)
            ds_const = 1. / self.total_points
            self._logger.info('Increasing cell size from %5.3f to %5.3f '
                              'and reducing number of elements to %i' %
                              (ds_const, ds_old, self.total_points))
        self.ds_const = ds_const
        # add first point, TE, s0=0 in AirfoilShape curve fraction coordinates
        # dist.append([0.0, ds_const, 1])
        self.CPs.append(af.interp_s(af.s_to_01(-1.)))
        # keep track of all CPs and WCPs in one dictionary
        allCPs = {}
        # cycle through all the regions
        for name in self.cs2d.regions:
            r = getattr(self.cs2d, name)
            # save the coordinates of the CPs that served as inputs
            self.CPs.append(af.interp_s(af.s_to_01(r.s1)))
            # save to the dictionary for sorting
            if r.s1 in CPs_s:
                raise UserWarning, "Each CP's s1 value should be unique"
            CPs_s.append(r.s1)
            allCPs[r.s1] = r
        # and now the webs, if any
        for name in self.cs2d.webs:
            w = getattr(self.cs2d, name)
            if w.thickness == 0.: continue

            WCPs_s.append(w.s0)
            WCPs_s.append(w.s1)
            # save the coordinates of the WCPs that served as inputs
            self.WCPs.append(af.interp_s(af.s_to_01(w.s0)))
            self.WCPs.append(af.interp_s(af.s_to_01(w.s1)))
            # web control points are allowed to coincide with CP's. If so,
            # there is no need to add another control point
            if w.s0 not in allCPs:
                allCPs[w.s0] = w
            if w.s1 not in allCPs:
                allCPs[w.s1] = w

        # now sort the list so we can properly construct a Curve
        dist_ni = 0
        sorted_allCPs_keys = sorted(allCPs.keys())
        for r_nr, s in enumerate(sorted_allCPs_keys):
            r = allCPs[s]
            # keep track of region start and ending points seperately since
            # webs can interupt the regions
            s_end = s
            if r_nr == 0:
                # the starting point of the curve was already added
                s_start = -1
            else:
                # start is the end necesarely with the previous region,
                # because a WEB doesn't have a start. Consequently, just rely
                # on the sorted allCPs list.
                s_start = sorted_allCPs_keys[r_nr - 1]

            # only set the starting index of a region of the current point
            # actually belongs to region and not a shear web
            if not r.name.lower().startswith('web'):
                # explicitaly take the previous region end point
                r.s0_i = self.iCPs[-1]

            if debug:
                print '% 6.3f  % 6.3f  %8s' % (s_start, s_end, r.name)

            # for AirfoilShape we want surface fractions between 0 and 1
            s_start_ = af.s_to_01(s_start)
            s_end_ = af.s_to_01(s_end)

            # find the amount of points required for this region
            # and the dist_ni refers to the total number of nodes on the
            # curve when at the end of the region (s1)
            dist_ni += max(1, int(round((s_end_ - s_start_) / ds_const)))
            # add a distribution point to the Curve
            dist.append([s_end_, ds_const, dist_ni])

            # save the index of the end point of the region
            # is it a region, or s0 or s1 of a web?
            if r.name.lower().startswith('web'):
                if s_end == r.s0:
                    r.s0_i = dist_ni - 1
                elif s_end == r.s1:
                    r.s1_i = dist_ni - 1
            else:
                # some CPs might coincide with WCPs, in that case it would
                # not have been in allCPs
                if s_end in WCPs_s:
                    # now figure out to which region the current s_end belongs
                    for w_name in self.cs2d.webs:
                        w = getattr(self.cs2d, w_name)
                        if s_end == w.s0:
                            w.s0_i = dist_ni - 1
                        elif s_end == w.s1:
                            w.s1_i = dist_ni - 1
                # but also still add s1_i to the region object
                r.s1_i = dist_ni - 1
            # and save the index for later reference in a convienent list
            if r.s1 in CPs_s:
                self.iCPs.append(dist_ni - 1)
            # be adviced, a CP can also live on the web
            if r.s1 in WCPs_s:
                self.iWCPs.append(dist_ni - 1)

        # before executing, make sure all the points are in increasing order
        if np.diff(np.array(dist)[:, 0]).min() <= 0:
            raise ValueError, 'Points are not continiously increasing'
        afn = af.redistribute(dist_ni, dist=dist)
        # get the redistributed points
        self.airfoil = afn.points
        self.total_points = self.airfoil.shape[0]
        self.CPs_s = CPs_s
        self.WCPs_s = WCPs_s

    def mirror_airfoil(self):
        """
        mirror the airfoil, multiply x axis with -1
        """

        offset = 0.0

        self.web_coord[:, 0] *= -1.0
        self.airfoil[:, 0] *= -1.0

        # also for the control points
        for cp in self.CPs:
            cp[0] *= -1.0
            cp[0] += offset
        for wcp in self.WCPs:
            wcp[0] *= -1.0
            wcp[0] += offset

        if self.te_le_orientation == 'right-to-left':
            self.te_le_orientation = 'left-to-right'
        else:
            self.te_le_orientation = 'right-to-left'

    def add_shearweb_nodes(self):
        """
        Distribute nodes over the shear web. Use the same spacing as used for
        the airfoil nodes.
        """

        self.nr_webs = 0
        self.web_coord = np.array([])
        # find the tickness in the TE region to close with a web the TE: first and last region
        r_name = self.cs2d.regions[-1]
        r_TE_suc = getattr(self.cs2d, r_name)
        r_name = self.cs2d.regions[0]
        r_TE_pres = getattr(self.cs2d, r_name)
        # add a 50% thickness safety factor
        TE_thick_max = (r_TE_pres.thickness + r_TE_suc.thickness) * 1.5
        for w_name in self.cs2d.webs:
            w = getattr(self.cs2d, w_name)
            if w.thickness == 0.: continue
            self.nr_webs += 1
            # at this point there are no nodes on the shear web itself
            # add a node distribution on the shear web too, and base the node
            # spacing on the average spacing ds on the airfoil
            # TODO: should'nt the shear web elements be assigned in
            # compute_airfoil?
            ds_mean = np.maximum(self.ds_const * self.cs2d.airfoil.length,
                                 self.max_thickness * 1.2)
            node1 = self.airfoil[w.s0_i, :]
            node2 = self.airfoil[w.s1_i, :]
            # the length of the shear web is then
            len_web = np.linalg.norm(node1 - node2)
            nr_points = max(int(round(len_web / ds_mean, 0)), 3)
            # generate nodal coordinates on the shear web
            if TE_thick_max > len_web and self.nr_webs == 1:
                # if a web is used to close the TE and the the web is very
                # short, no extra nodes are placed along the web to avoid mesh issues
                x = np.array([node1[0], node2[0]])
                y = np.array([node1[1], node2[1]])
            else:
                x = np.linspace(node1[0], node2[0], nr_points)
                y = np.linspace(node1[1], node2[1], nr_points)
            # and add them to the shear web node collection, but ignore the
            # first and last nodes because they are already included in
            # the airfoil coordinates.
            # For small arrays this is slightly faster, but for big arrays
            # (which is already true for 30 items and up) it is better to first
            # create them instead of transposing
            # tmp = np.array([x[1:-1], y[1:-1]]).transpose()
            tmp = np.ndarray((len(x) - 2, 2))
            tmp[:, 0] = x[1:-1]
            tmp[:, 1] = y[1:-1]
            # remember to start and stop indices for the shear web nodes
            w.w0_i = len(self.web_coord)
            w.w1_i = len(tmp) + w.w0_i - 1
            try:
                self.web_coord = np.append(self.web_coord, tmp, axis=0)
            except:
                self.web_coord = tmp.copy()

    def adjust_te_panels(self):
        """
        adjust the thickness of the trailing edge panels according
        to the thickness of the trailing edge

        """

        if not self.open_te: return

        # pressure and suction side panels
        dTE = np.abs(self.airfoil[-1, 1] - self.airfoil[0, 1]) / 3.
        r_name = self.cs2d.regions[-1]
        r_TE_suc = getattr(self.cs2d, r_name)
        r_name = self.cs2d.regions[0]
        r_TE_pres = getattr(self.cs2d, r_name)
        thick_max = (r_TE_pres.thickness + r_TE_suc.thickness) / 2.
        ratio = thick_max / dTE
        self._logger.info('TE panel ratio %f %f %f' %
                          (self.cs2d.s, dTE * 3., ratio))
        if ratio > 1.:
            for lname in r_TE_suc.layers:
                layer = getattr(r_TE_suc, lname)
                layer.thickness = layer.thickness / ratio
            for lname in r_TE_pres.layers:
                layer = getattr(r_TE_pres, lname)
                layer.thickness = layer.thickness / ratio
            r_TE_suc.thickness /= ratio
            r_TE_pres.thickness /= ratio

        # trailing edge "web"
        for name in self.cs2d.webs:
            TEw = getattr(self.cs2d, name)
            if TEw.s0 in [-1., 1.]:
                dTE = dTE * 2.
                ratio = r_TE_suc.thickness / TEw.thickness
                for lname in TEw.layers:
                    layer = getattr(TEw, lname)
                    layer.thickness = layer.thickness * ratio
                TEw.thickness *= ratio
                break

    def flatback(self):
        """
        Instead of removing some meshing points, make the TE region as thick
        as the total defined layer thickness in that region.
        """

        # find the tickness in the TE region: first and last region
        r_name = self.cs2d.regions[-1]
        r_TE_suc = getattr(self.cs2d, r_name)
        r_name = self.cs2d.regions[0]
        r_TE_pres = getattr(self.cs2d, r_name)
        # add 10% margin as well for safety
        thick_max = (r_TE_pres.thickness + r_TE_suc.thickness) * 1.1

        # and enforce that thickness on the trailing edge node suction side
        # first, define the trailing edge vector.
        if np.allclose(self.airfoil[-1, :], self.airfoil[0, :]):
            # when TE suction = TE pressure, move upwards vertically
            flatback_thick = 0
            flatback_vect_norm = np.array([0, 1])
        else:
            flatback_vect = self.airfoil[-1, :] - self.airfoil[0, :]
            flatback_thick = np.linalg.norm(flatback_vect)
            flatback_vect_norm = flatback_vect / flatback_thick
        if flatback_thick < thick_max:
            dt_thick = thick_max - flatback_thick
            # add missing thickness by moving the TE suction side upwards
            # along the flatback vector
            self.airfoil[-1, :] += dt_thick * flatback_vect_norm * 0.5
            self.airfoil[0, :] -= dt_thick * flatback_vect_norm * 0.5

    def output_te_ratio(self):
        """
        outputs a ratio between the thickness of the trailing edge panels
        and the thickness of the trailing edge

        """

        if not self.open_te:
            self.te_ratio = 0.
            return

        # pressure and suction side panels
        dTE = np.abs(self.airfoil[-1, 1] - self.airfoil[0, 1]) / 2.
        r_name = self.cs2d.regions[-1]
        r_TE_suc = getattr(self.cs2d, r_name)
        r_name = self.cs2d.regions[0]
        r_TE_pres = getattr(self.cs2d, r_name)
        thick_max = (r_TE_pres.thickness + r_TE_suc.thickness) / 2.
        self.te_ratio = thick_max / dTE
        self._logger.info('TE ratio %f %f %f' %
                          (self.cs2d.s, dTE * 3., self.te_ratio))

    def _check_TE_thickness(self):
        """
        The last point before the trailing edge should still have a thickness
        that is equal or higher than the total layer thickness (suction and
        pressure side combined).

        Two solutions: either reduce the layer thickness, or more simple,
        move the last point before the TE forward (direction of LE) so the
        thickness over that mesh point increases.

        This method looks for the point where the layer thickness equals the
        chord thickness and takes that as the last mesh point before the TE.

        Possible problems arise if a whole region needs to be cut down.
        Also, all the indices in the regions have to be changed...
        From here onwards the two meshes will be disconnected.
        This approach will cause problems if there still is significant
        curvature in this trailing edge area. Maybe a boundary condition
        is more appropriate?
        Possibly verify if the reduced number of points results in a loss
        of area accuracy compared to the original input coordinates
        """

        # find the local tickness of the last trailing edge points
        # TE region is here defined as 15% of the chord
        # FIXME: this approach assumes the thickness can be approximated by
        # the distance between the mesh points with equal index offset from
        # the TE. However, this is by no means guaranteed by the mesh, and will
        # result in some cases a much higher percieved thickness in the TE.
        nr = int(round(len(self.airfoil) * 0.15, 0))
        # calculate the thickness at each pair of nodes
        deltas = self.airfoil[1:nr] - self.airfoil[-nr:-1][::-1]
        if np.__version__ >= '1.8.0':
            thick_loc = np.linalg.norm(deltas, axis=1)
        else:
            thick_loc = np.ndarray((deltas.shape[0], ))
            for i, d in enumerate(deltas):
                thick_loc[i] = np.linalg.norm(d)

        # find the tickness in the TE region: first and last region
        r_name = self.cs2d.regions[-1]
        r_TE_suc = getattr(self.cs2d, r_name)
        r_name = self.cs2d.regions[0]
        r_TE_pres = getattr(self.cs2d, r_name)
        # add 10% margin as well
        thick_max = (r_TE_pres.thickness + r_TE_suc.thickness) * 1.1

        # TODO: before removing we should check what happens if we by accident
        # remove a control point and/or a complete region

        # and see how many nodes we need to ditch in the TE area
        # delete one extra node just to be sure
        nr_remove = thick_loc.__gt__(thick_max).argmax() + 1
        sel = np.ndarray((self.airfoil[0], ), dtype=np.bool)
        sel[:] = True
        sel[1:nr_remove + 1] = False
        sel[-nr_remove - 1:-1] = False
        self.airfoil = self.airfoil[sel, :]

        print "number of removed mesh points at TE: %i" % nr_remove

        # and correct all the indices
        for name in self.cs2d.regions:
            r = getattr(self, name)
            if r.s0_i >= nr_remove:
                r.s0_i -= nr_remove
                r.s1_i -= nr_remove
            elif r.s1_i >= nr_remove:
                r.s1_i -= nr_remove
        # additionally, the TE on the suction side also loses on top of that
        r = getattr(self, self.cs2d.regions[-1])
        r.s1_i -= nr_remove

        for name in self.webs:
            w = getattr(self.cs2d.webs, name)
            w.s0_i -= nr_remove
            w.s1_i -= nr_remove
        for i in xrange(len(self.iWCPs)):
            self.iWCPs[i] -= (nr_remove)
        # first and last are the TE
        for i in xrange(len(self.iCPs[1:])):
            self.iCPs[i + 1] -= (nr_remove)

        # TODO: this should follow the same procedure as for the regions
        self.iCPs[-1] -= nr_remove

    def create_elements(self, debug=False):
        """
        Create the elements and assign element sets to the different regions.

        Assign node and element numbers for the current airfoil points and
        shear webs. Since the airfoil coordinates are ordered clockwise
        continuous the node and element numbering is trivial.

        Note when referring to node and element numbers array indices are used.
        BECAS uses 1-based counting instead of zero-based.
        """

        # by default, the node and element numbers are zero based numbering
        self.onebasednumbering = False

        # convert to BECAS standards if necessary
        # if self.te_le_orientation == 'right-to-left':
        #     self.mirror_airfoil()
        #     print 'forced airfoil input coordinates orientation left-to-right'

        # element definitions for 1D line elements
        # line_element_definitions = {}
        # corresponds to self.elements (index is the element number)

        # element numbers for each ELSET
        self.elset_defs = {}

        nr_air_n = len(self.airfoil)
        nr_web_n = len(self.web_coord)
        nr_nodes = nr_air_n + nr_web_n
        # for closed TE, nr_elements = nr_nodes, for open TE, 1 element less
        if self.open_te:
            nr_elements = nr_nodes + len(self.cs2d.webs) - 1
            nr_air_el = nr_air_n - 1
        else:
            nr_elements = nr_nodes + len(self.cs2d.webs)
            nr_air_el = nr_air_n

        # place all nodal coordinates in one array. The elements are defined
        # by the node index.
        self.nodes = np.zeros((nr_nodes, 3))
        self.nodes[:nr_air_n, :2] = self.airfoil[:, :]
        self.nodes[nr_air_n:, :2] = self.web_coord

        # Elements are bounded by two neighbouring nodes. By closing the
        # circle (connecting the TE suction side with the pressure side), we
        # have as many elements as there are nodes on the airfoil
        # elements[element_nr, (node1,node2)]: shape=(n,2)
        # for each web, we have nr_web_nodes+1 number of elements
        self.elements = np.ndarray((nr_elements, 2), dtype=np.int)
        if self.open_te:
            self.elements[:nr_air_el, 0] = np.arange(nr_air_n - 1,
                                                     dtype=np.int)
            self.elements[:nr_air_el, 1] = self.elements[:nr_air_el, 0] + 1
        else:
            # when the airfoil is closed, add one node number too much...
            self.elements[:nr_air_el, 0] = np.arange(nr_air_n, dtype=np.int)
            self.elements[:nr_air_el, 1] = self.elements[:nr_air_el, 0] + 1
            # last node on last element is first node, airfoil is now closed
            self.elements[nr_air_el - 1, 1] = 0

        if debug:
            print 'nr airfoil nodes: %4i' % (len(self.airfoil))
            print '    nr web nodes: %4i' % len(self.web_coord)

        web_el = []
        pre_side, suc_side = [], []

        # compute TE panel angle
        v0 = np.array(self.CPs[1] - self.CPs[0])
        v1 = np.array(self.CPs[-2] - self.CPs[-1])
        self.TEangle = np.arccos(
            np.dot(v0, v1) /
            (np.linalg.norm(v0) * np.linalg.norm(v1))) * 180. / np.pi

        self._logger.info('TE angle = %3.3f' % self.TEangle)

        # keep track of elements that have been added on the shear webs
        el_offset = 0
        # define el for each shear web, and create corresponding node groups
        for w_name in self.cs2d.webs:
            # starting index for the elements of the web
            iw_start = nr_air_el + el_offset

            w = getattr(self.cs2d, w_name)
            w.is_TE = False
            if w.thickness == 0.: continue

            # number of intermediate shear web elements (those that are not
            # connected to the airfoil)
            nr_el = w.w1_i - w.w0_i

            # define start/stop element indices
            w.e0_i = iw_start
            w.e1_i = nr_el + iw_start + 1

            # shear web nodes run from w.s0 towards w.s1
            # first element is connected to the starting shear web point that
            # sits on the airfoil: nr_el+1
            self.elements[w.e0_i, :] = [w.s0_i, w.w0_i + nr_air_n]

            # elements in between
            wnodes = np.arange(w.w0_i, w.w1_i, dtype=np.int) + nr_air_n
            self.elements[w.e0_i + 1:w.e1_i, 0] = wnodes
            self.elements[w.e0_i + 1:w.e1_i, 1] = wnodes + 1
            # and the final element that connects the web back to the airfoil
            # nr_el+2
            self.elements[w.e1_i, :] = [w.w1_i + nr_air_n, w.s1_i]

            if debug:
                print '%s i_el start: %i' % (w_name, iw_start)
                print '%4i %4i %4i' % (iw_start, w.s0_i, w.w0_i + nr_air_n)
                print '%4i %4i %4i' % (w.e1_i, w.w1_i + nr_air_n, w.s1_i)

            # and now we can populate the different regions with their
            # corresponding elements
            if w.s0 in [-1., 1.] and abs(self.TEangle) > 150.:
                w.is_TE = True
                self.elset_defs[w_name] = np.array([w.e0_i, w.e1_i] + [0],
                                                   dtype=int)
                suc_side.extend([w.e0_i, w.e1_i + 1] + [1])
                self._logger.info('TE web identified! s=%3.3f %s %i %i' %
                                  (self.cs2d.s, w_name, w.s0_i, w.s1_i))
            else:
                self.elset_defs[w_name] = np.arange(w.e0_i,
                                                    w.e1_i + 1,
                                                    dtype=np.int)
                web_el.extend(range(w.e0_i, w.e1_i + 1))

            # add the number of elements added for this web
            el_offset += nr_el + 2

        if len(web_el) > 0:
            self.elset_defs['WEBS'] = np.array(web_el, dtype=np.int)

        # element groups for the regions
        for r_name in self.cs2d.regions:
            r = getattr(self.cs2d, r_name)
            # do not include element r.s1_i, that is included in the next elset
            self.elset_defs[r_name] = np.arange(r.s0_i, r.s1_i, dtype=np.int)

            # group in suction and pressure side (s_LE=0)
            if r.s1 <= 0:
                pre_side.extend([r.s0_i, r.s1_i])
            else:
                suc_side.extend([r.s0_i, r.s1_i])

        tmp = np.array(list(pre_side) + list(suc_side))
        pre0, pre1 = tmp.min(), tmp.max()
        self.elset_defs['SURFACE'] = np.arange(pre0, pre1, dtype=np.int)

        # the last region and the suction side do not include the last element
        # for flatback airfoils. Fix this here.
        # TODO: now the region object s1_i is not updated. Is that relevant?
        # Or could that brake things since the index start-stop is not increasing
        # if not self.open_te:
        #     r_name = self.cs2d.regions[-1]
        #     r = getattr(self.cs2d, r_name)
        #     # last region is the suction side trailing edge
        #     elset = self.elset_defs[r_name]
        #     self.elset_defs[r_name] = np.append(elset, np.array([nr_air_n-1]))
        #     # suction side
        #     elset = self.elset_defs['SUCTION_SIDE']
        #     self.elset_defs['SUCTION_SIDE'] = np.append(elset,
        #                                                 np.array([nr_air_n-1]))

    def create_elements_3d(self, reverse_normals=False):
        """
        Shellexpander wants a 3D section as input. Create a 3D section
        which is just like the 2D version except with a depth defined as 1%
        of the chord length.
        """

        # Compute depth of 3D mesh as 1% of chord lenght
        depth = -0.01 * self.cs2d.airfoil.chord
        if reverse_normals:
            depth = depth * (-1.0)

        nr_nodes_2d = len(self.nodes)
        # Add nodes for 3D mesh
        self.nodes_3d = np.ndarray((nr_nodes_2d * 2, 3))
        self.nodes_3d[:nr_nodes_2d, :] = self.nodes
        self.nodes_3d[nr_nodes_2d:, :] = self.nodes + np.array([0, 0, depth])
        # Generate shell elements
        self.el_3d = np.ndarray((len(self.elements), 4), dtype=np.int)
        self.el_3d[:, :2] = self.elements
        self.el_3d[:, 2] = self.elements[:, 1] + nr_nodes_2d
        self.el_3d[:, 3] = self.elements[:, 0] + nr_nodes_2d

        # same as line_element_definitions, but now expanded over thickness
        # to create a 3D shell element
        # element_definitions = {}
        # corresponds to self.el_3d

    def one_based_numbering(self):
        """
        instead of 0, 1 is the first element and node number. All nodes and
        elements +1.

        Note that this does not affect the indices defined in the region
        and web attributes
        """
        if not self.onebasednumbering:

            self.elements += 1
            self.el_3d += 1
            for elset in self.elset_defs:
                self.elset_defs[elset] += 1

            for name in self.cs2d.regions:
                r = getattr(self.cs2d, name)
                r.s0_i += 1
                r.s1_i += 1
            # webs refer to indices in self.web_coord, which is independent
            # of self.airfoil
#            for name in self._webs:
#                w = getattr(self, name)
#                w.s0_i += 1
#                w.s1_i += 1
            for i in xrange(len(self.iWCPs)):
                self.iWCPs[i] += 1
            for i in xrange(len(self.iCPs)):
                self.iCPs[i] += 1

            self.onebasednumbering = True

    def zero_based_numbering(self):
        """
        switch back to 0 as first element and node number
        """

        if self.onebasednumbering:

            self.elements -= 1
            self.el_3d -= 1
            for elset in self.elset_defs:
                self.elset_defs[elset] -= 1

            for name in self.cs2d.regions:
                r = getattr(self, name)
                r.s0_i -= 1
                r.s1_i -= 1
            # webs refer to indices in self.web_coord, which is independent
            # of self.
#            for name in self._webs:
#                w = getattr(self, name)
#                w.s0_i -= 1
#                w.s1_i -= 1
            for i in xrange(len(self.iWCPs)):
                self.iWCPs[i] -= 1
            for i in xrange(len(self.iCPs)):
                self.iCPs[i] -= 1

            self.onebasednumbering = False

    def check_airfoil(self):

        for rname in self.cs2d.regions:
            r = getattr(self.cs2d, rname)
            print 'Checking %s' % rname
            for lname in r.layers:
                print '    Checking %s' % lname
                l = getattr(r, lname)
                if l.thickness <= 0.:
                    print 'ERROR! Layer %s in Region %s has negative thickness: %f' % (
                        lname, rname, l.thickness)
        for rname in self.cs2d.webs:
            print 'Checking %s' % rname
            r = getattr(self.cs2d, rname)
            for lname in r.layers:
                print '    Checking %s' % lname
                l = getattr(r, lname)
                if l.thickness <= 0.:
                    print 'ERROR! Layer %s in Region %s has negative thickness: %f' % (
                        lname, rname, l.thickness)

    def write_abaqus_inp(self, fname=False):
        """Create Abaqus inp file which will be served to shellexpander so
        the actual BECAS input can be created.
        """
        def write_n_int_per_line(list_of_int, f, n):
            """Write the integers in list_of_int to the output file - n integers
            per line, separated by commas"""
            i = 0
            for number in list_of_int:
                i = i + 1
                f.write('%d' % (number))
                if i < len(list_of_int):
                    f.write(',  ')
                if i % n == 0:
                    f.write('\n')
            if i % n != 0:
                f.write('\n')

        self.abaqus_inp_fname = 'airfoil_abaqus.inp'

        # FIXME: for now, force 1 based numbering, I don't think shellexpander
        # and/or BECAS like zero based node and element numbering
        self.one_based_numbering()

        # where to start node/element numbering, 0 or 1?
        if self.onebasednumbering:
            off = 1
        else:
            off = 0

        with open(self.abaqus_inp_fname, 'w') as f:

            # Write nodal coordinates
            f.write('**\n')
            f.write('********************\n')
            f.write('** NODAL COORDINATES\n')
            f.write('********************\n')
            f.write('*NODE\n')
            tmp = np.ndarray((len(self.nodes_3d), 4))
            tmp[:, 0] = np.arange(len(self.nodes_3d), dtype=np.int) + off
            tmp[:, 1:] = self.nodes_3d
            np.savetxt(f, tmp, fmt='%1.0f, %1.8e, %1.8e, %1.8e')

            # Write element definitions
            f.write('**\n')
            f.write('***********\n')
            f.write('** ELEMENTS\n')
            f.write('***********\n')
            f.write('*ELEMENT, TYPE=S4, ELSET=%s\n' % self.section_name)
            tmp = np.ndarray((len(self.el_3d), 5))
            tmp[:, 0] = np.arange(len(self.el_3d), dtype=np.int) + off
            tmp[:, 1:] = self.el_3d
            np.savetxt(f, tmp, fmt='%i, %i, %i, %i, %i')

            # Write new element sets
            f.write('**\n')
            f.write('***************\n')
            f.write('** ELEMENT SETS\n')
            f.write('***************\n')
            for elset in sorted(self.elset_defs.keys()):
                elements = self.elset_defs[elset]
                f.write('*ELSET, ELSET=%s\n' % (elset))
                #                np.savetxt(f, elements, fmt='%i', delimiter=', ')
                write_n_int_per_line(list(elements), f, 8)

            # Write Shell Section definitions
            # The first layer is the outer most layer.
            # The second item ("int. points") and the fifth item ("plyname")
            # are not relevant. The are kept for compatibility with the ABAQUS
            # input syntax. As an example, take this one:
            # [0.006, 3, 'TRIAX', 0.0, 'Ply01']
            f.write('**\n')
            f.write('****************************\n')
            f.write('** SHELL SECTION DEFINITIONS\n')
            f.write('****************************\n')
            for i, r_name in enumerate(self.cs2d.regions + self.cs2d.webs):
                r = getattr(self.cs2d, r_name)
                text = '*SHELL SECTION, ELSET=%s, COMPOSITE, OFFSET=-0.5\n'
                f.write(text % (r_name))
                for l_name in r.layers:
                    lay = getattr(r, l_name)
                    md = self.cs2d.materials[lay.materialname.lower()]
                    if md.failure_criterium == 'maximum_stress':
                        mname = lay.materialname + 'MAXSTRESS'
                    elif md.failure_criterium == 'maximum_strain':
                        mname = lay.materialname + 'MAXSTRAIN'
                    elif md.failure_criterium == 'tsai_wu':
                        mname = lay.materialname + 'TSAIWU'
                    else:
                        mname = lay.materialname
                    if lay.plyname == '': lay.plyname = 'ply%02d' % i

                    layer_def = (lay.thickness, 3, mname, lay.angle,
                                 lay.plyname)
                    f.write('%g, %d, %s, %g, %s\n' % layer_def)

            # Write material properties
            f.write('**\n')
            f.write('**********************\n')
            f.write('** MATERIAL PROPERTIES\n')
            f.write('**********************\n')
            for matname in sorted(self.cs2d.materials.keys()):
                md = self.cs2d.materials[matname]
                if md.failure_criterium == 'maximum_stress':
                    mname = matname + 'MAXSTRESS'
                elif md.failure_criterium == 'maximum_strain':
                    mname = matname + 'MAXSTRAIN'
                elif md.failure_criterium == 'tsai_wu':
                    mname = matname + 'TSAIWU'
                else:
                    mname = matname
                f.write('*MATERIAL, NAME=%s\n' % (mname))
                f.write('*ELASTIC, TYPE=ENGINEERING CONSTANTS\n')
                f.write('%g, %g, %g, %g, %g, %g, %g, %g\n' %
                        (md.E1, md.E2, md.E3, md.nu12, md.nu13, md.nu23,
                         md.G12, md.G13))
                f.write('%g\n' % (md.G23))
                f.write('*DENSITY\n')
                f.write('%g\n' % (md.rho))
                f.write('*FAIL STRESS\n')
                gMa = md.gM0 * (md.C1a + md.C2a + md.C3a + md.C4a)
                f.write('%g, %g, %g, %g, %g\n' %
                        (gMa * md.s11_t, gMa * md.s11_c, gMa * md.s22_t,
                         gMa * md.s22_c, gMa * md.t12))
                f.write('*FAIL STRAIN\n')
                f.write('%g, %g, %g, %g, %g\n' %
                        (gMa * md.e11_t, gMa * md.e11_c, gMa * md.e22_t,
                         gMa * md.e22_c, gMa * md.g12))
                f.write('**\n')
        print 'Abaqus input file written: %s' % self.abaqus_inp_fname

    def write_becas_inp(self):
        """
        When write_abaqus_inp has been executed we have the shellexpander
        script that can create the becas input

        Dominant regions should be the spar caps.
        """
        class args:
            pass

        # the he name of the input file containing the finite element shell model
        args.inputfile = self.abaqus_inp_fname  #--in
        # The element sets to be considered (required). If more than one
        # element set is given, nodes appearing in more the one element sets
        # are considered "corners". Should be pressureside, suction side and
        # the webs
        elsets = []
        target_sets = ['SURFACE', 'WEBS']
        for elset in target_sets:
            if elset in self.elset_defs:
                elsets.append(elset)
        if len(elsets) < 1:
            raise ValueError, 'badly defined element sets'
        args.elsets = elsets  #--elsets, list
        args.sections = self.section_name  #--sec
        args.layers = self.max_layers  #--layers
        args.nodal_thickness = 'min'  #--ntick, choices=['min','max','average']
        # TODO: take the most thick regions (spar caps) as dominant
        args.dominant_elsets = self.dominant_elsets  #--dom, list
        args.centerline = None  #--cline, string
        args.becasdir = self.becas_inputs  #--bdir
        args.debug = False  #--debug, if present switch to True

        import imp
        shellexpander = imp.load_source(
            'shellexpander',
            os.path.join(self.path_shellexpander, 'shellexpander.py'))

        shellexpander.main(args)

    def plot_airfoil(self, ax, line_el_nr=True):
        """
        """

        #        if self.te_le_orientation == 'left-to-right':
        #            self.mirror_airfoil()

        nr = self.total_points_input
        points_actual = self.airfoil.shape[0] + self.web_coord.shape[0]
        title = '%i results in %i actual points' % (nr, points_actual)
        ax.set_title(title)
        # the original coordinates from the file
        ax.plot(self.cs2d.airfoil.points[:, 0],
                self.cs2d.airfoil.points[:, 1],
                'b--o',
                alpha=0.3,
                label='airfoil')
        ax.plot(self.airfoil[:, 0],
                self.airfoil[:, 1],
                'k-s',
                mfc='k',
                label='distfunc',
                alpha=0.3,
                ms=10)

        # plot all (web)control points
        ax.plot(np.array(self.CPs)[:, 0],
                np.array(self.CPs)[:, 1],
                'rx',
                markersize=7,
                markeredgewidth=1.2,
                label='CPs')
        ax.plot(np.array(self.WCPs)[:, 0],
                np.array(self.WCPs)[:, 1],
                'gx',
                markersize=7,
                markeredgewidth=1.2,
                label='WCPs')

        # where to start node/element numbering, 0 or 1?
        if self.onebasednumbering:
            off = 1
        else:
            off = 0

        # see if the indices to the control points are what we think they are
        iCPs = np.array(self.iCPs, dtype=np.int) - off
        iWCPs = np.array(self.iWCPs, dtype=np.int) - off
        x, y = self.airfoil[iCPs, 0], self.airfoil[iCPs, 1]
        ax.plot(x, y, 'r+', markersize=12, markeredgewidth=1.2, label='iCPs')
        x, y = self.airfoil[iWCPs, 0], self.airfoil[iWCPs, 1]
        ax.plot(x, y, 'g+', markersize=12, markeredgewidth=1.2, label='iWCPs')

        # plot the webs
        for iweb, w_name in enumerate(self.cs2d.webs):
            w = getattr(self.cs2d, w_name)
            webx = [self.airfoil[self.iWCPs[iweb] - off, 0]]
            weby = [self.airfoil[self.iWCPs[iweb] - off, 1]]
            webx.extend(self.web_coord[w.w0_i:w.w1_i + 1, 0])
            weby.extend(self.web_coord[w.w0_i:w.w1_i + 1, 1])
            webx.append(self.airfoil[self.iWCPs[-iweb - 1] - off, 0])
            weby.append(self.airfoil[self.iWCPs[-iweb - 1] - off, 1])
            ax.plot(webx, weby, 'g.-')


#            ax.plot(self.web_coord[w.w0_i:w.w1_i+1,0],
#                    self.web_coord[w.w0_i:w.w1_i+1,1], 'g.-')

## add the element numbers
#print 'nr airfoil nodes: %i' % len(b.airfoil)
#print '    nr web nodes: %i' % len(b.web_coord)
#print 'el_nr  node1 node2'
#for nr, el in enumerate(b.elements):
#    print '%5i  %5i %5i' % (nr, el[0], el[1])
        bbox = dict(
            boxstyle="round",
            alpha=0.8,
            edgecolor=(1., 0.5, 0.5),
            facecolor=(1., 0.8, 0.8),
        )

        # verify all the element numbers
        if line_el_nr:
            for nr, el in enumerate(self.elements):
                # -1 to account for 1 based element numbers instead of 0-based
                p1, p2 = self.nodes[el[0] - off, :], self.nodes[el[1] - off, :]
                x, y, z = (p1 + p2) / 2.0
                ax.text(x,
                        y,
                        str(nr),
                        fontsize=7,
                        verticalalignment='bottom',
                        horizontalalignment='center',
                        bbox=bbox)

        return ax
示例#11
0
class OffshorePlot(TopfarmComponent):
    wt_positions = Array(
        [],
        unit='m',
        iotype='in',
        desc='Array of wind turbines attached to particular positions')
    baseline = Array(
        [],
        unit='m',
        iotype='in',
        desc='Array of wind turbines attached to particular positions')
    borders = Array(iotype='in',
                    desc='The polygon defining the borders ndarray([n_bor,2])',
                    unit='m')
    depth = Array(iotype='in',
                  desc='An array of depth ndarray([n_d, 2])',
                  unit='m')
    foundations = Array(iotype='in',
                        desc='The foundation length ofeach wind turbine')
    #wt_dist = Array(iotype='in', desc="""The distance between each turbines ndarray([n_wt, n_wt]).""", unit='m')
    spiral_param = Float(5.0, iotype='in', desc='spiral parameter')
    png_name = Str('wind_farm',
                   iotype='in',
                   desc='The base of the png name used to save the fig')
    result_file = Str('wind_farm',
                      iotype='in',
                      desc='The base result name used to save the fig')
    distribution = Str('spiral',
                       iotype='in',
                       desc='The type of distribution to plot')
    elnet_layout = Dict(iotype='in')
    inc = 0
    fs = 15  #Font size

    def __init__(self, add_inputs, title='', **kwargs):
        super(OffshorePlot, self).__init__(**kwargs)
        self.fig = plt.figure(num=None, facecolor='w',
                              edgecolor='k')  #figsize=(13, 8), dpi=1000
        self.shape_plot = self.fig.add_subplot(121)
        self.objf_plot = self.fig.add_subplot(122)

        self.targname = add_inputs
        self.title = title

        # Adding automatically the inputs
        for i in add_inputs:
            self.add(i, Float(0.0, iotype='in'))

        #sns.set(style="darkgrid")
        #self.pal = sns.dark_palette("skyblue", as_cmap=True)
        plt.rc('lines', linewidth=1)
        plt.ion()
        self.force_execute = True
        if not pa('fig').exists():
            pa('fig').mkdir()

    def execute(self):
        plt.ion()
        if self.inc == 0:
            try:
                pa(self.result_file + '.results').remove()
            except:
                pass
            self.iterations = [self.inc]
            self.targvalue = [[getattr(self, i) for i in self.targname]]
            self.pre_plot()
        else:
            self.iterations.append(self.inc)
            self.targvalue.append([getattr(self, i) for i in self.targname])
            #print self.iterations,self.targvalue
        #if self.inc % (2*self.wt_positions.shape[0]) == 0:
        #self.refresh()
        #plt.show()
        self.save_plot('fig/' + self.png_name + 'layout%d.png' % (self.inc))
        self.inc += 1

    def pre_plot(self):

        plt.ion()
        #plt.show()
        ### Plot the water depth
        N = 100
        self.X, self.Y = plt.meshgrid(
            plt.linspace(self.depth[:, 0].min(), self.depth[:, 0].max(), N),
            plt.linspace(self.depth[:, 1].min(), self.depth[:, 1].max(), N))
        self.Z = plt.griddata(self.depth[:, 0],
                              self.depth[:, 1],
                              self.depth[:, 2],
                              self.X,
                              self.Y,
                              interp='linear')

        Zin = points_in_poly(self.X, self.Y, self.borders)
        self.Z.mask = Zin.__neg__()
        #Z.mask = False
        #Z.data[Zin.__neg__()] = -20.0

        display(plt.gcf())

        # def refresh(self):
        self.shape_plot.clear()
        self.shape_plot.contourf(self.X,
                                 self.Y,
                                 self.Z,
                                 10,
                                 vmax=self.depth[:, 2].max())  #, cmap=self.pal
        self.shape_plot.set_aspect('equal')
        self.shape_plot.autoscale(tight=True)

        Plot = lambda b, *args, **kwargs: self.shape_plot.plot(
            b[:, 0], b[:, 1], *args, **kwargs)
        if self.distribution == 'spiral':
            spiral = lambda t_, a_, x_: [
                a_ * t_**(1. / x_) * np.cos(t_), a_ * t_**
                (1. / x_) * np.sin(t_)
            ]
            spirals = lambda ts_, a_, x_: np.array(
                [spiral(t_, a_, x_) for t_ in ts_])
            for P in self.baseline:
                Plot(P + spirals(plt.linspace(0., 10 * np.pi, 1000),
                                 self.spiral_param, 1.),
                     'g-',
                     linewidth=0.1)

        self.shape_plot.plot(self.borders[:, 0], self.borders[:, 1], 'k-')
        self.posi = self.shape_plot.plot(self.wt_positions[:, 0],
                                         self.wt_positions[:, 1], 'ro')
        self.plotel = self.shape_plot.plot(
            np.array([
                self.baseline[[i, j], 0] for i, j in self.elnet_layout.keys()
            ]).T,
            np.array([
                self.baseline[[i, j], 1] for i, j in self.elnet_layout.keys()
            ]).T,
            'y--',
            linewidth=1)
        #print self.plotel

        self.objf_plot.clear()
        targarr = np.array(self.targvalue)
        self.posb = []
        for i in range(targarr.shape[1]):
            self.posb.append(
                self.objf_plot.plot(self.iterations,
                                    self.targvalue[0][i],
                                    '.',
                                    label=self.targname[i]))
        print 'posb', self.posb
        self.legend = self.objf_plot.legend(loc=3, bbox_to_anchor=(1.1, 0.0))

        plt.title('Foundation = %8.2f' % (self.foundation_length))
        plt.draw()

    def save_plot(self, filename):
        plt.ion()
        targarr = np.array(self.targvalue)
        self.posi[0].set_xdata(self.wt_positions[:, 0])
        self.posi[0].set_ydata(self.wt_positions[:, 1])
        while len(self.plotel) > 0:
            self.plotel.pop(0).remove()
        self.plotel = self.shape_plot.plot(
            np.array([
                self.wt_positions[[i, j], 0]
                for i, j in self.elnet_layout.keys()
            ]).T,
            np.array([
                self.wt_positions[[i, j], 1]
                for i, j in self.elnet_layout.keys()
            ]).T,
            'y-',
            linewidth=1)
        for i in range(len(self.posb)):
            self.posb[i][0].set_xdata(self.iterations)
            self.posb[i][0].set_ydata(targarr[:, i])
            self.legend.texts[i].set_text('%s = %8.2f' %
                                          (self.targname[i], targarr[-1, i]))
        self.objf_plot.set_xlim([0, self.iterations[-1]])
        self.objf_plot.set_ylim([0.5, 1.2])
        if not self.title == '':
            plt.title('%s = %8.2f' % (self.title, getattr(self, self.title)))
        plt.draw()
        #print self.iterations[-1] , ': ' + ', '.join(['%s=%6.2f'%(self.targname[i], targarr[-1,i]) for i in range(len(self.targname))])
        with open(self.result_file + '.results', 'a') as f:
            f.write('%d:' % (self.inc) + ', '.join([
                '%s=%6.2f' % (self.targname[i], targarr[-1, i])
                for i in range(len(self.targname))
            ]) + '\n')
        #plt.show()
        #plt.savefig(filename)
        display(plt.gcf())
        #plt.show()
        clear_output(wait=True)
示例#12
0
class fuzzy_out_viz(Component):
    """ visualize the fuzzy outputs of the system
    """
    viz_on = Int(0,
                 iotype='in',
                 desc='flag to turn on and off visualization (0=off, 1=on)')

    system_inputs = Dict({}, iotype='in', desc='input dict from fuzzy sys')
    system_outputs = Dict({}, iotype='in', desc='output dict from fuzzy sys')
    runFlag_in = Int(0, iotype='in')

    input_mfs = Dict({}, iotype='in', desc='dict of fuzzy system inputs')
    output_mfs = Dict({}, iotype='in', desc='dict of fuzzy system outputs')
    runFlag_out = Int(0, iotype='out')

    def execute(self):

        if self.viz_on == 1:

            print 'Plotting Fuzzy System Result'
            print 'Inputs:', len(self.input_mfs), '  Input Values:', len(
                self.system_inputs)
            print 'Outputs:', len(self.output_mfs), '  Output Values:', len(
                self.system_outputs)

            plt.figure()
            i = 1

            print 'Plotting Inputs'
            for k1 in self.input_mfs:

                #plot each input against MFs
                plt.subplot(len(self.input_mfs) + len(self.output_mfs), 1, i)
                for k2 in self.input_mfs[k1].MFs:
                    plt.plot(self.input_mfs[k1].MFs[k2][0],
                             self.input_mfs[k1].MFs[k2][1])
                i = i + 1

                #plot input
                if isinstance(self.system_inputs[k1], list):
                    plt.plot(self.system_inputs[k1][0],
                             self.system_inputs[k1][1],
                             lw=3.0,
                             color='k')
                else:
                    plt.plot([self.system_inputs[k1], self.system_inputs[k1]],
                             [0, 1.0],
                             lw=3.0,
                             color='k')
                plt.ylabel(k1)
                plt.ylim([0, 1.1])
                plt.xlim([1, 9])

            print 'Plotting Outputs'
            #plot output against MFs
            for k1 in self.output_mfs:
                plt.subplot(len(self.input_mfs) + len(self.output_mfs), 1, i)
                for k2 in self.output_mfs[k1].MFs:
                    plt.plot(self.output_mfs[k1].MFs[k2][0],
                             self.output_mfs[k1].MFs[k2][1])
                i = i + 1
                plt.plot(self.system_outputs[k1][0],
                         self,
                         system_outputs[k1][1],
                         lw=3.5,
                         color='b')
                plt.ylabel(k1)
                plt.ylim([0, 1.1])
                plt.xlim([1, 9])

            print 'Plots Generated'
            plt.show()
            self.runFlag_out = self.runFlag_in
class MonteCarlo(HasTraits):
    """ DOEgenerator that performs a random Design of Experiments with given
    distributions on all design variables. Plugs into the DOEgenerator socket
    on a DOEdriver."""

    implements(IDOEgenerator)

    # pylint: disable=E1101
    parameters = List(Str,
                      iotype='in',
                      desc='A list of names of variables to be included '
                      'in the Monte Carlo dataset.')

    num_parameters = Int(0,
                         iotype="in",
                         desc="number of parameters in the DOE "
                         "if the parameters are not explicitly defined")

    num_samples = Int(0,
                      iotype="in",
                      desc="number of total samples in the DOE")

    dist_types = Dict(key_trait=Str,
                      allow_none=True,
                      desc='Dictionary that provides mapping between '
                      'variables and the distribution type associated with '
                      'the variable in question .')

    dist_args = Dict(key_trait=Str,
                     allow_none=True,
                     desc='Dictionary that provides mapping between variables '
                     'and arguments that are required to define their '
                     'distributions. Keys should match those in '
                     'dist_types. Values should be lists.')

    def __init__(self, num_samples=None):
        super(MonteCarlo, self).__init__()
        self.num = 0
        if num_samples is not None:
            self.num_samples = num_samples

    def __iter__(self):
        """Return an iterator over our sets of input values"""
        return self

    def next(self):
        """Return next set of input values"""
        if self.num < self.num_samples:
            self.num += 1
            #create list of putputs. Append items to this as they are created
            outputs = []
            if self.parameters:
                #Create iteration output for explicit parameters
                for parameter in self.parameters:
                    if parameter in self.dist_types:
                        #make sure that dist_args is given
                        if parameter not in self.dist_args:
                            raise Exception(
                                "Parameters with specified distributions must "
                                "be supplied with input arguments in dist_args"
                            )
                        #compute values for given parameter. Append this to output list.
                        p_out = self.dist_types[parameter](
                            *self.dist_args[parameter])
                    else:
                        p_out = self.dist_types['Default'](
                            *self.dist_args['Default'])

                    if type(p_out) == 'numpy.ndarray':
                        outputs += p_out.tolist()
                    else:
                        outputs.append(p_out)

            else:  #parameters is none: default to num_parameters and utilize defaults
                args = self.dist_args['Default']
                args.append(self.num_parameters)
                outputs = self.dist_types['Default'](*args)
            return array(outputs)
        else:
            raise StopIteration()
示例#14
0
class MetaModel(Component):

    # pylint: disable-msg=E1101
    model = Slot(IComponent,
                 allow_none=True,
                 desc='Slot for the Component or Assembly being '
                 'encapsulated.')
    includes = List(Str,
                    iotype='in',
                    desc='A list of names of variables to be included '
                    'in the public interface.')
    excludes = List(Str,
                    iotype='in',
                    desc='A list of names of variables to be excluded '
                    'from the public interface.')

    warm_start_data = Slot(ICaseIterator,
                           iotype="in",
                           desc="CaseIterator containing cases to use as "
                           "initial training data. When this is set, all "
                           "previous training data is cleared and replaced "
                           "with data from this CaseIterator.")

    default_surrogate = Slot(ISurrogate,
                             allow_none=True,
                             desc="This surrogate will be used for all "
                             "outputs that don't have a specific surrogate "
                             "assigned to them in their sur_<name> slot.")

    surrogates = Dict(key_trait=Str,
                      value_trait=Slot(ISurrogate),
                      desc='surrogates for output variables')

    report_errors = Bool(
        True,
        iotype="in",
        desc="If True, metamodel will report errors reported "
        "from the component. If False, metamodel will swallow "
        "the errors but log that they happened and "
        "exclude the case from the training set.")

    recorder = Slot(ICaseRecorder, desc='Records training cases')

    # when fired, the next execution will train the metamodel
    train_next = Event(desc='Train metamodel on next execution')

    #when fired, the next execution will reset all training data
    reset_training_data = Event(desc='Reset training data on next execution')

    def __init__(self):
        super(MetaModel, self).__init__()
        self._surrogate_input_names = None
        self._surrogate_output_names = None
        self._surrogate_overrides = set(
        )  # keeps track of which sur_<name> slots are full
        self._training_data = {}
        self._training_input_history = []
        self._const_inputs = {
        }  # dict of constant training inputs indices and their values
        self._train = False
        self._new_train_data = False
        self._failed_training_msgs = []
        self._default_surrogate_copies = {
        }  # need to maintain separate copy of
        # default surrogate for each sur_*
        # that doesn't have a surrogate defined

        # the following line will work for classes that inherit from MetaModel
        # as long as they declare their traits in the class body and not in
        # the __init__ function.  If they need to create traits dynamically
        # during initialization they'll have to provide the value of
        # _mm_class_traitnames
        self._mm_class_traitnames = set(self.traits(iotype=not_none).keys())

        self.on_trait_change(self._surrogate_updated, "surrogates_items")

    def _train_next_fired(self):
        self._train = True
        self._new_train_data = True

    def _reset_training_data_fired(self):
        self._training_input_history = []
        self._const_inputs = {}
        self._failed_training_msgs = []

        # remove output history from training_data
        for name in self._training_data:
            self._training_data[name] = []

    def _warm_start_data_changed(self, oldval, newval):
        self.reset_training_data = True

        # build list of inputs
        for case in newval:
            if self.recorder:
                self.recorder.record(case)
            inputs = []
            for inp_name in self.surrogate_input_names():
                var_name = '.'.join([self.name, inp_name])
                try:
                    inp_val = case[var_name]
                except KeyError:
                    pass
                    #self.raise_exception('The variable "%s" was not '
                    #'found as an input in one of the cases provided '
                    #'for warm_start_data.' % var_name, ValueError)
                else:
                    if inp_val is not None:
                        inputs.append(inp_val)
            self._training_input_history.append(inputs)

            for output_name in self.surrogate_output_names():
                #grab value from case data
                var_name = '.'.join([self.name, output_name])
                try:
                    val = case.get_output(var_name)
                except KeyError:
                    self.raise_exception(
                        'The output "%s" was not found '
                        'in one of the cases provided for '
                        'warm_start_data' % var_name, ValueError)
                else:  # save to training output history
                    self._training_data[output_name].append(val)

        self._new_train_data = True

    def check_config(self):
        '''Called as part of pre_execute.'''

        # 1. model must be set
        if self.model is None:
            self.raise_exception("MetaModel object must have a model!",
                                 RuntimeError)

        # 2. can't have both includes and excludes
        if self.excludes and self.includes:
            self.raise_exception(
                "includes and excludes are mutually exclusive", RuntimeError)

        # 3. the includes and excludes must match actual inputs and outputs of the model
        input_names = self.surrogate_input_names()
        output_names = self.surrogate_output_names()
        input_and_output_names = input_names + output_names
        for include in self.includes:
            if include not in input_and_output_names:
                self.raise_exception(
                    'The include "%s" is not one of the '
                    'model inputs or outputs ' % include, ValueError)
        for exclude in self.excludes:
            if exclude not in input_and_output_names:
                self.raise_exception(
                    'The exclude "%s" is not one of the '
                    'model inputs or outputs ' % exclude, ValueError)

        # 4. Either there are no surrogates set and no default surrogate
        #    ( just do passthrough )
        #        or
        #    all outputs must have surrogates assigned either explicitly
        #    or through the default surrogate
        if self.default_surrogate is None:
            no_sur = []
            for name in self.surrogate_output_names():
                if not self.surrogates[name]:
                    no_sur.append(name)
            if len(no_sur) > 0 and len(no_sur) != len(
                    self._surrogate_output_names):
                self.raise_exception(
                    "No default surrogate model is defined and"
                    " the following outputs do not have a"
                    " surrogate model: %s. Either specify"
                    " default_surrogate, or specify a"
                    " surrogate model for all outputs." % no_sur, RuntimeError)

        # 5. All the explicitly set surrogates[] should match actual outputs of the model
        for surrogate_name in self.surrogates.keys():
            if surrogate_name not in output_names:
                self.raise_exception(
                    'The surrogate "%s" does not match one of the '
                    'model outputs ' % surrogate_name, ValueError)

    def execute(self):
        """If the training flag is set, train the metamodel. Otherwise,
        predict outputs.
        """

        if self._train:
            try:
                inputs = self.update_model_inputs()

                self.model.run(force=True)

            except Exception as err:
                if self.report_errors:
                    raise err
                else:
                    self._failed_training_msgs.append(str(err))
            else:  # if no exceptions are generated, save the data

                self._training_input_history.append(inputs)
                self.update_outputs_from_model()
                case_outputs = []

                for name, output_history in self._training_data.items():
                    case_outputs.append(('.'.join([self.name,
                                                   name]), output_history[-1]))
                # save the case, making sure to add out name to the local input
                # name since this Case is scoped to our parent Assembly
                case_inputs = [
                    ('.'.join([self.name, name]), val)
                    for name, val in zip(self.surrogate_input_names(), inputs)
                ]
                if self.recorder:
                    self.recorder.record(
                        Case(inputs=case_inputs, outputs=case_outputs))

            self._train = False
        else:
            # NO surrogates defined. just run model and get outputs
            if self.default_surrogate is None and not self._surrogate_overrides:
                inputs = self.update_model_inputs()
                self.model.run()
                self.update_outputs_from_model()
                return

            if self._new_train_data:
                if len(self._training_input_history) < 2:
                    self.raise_exception(
                        "ERROR: need at least 2 training points!",
                        RuntimeError)

                # figure out if we have any constant training inputs
                tcases = self._training_input_history
                in_hist = tcases[0][:]
                # start off assuming every input is constant
                idxlist = range(len(in_hist))
                self._const_inputs = dict(zip(idxlist, in_hist))
                for i in idxlist:
                    val = in_hist[i]
                    for case in range(1, len(tcases)):
                        if val != tcases[case][i]:
                            del self._const_inputs[i]
                            break

                if len(self._const_inputs) == len(in_hist):
                    self.raise_exception(
                        "ERROR: all training inputs are constant.")
                elif len(self._const_inputs) > 0:
                    # some inputs are constant, so we have to remove them from the training set
                    training_input_history = []
                    for inputs in self._training_input_history:
                        training_input_history.append([
                            val for i, val in enumerate(inputs)
                            if i not in self._const_inputs
                        ])
                else:
                    training_input_history = self._training_input_history
                for name, output_history in self._training_data.items():
                    surrogate = self._get_surrogate(name)
                    if surrogate is not None:
                        surrogate.train(training_input_history, output_history)

                self._new_train_data = False

            inputs = []
            for i, name in enumerate(self.surrogate_input_names()):
                val = self.get(name)
                cval = self._const_inputs.get(i, _missing)
                if cval is _missing:
                    inputs.append(val)

                elif val != cval:
                    self.raise_exception(
                        "ERROR: training input '%s' was a"
                        " constant value of (%s) but the value"
                        " has changed to (%s)." % (name, cval, val),
                        ValueError)

            for name in self._training_data:
                surrogate = self._get_surrogate(name)
                # copy output to boundary
                if surrogate is None:
                    self._set_output(name, self.model.get(name))
                else:
                    self._set_output(name, surrogate.predict(inputs))
示例#15
0
class HAWC2SDistributed(Assembly):
    """
    Assembly for running HAWC2S in parallel using a CaseIteratorDriver

    parameters
    -----------
    wsp: array-like
        array of wind speeds. pitch and RPM will either be interpolated from
        the opt file or computed on the fly
    htc_master_file: string
        name of HAWC2S master file.
        For now the master file HAS TO be named hawc2s_master.htc !!!
    hawc2bin: string
        absolute path to HAWC2S executable

    optional parameters:
    --------------------
    bladegeom: BladeGeometryVT
        IDOtools blade geometry VarTree
    beamprops: BeamStructureVT
        IDOtools blade beam structural properties
    radius: float
        blade length
    """

    # For now the master file HAS TO be named hawc2s_master.htc !!!
    htc_master_file = Str(iotype='in')
    model_name = Str('hawc2s_model', iotype='in')
    hawc2bin = Str(iotype='in')
    designTSR = Float(7.5, iotype='in')
    vartrees_out = VarTree(HAWC2VarTrees(), iotype='out')
    controller = VarTree(DTUBasicControllerVT(), iotype='in')

    wt = VarTree(AeroelasticHAWTVT(), iotype='in', desc='Turbine definition')
    inflow = VarTree(TurbineEnvironmentCaseListVT(), iotype='in', desc='Inflow conditions')

    case_list = Dict(iotype='in', desc='Dictionary of TurbineEnvironmentVT case inputs')

    oper = VarTree(RotorOperationalDataArray(), iotype='out', desc='Operational data')
    rotor_loads = VarTree(RotorLoadsArrayVT(), iotype='out', desc='Rotor torque, power, and thrust')
    blade_loads = VarTree(DistributedLoadsArrayVT(), iotype='out', desc='Spanwise load distributions')

    blade_disps = VarTree(BeamDisplacementsArrayVT(), iotype='out', desc='Blade deflections and rotations')

    def configure_hawc2s(self, htc_master_file=''):

        # Generate simple CID cases
        self.add('casegen', MakeCases())
        self.driver.workflow.add('casegen')
        self.create_passthrough('casegen.user_cases')

        self.htc_master_file = htc_master_file

        if not self.htc_master_file == '':
            # Read Input file for data initialization
            self.reader = HAWC2InputReader()
            self.reader.htc_master_file = self.htc_master_file
            self.reader.execute()

            self.casegen.vartrees = self.reader.vartrees.copy()
            self.vartrees_out = self.reader.vartrees.copy()

        # connect FUSED-Wind inflow variables used by HAWC2S
        self.connect('inflow.vhub', 'casegen.wsp')
        self.connect('inflow.density[0]', 'casegen.vartrees.wind.density')

        self.connect('designTSR',
                     'casegen.vartrees.dlls.risoe_controller.dll_init.designTSR')
        self.connect('designTSR',
                     'vartrees_out.dlls.risoe_controller.dll_init.designTSR')

        # add case iterator with HAWC2 wrapper
        self.add('h2', HAWC2SCaseIter())
        self.driver.workflow.add('h2')
        self.connect('model_name', 'h2.model_name')
        self.connect('hawc2bin', 'h2.hawc2bin')
        self.connect('casegen.cases', 'h2.vartrees')
        self.connect('casegen.case_ids', 'h2.case_ids')
 
        self.create_passthrough('h2.sequential')
        self.create_passthrough('h2.set_tsr_flag')
        self.h2.output.commands = self.reader.vartrees.h2s.commands

        # postprocess CID cases
        self.add('h2post', H2SCIDPostProcess())
        self.driver.workflow.add('h2post')
        self.connect('h2.rotor_loads', 'h2post.rotor_loads_cid')
        self.connect('h2.blade_loads', 'h2post.blade_loads_cid')
        self.connect('h2.hub_loads', 'h2post.hub_loads_cid')
        self.connect('h2.blade_disps', 'h2post.blade_disps_cid')
        self.connect('h2.oper', 'h2post.oper_cid')

        self.connect('h2post.rotor_loads', 'rotor_loads')
        self.connect('h2post.blade_loads', 'blade_loads')
        self.connect('h2post.blade_disps', 'blade_disps')
        self.connect('h2post.oper', 'oper')

        self.create_passthrough('h2post.hub_loads')


        # taken out for now ...
        # self.add('h2interp', H2SResInterp())
        # self.driver.workflow.add('h2interp')
        # self.connect('h2post.rotor_loads', 'h2interp.rotor_loads')
        # self.connect('h2post.blade_loads', 'h2interp.blade_loads')
        # self.connect('h2post.hub_loads', 'h2interp.hub_loads')
        # self.connect('h2post.blade_disps', 'h2interp.blade_disps')
        # self.connect('h2post.oper', 'h2interp.oper')        
        
        # self.create_passthrough('h2interp.rotor_loads_i')
        # self.create_passthrough('h2interp.blade_loads_i')
        # self.create_passthrough('h2interp.hub_loads_i')
        # self.create_passthrough('h2interp.blade_disps_i')
        # self.create_passthrough('h2interp.oper_i')
        self.log_level = logging.DEBUG

    def configure_geometry(self):

        self.add('geom', HAWC2GeometryBuilder())
        self.driver.workflow.add('geom')
        self.create_passthrough('geom.bladegeom', alias='pfIn')
        # self.create_passthrough('geom.blade_length')
        self.connect('wt.blade_length', 'geom.blade_length')
        self.create_passthrough('geom.interp_from_htc', alias='planform_interp_from_htc')
        self.create_passthrough('geom.blade_ni_span')
        self.connect('geom.blade_ae', 'casegen.vartrees.blade_ae')
        self.connect('geom.blade_ae', 'vartrees_out.blade_ae')
        # this should be changed
        self.geom.c12axis_init = self.reader.vartrees.blade_ae.c12axis.copy()

    def configure_freq_placement(self, freq_type='ae'):

        self.h2.configure_freq_placement_cid(freq_type=freq_type)
        self.connect('h2.freq_factor', 'h2post.freq_factor_cid')
        self.create_passthrough('h2post.freq_factor')
        self.create_passthrough('h2.mode_freq')
        self.create_passthrough('h2.mode_damp')
        self.create_passthrough('h2.mode_target_freq')
        self.create_passthrough('h2.mode_target_damp')

    def configure_controller_tuning(self):

        self.controller = self.reader.vartrees.dlls.risoe_controller.dll_init.copy()
        for att in self.controller.list_vars():
            if att == 'designTSR':
                continue
            self.connect('controller.'+att,
                         'casegen.vartrees.dlls.risoe_controller.dll_init.'+att)
            self.connect('controller.'+att,
                         'vartrees_out.dlls.risoe_controller.dll_init.'+att)

    def configure_structure(self):

        self.add('beam_st', HAWC2BeamStructureIDO())
        self.driver.workflow.add('beam_st')
        self.connect('wt.blade1.beam_structure', 'beam_st.beam_structure')
        self.connect('beam_st.h2beam_structure', 'casegen.vartrees.blade_structure')
        self.connect('beam_st.h2beam_structure', 'vartrees_out.blade_structure')

    def _pre_execute(self):
        super(HAWC2SDistributed, self)._pre_execute()

        self.tt = time.time()

    def _post_execute(self):
        super(HAWC2SDistributed, self)._post_execute()

        t = time.time() - self.tt
        self._logger.info('HAWC2S time: %f' % t)
示例#16
0
class Build_Fuzzy_Input(Component):
    """
    creates a dictionary of up to 15 inputs with their corresponding keys 
    input values can be a single crisp value [c] or two iterable 
    objects representing a fuzzy number: [ [x values], [y values] ]
    """
    
    # set up interface to the framework
    #N_inputs = Int(1, iotype'in', desc='number of input pairs to accept')
    in_1 = VarTree(InputPair(), iotype='in')
    in_2 = VarTree(InputPair(), iotype='in')
    in_3 = VarTree(InputPair(), iotype='in')
    in_4 = VarTree(InputPair(), iotype='in')
    in_5 = VarTree(InputPair(), iotype='in')
    in_6 = VarTree(InputPair(), iotype='in')
    in_7 = VarTree(InputPair(), iotype='in')
    in_8 = VarTree(InputPair(), iotype='in')
    in_9 = VarTree(InputPair(), iotype='in')
    in_10 = VarTree(InputPair(), iotype='in')
    in_11 = VarTree(InputPair(), iotype='in')
    in_12 = VarTree(InputPair(), iotype='in')
    in_13 = VarTree(InputPair(), iotype='in')
    in_14 = VarTree(InputPair(), iotype='in')
    in_15 = VarTree(InputPair(), iotype='in')

    inDict_1 = VarTree(InputDict(), iotype='in', desc='Input Dictionary (usually from another output)')
    inDict_2 = VarTree(InputDict(), iotype='in', desc='Input Dictionary (usually from another output)')
    inDict_3 = VarTree(InputDict(), iotype='in', desc='Input Dictionary (usually from another output)')
    inDict_4 = VarTree(InputDict(), iotype='in', desc='Input Dictionary (usually from another output)')
    inDict_5 = VarTree(InputDict(), iotype='in', desc='Input Dictionary (usually from another output)')

    runFlag_in = Int(0, iotype='in', desc='test')
    
    system_inputs = Dict({}, iotype='out', desc='input dict for fuzzy sys')
    runFlag_out = Int(0, iotype='out', desc='test')

    def execute(self):
        """combine all input pairs into output dict"""     
        #try:   
        inputs = [self.in_1,  self.in_2,  self.in_3,  self.in_4,  self.in_5, 
                  self.in_6,  self.in_7,  self.in_8,  self.in_9,  self.in_10, 
                  self.in_11, self.in_12, self.in_13, self.in_14, self.in_15, ]
        inDicts = [self.inDict_1, self.inDict_2, self.inDict_3, self.inDict_4, self.inDict_5]

        system_inputs = {}          
        #for each input, assign it to dict if it's not empty         
        for ix in inputs:
            if ix.input_key <> '':
                if len(ix.input_value) == 1:
                    system_inputs[str(ix.input_key).strip("'")] = ix.input_value[0]
                else:
                    system_inputs[str(ix.input_key).strip("'")] = ix.input_value
        
        #for each input dict, add the selected keys to the system inputs
        for ix in inDicts:
            #print "INPUT KEYS:", ix.input_keys, len(ix.input_dict)
            if len(ix.input_keys) > 0:
                for k in ix.input_keys:
                    if len(ix.input_dict) == 0 or k not in ix.input_dict:
                        system_inputs[str(str(k).strip("'"))] = None
                    #elif len(ix.input_dict[k]) == 1: #catch for crisp value?
                    #    system_inputs[str(str(k).strip("'"))] = ix.input_dict[k][0]
                    else: 
                        system_inputs[str(str(k).strip("'"))] = ix.input_dict[k]

        self.system_inputs = system_inputs                    
        self.runFlag_out = self.runFlag_in
示例#17
0
class Quantify(Component):
    """ 
    Linearly interpolates to change a quantitative value to a qualitative one
    """

    # set up interface to the framework
    qualRange  = List([1.0,9.0], iotype='in', desc='The qualitative range to use.')
    quantRange = List([],        iotype='in', desc='The quantitative range to use.')
    inDict     = Dict({},        iotype='in', desc='Input dictionary to get qualvalue from')
    inKey      = Str('',         iotype='in', desc='Key to use in inDict')
    defuzz     = Str('centroid', iotype='in', desc='Defuzzification method to use') 
    qualVal    = List([],        iotype='in', desc='The qualitative value')

    passthrough = Int(0, iotype='in', low=0, high=1, desc='passthrough flag for incompatible options')

    quantVal   = List([],        iotype='out', desc='Resulting quantitative one')


    def execute(self):
        """Interpolate linearly
        """
        if self.passthrough == 1: return None

        x0 = self.qualRange[0]
        x1 = self.qualRange[1]
        y0 = self.quantRange[0]
        y1 = self.quantRange[1]
        
        if self.inDict <> {}:
            inVal = self.inDict[self.inKey]


            if len(inVal) > 1: #translate universe if fuzzy
                newXs = [y0 + (y1 - y0)*((v-x0)/(x1-x0)) for v in inVal[0]]
                self.quantVal = [newXs, inVal[1]]
            else: #interpolate if crisp
                self.quantVal = y0 + (y1 - y0)*((inVal[0]-x0)/(x1-x0))
            #print "Quantified: [%.2f,%.2f] => [%.2f,%.2f]" % (self.qualRange[0],self.qualRange[1],self.quantRange[0],self.quantRange[1])
            #plt.figure()
            #plt.subplot(2,1,1)
            #plt.plot(inVal[0],inVal[1])
            #plt.subplot(2,1,2)
            #plt.plot(self.quantVal[0],self.quantVal[1])
            #plt.show()
        #if no dict
        elif isinstance(self.qualVal, list):
            inVal = self.qualVal
            if len(inVal) > 1: #translate universe if fuzzy
                newXs = [y0 + (y1 - y0)*((v-x0)/(x1-x0)) for v in inVal[0]]
                self.quantVal = [newXs, inVal[1]]
            else: #interpolate if crisp
                self.quantVal = y0 + (y1 - y0)*((inVal[0]-x0)/(x1-x0))

        #elif isinstance(self.qualVal, float) or isinstance(self.qualVal, int):
        #    self.quantVal = y0 + (y1 - y0)*((self.qualVal-x0)/(x1-x0))

        if False:
            print "Plotting interpolation..."
            plt.figure()
            plt.subplot(2,1,1)
            plt.plot(inVal[0], inVal[1])
            plt.xlim(self.qualRange)
            plt.ylim([0.0, 1.0])
            plt.subplot(2,1,2)
            plt.plot(self.quantVal[0],self.quantVal[1])
            plt.xlim(self.quantRange)
            plt.ylim([0.0, 1.0])
            plt.show()
示例#18
0
class BladeStructureVT3D(VariableTree):
    """
    Variable tree for the structural definition of a blade.
    """

    x = Array(desc='spanwise discretization of blade')
    regions = List(desc='List of names of regions')
    webs = List(desc='List of names of webs')
    iwebs = List(desc='List of DP indices connecting webs to the surface')
    DPs = List(desc='Names of division point curves')
    materials = Dict()

    def configure_regions(self, nr, names=[]):

        for i in range(nr + 1):
            self.add(
                'DP%02d' % i,
                Array(dtype=float,
                      low=-1.,
                      high=1.,
                      desc='Region division point curves %i' % i))
            self.DPs.append('DP%02d' % i)

        for i in range(nr):
            try:
                name = rnames[i]
            except:
                name = 'region%02d' % i
            self.add_region(name)

    def configure_webs(self, nw, iwebs, names=[]):

        self.iwebs = iwebs

        for i in range(nw):
            try:
                name = wnames[i]
            except:
                name = 'web%02d' % i
            self.add_web(name)

    def add_region(self, name):

        self.add(name, VarTree(Region3D()))
        region = getattr(self, name)
        self.regions.append(name)
        # self.nr = len(self.regions)
        return region

    def add_web(self, name):

        self.add(name, VarTree(Region3D()))
        self.webs.append(name)
        return getattr(self, name)

    def add_material(self, name):
        """
        add a material to the blade
        
        parameters
        -----------
        name: string
            name of material to add

        returns
        --------
        MaterialProps: object
            VariableTree with material properties to set by user
        """

        mat = MaterialProps()
        mat.materialname = name
        self.add(name, VarTree(mat))
        self.materials[name] = getattr(self, name)
        return getattr(self, name)
示例#19
0
class PrintOutputs(TopfarmComponent):
    wt_positions = Array(
        [],
        unit='m',
        iotype='in',
        desc='Array of wind turbines attached to particular positions')
    baseline = Array(
        [],
        unit='m',
        iotype='in',
        desc='Array of wind turbines attached to particular positions')
    borders = Array(iotype='in',
                    desc='The polygon defining the borders ndarray([n_bor,2])',
                    unit='m')
    depth = Array(iotype='in',
                  desc='An array of depth ndarray([n_d, 2])',
                  unit='m')
    foundation_length = Float(
        iotype='in', desc='The total foundation length of the wind farm')
    foundations = Array(iotype='in',
                        desc='The foundation length ofeach wind turbine')
    wt_dist = Array(
        iotype='in',
        desc="""The distance between each turbines ndarray([n_wt, n_wt]).""",
        unit='m')
    spiral_param = Float(5.0, iotype='in', desc='spiral parameter')
    png_name = Str('wind_farm',
                   iotype='in',
                   desc='The base of the png name used to save the fig')
    result_file = Str('wind_farm',
                      iotype='in',
                      desc='The base result name used to save the fig')
    net_aep = Float(iotype='in', desc='')
    distribution = Str('spiral',
                       iotype='in',
                       desc='The type of distribution to plot')
    elnet_layout = Dict(iotype='in')
    elnet_length = Float(iotype='in')
    inc = 0
    fs = 15  #Font size

    def execute(self):
        dist_min = np.array(
            [self.wt_dist[i] for i in range(self.wt_dist.shape[0])]).min()
        dist_mean = np.array(
            [self.wt_dist[i] for i in range(self.wt_dist.shape[0])]).mean()

        if self.inc == 0:
            try:
                pa(self.result_file + '.results').remove()
            except:
                pass
            self.iterations = [self.inc]
            self.targvalue = [[
                self.foundation_length, self.elnet_length, dist_mean, dist_min,
                self.net_aep
            ]]
        else:
            self.iterations.append(self.inc)
            self.targvalue.append([
                self.foundation_length, self.elnet_length, dist_mean, dist_min,
                self.net_aep
            ])
        self.targname = [
            'Foundation length', 'El net length', 'Mean WT Dist',
            'Min WT Dist', 'AEP'
        ]

        targarr = np.array(self.targvalue)
        output = '%d:' % (self.inc) + ', '.join([
            '%s=%6.2f' % (self.targname[i], targarr[-1, i])
            for i in range(len(self.targname))
        ]) + '\n'  # + str(self.wt_positions)
        print output
        with open(self.result_file + '.results', 'a') as f:
            f.write(output)

        self.inc += 1
示例#20
0
class ExtCode(Component):
    """ Just a component with resources. """
    resources = Dict({}, iotype='in',
                     desc='Resources required to run this component.')
class ExternalCode(ComponentWithDerivatives):
    """
    Run an external code as a component. The component can be configured to
    run the code on a remote server, see :meth:`execute`.
    """

    PIPE = subprocess.PIPE
    STDOUT = subprocess.STDOUT

    # pylint: disable-msg=E1101
    command = List(Str, desc='The command to be executed.')
    env_vars = Dict({},
                    iotype='in',
                    desc='Environment variables required by the command.')
    resources = Dict({},
                     iotype='in',
                     desc='Resources required to run this component.')
    poll_delay = Float(0.,
                       low=0.,
                       units='s',
                       iotype='in',
                       desc='Delay between polling for command completion.'
                       ' A value of zero will use an internally computed'
                       ' default.')
    timeout = Float(0.,
                    low=0.,
                    iotype='in',
                    units='s',
                    desc='Maximum time to wait for command completion.'
                    ' A value of zero implies an infinite wait.')
    timed_out = Bool(False,
                     iotype='out',
                     desc='True if the command timed-out.')
    return_code = Int(0, iotype='out', desc='Return code from the command.')

    def __init__(self, *args, **kwargs):
        super(ExternalCode, self).__init__(*args, **kwargs)

        self.stdin = None
        self.stdout = None
        self.stderr = "error.out"

        self._process = None
        self._server = None

    # This gets used by remote server.
    def get_access_controller(self):  #pragma no cover
        """ Return :class:`AccessController` for this object. """
        return _AccessController()

    @rbac(('owner', 'user'))
    def set(self, path, value, index=None, src=None, force=False):
        """ Don't allow setting of 'command' by a remote client. """
        if path in ('command', 'get_access_controller') and remote_access():
            self.raise_exception('%r may not be set() remotely' % path,
                                 RuntimeError)
        return super(ExternalCode, self).set(path, value, index, src, force)

    def execute(self):
        """
        Runs the specified command.

        First removes existing output (but not in/out) files.
        Then if `resources` have been specified, an appropriate server
        is allocated and the command is run on that server.
        Otherwise the command is run locally.

        When running remotely, the following resources are set:

        ======================= =====================================
        Key                     Value
        ======================= =====================================
        job_name                self.get_pathname()
        ----------------------- -------------------------------------
        remote_command          self.command (first item)
        ----------------------- -------------------------------------
        args                    self.command (2nd through last items)
        ----------------------- -------------------------------------
        job_environment         self.env_vars
        ----------------------- -------------------------------------
        input_path              self.stdin
        ----------------------- -------------------------------------
        output_path             self.stdout
        ----------------------- -------------------------------------
        error_path              self.stderr (if != STDOUT)
        ----------------------- -------------------------------------
        join_files              If self.stderr == STDOUT
        ----------------------- -------------------------------------
        hard_run_duration_limit self.timeout (if non-zero)
        ======================= =====================================

        .. note::

            Input files to be sent to the remote server are defined by
            :class:`FileMetadata` entries in the `external_files` list
            with `input` True.  Similarly, output files to be retrieved
            from the remote server are defined by entries with `output`
            True.

        .. warning::

            Any file **not** labelled with `binary` True will undergo
            newline translation if the local and remote machines have
            different newline representations. Newline translation will
            corrupt a file which is binary but hasn't been labelled as
            such.

        """
        self.return_code = -12345678
        self.timed_out = False

        for metadata in self.external_files:
            if metadata.get('output', False) and \
               not metadata.get('input', False):
                for path in glob.glob(metadata.path):
                    if os.path.exists(path):
                        os.remove(path)

        if not self.command:
            self.raise_exception('Null command line', ValueError)

        return_code = None
        error_msg = ''
        try:
            if self.resources:
                return_code, error_msg = self._execute_remote()
            else:
                return_code, error_msg = self._execute_local()

            if return_code is None:
                if self._stop:
                    self.raise_exception('Run stopped', RunStopped)
                else:
                    self.timed_out = True
                    self.raise_exception('Timed out', RunInterrupted)

            elif return_code:
                if isinstance(self.stderr, str):
                    stderrfile = open(self.stderr, 'r')
                    error_desc = stderrfile.read()
                    stderrfile.close()
                    err_fragment = "\nError Output:\n%s" % error_desc
                else:
                    err_fragment = error_msg

                self.raise_exception('return_code = %d%s' \
                    % (return_code, err_fragment), RuntimeError)
        finally:
            self.return_code = -999999 if return_code is None else return_code

    def _execute_local(self):
        """ Run command. """
        self._logger.info('executing %s...', self.command)
        start_time = time.time()

        self._process = \
            ShellProc(self.command, self.stdin, self.stdout, self.stderr,
                      self.env_vars)
        self._logger.debug('PID = %d', self._process.pid)

        try:
            return_code, error_msg = \
                self._process.wait(self.poll_delay, self.timeout)
        finally:
            self._process.close_files()
            self._process = None

        et = time.time() - start_time
        if et >= 60:  #pragma no cover
            self._logger.info('elapsed time: %.1f sec.', et)

        return (return_code, error_msg)

    def _execute_remote(self):
        """
        Allocate a server based on required resources, send inputs,
        run command, and retrieve results.
        """
        # Allocate server.
        self._server, server_info = RAM.allocate(self.resources)
        if self._server is None:
            self.raise_exception('Server allocation failed :-(', RuntimeError)

        return_code = -88888888
        error_msg = ''
        try:
            # Create resource description for command.
            rdesc = self.resources.copy()
            rdesc['job_name'] = self.get_pathname()
            rdesc['remote_command'] = self.command[0]
            if len(self.command) > 1:
                rdesc['args'] = self.command[1:]
            if self.env_vars:
                rdesc['job_environment'] = self.env_vars
            if self.stdin:
                rdesc['input_path'] = self.stdin
            if self.stdout:
                rdesc['output_path'] = self.stdout
            if self.stderr:
                if self.stderr == self.STDOUT:
                    rdesc['join_files'] = True
                else:
                    rdesc['error_path'] = self.stderr
            if self.timeout:
                rdesc['hard_run_duration_limit'] = self.timeout

            # Send inputs.
            patterns = []
            textfiles = []
            for metadata in self.external_files:
                if metadata.get('input', False):
                    patterns.append(metadata.path)
                    if not metadata.binary:
                        textfiles.append(metadata.path)
            if patterns:
                self._send_inputs(patterns, textfiles)
            else:
                self._logger.debug('No input metadata paths')

            # Run command.
            self._logger.info('executing %s...', self.command)
            start_time = time.time()
            return_code, error_msg = \
                self._server.execute_command(rdesc)
            et = time.time() - start_time
            if et >= 60:  #pragma no cover
                self._logger.info('elapsed time: %.1f sec.', et)

            # Retrieve results.
            patterns = []
            textfiles = []
            for metadata in self.external_files:
                if metadata.get('output', False):
                    patterns.append(metadata.path)
                    if not metadata.binary:
                        textfiles.append(metadata.path)
            if patterns:
                self._retrieve_results(patterns, textfiles)
            else:
                self._logger.debug('No output metadata paths')

        finally:
            RAM.release(self._server)
            self._server = None

        return (return_code, error_msg)

    def _send_inputs(self, patterns, textfiles):
        """ Sends input files matching `patterns`. """
        self._logger.info('sending inputs...')
        start_time = time.time()

        filename = 'inputs.zip'
        pfiles, pbytes = pack_zipfile(patterns, filename, self._logger)
        try:
            filexfer(None, filename, self._server, filename, 'b')
            ufiles, ubytes = self._server.unpack_zipfile(filename,
                                                         textfiles=textfiles)
        finally:
            os.remove(filename)

        # Difficult to force file transfer error.
        if ufiles != pfiles or ubytes != pbytes:  #pragma no cover
            msg = 'Inputs xfer error: %d:%d vs. %d:%d' \
                  % (ufiles, ubytes, pfiles, pbytes)
            self.raise_exception(msg, RuntimeError)

        et = time.time() - start_time
        if et >= 60:  #pragma no cover
            self._logger.info('elapsed time: %f sec.', et)

    def _retrieve_results(self, patterns, textfiles):
        """ Retrieves result files matching `patterns`. """
        self._logger.info('retrieving results...')
        start_time = time.time()

        filename = 'outputs.zip'
        pfiles, pbytes = self._server.pack_zipfile(patterns, filename)
        filexfer(self._server, filename, None, filename, 'b')

        # Valid, but empty, file causes unpack_zipfile() problems.
        try:
            if os.path.getsize(filename) > 0:
                ufiles, ubytes = unpack_zipfile(filename,
                                                logger=self._logger,
                                                textfiles=textfiles)
            else:
                ufiles, ubytes = 0, 0
        finally:
            os.remove(filename)

        # Difficult to force file transfer error.
        if ufiles != pfiles or ubytes != pbytes:  #pragma no cover
            msg = 'Results xfer error: %d:%d vs. %d:%d' \
                  % (ufiles, ubytes, pfiles, pbytes)
            self.raise_exception(msg, RuntimeError)

        et = time.time() - start_time
        if et >= 60:  #pragma no cover
            self._logger.info('elapsed time: %f sec.', et)

    def stop(self):
        """ Stop the external code. """
        self._stop = True
        if self._process:
            self._process.terminate()

    def copy_inputs(self, inputs_dir, patterns):
        """
        Copy inputs from `inputs_dir` that match `patterns`.

        inputs_dir: string
            Directory to copy files from. Relative paths are evaluated from
            the component's execution directory.

        patterns: list or string
            One or more :mod:`glob` patterns to match against.

        This can be useful for resetting problem state.
        """
        self._logger.info('copying initial inputs from %s...', inputs_dir)
        with self.dir_context:
            if not os.path.exists(inputs_dir):
                self.raise_exception("inputs_dir '%s' does not exist" \
                                     % inputs_dir, RuntimeError)
            self._copy(inputs_dir, patterns)

    def copy_results(self, results_dir, patterns):
        """
        Copy files from `results_dir` that match `patterns`.

        results_dir: string
            Directory to copy files from. Relative paths are evaluated from
            the component's execution directory.

        patterns: list or string
            One or more :mod:`glob` patterns to match against.

        This can be useful for workflow debugging when the external
        code takes a long time to execute.
        """
        self._logger.info('copying precomputed results from %s...',
                          results_dir)
        with self.dir_context:
            if not os.path.exists(results_dir):
                self.raise_exception("results_dir '%s' does not exist" \
                                     % results_dir, RuntimeError)
            self._copy(results_dir, patterns)

    def _copy(self, directory, patterns):
        """
        Copy files from `directory` that match `patterns`
        to the current directory and ensure they are writable.

        directory: string
            Directory to copy files from.

        patterns: list or string
            One or more :mod:`glob` patterns to match against.
        """
        if isinstance(patterns, basestring):
            patterns = [patterns]

        for pattern in patterns:
            pattern = os.path.join(directory, pattern)
            for src_path in sorted(glob.glob(pattern)):
                dst_path = os.path.basename(src_path)
                self._logger.debug('    %s', src_path)
                shutil.copy(src_path, dst_path)
                # Ensure writable.
                mode = os.stat(dst_path).st_mode
                mode |= stat.S_IWUSR
                os.chmod(dst_path, mode)
示例#22
0
class Fuzzy_System(Component):

    #component inputs and outputs
    fcl_file = Str('', iotype='in', desc='File name for FCL file')
    print fcl_file

    #inputs
    TESTMODE = Int(0, iotype='in', desc='TestMode Flag (1 == Run in TESTMODE)')
    TESTPLOT = Int(0,
                   iotype='in',
                   desc='TestPlot Flag (1 == Create a TESTPLOT)')
    input_list = Dict({}, iotype='in', desc='Dict of Input Values')
    runFlag_in = Int(0, iotype='in', desc='test')
    passthrough = Int(0,
                      iotype='in',
                      low=0,
                      high=1,
                      desc='passthrough flag for incompatible options')

    #outputs
    outputs_all = Dict({}, iotype='out', desc='Output Value Dict')
    input_mfs = Dict({}, iotype='out', desc='Dict of Input MFs')
    output_mfs = Dict({}, iotype='out', desc='Dict of Output MFs')
    runFlag_out = Int(0, iotype='out', desc='test')

    #initialize system
    def __init__(
        self
    ):  # inputs, outputs, rulebase, AND_operator, OR_operator, aggregator, implication):
        """ Creates a new Fuzzy System object """
        super(Fuzzy_System, self).__init__()

        self.old_fcl_file = self.fcl_file  #check for changing fuzzy system fcl file
        if self.fcl_file <> '':
            self.inputs, \
            self.outputs, \
            self.rulebase, \
            self.AND_operator, \
            self.OR_operator, \
            self.aggregator, \
            self.implication, \
            self.defuzz = build_fuzz_system(self.fcl_file)

            self.input_mfs = self.inputs  #add to MDAO outputs
            self.output_mfs = self.outputs  #add to MDAO outputs

            print 'New System Loaded...', len(self.inputs), 'inputs. ', \
                  len(self.outputs), 'outputs. ', len(self.rulebase), 'rules.'

            self.implicatedOutputs = {
                rule.rule_id: {}
                for rule in self.rulebase
            }  #dict of {rule_id:{outputname: [x,y], ...} }for fired inputs

    #get minimum of fuzzy number (min y > 0)
    def fuzzy_minimum(self, n_fuzzy):
        pass

    #get maximum of fuzzy number (max y > 0)
    def fuzzy_maximum(self, n_fuzzy):
        pass

    #get the minimum (AND) of singleton and fuzzy number (as numpy array)
    # singleton - float - single value, x
    # n_fuzzy - fuzzy number [nparray[x], nparray[y]] (from skfuzzy package)
    def fuzzy_single_AND(self, singleton, n_fuzzy):
        #try:

        singleton = float(singleton)
        for i in range(len(n_fuzzy[0]) - 1):
            #print i, 'Check Range:', n_fuzzy[0][i], singleton, n_fuzzy[0][i+1]
            #print type(n_fuzzy[0][i]), type(singleton), type(n_fuzzy[0][i+1])
            if round(n_fuzzy[0][i], 6) <= round(singleton, 6) and round(
                    n_fuzzy[0][i + 1], 6) > round(
                        singleton, 6):  #find points around singleton

                #interpolate linearly for more accurate answer
                return n_fuzzy[1][i] + (n_fuzzy[1][i+1] - n_fuzzy[1][i]) * \
                       ((singleton - n_fuzzy[0][i]) / (n_fuzzy[0][i+1] - n_fuzzy[0][i]))

        print 'Singleton (', singleton, ') Not Found in Fuzzy Range (', str(
            min(n_fuzzy[0])), '-', str(max(n_fuzzy[0])), ')!'
        return 0.0  #[ n_fuzzy[0], [0.0 for n in n_fuzzy[0]] ]
        #except TypeError as (errno, strerror):
        #    print "Type Error:".format(errno, strerror)
        #    return 0.0

    #take in float singleton value and range and return fuzzy value
    # s - single float value
    # x_range - range to build MF on ([x1,x2,step])
    def singleton_to_fuzzy(self, s, x_range):
        x = np.arange(x_range[0], x_range[1], x_range[2])
        y = np.zeros(len(x))
        print len(x), len(y)

        print x, y

        for i in range(len(x)):
            if x[i] < s and x[i + 1] >= s:
                x = np.insert(x, i + 1, s)
                x = np.insert(x, i + 1, s)
                x = np.insert(x, i + 1, s)
                y = np.insert(y, i + 1, 0)
                y = np.insert(y, i + 1, 1)
                y = np.insert(y, i + 1, 0)
                break

        return [x, y]

    #get firing stregth of an input
    #input_name - linguistic input name
    #input_ - list corresponding to input [x,y] or singleton
    #input_sys - object corresponding to system input MFs
    #
    def firing_strength(self, input_name, input_, input_sys):
        if not isinstance(input_, list):  #if a singleton and not a list
            fs = self.fuzzy_single_AND(
                input_,
                [input_sys.MFs[input_name][0], input_sys.MFs[input_name][1]])
            return fs
        x_min, y_min = fuzz.fuzzy_and(
            input_sys.MFs[input_name][0], input_sys.MFs[input_name][1],
            input_[0],
            input_[1])  #use AND operator to get minimum of two functions
        return max(y_min)

    #recurses through rule antecedent to get firing strength
    #list1 - originally the rule antecedent goes here
    def rule_recurse(self, list1, input_list, TESTMODE):
        while any(isinstance(l, list) for l in list1):

            n = next(l for l in list1 if isinstance(
                l, list))  #get the next instance of a list in the given list
            for j in range(len(list1)):  #find that instance's index
                if isinstance(list1[j], list) and list1[j] == n:
                    i = j
                    break
            list1[i] = self.rule_recurse(
                list1[i], input_list,
                TESTMODE)[0]  #recurse the function on the found list
            #print 'list:', list1, 'dive deeper... '

        #### APPLY FUZZY RULES #### (order of operations: order of while loops)
        ###
        while 'IS' in list1:  #get all firing stregths first
            i = list1.index('IS')
            fs = self.firing_strength(list1[i + 1], input_list[list1[i - 1]],
                                      self.inputs[list1[i - 1]])
            if TESTMODE == 1:
                print "FIRING STRENGTH for", self.inputs[list1[
                    i - 1]].name, 'is', list1[i + 1], 'at Input:', '=', fs
            list1[i - 1:i + 2] = [fs]
        ###
        while 'ISNOT' in list1:  #get compliment firing strengths next
            i = list1.index('ISNOT')
            fs = 1 - self.firing_strength(list1[i + 1],
                                          input_list[list1[i - 1]],
                                          self.inputs[list1[i - 1]])
            if TESTMODE == 1:
                print "FIRING STRENGTH for", self.inputs[list1[
                    i - 1]].name, 'is', list1[i + 1], 'at Input:', '=', fs
            list1[i - 1:i + 2] = [fs]
        ###
        while 'OR' in list1:  #calculate ORs next
            i = list1.index('OR')
            if self.OR_operator == 'MAX':
                x = max(list1[i - 1], list1[i + 1])
            # other OR operators??
            if TESTMODE == 1: print "REPLACE: ", list1[i - 1:i + 2], 'with', x
            list1[i - 1:i + 2] = [x]
        ###
        while 'AND' in list1:  #calculate ANDs next
            i = list1.index('AND')
            if self.AND_operator == 'MIN':  #use minimum operator
                x = min(list1[i - 1], list1[i + 1])
            elif self.AND_operator == 'PRODUCT':  #use product operator
                x = list1[i - 1] * list1[i + 1]
            # other AND operators??
            if TESTMODE == 1: print "REPLACE: ", list1[i - 1:i + 2], 'with', x
            list1[i - 1:i + 2] = [x]
        ###
        while 'ANDOR' in list1:  #calculate and/ors (means)
            i = list1.index('ANDOR')
            x = sum(list1[i - 1],
                    list1[i + 1]) / 2.0  #take mean of two operators
            if TESTMODE == 1: print 'REPLACE: ', list1[i - 1:i + 2], 'with', x
            list1[i - 1:i + 2] = [x]

        #calculate mean operators next

        return list1

    #perform implication on a given output MF for a given antecedent firing strength
    #fs - firing strength of rule antecedent (single float value)
    #outputMF - output membership function to apply implication to (form [[x_vals],[y_vals]])
    #returns the resulting membership function
    def implicate(self, fs, outputMF):
        y_ = copy.deepcopy(outputMF[1])
        if self.implication == 'MIN':
            for i in range(len(y_)):
                if y_[i] > fs: y_[i] = fs
        if self.implication == 'PRODUCT':
            for i in range(len(y_)):
                y_[i] = y_[i] * fs
        return y_

    #peform aggregation on the given outputs of the rules
    #MFs - list of MF functions
    #returns aggregation of all MFs
    def aggregate(self, MFs):
        o1 = MFs.pop()
        if self.aggregator == 'MAX':
            while len(MFs) > 0:
                o2 = MFs.pop()
                o1[0], o1[1] = fuzz.fuzzy_or(o1[0], o1[1], o2[0], o2[1])

        return o1

    #runs the fuzzy system for a single output
    #input_list - dict of inputs {'input1':value, 'input2':value, ...}
    #output_key - key of output to calculate
    #TESTMODE   - testmode flag
    #returns    - the resulting fuzzy output (NOT Defuzzified)
    def run_system(self, input_list, output_key, TESTMODE):

        outs = []
        for rule in self.rulebase:  #iterate over rulebase
            if TESTMODE == 1:
                print '------------------------------------------------------------------------'
                print 'TRANSLATING RULE: ', rule.rule_id, 'for output', output_key

            #break apart antecedent and consequent
            if_i = rule.rule_list.index('IF')
            then_i = rule.rule_list.index('THEN')
            rule_ant = copy.deepcopy(
                rule.rule_list[if_i + 1:then_i])  #get the rule antecedent
            rule_con = copy.deepcopy(
                rule.rule_list[then_i + 1:len(rule.rule_list) +
                               1])[0]  #get the rule consequent

            if rule_con[
                    0] == output_key:  #only follow rule if it applies to given output

                fs = self.rule_recurse(rule_ant, input_list,
                                       TESTMODE)[0]  #get firing strength

                if TESTMODE == 1:
                    print 'FIRING STREGTH, RULE', rule.rule_id, ':', fs

                output = copy.deepcopy(
                    self.outputs[rule_con[0]].MFs[rule_con[2]])  #get output
                output[1] = self.implicate(
                    fs, output)  #use implication to get fuzzy consequent
                outs.append(output)

        #aggregate outputs
        if len(outs) > 0:
            output_result = self.aggregate(
                outs)  #aggregate outputs if there are outputs
        else:
            m1 = self.outputs[output_key].data_range[0]  #get output min
            m2 = self.outputs[output_key].data_range[1]  #get output max
            x1 = np.arange(m1, m2, 0.01)  #get x range
            output_result = [x1, [0 for i in range(len(x1))]
                             ]  #return mf function of zeros

        return output_result

    #runs the fuzzy system for all inputs
    #input_list          - dict of inputs {'input1':value, 'input2':value, ...}  #lack of name in dict assumes 0 input (0 for all possible values)
    #TESTMODE (optional) - flag to output text for test mode (default is OFF)
    #returns:            - dict of outputs {'output1':value, 'output2':value, ...}

    def execute(self):  #, input_list,TESTMODE=0):

        #try:
        TESTMODE = self.TESTMODE
        TESTPLOT = self.TESTPLOT
        input_list = self.input_list

        #check if for fcl file and re-read inputs
        if self.fcl_file <> self.old_fcl_file:
            print 'New FCL File:', self.fcl_file, 'detected.  Loading ....'
            self.inputs, self.outputs, self.rulebase, self.AND_operator, \
            self.OR_operator, self.aggregator, self.implication, self.defuzz = build_fuzz_system(self.fcl_file)

            print 'New FRBS Loaded...', len(self.inputs), 'inputs. ', \
                  len(self.outputs), 'outputs. ', len(self.rulebase), 'rules.'
            #print 'INPUTS w/ MFs:', [k for k in self.inputs]

            self.input_mfs = self.inputs  #add to MDAO outputs
            self.output_mfs = self.outputs  #add to MDAO outputs

            self.old_fcl_file = self.fcl_file  #track fcl file

        #----TESTMODE----:
        if TESTMODE == 1:
            print 'INPUTS PASSED:', [k for k in input_list]
            for k in input_list:
                if isinstance(input_list[k], list):
                    print k, 'as fuzzy input. ', len(
                        self.inputs[k].MFs), 'MFs available.'
                else:
                    print k, 'as', input_list[k], '.', len(
                        self.inputs[k].MFs), 'MFs available.'

        #----------------

        #print 'Executing FRBS', len(input_list), 'input values read from', len(self.input_list), 'inputs.'
        #run systm for each output
        for key in self.outputs:

            if self.passthrough == 1:
                self.outputs_all[
                    key] = None  #catch for incompatible option (does nothing if incompatible)
            else:
                output_val = self.run_system(input_list, key, TESTMODE)
                self.outputs_all[key] = output_val

            #----TESTMODE----:
            if TESTPLOT == 1:

                fig = plt.figure()

                i = 1

                for k in self.inputs:
                    #plot each input against MFs
                    plt.subplot(len(self.inputs) + len(self.outputs), 1, i)
                    for k2 in self.inputs[k].MFs:
                        plt.plot(self.inputs[k].MFs[k2][0],
                                 self.inputs[k].MFs[k2][1])
                    i = i + 1

                    #plot input
                    if isinstance(input_list[k], list):
                        plt.plot(input_list[k][0],
                                 input_list[k][1],
                                 lw=3.0,
                                 color='k')
                        plt.yticks([0.0, 0.5, 1.0])
                    else:
                        plt.plot([input_list[k], input_list[k]], [0, 1.0],
                                 lw=3.0,
                                 color='k')
                        plt.yticks([0.0, 0.5, 1.0])
                    plt.ylabel(k)
                    plt.ylim([0, 1.1])
                    #plt.xlim([1,9])

                #plot output against MFs
                plt.subplot(len(self.inputs) + len(self.outputs), 1, i)
                for k in self.outputs[key].MFs:
                    plt.plot(self.outputs[key].MFs[k][0],
                             self.outputs[key].MFs[k][1])
                    plt.yticks([0.0, 0.5, 1.0])
                plt.plot(output_val[0], output_val[1], lw=3.5, color='b')
                fig.subplots_adjust(hspace=0.5, top=0.95, bottom=0.05)
                plt.show()
            #--------------

        self.runFlag_out = self.runFlag_in
示例#23
0
class ExternalCode(ComponentWithDerivatives):
    """
    Run an external code as a component. The component can be configured to
    run the code on a remote server. See :meth:`execute`.

    Default stdin is the 'null' device, default stdout is the console, and
    default stderr is ``error.out``.
    """

    PIPE     = shellproc.PIPE
    STDOUT   = shellproc.STDOUT
    DEV_NULL = shellproc.DEV_NULL

    # pylint: disable-msg=E1101
    command = List(Str, desc='The command to be executed.')
    env_vars = Dict({}, iotype='in',
                    desc='Environment variables required by the command.')
    resources = Dict({}, iotype='in',
                     desc='Resources required to run this component.')
    poll_delay = Float(0., low=0., units='s', iotype='in',
                       desc='Delay between polling for command completion.'
                            ' A value of zero will use an internally computed'
                            ' default.')
    timeout = Float(0., low=0., iotype='in', units='s',
                    desc='Maximum time to wait for command completion.'
                         ' A value of zero implies an infinite wait.')
    timed_out = Bool(False, iotype='out', desc='True if the command timed-out.')
    return_code = Int(0, iotype='out', desc='Return code from the command.')

    def __init__(self, *args, **kwargs):
        super(ExternalCode, self).__init__(*args, **kwargs)
        self.check_external_outputs=True

        self.stdin  = self.DEV_NULL
        self.stdout = None
        self.stderr = "error.out"

        self._process = None
        self._server = None

    # This gets used by remote server.
    def get_access_controller(self):  #pragma no cover
        """ Return :class:`AccessController` for this object. """
        return _AccessController()

    @rbac(('owner', 'user'))
    def set(self, path, value, index=None, src=None, force=False):
        """
        Don't allow setting of 'command' or 'resources' by a remote client.
        """
        if path in ('command', 'resources', 'get_access_controller') \
           and remote_access():
            self.raise_exception('%r may not be set() remotely' % path,
                                 RuntimeError)
        return super(ExternalCode, self).set(path, value, index, src, force)

    def execute(self):
        """
        Runs the specified command.

            1. Existing output (but not in/out) files are removed.
            2. Checks that all external input files exist.
            3. Runs the command.
            4. Checks that all external output files exist.

        If a subclass generates outputs (such as postprocessing results),
        then it should set attribute ``check_external_outputs`` False and call
        :meth:`check_files` itself.

        If `resources` have been specified, an appropriate server
        is allocated and the command is run on that server.
        Otherwise the command is run locally.

        When running remotely, the following resources are set:

        ================ =====================================
        Key              Value
        ================ =====================================
        job_name         self.get_pathname()
        ---------------- -------------------------------------
        remote_command   self.command (first item)
        ---------------- -------------------------------------
        args             self.command (2nd through last items)
        ---------------- -------------------------------------
        job_environment  self.env_vars
        ---------------- -------------------------------------
        input_path       self.stdin
        ---------------- -------------------------------------
        output_path      self.stdout
        ---------------- -------------------------------------
        error_path       self.stderr (if != STDOUT)
        ---------------- -------------------------------------
        join_files       If self.stderr == STDOUT
        ---------------- -------------------------------------
        wallclock_time   self.timeout (if non-zero)
        ================ =====================================

        .. note::

            Input files to be sent to the remote server are defined by
            :class:`FileMetadata` entries in the `external_files` list
            with `input` True.  Similarly, output files to be retrieved
            from the remote server are defined by entries with `output`
            True.

        .. warning::

            Any file **not** labeled with `binary` True will undergo
            newline translation if the local and remote machines have
            different newline representations. Newline translation will
            corrupt a file which is binary but hasn't been labeled as
            such.

        """
        self.return_code = -12345678
        self.timed_out = False

        # Remove existing output (but not in/out) files.
        for metadata in self.external_files:
            if metadata.get('output', False) and \
               not metadata.get('input', False):
                for path in glob.glob(metadata.path):
                    if os.path.exists(path):
                        os.remove(path)
        for pathname, obj in self.items(iotype='out', recurse=True):
            if isinstance(obj, FileRef):
                if os.path.exists(obj.path):
                    os.remove(obj.path)

        if not self.command:
            self.raise_exception('Empty command list', ValueError)

        self.check_files(inputs=True)

        return_code = None
        error_msg = ''
        try:
            if self.resources:
                return_code, error_msg = self._execute_remote()
            else:
                return_code, error_msg = self._execute_local()

            if return_code is None:
                if self._stop:
                    self.raise_exception('Run stopped', RunStopped)
                else:
                    self.timed_out = True
                    self.raise_exception('Timed out', RunInterrupted)

            elif return_code:
                if isinstance(self.stderr, str):
                    stderrfile = open(self.stderr, 'r')
                    error_desc = stderrfile.read()
                    stderrfile.close()
                    err_fragment = "\nError Output:\n%s" % error_desc
                else:
                    err_fragment = error_msg
                    
                self.raise_exception('return_code = %d%s' \
                    % (return_code, err_fragment), RuntimeError)

            if self.check_external_outputs:
                self.check_files(inputs=False)
        finally:
            self.return_code = -999999 if return_code is None else return_code

    def check_files(self, inputs):
        """
        Check that all 'specific' input or output external files exist.
        If an external file path specifies a pattern, it is *not* checked.

        inputs: bool
            If True, check inputs; otherwise outputs.
        """
        # External files.
        for metadata in self.external_files:
            path = metadata.path
            for ch in ('*?['):
                if ch in path:
                    break
            else:
                if inputs:
                    if not metadata.get('input', False):
                        continue
                else:
                    if not metadata.get('output', False):
                        continue
                if not os.path.exists(path):
                    iotype = 'input' if inputs else 'output'
                    self.raise_exception('missing %s file %r' % (iotype, path),
                                         RuntimeError)
        # Stdin, stdout, stderr.
        if inputs and self.stdin and self.stdin != self.DEV_NULL:
            if not os.path.exists(self.stdin):
                self.raise_exception('missing stdin file %r' % self.stdin,
                                     RuntimeError)

        if not inputs and self.stdout and self.stdout != self.DEV_NULL:
            if not os.path.exists(self.stdout):
                self.raise_exception('missing stdout file %r' % self.stdout,
                                     RuntimeError)

        if not inputs and self.stderr \
                      and self.stderr != self.DEV_NULL \
                      and self.stderr != self.STDOUT \
                      and (not self.resources or \
                           not self.resources.get('join_files')):
            if not os.path.exists(self.stderr):
                self.raise_exception('missing stderr file %r' % self.stderr,
                                     RuntimeError)
        # File variables.
        if inputs:
            for pathname, obj in self.items(iotype='in', recurse=True):
                if isinstance(obj, FileRef):
                    path = self.get_metadata(pathname, 'local_path')
                    if path and not os.path.exists(path):
                        self.raise_exception("missing 'in' file %r" % path,
                                             RuntimeError)
        else:
            for pathname, obj in self.items(iotype='out', recurse=True):
                if isinstance(obj, FileRef):
                    if not os.path.exists(obj.path):
                        self.raise_exception("missing 'out' file %r" % obj.path,
                                             RuntimeError)

    def _execute_local(self):
        """ Run command. """
        self._logger.info('executing %s...', self.command)
        start_time = time.time()

        self._process = \
            shellproc.ShellProc(self.command, self.stdin,
                                self.stdout, self.stderr, self.env_vars)
        self._logger.debug('PID = %d', self._process.pid)

        try:
            return_code, error_msg = \
                self._process.wait(self.poll_delay, self.timeout)
        finally:
            self._process.close_files()
            self._process = None

        et = time.time() - start_time
        if et >= 60:  #pragma no cover
            self._logger.info('elapsed time: %.1f sec.', et)

        return (return_code, error_msg)

    def _execute_remote(self):
        """
        Allocate a server based on required resources, send inputs,
        run command, and retrieve results.
        """
        rdesc = self.resources.copy()

        # Allocate server.
        self._server, server_info = RAM.allocate(rdesc)
        if self._server is None:
            self.raise_exception('Server allocation failed :-(', RuntimeError)

        if self._logger.level == logging.NOTSET:
            # By default avoid lots of protocol messages.
            self._server.set_log_level(logging.DEBUG)
        else:
            self._server.set_log_level(self._logger.level)

        return_code = -88888888
        error_msg = ''
        try:
            # Create resource description for command.
            rdesc['job_name'] = self.get_pathname()
            rdesc['remote_command'] = self.command[0]
            if len(self.command) > 1:
                rdesc['args'] = self.command[1:]
            if self.env_vars:
                rdesc['job_environment'] = self.env_vars
            if not self.stdin:
                self.raise_exception('Remote execution requires stdin of'
                                     ' DEV_NULL or filename, got %r'
                                     % self.stdin, ValueError)
            if self.stdin != self.DEV_NULL:
                rdesc['input_path'] = self.stdin
            if self.stdout:
                rdesc['output_path'] = self.stdout
            else:
                rdesc['output_path'] = '%s.stdout' % self.command[0]
            if self.stderr:
                if self.stderr == self.STDOUT:
                    rdesc['join_files'] = True
                else:
                    rdesc['error_path'] = self.stderr
            else:
                rdesc['error_path'] = '%s.stderr' % self.command[0]
            if self.timeout:
                if 'resource_limits' in rdesc:
                    limits = rdesc['resource_limits'].copy()
                else:
                    limits = {}
                limits['wallclock_time'] = self.timeout
                rdesc['resource_limits'] = limits

            # Send inputs.
            patterns = []
            textfiles = []
            for metadata in self.external_files:
                if metadata.get('input', False):
                    patterns.append(metadata.path)
                    if not metadata.binary:
                        textfiles.append(metadata.path)
            for pathname, obj in self.items(iotype='in', recurse=True):
                if isinstance(obj, FileRef):
                    local_path = self.get_metadata(pathname, 'local_path')
                    if local_path:
                        patterns.append(local_path)
                        if not obj.binary:
                            textfiles.append(local_path)
            if self.stdin and self.stdin != self.DEV_NULL:
                patterns.append(self.stdin)
                textfiles.append(self.stdin)
            if patterns:
                self._send_inputs(patterns, textfiles)
            else:
                self._logger.debug('No input files')

            # Run command.
            self._logger.info('executing %s...', self.command)
            start_time = time.time()
            return_code, error_msg = \
                self._server.execute_command(rdesc)
            et = time.time() - start_time
            if et >= 60:  #pragma no cover
                self._logger.info('elapsed time: %.1f sec.', et)

            # Retrieve results.
            patterns = []
            textfiles = []
            for metadata in self.external_files:
                if metadata.get('output', False):
                    patterns.append(metadata.path)
                    if not metadata.binary:
                        textfiles.append(metadata.path)
            for pathname, obj in self.items(iotype='out', recurse=True):
                if isinstance(obj, FileRef):
                    patterns.append(obj.path)
                    if not obj.binary:
                        textfiles.append(obj.path)
            patterns.append(rdesc['output_path'])
            textfiles.append(rdesc['output_path'])
            if self.stderr != self.STDOUT:
                patterns.append(rdesc['error_path'])
                textfiles.append(rdesc['error_path'])
            self._retrieve_results(patterns, textfiles)

            # Echo stdout if not redirected.
            if not self.stdout:
                name = rdesc['output_path']
                if os.path.exists(name):
                    with open(name, 'rU') as inp:
                        sys.stdout.write(inp.read())
                    os.remove(name)
                else:
                    sys.stdout.write('\n[No stdout available]\n')

            # Echo stderr if not redirected.
            if not self.stderr:
                name = rdesc['error_path']
                if os.path.exists(name):
                    with open(name, 'rU') as inp:
                        sys.stderr.write(inp.read())
                    os.remove(name)
                else:
                    sys.stdout.write('\n[No stderr available]\n')
        finally:
            RAM.release(self._server)
            self._server = None

        return (return_code, error_msg)

    def _send_inputs(self, patterns, textfiles):
        """ Sends input files matching `patterns`. """
        self._logger.info('sending inputs...')
        start_time = time.time()

        filename = 'inputs.zip'
        pfiles, pbytes = pack_zipfile(patterns, filename, self._logger)
        try:
            filexfer(None, filename, self._server, filename, 'b')
            ufiles, ubytes = self._server.unpack_zipfile(filename,
                                                         textfiles=textfiles)
        finally:
            os.remove(filename)
            self._server.remove(filename)

        # Difficult to force file transfer error.
        if ufiles != pfiles or ubytes != pbytes:  #pragma no cover
            msg = 'Inputs xfer error: %d:%d vs. %d:%d' \
                  % (ufiles, ubytes, pfiles, pbytes)
            self.raise_exception(msg, RuntimeError)

        et = time.time() - start_time
        if et >= 60:  #pragma no cover
            self._logger.info('elapsed time: %f sec.', et)

    def _retrieve_results(self, patterns, textfiles):
        """ Retrieves result files matching `patterns`. """
        self._logger.info('retrieving results...')
        start_time = time.time()

        filename = 'outputs.zip'
        pfiles, pbytes = self._server.pack_zipfile(patterns, filename)
        filexfer(self._server, filename, None, filename, 'b')

        # Valid, but empty, file causes unpack_zipfile() problems.
        try:
            if os.path.getsize(filename) > 0:
                ufiles, ubytes = unpack_zipfile(filename, logger=self._logger,
                                                textfiles=textfiles)
            else:
                ufiles, ubytes = 0, 0
        finally:
            os.remove(filename)
            self._server.remove(filename)

        # Difficult to force file transfer error.
        if ufiles != pfiles or ubytes != pbytes:  #pragma no cover
            msg = 'Results xfer error: %d:%d vs. %d:%d' \
                  % (ufiles, ubytes, pfiles, pbytes)
            self.raise_exception(msg, RuntimeError)

        et = time.time() - start_time
        if et >= 60:  #pragma no cover
            self._logger.info('elapsed time: %f sec.', et)

    def stop(self):
        """ Stop the external code. """
        self._stop = True
        if self._process:
            self._process.terminate()

    def copy_inputs(self, inputs_dir, patterns):
        """
        Copy inputs from `inputs_dir` that match `patterns`.

        inputs_dir: string
            Directory to copy files from. Relative paths are evaluated from
            the component's execution directory.

        patterns: list or string
            One or more :mod:`glob` patterns to match against.

        This can be useful for resetting problem state.
        """
        self._logger.info('copying initial inputs from %s...', inputs_dir)
        with self.dir_context:
            if not os.path.exists(inputs_dir):
                self.raise_exception("inputs_dir '%s' does not exist" \
                                     % inputs_dir, RuntimeError)
            self._copy(inputs_dir, patterns)

    def copy_results(self, results_dir, patterns):
        """
        Copy files from `results_dir` that match `patterns`.

        results_dir: string
            Directory to copy files from. Relative paths are evaluated from
            the component's execution directory.

        patterns: list or string
            One or more :mod:`glob` patterns to match against.

        This can be useful for workflow debugging when the external
        code takes a long time to execute.
        """
        self._logger.info('copying precomputed results from %s...', results_dir)
        with self.dir_context:
            if not os.path.exists(results_dir):
                self.raise_exception("results_dir '%s' does not exist" \
                                     % results_dir, RuntimeError)
            self._copy(results_dir, patterns)

    def _copy(self, directory, patterns):
        """
        Copy files from `directory` that match `patterns`
        to the current directory and ensure they are writable.

        directory: string
            Directory to copy files from.

        patterns: list or string
            One or more :mod:`glob` patterns to match against.
        """
        if isinstance(patterns, basestring):
            patterns = [patterns]

        for pattern in patterns:
            pattern = os.path.join(directory, pattern)
            for src_path in sorted(glob.glob(pattern)):
                dst_path = os.path.basename(src_path)
                self._logger.debug('    %s', src_path)
                shutil.copy(src_path, dst_path)
                # Ensure writable.
                mode = os.stat(dst_path).st_mode
                mode |= stat.S_IWUSR
                os.chmod(dst_path, mode)
示例#24
0
class DFES(Component):

    #component inputs and outputs
    weight_file = Str('', iotype='in', desc='File name for FCL file')
    actType = Str('', iotype='in', desc='Type of Activation Function')
    hidNodes = Int(1, iotype='in', desc='Number of Hidden Nodes')
    inGran = Int(1, iotype='in', desc='Number of Input Nodes per Input')
    outGran = Int(1, iotype='in', desc='Number of Output Nodes per Output')
    inRanges = Dict({}, iotype='in', desc='Dict of Inputs (input:[min,max])')
    inOrder = List([], iotype='in', desc='List of Inputs in correct order.')
    outRanges = Dict({}, iotype='in', desc='Dict of Outputs (input:[min,max])')

    input_list = Dict({}, iotype='in', desc='Dict of Input Values')
    TESTPLOT = Int(0, iotype='in', desc='Flag for plottin')
    runFlag_in = Int(0, iotype='in', desc='test')

    passthrough = Int(0,
                      iotype='in',
                      low=0,
                      high=1,
                      desc='passthrough flag for incompatible options')

    #outputs
    outputs_all = Dict({}, iotype='out', desc='Output Value Dict')
    runFlag_out = Int(0, iotype='out', desc='test')
    """
    Discrete Fuzzy Expert System ::: MODIFIED for OpenMDAO
    
    ----- INPUTS -----
    inRanges : dict
        dict of input names with ranges for inputs {'inputName':[x_min, x_max], ... }
    outRanges : dict
        dict of output names with ranges for inputs {'inputName':[x_min, x_max], ... }
    actType : str
        type of activation function to use
    hidNodes : int
        number of hidden nodes to use
    inGran : int
        number of discrete values to divide each input into (input nodes = inGran*#inputs)
    outGran : int
        number of discrete values to divide each output into (output nodes = outGran*#outputs)
    """
    def __init__(
        self
    ):  # inRanges, outRanges, actType, hidNodes, inGran=50, outGran=50, inWeights=None, outputWeights=None):
        """ Creates a new System object """
        super(DFES, self).__init__()
        self.old_weight_file = self.weight_file  #save current weight file

        #self.actType    = actType
        #self.inRanges   = inRanges
        #self.outRanges  = outRanges
        #self.inGran     = inGran
        #self.outGran    = outGran

        #FUZZY INPUT/OUTPUT MFs
        self.inputXs = OrderedDict((inp, np.arange( min(self.inRanges[inp]), max(self.inRanges[inp]),
                       (max(self.inRanges[inp]) - min(self.inRanges[inp]))/float(self.inGran) ) ) \
                       for inp in self.inOrder) #x values for input MFs
        self.outputXs = {otp : np.arange( min(self.outRanges[otp]), max(self.outRanges[otp]),
                       (max(self.outRanges[otp]) - min(self.outRanges[otp]))/float(self.outGran) ) \
                       for otp in self.outRanges} #x values for output MFs

        #NEURAL NET
        self.nIn = len(self.inRanges) * self.inGran + len(
            self.inRanges)  #num of input nodes (+1 bias for each input)
        self.nHid = self.hidNodes  #num of hidden nodes
        self.nOut = len(self.outRanges) * self.outGran  #number of output nodes

        self.actIn = [1.0] * self.nIn  #input activations
        self.actHid = [1.0] * self.nHid  #hidden activations
        self.actOut = [1.0] * self.nOut  #output activations

        #create weight matrices (randomize)
        self.weightIn = np.ones((self.nIn, self.nHid))
        self.weightOut = np.ones((self.nHid, self.nOut))

        #create momentum matrices (last change in weights)
        self.momIn = np.zeros((self.nIn, self.nHid))
        self.momOut = np.zeros((self.nHid, self.nOut))

        #no randomization of weights... only trained systems
        #print 'New System Loaded...',  len(self.inputXs), 'inputs. ', len(self.outputXs), 'outputs. ',
        #print self.nIn, 'input nodes. ', self.nHid, 'hidden nodes. ', self.nOut, 'output nodes. '

    def feedforward(self, inputs):
        """
        Calculates network through feedforward
        ----- INPUTS -----
        inputs : dict
            inputs to system in form of fuzzy MFs {'input name': [x,y] or x (for singleton, ...}
        ----- OUTPUTS -----
        outputs : dict
            outputs to system in form {'output name': [x,y], ... }
        """
        mu_min = 0.4  #if no input node is greater than this, it'll find one

        with Timer() as t:
            #translate input MFs to input membership nodes by interpolating
            inNodes = []
            for inp in self.inputXs:
                if isinstance(inputs[inp], list) or isinstance(
                        inputs[inp], tuple):  #for mf input
                    inpYs = np.interp(self.inputXs[inp], inputs[inp][0],
                                      inputs[inp][1])
                elif isinstance(inputs[inp], float) or isinstance(
                        inputs[inp], int):  #for singleton inputs
                    inpYs = np.interp(
                        self.inputXs[inp],
                        [inputs[inp] * 0.9, inputs[inp], inputs[inp] * 1.1],
                        [0.0, 1.0, 0.0])
                else:
                    raise StandardError(
                        "Inputs of unusable type! Input %s (value %s) is of type %s"
                        % (inp, inputs[inp], type(inputs[inp])))

                #check for miss-interpolated input
                if all([y < mu_min for y in inpYs]):
                    #print "modding inputs",
                    #print inYs
                    max_mu = max(inputs[inp][1])  #get max input membership
                    max_x = inputs[inp][0][list(
                        inputs[inp][1]).index(max_mu)]  #get x value at max mf
                    node_x = min(
                        self.inputXs[inp], key=lambda x: abs(x - max_x)
                    )  #get node with closest value to max mf x value
                    inpYs[list(self.inputXs[inp]).index(
                        node_x
                    )] = max_mu  #assign maximum mf at closest value to max mf

                inNodes = inNodes + list(inpYs) + [
                    1.0
                ]  #combine inputs and a bias for each input

            self.actIn = inNodes  #replace input nodes with new ones

            #activations for hidden nodes:
            for i in range(len(self.actHid)):
                self.actHid[i] = sum([self.actIn[j]*self.weightIn[j][i] \
                                    for j in range(len(self.actIn))]) #sum of individual weights*input activations
                self.actHid[i] = functions(self.actHid[i],
                                           self.actType)  #apply function

            #activations for output nodes
            for i in range(len(self.actOut)):
                self.actOut[i] = sum([self.actHid[j]*self.weightOut[j][i] \
                                    for j in range(len(self.actHid))]) #sum of individual weights*Hidden activations
                self.actOut[i] = functions(self.actOut[i],
                                           self.actType)  #apply function

            #get output MFs
            outputMFs = {}
            i = 0
            for otp in self.outputXs:
                outputMFs[otp] = [
                    self.outputXs[otp], self.actOut[i:i + self.outGran]
                ]  #get [x,y] of mf
                i = i + self.outGran

        #print '=> completed in', t.secs, 'sec'

        return outputMFs

    def backpropagate(self, targets, LR, M):
        """
        Backpropagate result through system to adjust weights. Uses a momentum factor
        to speed training
        
        ----- INPUTS -----
        targets : dict
            target outputs in form {output name: [x,y], ... }
        LR : float
            learning rate
        M : float 
            momentum multiplier 
        """
        if len(targets) * self.outGran != self.nOut:
            raise ValueError('wrong number of target values')

        #interpolate output data to output node X values and build target nodes
        nTarget = []
        for otp in self.outputXs:
            tarYs = np.interp(self.outputXs[otp], targets[otp][0],
                              targets[otp][1])
            nTarget = nTarget + list(tarYs)

        #get deltas for output nodes
        outDels = [nTarget[i] - self.actOut[i] for i in range(len(nTarget))]
        outDels = [dfunctions(self.actOut[i], self.actType)*outDels[i] \
                    for i in range(len(outDels))]

        #get deltas for hidden nodes
        hidDels = [0.0] * len(self.actHid)
        for i in range(len(self.actHid)):
            errors = [
                outDels[j] * self.weightOut[i][j] for j in range(len(outDels))
            ]
            hidDels[i] = dfunctions(self.actHid[i], self.actType) * sum(errors)

        #update output weights
        for i in range(len(self.weightOut)):
            for j in range(len(self.weightOut[i])):
                del_w = outDels[j] * self.actHid[i]
                self.weightOut[i][j] = self.weightOut[i][
                    j] + del_w * LR + self.momOut[i][j] * M
                self.momOut[i][j] = del_w

        #update hidden weights
        for i in range(len(self.weightIn)):
            for j in range(len(self.weightIn[i])):
                del_w = hidDels[j] * self.actIn[i]
                self.weightIn[i][j] = self.weightIn[i][
                    j] + del_w * LR + self.momIn[i][j] * M
                self.momIn[i][j] = del_w

        RSME = sum([(nTarget[i] - self.actOut[i])**2
                    for i in range(len(nTarget))]) / len(nTarget)
        return RSME**0.5

    def getError(self, targets):
        """
        Get Error for a given target
        
        ----- INPUTS -----
        targets : dict
            target outputs in form {output name: [x,y], ... }
        """
        if len(targets) * self.outGran != self.nOut:
            raise ValueError('wrong number of target values')

        #interpolate output data to output node X values and build target nodes
        nTarget = []
        for otp in self.outputXs:
            tarYs = np.interp(self.outputXs[otp], targets[otp][0],
                              targets[otp][1])
            nTarget = nTarget + list(tarYs)

        #get deltas for output nodes
        #print "TARGETS:", nTarget
        #print "OUTPUTS:", self.actOut
        #plt.figure()
        #plt.plot([i for i in range(len(nTarget))], nTarget, '--b')
        #plt.plot([i for i in range(len(self.actOut))], self.actOut, '-r')
        #plt.show()

        outDels = [nTarget[i] - self.actOut[i] for i in range(len(nTarget))]
        RSME = ((sum([oD**2 for oD in outDels])) /
                len(outDels))**0.5  #get RSME for training

        return RSME

    def train(self,
              data,
              holdback=0.2,
              LR=0.1,
              M=0.02,
              maxIterations=300,
              xConverge=0.0005,
              interactive=False):
        """
        Train the system through back propagation. Stops at maxIterations or when
        convereged with running average standard dev. Checks standard deviation over
        a running average of last 10 iterations to see if it's smaller than xConverge.
        Uses stoichastic method, randomizing data order each epoch, but updating weights on each 
        pass through. 
        
        ----- INPUTS -----
        data : list
            data list for training network. data in form:
            [({in1: [x,y], in2[x,y], ... }, {out1: [x,y], out2: [x,y], ...} ), ...]
        holdback : float[0-1]
            percent of data to be used for validation
        LR : float
            learning rate
        M : float
            momentum multiplier (speeds training)
        maxIterations : int
            maximum number of iterations through the data
        xConverge : float
            stops training when last 10 training points have a std. dev. < xConverge
        ----- OUTPUTS -----
        system : instance of DFES
            instance of the trained system
        """

        #separate the data
        valData, trainData = [], []
        valIndecies = random.sample(range(len(data)),
                                    int(holdback * len(data)))
        for i in range(len(data)):
            if i in valIndecies: valData.append(data[i])
            else: trainData.append(data[i])

        print "Using", len(trainData), "training points and", len(
            valData), "validation points.",
        print "Holdback =", round(
            float(len(valData)) / (len(valData) + len(trainData)), 3)

        convergeFlag = False  # flag for convergence
        iter = 0  # iteration counter
        totRSME_0 = 10.**10  # initialize RSME
        valRSME_min = 10.**10  # initialize RSME
        trackTrainERR = []
        trackValERR = []
        trackERR = []
        normStdDev_last10 = None

        if interactive:
            plt.figure()
            plt.xlabel('Training Iteration')
            plt.ylabel('Average System RSME')
            plt.ion()
            plt.show()

        with Timer() as t:

            while not convergeFlag and iter < maxIterations:  #main training loop
                iter = iter + 1

                #randomize data order
                iRef = range(len(trainData))  #get indecies for data
                random.shuffle(iRef)  #shuffle indecies
                trainData2 = copy.deepcopy(trainData)  #copy data
                trainData = [trainData2[i]
                             for i in iRef]  #assign new order to data

                #pass data through backpropagate
                trainRSME = 0.0
                for item in trainData:
                    self.feedforward(item[0])
                    trainRSME = trainRSME + self.backpropagate(item[1], LR, M)

                #get validation data error
                valRSME = 0.0
                for item in valData:
                    self.feedforward(item[0])
                    valRSME = valRSME + self.getError(item[1])

                trackTrainERR.append(trainRSME)  #track training Error
                trackValERR.append(valRSME)  #track validation error
                trackERR.append(trainRSME + valRSME)  #track total error

                #save best systems
                if valRSME < valRSME_min:
                    self.write_weights('data/temp_opt_weights_file.nwf')
                    valRSME_min = valRSME

                #check for convergence
                if len(trackERR) > 10:  #only after 10 iterations
                    normStdDev_last10 = np.std(trackERR[-10:]) / np.average(
                        trackERR[-10:]
                    )  #get normalized standard deviation of last 10 total errors
                    if abs(normStdDev_last10) < xConverge:
                        convergeFlag = True
                        print 'Training Converved, normalized stdDev =', normStdDev_last10

                #plot if interactive
                if interactive:
                    plt.cla()
                    plt.plot(
                        [i for i in range(len(trackTrainERR))],
                        [e / len(data) for e in trackTrainERR],
                    )
                    plt.plot(
                        [i for i in range(len(trackValERR))],
                        [e / len(data) for e in trackValERR],
                    )
                    plt.plot(
                        [i for i in range(len(trackERR))],
                        [e / len(data) for e in trackERR],
                    )
                    plt.legend(
                        ["Training Error", "Validation Error", "Total Error"])
                    plt.draw()

                #display progress
                if iter % 5 == 0:
                    print 'Iteration', iter, 'trainErr:', round(trainRSME, 3),
                    print 'valErr:', round(valRSME, 3),
                    print 'normStdDev(last 10)', normStdDev_last10,
                    print '=> run time', t.getTime(), 's'

        print "Best Validation RSME:", valRSME_min
        self.read_weights('data/temp_opt_weights_file.nwf')

        plt.cla()
        plt.plot(
            [i for i in range(len(trackTrainERR))],
            [e / len(data) for e in trackTrainERR],
        )
        plt.plot(
            [i for i in range(len(trackValERR))],
            [e / len(data) for e in trackValERR],
        )
        plt.plot(
            [i for i in range(len(trackERR))],
            [e / len(data) for e in trackERR],
        )
        plt.legend(["Training Error", "Validation Error", "Total Error"])
        plt.draw()

        testRSME = self.test(valData, plotPoints=0)

        return testRSME

    def test(self, valData, plotPoints=3):
        """
        Tests the system, returning the sum of the RSMEs of the val data. Shows plots
        to illustrate accuracy of system
        """
        totRSME = 0

        #get validation data error
        for item in valData:
            self.feedforward(item[0])
            totRSME = totRSME + self.getError(item[1])

        plotData = []  #random val data points to plot
        while len(plotData) < plotPoints:
            plotData.append(valData[random.randrange(0, len(valData) - 1)])

        print "Test RSME:", totRSME, '-', totRSME / len(
            valData), '(normalized to data set size)'

        for dat in plotData:
            output = self.feedforward(dat[0])
            plt.figure()
            i = 1
            for inp in self.inputXs:  #plot input MFs
                ax = plt.subplot(len(self.inputXs), 2, i)
                i = i + 2
                ax.plot(dat[0][inp][1], dat[0][inp][0])
                ax.scatter(list(dat[0][inp][1]),
                           list(dat[0][inp][0]),
                           marker='o',
                           c='r')
                ax.plot([0, 1.1],
                        [self.inRanges[inp][0], self.inRanges[inp][0]], '--k')
                ax.plot([0, 1.1],
                        [self.inRanges[inp][1], self.inRanges[inp][1]], '--k')
                ax.set_xlim([0.0, 1.1])
                ax.set_ylabel(
                    str([self.inRanges[inp][0], self.inRanges[inp][1]]))

            i = 1
            for otp in self.outputXs:  #plot sys output MFs and data MFs
                ax = plt.subplot(len(self.outputXs), 2, i + 1)
                i = i + 2
                ax.plot(dat[1][otp][1], dat[1][otp][0])
                ax.plot(output[otp][1], output[otp][0])
                ax.set_xlim([0.0, 1.1])
                ax.set_ylim([self.outRanges[otp][0], self.outRanges[otp][1]])

            plt.show()

        return totRSME

    def write_weights(self, filename):
        """
        Write out the weights to a file to recreate the system
        
        """
        c = open(filename, 'w')
        c.truncate()

        with open(filename, 'w') as writer:  #with the file open
            writer.seek(0)  #start at beginning
            writer.write('INPUT WEIGHTS' + '\n')  #start fcl block
            for row in self.weightIn:
                for w in row:
                    writer.write(str(w) + ',')
                writer.write('\n')
            writer.write('')
            writer.write('OUTPUT WEIGHTS' + '\n')  #start fcl block
            for row in self.weightOut:
                for w in row:
                    writer.write(str(w) + ',')
                writer.write('\n')

    def read_weights(self, filename):
        """
        Reads in a weights file to recreate a trained system
        """

        f = open(filename, 'r')
        lines = f.readlines()

        #get input weights
        inW = []
        flag = 0
        for line in lines:
            if 'INPUT WEIGHTS' in line:
                flag = 1
                continue
            if 'OUTPUT WEIGHTS' in line:
                flag = 0
                continue
            if flag == 1 and line.strip() <> "":
                row = line.rstrip().split(',')
                while '' in row:
                    row.pop(row.index(''))
                row = [float(r) for r in row]
                inW.append(row)

        #check weight size before accepting as system weights
        if (not len(inW) == self.nIn) or (not len(inW[0]) == self.nHid):
            print "Input Matrix Size:", len(inW), len(inW[0])
            raise StandardError('Input weight matrix in file the wrong size!')
        else:
            self.weightIn = np.array(inW)

        #get output weights
        outW = []
        flag = 0
        for line in lines:
            if 'OUTPUT WEIGHTS' in line:
                flag = 1
                continue

            if flag == 1 and line.strip() <> "":
                row = line.rstrip().split(',')
                while '' in row:
                    row.pop(row.index(''))
                row = [float(r) for r in row]
                outW.append(row)

        #check weight size before accepting as system weights
        if (not len(outW) == self.nHid) or (not len(outW[0]) == self.nOut):
            print "Output Matrix Size:", len(outW), len(outW[0])
            raise StandardError('Output weight matrix in file the wrong size!')
        else:
            self.weightOut = np.array(outW)

    def execute(self):
        """
        Runs the system (feed forward) and returns the output dic
        
        ----- INPUTS -----
        inputs : dict
            inputs to system in form of fuzzy MFs {'input name': [x,y] or x (for singleton, ...}
        
        ----- OUTPUTS -----
        outputs : dict
            outputs to system in form {'output name': [x,y], ... }
        """

        #inputs = self.input_list

        if self.old_weight_file <> self.weight_file:  #check for changed weights file
            self.__init__()  #reinitialize system
            self.read_weights(self.weight_file)
            self.old_weight_file = self.weight_file  #save old weight

            print "DFES File Loaded:", self.weight_file

        #print "Running DFES for outputs:", [otp for otp in self.outRanges]

        #print len(self.input_list), "INPUTS!"
        if self.passthrough == 1:
            self.outputs_all = {
                otp: None
                for otp in self.outputXs.keys()
            }  #catch for incompatible option (does nothing if incompatible)
        else:
            self.outputs_all = self.feedforward(self.input_list)

        if self.TESTPLOT == 1:
            data = [[self.input_list, self.outputs_all],
                    [self.input_list, self.outputs_all]]
            e = self.test(data, plotPoints=1)

        self.runFlag_out = self.runFlag_in