Ejemplo n.º 1
0
class MyEvComp(Component):
    doit = Event(desc='Do It!')
    doit2 = Event(desc='Do It Again!')
    doit_count = Int(0, iotype='out')
    doit2_count = Int(0, iotype='out')
    some_int = Int(0, iotype='in')

    def _doit_fired(self):
        self.doit_count += 1
        
    def _doit2_fired(self):
        self.doit2_count += 1
        
    def execute(self):
        pass
Ejemplo n.º 2
0
class DrivenComponent(Component):
    """ Just something to be driven and compute results. """

    x0 = Float(1., iotype='in')
    y0 = Float(1., iotype='in')  # used just to get ParameterGroup
    x1 = Float(1., iotype='in')
    x2 = Float(1., iotype='in')
    x3 = Float(1., iotype='in')
    err_event = Event()
    stop_exec = Bool(False, iotype='in')
    rosen_suzuki = Float(0., iotype='out')

    def __init__(self):
        super(DrivenComponent, self).__init__()
        self._raise_err = False

    def _err_event_fired(self):
        self._raise_err = True

    def execute(self):
        """ Compute results from input vector. """
        self.rosen_suzuki = rosen_suzuki(self.x0, self.x1, self.x2, self.x3)
        if self._raise_err:
            self.raise_exception('Forced error', RuntimeError)
        if self.stop_exec:
            self.parent.driver.stop()  # Only valid if sequential!
Ejemplo n.º 3
0
class Pareto_Min_Dist(Component):
    """Computes the probability that any given point from the primary concept 
    will interesect the pareto frontiers of some other concepts.
    """ 
    pareto = List([], iotype="in",
                    desc="List of CaseIterators containing competing local Pareto points")
                    
    criteria = ListStr(iotype="in",dtype="str",
                       desc="Names of responses to maximize expected improvement around. "
                            "Must be NormalDistribution type.")
    
    predicted_values = Array(iotype="in",dtype=NormalDistribution,
                             desc="CaseIterator which contains a NormalDistribution "
                                  "for each response at a location where you wish to "
                                  "calculate EI.")
    
    dist = Float(0.0, iotype="out", 
                 desc="minimum distance from a point to other pareto set ")
    
    reset_pareto = Event()
    
    def __init__(self):
        super(Pareto_Min_Dist, self).__init__()
        self.y_star_other = None
        
    def _reset_pareto_fired(self):
        self.y_star_other = None
    
    def get_pareto(self):
        y_star_other = []

        c = []
             
        for single_case_list in self.pareto:
            for case in single_case_list:
                for objective in case.outputs:
                    for crit in self.criteria:
                        if crit in objective[0]:
                            #TODO: criteria needs at least two things matching
                            #objective names in CaseIterator outputs, error otherwise
                            c.append(objective[2])
                if c != [] :
                    y_star_other.append(c)
                c = []
       
        return y_star_other
        
        
    def _calc_min_dist(self,p,y_star_other):
        """Computes the minimum distance from a candidate point 
        to other_pareto.
        """
        
        dists = []
        
        for y in y_star_other:
            d = sqrt(sum([(A-B)**2 for A,B in zip(p,y)]))
            dists.append(d)

        return min(dists)
        
    def execute(self):
        mu = [objective.mu for objective in self.predicted_values]

        if self.y_star_other == None:
            self.y_star_other = self.get_pareto()
        
        self.dist = self._calc_min_dist(mu,self.y_star_other)
        
Ejemplo n.º 4
0
class MetaModelBase(Component):
    """
    Base class for functionality of a meta model.
    Should be subclassed.
    """
    # pylint: disable-msg=E1101
    model = Slot(IComponent, allow_none=True,
                   desc='Slot for the Component or Assembly being '
                   'encapsulated.')
    includes = List(Str, iotype='in',
                    desc='A list of names of variables to be included '
                         'in the public interface.')
    excludes = List(Str, iotype='in',
                    desc='A list of names of variables to be excluded '
                         'from the public interface.')

    default_surrogate = Slot(ISurrogate, allow_none=True,
                             desc="This surrogate will be used for all "
                             "outputs that don't have a specific surrogate "
                             "assigned to them in their sur_<name> slot.")

    surrogates = Dict(key_trait=Str,
                      value_trait=Slot(ISurrogate),
                      desc='surrogates for output variables')

    report_errors = Bool(True, iotype="in",
                         desc="If True, metamodel will report errors reported "
                         "from the component. If False, metamodel will swallow "
                         "the errors but log that they happened and "
                         "exclude the case from the training set.")

    recorder = Slot(ICaseRecorder,
                    desc='Records training cases')

    # when fired, the next execution will train the metamodel
    train_next = Event(desc='Train metamodel on next execution')

    #when fired, the next execution will reset all training data
    reset_training_data = Event(desc='Reset training data on next execution')

    def __init__(self):
        super(MetaModelBase, self).__init__()
        self._surrogate_input_names = None
        self._surrogate_output_names = None
        self._surrogate_overrides = set()  # keeps track of which sur_<name> slots are full
        self._training_data = {}
        self._training_input_history = []
        self._const_inputs = {}  # dict of constant training inputs indices and their values
        self._train = False
        self._new_train_data = False
        self._failed_training_msgs = []
        self._default_surrogate_copies = {}  # need to maintain separate copy of
                                             # default surrogate for each sur_*
                                             # that doesn't have a surrogate
                                             # defined

        # the following line will work for classes that inherit from MetaModel
        # as long as they declare their traits in the class body and not in
        # the __init__ function.  If they need to create traits dynamically
        # during initialization they'll have to provide the value of
        # _mm_class_traitnames
        self._mm_class_traitnames = set(self.traits(iotype=not_none).keys())

        self.on_trait_change(self._surrogate_updated, "surrogates_items")

    def _train_next_fired(self):
        self._train = True
        self._new_train_data = True

    def _reset_training_data_fired(self):
        self._training_input_history = []
        self._const_inputs = {}
        self._failed_training_msgs = []

        # remove output history from training_data
        for name in self._training_data:
            self._training_data[name] = []

    def _warm_start_data_changed(self, oldval, newval):
        self.reset_training_data = True

        # build list of inputs
        for case in newval:
            if self.recorder:
                self.recorder.record(case)
            inputs = []
            for inp_name in self.surrogate_input_names():
                var_name = '.'.join([self.name, inp_name])
                try:
                    inp_val = case[var_name]
                except KeyError:
                    pass
                    #self.raise_exception('The variable "%s" was not '
                                         #'found as an input in one of the cases provided '
                                         #'for warm_start_data.' % var_name, ValueError)
                else:
                    if inp_val is not None:
                        inputs.append(inp_val)
            self._training_input_history.append(inputs)

            for output_name in self.surrogate_output_names():
                #grab value from case data
                var_name = '.'.join([self.name, output_name])
                try:
                    val = case.get_output(var_name)
                except KeyError:
                    self.raise_exception('The output "%s" was not found '
                                         'in one of the cases provided for '
                                         'warm_start_data' % var_name, ValueError)
                else:  # save to training output history
                    self._training_data[output_name].append(val)

        self._new_train_data = True

    def child_run_finished(self, childname, outs=None):
        pass

    def check_config(self):
        '''Called as part of pre_execute.'''

        # 1. model must be set
        if self.model is None:
            self.raise_exception("MetaModel object must have a model!",
                                 RuntimeError)

        # 2. can't have both includes and excludes
        if self.excludes and self.includes:
            self.raise_exception("includes and excludes are mutually exclusive",
                                 RuntimeError)

        # 3. the includes and excludes must match actual inputs and outputs of the model
        input_names = self.surrogate_input_names()
        output_names = self.surrogate_output_names()
        input_and_output_names = input_names + output_names
        for include in self.includes:
            if include not in input_and_output_names:
                self.raise_exception('The include "%s" is not one of the '
                                     'model inputs or outputs ' % include, ValueError)
        for exclude in self.excludes:
            if exclude not in input_and_output_names:
                self.raise_exception('The exclude "%s" is not one of the '
                                     'model inputs or outputs ' % exclude, ValueError)

        # 4. Either there are no surrogates set and no default surrogate
        #    ( just do passthrough )
        #        or
        #    all outputs must have surrogates assigned either explicitly
        #    or through the default surrogate
        if self.default_surrogate is None:
            no_sur = []
            for name in self.surrogate_output_names():
                if not self.surrogates[name]:
                    no_sur.append(name)
            if len(no_sur) > 0 and len(no_sur) != len(self._surrogate_output_names):
                self.raise_exception("No default surrogate model is defined and"
                                     " the following outputs do not have a"
                                     " surrogate model: %s. Either specify"
                                     " default_surrogate, or specify a"
                                     " surrogate model for all outputs." %
                                     no_sur, RuntimeError)

        # 5. All the explicitly set surrogates[] should match actual outputs of the model
        for surrogate_name in self.surrogates.keys():
            if surrogate_name not in output_names:
                self.raise_exception('The surrogate "%s" does not match one of the '
                                     'model outputs ' % surrogate_name, ValueError)

    def execute(self):
        """If the training flag is set, train the metamodel. Otherwise,
        predict outputs.
        """
        if self._train:
            try:
                inputs = self.update_model_inputs()
                self.model.run(force=True)

            except Exception as err:
                if self.report_errors:
                    raise err
                else:
                    self._failed_training_msgs.append(str(err))
            else:  # if no exceptions are generated, save the data

                self._training_input_history.append(inputs)
                self.update_outputs_from_model()
                case_outputs = []

                for name, output_history in self._training_data.items():
                    case_outputs.append(('.'.join([self.name, name]),
                                         output_history[-1]))
                # save the case, making sure to add out name to the local input
                # name since this Case is scoped to our parent Assembly
                case_inputs = [('.'.join([self.name, name]), val)
                               for name, val in zip(self.surrogate_input_names(),
                                                    inputs)]
                if self.recorder:
                    self.recorder.record(Case(inputs=case_inputs,
                                              outputs=case_outputs))

            self._train = False
        else:
            # NO surrogates defined. just run model and get outputs
            if self.default_surrogate is None and not self._surrogate_overrides:
                inputs = self.update_model_inputs()
                self.model.run()
                self.update_outputs_from_model()
                return

            if self._new_train_data:
                if len(self._training_input_history) < 2:
                    self.raise_exception("ERROR: need at least 2 training points!",
                                         RuntimeError)

                # figure out if we have any constant training inputs
                tcases = self._training_input_history
                in_hist = tcases[0][:]
                # start off assuming every input is constant
                idxlist = range(len(in_hist))
                self._const_inputs = dict(zip(idxlist, in_hist))
                for i in idxlist:
                    val = in_hist[i]
                    for case in range(1, len(tcases)):
                        if val != tcases[case][i]:
                            del self._const_inputs[i]
                            break

                if len(self._const_inputs) == len(in_hist):
                    self.raise_exception("ERROR: all training inputs are constant.")
                elif len(self._const_inputs) > 0:
                    # some inputs are constant, so we have to remove them from the training set
                    training_input_history = []
                    for inputs in self._training_input_history:
                        training_input_history.append([val for i, val in enumerate(inputs)
                                                       if i not in self._const_inputs])
                else:
                    training_input_history = self._training_input_history
                for name, output_history in self._training_data.items():
                    surrogate = self._get_surrogate(name)
                    if surrogate is not None:
                        surrogate.train(training_input_history, output_history)

                self._new_train_data = False

            inputs = []
            for i, name in enumerate(self.surrogate_input_names()):
                val = self.get(name)
                cval = self._const_inputs.get(i, _missing)
                if cval is _missing:
                    inputs.append(val)

                elif val != cval:
                    self.raise_exception("ERROR: training input '%s' was a"
                                         " constant value of (%s) but the value"
                                         " has changed to (%s)." %
                                         (name, cval, val), ValueError)

            for name in self._training_data:
                surrogate = self._get_surrogate(name)
                # copy output to boundary
                if surrogate is None:
                    self._set_output(name, self.model.get(name))
                else:
                    self._set_output(name, surrogate.predict(inputs))
class MultiObjExpectedImprovementBase(Component):
    criteria = Array(
        iotype="in",
        desc="Names of responses to maximize expected improvement around. \
                    Must be NormalDistribution type.")

    predicted_values = Array(
        [0, 0],
        iotype="in",
        dtype=NormalDistribution,
        desc="CaseIterator which contains NormalDistributions for each \
                        response at a location where you wish to calculate EI."
    )

    n = Int(1000,
            iotype="in",
            desc="Number of Monte Carlo Samples with \
                        which to calculate probability of improvement.")

    calc_switch = Enum("PI", ["PI", "EI"],
                       iotype="in",
                       desc="Switch to use either \
                        probability (PI) or expected (EI) improvement.")

    PI = Float(0.0,
               iotype="out",
               desc="The probability of improvement of the next_case.")

    EI = Float(0.0,
               iotype="out",
               desc="The expected improvement of the next_case.")

    reset_y_star = Event(desc='Reset Y* on next execution')

    def __init__(self):
        super(MultiObjExpectedImprovementBase, self).__init__()
        self.y_star = None

    def _reset_y_star_fired(self):
        self.y_star = None

    def get_y_star(self):
        criteria_count = len(self.criteria)

        flat_crit = self.criteria.ravel()

        try:
            y_star = zip(*[self.best_cases[crit] for crit in self.criteria])
        except KeyError:
            self.raise_exception(
                'no cases in the provided case_set had output '
                'matching the provided criteria, %s' % self.criteria,
                ValueError)

        #sort list on first objective
        y_star = array(y_star)[array([i[0] for i in y_star]).argsort()]
        return y_star

    def _2obj_PI(self, mu, sigma):
        """Calculates the multi-objective probability of improvement
        for a new point with two responses. Takes as input a
        pareto frontier, mean and sigma of new point."""

        y_star = self.y_star

        PI1 = (0.5 + 0.5 * erf(
            (1 / (2**0.5)) * ((y_star[0][0] - mu[0]) / sigma[0])))
        PI3 = (1-(0.5+0.5*erf((1/(2**0.5))*((y_star[-1][0]-mu[0])/sigma[0]))))\
        *(0.5+0.5*erf((1/(2**0.5))*((y_star[-1][1]-mu[1])/sigma[1])))

        PI2 = 0
        if len(y_star) > 1:
            for i in range(len(y_star) - 1):
                PI2=PI2+((0.5+0.5*erf((1/(2**0.5))*((y_star[i+1][0]-mu[0])/sigma[0])))\
                -(0.5+0.5*erf((1/(2**0.5))*((y_star[i][0]-mu[0])/sigma[0]))))\
                *(0.5+0.5*erf((1/(2**0.5))*((y_star[i+1][1]-mu[1])/sigma[1])))
        mcpi = PI1 + PI2 + PI3
        return mcpi

    def _2obj_EI(self, mu, sigma):
        """Calculates the multi-criteria expected improvement
        for a new point with two responses. Takes as input a
        pareto frontier, mean and sigma of new point."""

        y_star = self.y_star
        ybar11 = mu[0]*(0.5+0.5*erf((1/(2**0.5))*((y_star[0][0]-mu[0])/sigma[0])))\
        -sigma[0]*(1/((2*pi)**0.5))*exp(-0.5*((y_star[0][0]-mu[0])**2/sigma[0]**2))
        ybar13 = (mu[0]*(0.5+0.5*erf((1/(2**0.5))*((y_star[-1][0]-mu[0])/sigma[0])))\
        -sigma[0]*(1/((2*pi)**0.5))*exp(-0.5*((y_star[-1][0]-mu[0])**2/sigma[0]**2)))\
        *(0.5+0.5*erf((1/(2**0.5))*((y_star[-1][1]-mu[1])/sigma[1])))

        ybar12 = 0
        if len(y_star) > 1:
            for i in range(len(y_star) - 1):
                ybar12 = ybar12+((mu[0]*(0.5+0.5*erf((1/(2**0.5))*((y_star[i+1][0]-mu[0])/sigma[0])))\
                -sigma[0]*(1/((2*pi)**0.5))*exp(-0.5*((y_star[i+1][0]-mu[0])**2/sigma[0]**2)))\
                -(mu[0]*(0.5+0.5*erf((1/(2**0.5))*((y_star[i][0]-mu[0])/sigma[0])))\
                -sigma[0]*(1/((2*pi)**0.5))*exp(-0.5*((y_star[i][0]-mu[0])**2/sigma[0]**2))))\
                *(0.5+0.5*erf((1/(2**0.5))*((y_star[i+1][1]-mu[1])/sigma[1])))

        ybar1 = (ybar11 + ybar12 + ybar13) / self.PI

        ybar21 = mu[1]*(0.5+0.5*erf((1/(2**0.5))*((y_star[0][1]-mu[1])/sigma[1])))\
        -sigma[1]*(1/((2*pi)**0.5))*exp(-0.5*((y_star[0][1]-mu[1])**2/sigma[1]**2))
        ybar23 = (mu[1]*(0.5+0.5*erf((1/(2**0.5))*((y_star[-1][1]-mu[1])/sigma[1])))\
        -sigma[1]*(1/((2*pi)**0.5))*exp(-0.5*((y_star[-1][1]-mu[1])**2/sigma[1]**2)))\
        *(0.5+0.5*erf((1/(2**0.5))*((y_star[-1][0]-mu[0])/sigma[0])))

        ybar22 = 0
        if len(y_star) > 1:
            for i in range(len(y_star) - 1):
                ybar22 = ybar22+((mu[1]*(0.5+0.5*erf((1/(2**0.5))*((y_star[i+1][1]-mu[1])/sigma[1])))\
                -sigma[1]*(1/((2*pi)**0.5))*exp(-0.5*((y_star[i+1][1]-mu[1])**2/sigma[1]**2)))\
                -(mu[1]*(0.5+0.5*erf((1/(2**0.5))*((y_star[i][1]-mu[1])/sigma[1])))\
                -sigma[1]*(1/((2*pi)**0.5))*exp(-0.5*((y_star[i][1]-mu[1])**2/sigma[1]**2))))\
                *(0.5+0.5*erf((1/(2**0.5))*((y_star[i+1][0]-mu[0])/sigma[0])))

        ybar2 = (ybar21 + ybar22 + ybar23) / self.PI
        dists = [((ybar1 - point[0])**2 + (ybar2 - point[1])**2)**0.5
                 for point in y_star]
        mcei = self.PI * min(dists)
        if isnan(mcei):
            mcei = 0
        return mcei

    def _dom(self, a, b):
        """determines if a completely dominates b
       returns True is if does
    """
        comp = [c1 < c2 for c1, c2 in zip(a, b)]
        if sum(comp) == len(self.criteria):
            return True
        return False

    def _nobj_PI(self, mu, sigma):
        cov = diag(array(sigma)**2)
        rands = random.multivariate_normal(mu, cov, self.n)
        num = 0  # number of cases that dominate the current Pareto set

        for random_sample in rands:
            for par_point in self.y_star:
                #par_point = [p[2] for p in par_point.outputs]
                if self._dom(par_point, random_sample):
                    num = num + 1
                    break
        pi = (self.n - num) / float(self.n)
        return pi

    def execute(self):
        """ Calculates the expected improvement or
        probability of improvement of a candidate
        point given by a normal distribution.
        """
        mu = [objective.mu for objective in self.predicted_values]
        sig = [objective.sigma for objective in self.predicted_values]

        if self.y_star == None:
            self.y_star = self.get_y_star()

        n_objs = len(self.criteria)

        if n_objs == 2:
            """biobjective optimization"""
            self.PI = self._2obj_PI(mu, sig)
            if self.calc_switch == 'EI':
                """execute EI calculations"""
                self.EI = self._2obj_EI(mu, sig)
        if n_objs > 2:
            """n objective optimization"""
            self.PI = self._nobj_PI(mu, sig)
            if self.calc_switch == 'EI':
                """execute EI calculations"""
                self.raise_exception(
                    "EI calculations not supported"
                    " for more than 2 objectives", ValueError)