Beispiel #1
0
    def test(self):
        error = EMAError('a message')

        self.assertEqual(str(error), 'a message')

        error = EMAError('a message', 'another message')

        self.assertEqual(str(error), str("(u'a message', u'another message')"))
    def _generate_experiments(self, cases, which_uncertainties):
        '''
        Helper method for generating experiments
        
        Parameters
        ----------
        cases : int or list
        which_uncertianties : {INTERSECTION, UNION}

        Returns
        -------
        generator
            a generator that yields experiment dicts
        int
            the total number of experiments 
            so: nr_cases * nr of models * nr of policies)
        list
            list of the uncertainties over which the experiments are designed
        
        '''
        overview_dict, unc_dict = self.determine_uncertainties()
        # identify the uncertainties and sample over them
        if isinstance(cases, int):
            if which_uncertainties == UNION:
                if isinstance(self.sampler, FullFactorialSampler):
                    raise EMAError(
                        "full factorial sampling cannot be combined with exploring the union of uncertainties"
                    )
                uncertainties = unc_dict.values()
            elif which_uncertainties == INTERSECTION:
                uncertainties = overview_dict[tuple(
                    [msi.name for msi in self.model_structures])]
                unc_dict = {
                    key.name: unc_dict[key.name]
                    for key in uncertainties
                }
                uncertainties = [unc_dict[unc.name] for unc in uncertainties]
            else:
                raise ValueError("incompatible value for which_uncertainties")

            designs, nr_of_designs = self.sampler.generate_designs(
                uncertainties, cases)
        elif isinstance(cases, list):
            unc_names = reduce(set.union, map(set, map(dict.keys, cases)))
            uncertainties = [unc_dict[unc] for unc in unc_names]
            designs = cases
            nr_of_designs = len(designs)
        else:
            raise EMAError("unknown type for cases")

        nr_of_exp = nr_of_designs * len(self.policies) * len(
            self.model_structures)
        experiments = experiment_generator(designs, self.model_structures,\
                                           self.policies)

        return experiments, nr_of_exp, uncertainties
Beispiel #3
0
    def model_init(self, policy, kwargs):
        '''
        Method called to initialize the model.
        
        Parameters
        ----------
        policy : dict
                 policy to be run.
        kwargs : dict
                 keyword arguments to be used by model_intit. This
                 gives users to the ability to pass any additional 
                 arguments. 
        
        
        '''

        if not self.xl:
            try:
                ema_logging.debug("trying to start Excel")
                self.xl = win32com.client.Dispatch("Excel.Application")
                ema_logging.debug("Excel started")
            except com_error as e:
                raise EMAError(str(e))
        ema_logging.debug("trying to open workbook")
        self.wb = self.xl.Workbooks.Open(self.working_directory +
                                         self.workbook)
        ema_logging.debug("workbook opened")
        ema_logging.debug(self.working_directory)
Beispiel #4
0
    def __init__(self, working_directory, name):
        """
        interface to the model
        
        Parameters
        ----------
        
        working_directory : str
                            working_directory for the model. 
        name : str
               name of the modelInterface. The name should contain only
               alpha-numerical characters.
               
        Raises
        ------
        EMAError if name contains non alpha-numerical characters
        
        
        """
        self.name = None

        super(ModelStructureInterface, self).__init__()
        if working_directory:
            self.set_working_directory(working_directory)

        if not name.isalnum():
            raise EMAError("name of model should only contain alpha numerical\
                            characters")

        self.name = name
def construct_distances(data, distance='gonenc', **kwargs):
    """ 
        
    Constructs a n-by-n matrix of distances for n data-series in data 
    according to the specified distance.
    
    Distance argument specifies the distance measure to be used. Options, 
    which are defined in clusteringDistances.py, are as follows.
    
    
    * gonenc: a distance based on qualitative dynamic pattern features 
    * willem: a disance mainly based on the presence of crisis-periods and 
              the overall trend of the data series
    * sse: regular sum of squared errors
    * mse: regular mean squared error
    
    
    SSE and MSE are in clusterinDistances.py and don't work right now.
    
    others will be added over time
    
    """

    # Sets up the distance function according to user specification
    try:
        return distance_functions[distance](data, **kwargs)
    except KeyError:
        raise EMAError("trying to use an unknown distance: %s" % (distance))
def simple_density(density, value, ax_d, ax, log):
    '''
    
    Helper function, responsible for producing a density plot
    
    Parameters
    ----------
    density : {HIST, BOXPLOT, VIOLIN, KDE}
    value : ndarray
    ax_d : axes instance
    ax : axes instance
    log : bool
    
    '''

    if density == KDE:
        plot_kde(ax_d, [value[:, -1]], log)
    elif density == HIST:
        plot_histogram(ax_d, value[:, -1], log)
    elif density == BOXPLOT:
        plot_boxplots(ax_d, value[:, -1], log)
    elif density == VIOLIN:
        plot_violinplot(ax_d, [value[:, -1]], log)
    else:
        raise EMAError("unknown density plot type")

    ax_d.get_yaxis().set_view_interval(ax.get_yaxis().get_view_interval()[0],
                                       ax.get_yaxis().get_view_interval()[1])
    ax_d.set_ylim(ymin=ax.get_yaxis().get_view_interval()[0],
                  ymax=ax.get_yaxis().get_view_interval()[1])
Beispiel #7
0
    def __init__(self, evaluate_population, generate_individual, levers,
                 reporting_interval, obj_function, ensemble, crossover_rate,
                 mutation_rate, weights, pop_size):
        self.evaluate_population = evaluate_population
        self.levers = levers
        self.lever_keys = list(levers.keys())
        self.reporting_interval = reporting_interval
        self.ensemble = ensemble
        self.crossover_rate = crossover_rate
        self.mutation_rate = mutation_rate
        self.weights = weights
        self.obj_function = obj_function
        self.pop_size = pop_size

        #create a class for the individual
        creator.create("Fitness", base.Fitness, weights=self.weights)
        creator.create("Individual", dict,
                       fitness=creator.Fitness)  #@UndefinedVariable
        self.toolbox = base.Toolbox()
        self.levers = levers

        self.attr_list = []
        self.lever_names = []
        for key, value in levers.iteritems():
            lever_type = value['type']
            values = value['values']

            if lever_type == 'list':
                self.toolbox.register(key, random.choice, values)
            else:
                if lever_type == 'range int':
                    self.toolbox.register(key, random.randint, values[0],
                                          values[1])
                elif lever_type == 'range float':
                    self.toolbox.register(key, random.uniform, values[0],
                                          values[1])
                else:
                    raise EMAError(
                        "unknown allele type: possible types are range and list"
                    )

            self.attr_list.append(getattr(self.toolbox, key))
            self.lever_names.append(key)

        # Structure initializers
        self.toolbox.register(
            "individual",
            generate_individual,
            creator.Individual,  #@UndefinedVariable
            self.attr_list,
            keys=self.lever_names)
        self.toolbox.register("population", tools.initRepeat, list,
                              self.toolbox.individual)

        # Operator registering
        self.toolbox.register("evaluate", self.obj_function)

        self.get_population = self._first_get_population
        self.called = 0
    def test_run_experiment(self):
        mockMSI = mock.Mock(spec=MockMSI)
        mockMSI.name = 'test'
        
        msis = {'test':mockMSI}

        runner = ExperimentRunner(msis, {})
        
        experiment = {'a':1, 'b':2, 'policy':{'name':'none'}, 'model':'test', 
                      'experiment id': 0}
        
        runner.run_experiment(experiment)

        self.assertEqual({('none', 'test'):mockMSI},runner.msi_initialization)
        
        mockMSI.run_model.assert_called_once_with({'a':1, 'b':2})
        mockMSI.model_init.assert_called_once_with({'name':'none'}, {})
        mockMSI.retrieve_output.assert_called_once_with()
        mockMSI.reset_model.assert_called_once_with()
        
        # assert raises ema error
        mockMSI = mock.Mock(spec=MockMSI)
        mockMSI.name = 'test'
        mockMSI.model_init.side_effect = EMAError("message")
        
        msis = {'test':mockMSI}
        runner = ExperimentRunner(msis, {})
    
        experiment = {'a':1, 'b':2, 'policy':{'name':'none'}, 'model':'test', 
              'experiment id': 0}
        self.assertRaises(EMAError, runner.run_experiment, experiment)

        # assert raises exception
        mockMSI = mock.Mock(spec=MockMSI)
        mockMSI.name = 'test'
        mockMSI.model_init.side_effect = Exception("message")
        msis = {'test':mockMSI}
        runner = ExperimentRunner(msis, {})
    
        experiment = {'a':1, 'b':2, 'policy':{'name':'none'}, 'model':'test', 
              'experiment id': 0}
        self.assertRaises(Exception, runner.run_experiment, experiment)
        
        # assert handling of case error
        mockMSI = mock.Mock(spec=MockMSI)
        mockMSI.name = 'test'
        mockMSI.run_model.side_effect = CaseError("message", {})
        msis = {'test':mockMSI}
        runner = ExperimentRunner(msis, {})
    
        experiment = {'a':1, 'b':2, 'policy':{'name':'none'}, 'model':'test', 
              'experiment id': 0}

        runner.run_experiment(experiment)
    def transform(self, case):
        if not self.x:
            # first time transform is called
            self.x, self.y = self._get_initial_lookup(self.name)
            self.x_min = min(self.x)
            self.x_max = max(self.x)
        try:
            func = self.transform_functions[self.lookup_type]

            return func(case)
        except KeyError:
            raise EMAError(self.error_message)
def group_density(ax_d,
                  density,
                  outcomes,
                  outcome_to_plot,
                  group_labels,
                  log=False,
                  index=-1):
    '''
    helper function for plotting densities in case of grouped data
    
    
    Parameters
    ----------
    ax_d : axes instance
    density : {HIST, BOXPLOT, VIOLIN, KDE}
    outcomes :  dict
    outcome_to_plot : str 
    group_labels : list of str
    log : bool, optional
    index : int, optional
    
    Raises
    ------
    EMAError
        if density is unkown
    
    '''

    if density == HIST:
        values = [
            outcomes[key][outcome_to_plot][:, index] for key in group_labels
        ]
        plot_histogram(ax_d, values, log)
    elif density == BOXPLOT:
        values = [
            outcomes[key][outcome_to_plot][:, index] for key in group_labels
        ]
        plot_boxplots(ax_d, values, log, group_labels)
    elif density == VIOLIN:
        values = [
            outcomes[key][outcome_to_plot][:, index] for key in group_labels
        ]
        plot_violinplot(ax_d, values, log, group_labels=group_labels)
    elif density == KDE:
        values = [
            outcomes[key][outcome_to_plot][:, index] for key in group_labels
        ]
        plot_kde(ax_d, values, log)
    else:
        raise EMAError("unknown density type: {}".format(density))
def prepare_pairs_data(results,
                       outcomes_to_show=None,
                       group_by=None,
                       grouping_specifiers=None,
                       point_in_time=-1,
                       filter_scalar=True):
    '''
    
    Parameters
    ----------
    results : tuple
    outcomes_to_show : list of str, optional
    group_by : str, optional
    grouping_specifiers : iterable, optional
    point_in_time : int, optional
    filter_scalar : bool, optional
       
    '''
    if isinstance(outcomes_to_show, six.string_types):
        raise EMAError(
            "for pair wise plotting, more than one outcome needs to be provided"
        )

    outcomes, outcomes_to_show, time, grouping_labels = prepare_data(
        results, outcomes_to_show, group_by, grouping_specifiers,
        filter_scalar)

    def filter_outcomes(outcomes, point_in_time):
        new_outcomes = {}
        for key, value in outcomes.items():
            if len(value.shape) == 2:
                new_outcomes[key] = value[:, point_in_time]
            else:
                new_outcomes[key] = value
        return new_outcomes

    if point_in_time:
        if point_in_time != -1:
            point_in_time = np.where(time == point_in_time)

        if group_by:
            new_outcomes = {}
            for key, value in outcomes.items():
                new_outcomes[key] = filter_outcomes(value, point_in_time)
            outcomes = new_outcomes
        else:
            outcomes = filter_outcomes(outcomes, point_in_time)
    return outcomes, outcomes_to_show, grouping_labels
Beispiel #12
0
def set_ax_collections_to_bw(ax, style):
    """
    Take each polycollection in the axes, ax, and convert the face color to be 
    suitable for black and white viewing.

    Parameters
    ----------
    ax : axes
        The ax of which the polycollection needs to be transformed to 
       B&W.
    
    """        
    for collection in ax.collections:
        try:
            _collection_converter[collection.__class__](collection, ax, style)
        except KeyError:
            raise EMAError("converter for {} not implemented").format(collection.__class__)
    def _determine_unique_attributes(self, attribute):
        '''
        Helper method for determining the unique values on attributes of model 
        interfaces, and how these values are shared across multiple model 
        structure interfaces. The working assumption is that this function 
        
        Parameters
        ----------
        attribute : {'uncertainties', 'outcomes'}
                    the attribute to check on the msi
        
        Returns
        -------
        tuple of dicts
            An overview dictionary which shows which uncertainties or outcomes 
            are used by which model structure interface, or interfaces, and a 
            dictionary with the unique uncertainties or outcomes across all the 
            model structure interfaces, with the name as key. 
        
        '''
        # check whether uncertainties exist with the same name
        # but different other attributes
        element_dict = {}
        overview_dict = {}
        for msi in self.model_structures:
            elements = getattr(msi, attribute)
            for element in elements:
                if element.name in element_dict.keys():
                    if element == element_dict[element.name]:
                        overview_dict[element.name].append(msi)
                    else:
                        raise EMAError(
                            "%s `%s` is shared but has different state" %
                            (element.__class__.__name__, element.name))
                else:
                    element_dict[element.name] = element
                    overview_dict[element.name] = [msi]

        temp_overview = defaultdict(list)
        for key, value in overview_dict.items():
            temp_overview[tuple([msi.name
                                 for msi in value])].append(element_dict[key])
        overview_dict = temp_overview

        return overview_dict, element_dict
Beispiel #14
0
    def load_model(self, path):
        '''
        
        load a netlogo model.
        
        :param path: the absolute path to the netlogo model
        :raise: IOError in case the  model is not found
        :raise: NetLogoException wrapped arround netlogo exceptions. 
        
        '''
        if not os.path.isfile(path):
            raise EMAError('{} is not a file'.format(path))

        try:
            self.link.loadModel(path)
        except jpype.JException(jpype.java.io.IOException) as ex:
            raise IOError(ex.message())
        except jpype.JException(jpype.java.org.nlogo.api.LogoException) as ex:
            raise NetLogoException(ex.message())
        except jpype.JException(
                jpype.java.org.nlogo.api.CompilerException) as ex:
            raise NetLogoException(ex.message())
        except jpype.JException(jpype.java.lang.InterruptedException) as ex:
            raise NetLogoException(ex.message())
def prepare_data(results,
                 outcomes_to_show=None,
                 group_by=None,
                 grouping_specifiers=None,
                 filter_scalar=True):
    '''
    
    Parameters
    ----------
    results : tuple
    outcomes_to_show : list of str, optional
    group_by : str, optional
    grouping_specifiers : iterable, optional
    filter_scalar : bool, optional
    
    '''

    #unravel results
    experiments, outcomes = results

    temp_outcomes = {}

    # remove outcomes that are not to be shown
    if outcomes_to_show:
        if isinstance(outcomes_to_show, six.string_types):
            outcomes_to_show = [outcomes_to_show]

        for entry in outcomes_to_show:
            temp_outcomes[entry] = copy.deepcopy(outcomes[entry])

    time, outcomes = determine_time_dimension(outcomes)

    # filter the outcomes to exclude scalar values
    if filter_scalar:
        outcomes = filter_scalar_outcomes(outcomes)
    if not outcomes_to_show:
        outcomes_to_show = outcomes.keys()

    # group the data if desired
    if group_by:
        if not grouping_specifiers:
            #no grouping specifier, so infer from the data
            if group_by == 'index':
                raise EMAError(
                    "no grouping specifiers provided while trying to group on index"
                )
            else:
                column_to_group_by = experiments[group_by]
                if column_to_group_by.dtype == np.object:
                    grouping_specifiers = set(column_to_group_by)
                else:
                    grouping_specifiers = make_continuous_grouping_specifiers(
                        column_to_group_by, grouping_specifiers)
            grouping_labels = grouping_specifiers = sorted(grouping_specifiers)
        else:
            if isinstance(grouping_specifiers, six.string_types):
                grouping_specifiers = [grouping_specifiers]
                grouping_labels = grouping_specifiers
            elif isinstance(grouping_specifiers, dict):
                grouping_labels = sorted(grouping_specifiers.keys())
                grouping_specifiers = [
                    grouping_specifiers[key] for key in grouping_labels
                ]
            else:
                grouping_labels = grouping_specifiers


        outcomes = group_results(experiments, outcomes, group_by,\
                                 grouping_specifiers, grouping_labels)

        new_outcomes = {}
        for key, value in outcomes.items():
            new_outcomes[key] = value[1]
        outcomes = new_outcomes
    else:
        grouping_labels = []

    return outcomes, outcomes_to_show, time, grouping_labels
Beispiel #16
0
    def __init__(self, lookup_type, values, name, msi, ymin=None, ymax=None):
        '''

        Parameters
        ----------
        lookup_type : {'categories', 'hearne', 'approximation'}
                      the method to be used for alternative generation. 
        values : collection
                 the values for specifying the uncertainty from which to 
                 sample.
            If 'lookup_type' is "categories", a set of alternative lookup 
                functions to  be entered as tuples of x,y points.
                Example definition: 
                LookupUncertainty([[(0.0, 0.05), (0.25, 0.15), (0.5, 0.4), 
                                    (0.75, 1), (1, 1.25)], 
                                  [(0.0, 0.1), (0.25, 0.25), (0.5, 0.75), 
                                   (1, 1.25)],
                                  [(0.0, 0.0), (0.1, 0.2), (0.3, 0.6), 
                                   (0.6, 0.9), (1, 1.25)]], 
                                   "TF3", 'categories', self )
            if 'lookup_type' is "hearne1", a list of ranges for each parameter 
                Single-extreme piecewise functions
                m: maximum deviation from l of the distortion function
                p: the point that this occurs
                l: lower end point
                u: upper end point
            If 'lookup_type' is "hearne2", a list of ranges for each 
                parameter. Double extreme piecewise linear functions with 
                variable endpoints are used to distort the lookup functions. 
                These functions are defined by 6 parameters, being m1, m2, p1, 
                p2, l and u; and the uncertainty ranges for these 6 parameters 
                should  be given as the values of this lookup uncertainty if 
                Hearne's method is chosen. The meaning of these parameters is 
                simply:
                m1: maximum deviation (peak if positive, bottom if negative) of 
                 the distortion function from l in the first segment
                p1: where this peak occurs in the x axis
                m2: maximum deviation of the distortion function from l or u in 
                    the second segment
                p2: where the second peak/bottom occurs
                l : lower end point, namely the y value for x_min
                u : upper end point, namely the y value for x_max
                Example definition:
                LookupUncertainty([(-1, 2), (-1, 1), (0, 1), (0, 1), (0, 0.5), 
                                   (0.5, 1.5)], "TF2", 'hearne', self, 0, 2)
             If 'lookup_type' is "approximation", an analytical function 
                 approximation (a logistic function) will be used, instead of a 
                 lookup. This function also has 6 parameters whose ranges should 
                 be given:
                 A: the lower asymptote
                 K: the upper asymptote
                 B: the growth rate
                 Q: depends on the value y(0)
                 M: the time of maximum growth if Q=v
                Example definition:
                TODO:
        name : str
               name of the uncertainty
        msi : VensimModelStructureInterface instance
              model structure interface, to be used for adding new 
              parameter uncertainties
        min : float
              min value the lookup function can take, this argument is 
              not needed in case of CAT
        max : float
              max value the lookup function can take, this argument is 
              not needed in case of CAT
        
        '''
        super(LookupUncertainty, self).__init__(values, name)
        self.lookup_type = lookup_type
        self.y_min = ymin
        self.y_max = ymax
        self.error_message = self.error_message.format(self.name)
        self.transform_functions = {
            self.HEARNE1: self._hearne1,
            self.HEARNE2: self._hearne2,
            self.APPROX: self._approx,
            self.CAT: self._cat
        }

        if self.lookup_type == "categories":
            msi.uncertainties.append(
                CategoricalUncertainty(range(len(values)), "c-" + self.name))
            msi._lookup_uncertainties.append(self)
        elif self.lookup_type == "hearne1":
            msi.uncertainties.append(
                ParameterUncertainty(values[0], "m-" + self.name))
            msi.uncertainties.append(
                ParameterUncertainty(values[1], "p-" + self.name))
            msi.uncertainties.append(
                ParameterUncertainty(values[2], "l-" + self.name))
            msi.uncertainties.append(
                ParameterUncertainty(values[3], "u-" + self.name))
            msi._lookup_uncertainties.append(self)
        elif self.lookup_type == "hearne2":
            msi.uncertainties.append(
                ParameterUncertainty(values[0], "m1-" + self.name))
            msi.uncertainties.append(
                ParameterUncertainty(values[1], "m2-" + self.name))
            msi.uncertainties.append(
                ParameterUncertainty(values[2], "p1-" + self.name))
            msi.uncertainties.append(
                ParameterUncertainty(values[3], "p2-" + self.name))
            msi.uncertainties.append(
                ParameterUncertainty(values[4], "l-" + self.name))
            msi.uncertainties.append(
                ParameterUncertainty(values[5], "u-" + self.name))
            msi._lookup_uncertainties.append(self)
        elif self.lookup_type == "approximation":
            msi.uncertainties.append(
                ParameterUncertainty(values[0], "A-" + self.name))
            msi.uncertainties.append(
                ParameterUncertainty(values[1], "K-" + self.name))
            msi.uncertainties.append(
                ParameterUncertainty(values[2], "B-" + self.name))
            msi.uncertainties.append(
                ParameterUncertainty(values[3], "Q-" + self.name))
            msi.uncertainties.append(
                ParameterUncertainty(values[4], "M-" + self.name))
            msi._lookup_uncertainties.append(self)
        else:
            raise EMAError(self.error_message)
def multiple_densities(results,
                       points_in_time=[],
                       outcomes_to_show=[],
                       group_by=None,
                       grouping_specifiers=None,
                       density=plotting_util.KDE,
                       legend=True,
                       titles={},
                       ylabels={},
                       experiments_to_show=None,
                       plot_type=ENVELOPE,
                       log=False):
    ''' Make an envelope plot with multiple density plots over the run time

    Parameters
    ----------
    results : tupule
              return from :meth:`perform_experiments`.
    points_in_time : list 
                     a list of points in time for which you want to see the 
                     density. At the moment  up to 6 points in time are 
                     supported
    outcomes_to_show : list of str, optional
                       list of outcome of interest you want to plot. If empty, 
                       all outcomes are plotted. **Note**:  just names.
    group_by : str, optional
               name of the column in the cases array to group results by. 
               Alternatively, `index` can be used to use indexing arrays as the
               basis for grouping.
    grouping_specifiers : iterable or dict, optional
                          set of categories to be used as a basis for grouping 
                          by. Grouping_specifiers is only meaningful if 
                          group_by is provided as well. In case of grouping by 
                          index, the grouping specifiers should be in a 
                          dictionary where the key denotes the name of the 
                          group.
    density : {KDE, HIST, VIOLIN, BOXPLOT}, optional
    legend : bool, optional
    titles : dict, optional
             a way for controlling whether each of the axes should have a 
             title. There are three possibilities. If set to None, no title 
             will be shown for any of the axes. If set to an empty dict, 
             the default, the title is identical to the name of the outcome of 
             interest. If you want to override these default names, provide a 
             dict with the outcome of interest as key and the desired title as 
             value. This dict need only contain the outcomes for which you 
             want to use a different title. 
    ylabels : dict, optional
              way for controlling the ylabels. Works identical to titles.
    experiments_to_show : ndarray, optional
                          indices of experiments to show lines for,
                          defaults to None.
    plot_type : {ENVELOPE, ENV_LIN, LINES}, optional
    log : bool, optional

    Returns
    -------
    fig : Figure instance 
    axes : dict
           dict with outcome as key, and axes as value. Density axes' are
           indexed by the outcome followed by _density.
    
    Note
    ---- 
    the current implementation is limited to seven different categories in
    case of group_by, categories, and/or discretesize. This limit is due to the 
    colors specified in COLOR_LIST.
    
    Note
    ----
    the connection patches are for some reason not drawn if log scaling is 
    used for the density plots. This appears to be an issue in matplotlib 
    itself.
    
    '''
    if not outcomes_to_show:
        outcomes_to_show = results[1].keys()
        outcomes_to_show.remove(TIME)
    elif isinstance(outcomes_to_show, six.string_types):
        outcomes_to_show = [outcomes_to_show]

    axes_dicts = {}
    figures = []
    for outcome_to_show in outcomes_to_show:
        temp_results = copy.deepcopy(results)
        axes_dict = {}
        axes_dicts[outcome_to_show] = axes_dict

        if plot_type != ENV_LIN:
            # standard way of pre processing data
            if experiments_to_show != None:
                experiments, outcomes = temp_results
                experiments = experiments[experiments_to_show]
                new_outcomes = {}
                for key, value in outcomes.items():
                    new_outcomes[key] = value[experiments_to_show]
                temp_results = experiments, new_outcomes

        data = prepare_data(temp_results, [outcome_to_show], group_by,
                            grouping_specifiers)
        outcomes, outcomes_to_show, time, grouping_labels = data
        del outcomes_to_show

        #start of plotting
        fig = plt.figure()
        figures.append(fig)

        # making of grid
        if not points_in_time:
            raise EMAError("no points in time specified")
        if len(points_in_time) == 1:
            ax_env = plt.subplot2grid((2, 3), (0, 0), colspan=3)
            ax1 = plt.subplot2grid((2, 3), (1, 1), sharey=ax_env)
            kde_axes = [ax1]
        elif len(points_in_time) == 2:
            ax_env = plt.subplot2grid((2, 2), (0, 0), colspan=2)
            ax1 = plt.subplot2grid((2, 2), (1, 0), sharey=ax_env)
            ax2 = plt.subplot2grid((2, 2), (1, 1), sharex=ax1, sharey=ax_env)
            kde_axes = [ax1, ax2]
        elif len(points_in_time) == 3:
            ax_env = plt.subplot2grid((2, 3), (0, 0), colspan=3)
            ax1 = plt.subplot2grid((2, 3), (1, 0), sharey=ax_env)
            ax2 = plt.subplot2grid((2, 3), (1, 1), sharex=ax1, sharey=ax_env)
            ax3 = plt.subplot2grid((2, 3), (1, 2), sharex=ax1, sharey=ax_env)
            kde_axes = [ax1, ax2, ax3]
        elif len(points_in_time) == 4:
            ax_env = plt.subplot2grid((2, 4), (0, 1), colspan=2)
            ax1 = plt.subplot2grid((2, 4), (1, 0), sharey=ax_env)
            ax2 = plt.subplot2grid((2, 4), (1, 1), sharex=ax1, sharey=ax_env)
            ax3 = plt.subplot2grid((2, 4), (1, 2), sharex=ax1, sharey=ax_env)
            ax4 = plt.subplot2grid((2, 4), (1, 3), sharex=ax1, sharey=ax_env)
            kde_axes = [ax1, ax2, ax3, ax4]
        elif len(points_in_time) == 5:
            ax_env = plt.subplot2grid((2, 5), (0, 1), colspan=3)
            ax1 = plt.subplot2grid((2, 5), (1, 0), sharey=ax_env)
            ax2 = plt.subplot2grid((2, 5), (1, 1), sharex=ax1, sharey=ax_env)
            ax3 = plt.subplot2grid((2, 5), (1, 2), sharex=ax1, sharey=ax_env)
            ax4 = plt.subplot2grid((2, 5), (1, 3), sharex=ax1, sharey=ax_env)
            ax5 = plt.subplot2grid((2, 5), (1, 4), sharex=ax1, sharey=ax_env)
            kde_axes = [ax1, ax2, ax3, ax4, ax5]
        elif len(points_in_time) == 6:
            ax_env = plt.subplot2grid((2, 6), (0, 1), colspan=4)
            ax1 = plt.subplot2grid((2, 6), (1, 0), sharey=ax_env)
            ax2 = plt.subplot2grid((2, 6), (1, 1), sharex=ax1, sharey=ax_env)
            ax3 = plt.subplot2grid((2, 6), (1, 2), sharex=ax1, sharey=ax_env)
            ax4 = plt.subplot2grid((2, 6), (1, 3), sharex=ax1, sharey=ax_env)
            ax5 = plt.subplot2grid((2, 6), (1, 4), sharex=ax1, sharey=ax_env)
            ax6 = plt.subplot2grid((2, 6), (1, 5), sharex=ax1, sharey=ax_env)

            kde_axes = [
                ax1,
                ax2,
                ax3,
                ax4,
                ax5,
                ax6,
            ]
        else:
            raise EMAError("too many points in time provided")

        axes_dict["main plot"] = ax_env
        for n, entry in enumerate(kde_axes):
            axes_dict["density_%s" % n] = entry

            #turn of ticks for all but the first density
            if n > 0:
                for tl in entry.get_yticklabels():
                    tl.set_visible(False)

        # bit of a trick to avoid duplicating code. If no subgroups are
        # specified, nest the outcomes one step deeper in de dict so the
        # iteration below can proceed normally.
        if not grouping_labels:
            grouping_labels = [""]
            outcomes[""] = outcomes

        for j, key in enumerate(grouping_labels):
            value = outcomes[key][outcome_to_show]

            if plot_type == ENVELOPE:
                plot_envelope(ax_env, j, time, value, fill=False)
            elif plot_type == LINES:
                ax_env.plot(time.T, value.T)
            elif plot_type == ENV_LIN:
                plot_envelope(ax_env, j, time, value, fill=True)
                if experiments_to_show != None:
                    ax_env.plot(time.T, value[experiments_to_show].T)
                else:
                    ax_env.plot(time.T, value.T)
            ax_env.set_xlim(time[0], time[-1])

            ax_env.set_xlabel(TIME_LABEL)
            do_ylabels(ax_env, ylabels, outcome_to_show)
            do_titles(ax_env, titles, outcome_to_show)

        for i, ax in enumerate(kde_axes):
            time_value = points_in_time[i]
            index = np.where(time == points_in_time[i])[0][0]

            group_density(ax,
                          density,
                          outcomes,
                          outcome_to_show,
                          grouping_labels,
                          index=index,
                          log=log)

        min_y, max_y = ax_env.get_ylim()
        ax_env.autoscale(enable=False, axis='y')

        for i, ax in enumerate(kde_axes):
            time_value = points_in_time[i]

            ax_env.plot([time_value, time_value], [min_y, max_y],
                        c='k',
                        ls='--')
            con = ConnectionPatch(xyA=(time_value, min_y),
                                  xyB=(ax.get_xlim()[0], max_y),
                                  coordsA="data",
                                  coordsB="data",
                                  axesA=ax_env,
                                  axesB=ax)
            ax_env.add_artist(con)

        if legend and group_by:
            lt = PATCH
            alpha = 0.3
            if plot_type == LINES:
                lt = LINE
                alpha = 1
            make_legend(grouping_labels, ax_env, legend_type=lt, alpha=alpha)
    return figures, axes_dicts
    def perform_outcome_optimization(self,
                                     obj_function=None,
                                     weights=(),
                                     algorithm=NSGA2,
                                     reporting_interval=100,
                                     nr_of_generations=100,
                                     pop_size=100,
                                     crossover_rate=0.5,
                                     mutation_rate=0.02,
                                     caching=False,
                                     **kwargs):
        """
        Method responsible for performing outcome optimization. The 
        optimization will be performed over the intersection of the 
        uncertainties in case of multiple model structures. 
        
        Parameters
        ----------    
        obj_function : callable
                       the objective function used by the optimization
        weights : tuple
                  tuple of weights on the various outcomes of the objective 
                  function. Use the constants MINIMIZE and MAXIMIZE.
        reporting_interval : int, optional
                             parameter for specifying the frequency with
                             which the callback reports the progress.
                             (Default is 100) 
        nr_of_generations : int, optional
                            the number of generations for which the GA will be 
                            run
        pop_size : int, optional
                   the population size for the GA
        crossover_rate : float, optional
                         crossover rate for the GA
        mutation_rate : float, optional
                        mutation_rate for the GA
        caching : bool, optional
                  keep track of tried solutions. This is memory intensive, 
                  so should be used sparingly. Defaults to False. 
        """

        # Attribute generator
        od = self._determine_unique_attributes('uncertainties')[0]
        shared_uncertainties = od[tuple(
            [msi.name for msi in self.model_structures])]

        #make a dictionary with the shared uncertainties and their range
        uncertainty_dict = {}
        for uncertainty in shared_uncertainties:
            uncertainty_dict[uncertainty.name] = uncertainty
        keys = sorted(uncertainty_dict.keys())

        levers = {}
        for key in keys:
            specification = {}
            uncertainty = uncertainty_dict[key]
            value = uncertainty.values

            if isinstance(uncertainty, CategoricalUncertainty):
                value = uncertainty.categories
                specification["type"] = 'list'
                specification['values'] = value
            elif isinstance(uncertainty, ParameterUncertainty):
                if uncertainty.dist == 'integer':
                    specification["type"] = 'range int'
                else:
                    specification["type"] = 'range float'
                specification['values'] = value
            else:
                raise EMAError(
                    "unknown allele type: possible types are range and list")
            levers[key] = specification

        return self._run_optimization(generate_individual_outcome,
                                      evaluate_population_outcome,
                                      weights=weights,
                                      levers=levers,
                                      algorithm=algorithm,
                                      obj_function=obj_function,
                                      pop_size=pop_size,
                                      reporting_interval=reporting_interval,
                                      nr_of_generations=nr_of_generations,
                                      crossover_rate=crossover_rate,
                                      mutation_rate=mutation_rate,
                                      **kwargs)