Ejemplo n.º 1
0
    def test_determine_parameters(self):
        function = mock.Mock()
        model_a = Model("A", function)
        model_a.uncertainties = [
            RealParameter('a', 0, 1),
            RealParameter('b', 0, 1),
        ]
        function = mock.Mock()
        model_b = Model("B", function)
        model_b.uncertainties = [
            RealParameter('b', 0, 1),
            RealParameter('c', 0, 1),
        ]

        models = [model_a, model_b]

        parameters = determine_parameters(models, 'uncertainties', union=True)
        for model in models:
            for unc in model.uncertainties:
                self.assertIn(unc.name, parameters.keys())

        parameters = determine_parameters(models, 'uncertainties', union=False)
        self.assertIn('b', parameters.keys())
        self.assertNotIn('c', parameters.keys())
        self.assertNotIn('a', parameters.keys())
Ejemplo n.º 2
0
 def test_determine_parameters(self):
     function = mock.Mock()
     model_a = Model("A", function)
     model_a.uncertainties = [RealParameter('a', 0, 1),
                              RealParameter('b', 0, 1),]
     function = mock.Mock()
     model_b = Model("B", function)
     model_b.uncertainties = [RealParameter('b', 0, 1),
                              RealParameter('c', 0, 1),]
     
     models = [model_a, model_b]
     
     parameters = determine_parameters(models, 'uncertainties', union=True)
     for model in models:
         for unc in model.uncertainties:
             self.assertIn(unc.name, parameters.keys())
     
     parameters = determine_parameters(models, 'uncertainties', union=False)
     self.assertIn('b', parameters.keys())
     self.assertNotIn('c', parameters.keys())
     self.assertNotIn('a', parameters.keys())    
Ejemplo n.º 3
0
    def perform_experiments(self, 
                           cases,
                           callback=DefaultCallback,
                           reporting_interval=None,
                           uncertainty_union=False,
                           outcome_union=False,
                           **kwargs):
        """
        Method responsible for running the experiments on a structure. In case 
        of multiple model structures, the outcomes are set to the intersection 
        of the sets of outcomes of the various models.     
        
        Parameters
        ----------    
        cases : int
                In case of Latin Hypercube sampling and Monte Carlo 
                sampling, cases specifies the number of cases to
                generate. In case of Full Factorial sampling,
                cases specifies the resolution to use for sampling
                continuous uncertainties. 
        callback : callback, optional
                   callable that will be called after finishing a 
                   single experiment (default is :class:`~callbacks.DefaultCallback`)
        reporting_interval : int, optional
                             parameter for specifying the frequency with
                             which the callback reports the progress.
                             If none is provided, it defaults to 1/10 of 
                             the total number of scenarios.
        uncertainty_union : bool, optional
                              keyword argument for controlling whether,
                              in case of multiple model structure 
                              interfaces, the intersection or the union
                              of uncertainties should be used. 
        outcome_union : bool, optional
                          keyword argument for controlling whether,
                          in case of multiple model structure 
                          interfaces, the intersection or the union
                          of outcomes should be used. 
        kwargs : dict, optional
                 generic keyword arguments to pass on to the callback

        Returns
        -------
        tuple 
            a `structured numpy array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_ 
            containing the experiments, and a dict with the names of the 
            outcomes as keys and an numpy array as value.


        .. rubric:: suggested use

        In general, analysis scripts require both the structured array of the 
        experiments and the dictionary of arrays containing the results. The 
        recommended use is the following::

        >>> results = ensemble.perform_experiments(10000) #recommended use
        >>> experiments, output = ensemble.perform_experiments(10000) 

        The latter option will work fine, but most analysis scripts require 
        to wrap it up into a tuple again::

        >>> data = (experiments, output)

        Another reason for the recommended use is that you can save this tuple
        directly::

        >>> import util as util
        >>> util.save_results(results, filename)

        """
        if not self.policies:
            self.policies = Policy('none')
            levers = []        
        else:
            levers = determine_objects(self.model_structures, 'levers', 
                                       union=True)
            attributes = {key for p in self.policies for key in p.keys()}
            levers = [lever for lever in levers if lever.name in attributes]

        outcomes = determine_objects(self.model_structures, 'outcomes', 
                                     union=outcome_union)

        if isinstance(cases, numbers.Integral):
            res = sample_uncertainties(self.model_structures, cases, 
                                       uncertainty_union, sampler=self.sampler)
            scenarios = res
            uncertainties = res.parameters
            nr_of_scenarios = res.n
        elif isinstance(cases, np.ndarray):
            res = from_experiments(self.model_structures, cases)
            scenarios = res
            uncertainties = res.parameters
            nr_of_scenarios = res.n
        else:
            scenarios = cases
            nr_of_scenarios = len(scenarios)
            uncertainties = samplers.determine_parameters(self.model_structures, 
                                                          'uncertainties')
            names = set()
            for case in cases:
                names = names.union(case.keys())
                
#             uncertainties = [u for u in uncertainties if u.name in names]
            
        
        experiments = experiment_generator(scenarios, self.model_structures, 
                                           self.policies)
        nr_of_exp = nr_of_scenarios * len(self.model_structures) * len(self.policies)
        
        info(str(nr_of_exp) + " experiment will be executed")

        if reporting_interval is None:
            reporting_interval = max(1, int(round(nr_of_exp / 10))) 

        #initialize the callback object
        callback = callback(uncertainties, 
                            levers,
                            outcomes, 
                            nr_of_exp,
                            reporting_interval=reporting_interval,
                            **kwargs)

        if self.parallel:
            info("preparing to perform experiment in parallel")
            
            if not self.pool:
                self.pool = MultiprocessingPool(self.model_structures,
                                                nr_processes=self.processes)
            info("starting to perform experiments in parallel")

            self.pool.perform_experiments(callback, experiments)
        else:
            info("starting to perform experiments sequentially")
            
            cwd = os.getcwd() 
            runner = ExperimentRunner(self.model_structures)
            for experiment in experiments:
                result = runner.run_experiment(experiment)
                callback(experiment, result)
            runner.cleanup()
            os.chdir(cwd)
        
        if callback.i != nr_of_exp:
            raise EMAError(('some fatal error has occurred while '
                            'running the experiments, not all runs have ' 
                            'completed. expected {} '.format(nr_of_exp),
                            'got {}'.format(callback.i),
                            '{}'.format(type(callback))))
       
        results = callback.get_results()
        info("experiments finished")
        
        return results
Ejemplo n.º 4
0
    def perform_experiments(self,
                            cases,
                            callback=DefaultCallback,
                            reporting_interval=None,
                            uncertainty_union=False,
                            outcome_union=False,
                            **kwargs):
        """
        Method responsible for running the experiments on a structure. In case 
        of multiple model structures, the outcomes are set to the intersection 
        of the sets of outcomes of the various models.     
        
        Parameters
        ----------    
        cases : int
                In case of Latin Hypercube sampling and Monte Carlo 
                sampling, cases specifies the number of cases to
                generate. In case of Full Factorial sampling,
                cases specifies the resolution to use for sampling
                continuous uncertainties. 
        callback : callback, optional
                   callable that will be called after finishing a 
                   single experiment (default is :class:`~callbacks.DefaultCallback`)
        reporting_interval : int, optional
                             parameter for specifying the frequency with
                             which the callback reports the progress.
                             If none is provided, it defaults to 1/10 of 
                             the total number of scenarios.
        uncertainty_union : bool, optional
                              keyword argument for controlling whether,
                              in case of multiple model structure 
                              interfaces, the intersection or the union
                              of uncertainties should be used. 
        outcome_union : bool, optional
                          keyword argument for controlling whether,
                          in case of multiple model structure 
                          interfaces, the intersection or the union
                          of outcomes should be used. 
        kwargs : dict, optional
                 generic keyword arguments to pass on to the callback

        Returns
        -------
        tuple 
            a `structured numpy array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_ 
            containing the experiments, and a dict with the names of the 
            outcomes as keys and an numpy array as value.


        .. rubric:: suggested use

        In general, analysis scripts require both the structured array of the 
        experiments and the dictionary of arrays containing the results. The 
        recommended use is the following::

        >>> results = ensemble.perform_experiments(10000) #recommended use
        >>> experiments, output = ensemble.perform_experiments(10000) 

        The latter option will work fine, but most analysis scripts require 
        to wrap it up into a tuple again::

        >>> data = (experiments, output)

        Another reason for the recommended use is that you can save this tuple
        directly::

        >>> import util as util
        >>> util.save_results(results, filename)

        """
        if not self.policies:
            self.policies = Policy('none')
            levers = []
        else:
            levers = determine_objects(self.model_structures,
                                       'levers',
                                       union=True)
            attributes = {key for p in self.policies for key in p.keys()}
            levers = [lever for lever in levers if lever.name in attributes]

        outcomes = determine_objects(self.model_structures,
                                     'outcomes',
                                     union=outcome_union)

        if isinstance(cases, numbers.Integral):
            res = sample_uncertainties(self.model_structures,
                                       cases,
                                       uncertainty_union,
                                       sampler=self.sampler)
            scenarios = res
            uncertainties = res.parameters
            nr_of_scenarios = res.n
        elif isinstance(cases, np.ndarray):
            res = from_experiments(self.model_structures, cases)
            scenarios = res
            uncertainties = res.parameters
            nr_of_scenarios = res.n
        else:
            scenarios = cases
            nr_of_scenarios = len(scenarios)
            uncertainties = samplers.determine_parameters(
                self.model_structures, 'uncertainties')
            names = set()
            for case in cases:
                names = names.union(case.keys())


#             uncertainties = [u for u in uncertainties if u.name in names]

        experiments = experiment_generator(scenarios, self.model_structures,
                                           self.policies)
        nr_of_exp = nr_of_scenarios * len(self.model_structures) * len(
            self.policies)

        info(str(nr_of_exp) + " experiment will be executed")

        if reporting_interval is None:
            reporting_interval = max(1, int(round(nr_of_exp / 10)))

        #initialize the callback object
        callback = callback(uncertainties,
                            levers,
                            outcomes,
                            nr_of_exp,
                            reporting_interval=reporting_interval,
                            **kwargs)

        if self.parallel:
            info("preparing to perform experiment in parallel")

            if not self.pool:
                self.pool = MultiprocessingPool(self.model_structures,
                                                nr_processes=self.processes)
            info("starting to perform experiments in parallel")

            self.pool.perform_experiments(callback, experiments)
        else:
            info("starting to perform experiments sequentially")

            cwd = os.getcwd()
            runner = ExperimentRunner(self.model_structures)
            for experiment in experiments:
                result = runner.run_experiment(experiment)
                callback(experiment, result)
            runner.cleanup()
            os.chdir(cwd)

        if callback.i != nr_of_exp:
            raise EMAError(
                ('some fatal error has occurred while '
                 'running the experiments, not all runs have '
                 'completed. expected {} '.format(nr_of_exp),
                 'got {}'.format(callback.i), '{}'.format(type(callback))))

        results = callback.get_results()
        info("experiments finished")

        return results