def test_log_to_stderr(self): ema_logging._rootlogger = None logger = ema_logging.log_to_stderr(ema_logging.DEBUG) self.assertEqual(len(logger.handlers), 2) self.assertEqual(logger.level, ema_logging.DEBUG) ema_logging._rootlogger = None logger = ema_logging.log_to_stderr() self.assertEqual(len(logger.handlers), 2) self.assertEqual(logger.level, ema_logging.DEFAULT_LEVEL) logger = ema_logging.log_to_stderr() self.assertEqual(len(logger.handlers), 2) self.assertEqual(logger.level, ema_logging.DEFAULT_LEVEL)
def test_log_messages(self): ema_logging.log_to_stderr(ema_logging.DEBUG) with mock.patch('ema_workbench.util.ema_logging._logger') as mocked_logger: message = 'test message' ema_logging.debug(message) mocked_logger.debug.assert_called_with(message) ema_logging.info(message) mocked_logger.info.assert_called_with(message) ema_logging.warning(message) mocked_logger.warning.assert_called_with(message) ema_logging.error(message) mocked_logger.error.assert_called_with(message) ema_logging.exception(message) mocked_logger.exception.assert_called_with(message) ema_logging.critical(message) mocked_logger.critical.assert_called_with(message)
def test_log_messages(self): ema_logging.log_to_stderr(ema_logging.DEBUG) with mock.patch( 'ema_workbench.util.ema_logging._logger') as mocked_logger: message = 'test message' ema_logging.debug(message) mocked_logger.debug.assert_called_with(message) ema_logging.info(message) mocked_logger.info.assert_called_with(message) ema_logging.warning(message) mocked_logger.warning.assert_called_with(message) ema_logging.error(message) mocked_logger.error.assert_called_with(message) ema_logging.exception(message) mocked_logger.exception.assert_called_with(message) ema_logging.critical(message) mocked_logger.critical.assert_called_with(message)
def test_optimization(): if os.name != 'nt': return ema_logging.log_to_stderr(ema_logging.INFO) model = FluModel(r'../models', "fluCase") ensemble = ModelEnsemble() ensemble.model_structure = model ensemble.parallel=True pop_size = 8 nr_of_generations = 10 eps = np.array([1e-3, 1e6]) stats, pop = ensemble.perform_outcome_optimization(obj_function = obj_function_multi, algorithm=epsNSGA2, reporting_interval=100, weights=(MAXIMIZE, MAXIMIZE), pop_size=pop_size, nr_of_generations=nr_of_generations, crossover_rate=0.8, mutation_rate=0.05, eps=eps)
"susceptible to immune population delay time region 1"), ParameterUncertainty((0.5,2), "susceptible to immune population delay time region 2"), ParameterUncertainty((0.01, 5), "root contact rate region 1"), ParameterUncertainty((0.01, 5), "root contact ratio region 2"), ParameterUncertainty((0, 0.15), "infection ratio region 1"), ParameterUncertainty((0, 0.15), "infection rate region 2"), ParameterUncertainty((10, 100), "normal contact rate region 1"), ParameterUncertainty((10, 200), "normal contact rate region 2")] if __name__ == "__main__": ema_logging.log_to_stderr(ema_logging.INFO) model = FluModel(r'./models/flu', "fluCase") ensemble = ModelEnsemble() ensemble.model_structure = model ensemble.parallel = True #turn on parallel processing nr_experiments = 1000 results = ensemble.perform_experiments(nr_experiments) fh = r'./data/{} flu cases no policy.tar.gz'.format(nr_experiments) save_results(results, fh)
kwargs['returns to scale lookup'] = lookup scale = kwargs.pop("lookup approximated learning speed") speed = kwargs.pop("lookup approximated learning scale") start = kwargs.pop("lookup approximated learning start") lookup = [ self.approxLearning(x, speed, scale, start) for x in range(0, 101, 10) ] kwargs['approximated learning effect lookup'] = lookup super(ScarcityModel, self).run_model(kwargs, policy) if __name__ == "__main__": ema_logging.log_to_stderr(ema_logging.DEBUG) model = ScarcityModel("scarcity", wd=r'./models/scarcity', model_file=r'\MetalsEMA.vpm') model.outcomes = [ Outcome('relative market price', time=True), Outcome('supply demand ratio', time=True), Outcome('real annual demand', time=True), Outcome('produced of intrinsically demanded', time=True), Outcome('supply', time=True), Outcome('Installed Recycling Capacity', time=True), Outcome('Installed Extraction Capacity', time=True) ]
from __future__ import (unicode_literals, print_function, absolute_import, division) from ema_workbench import (Model, MultiprocessingEvaluator, Policy, Scenario) from ema_workbench.em_framework.evaluators import perform_experiments from ema_workbench.em_framework.samplers import sample_uncertainties from ema_workbench.util import ema_logging import time from problem_formulation import get_model_for_problem_formulation if __name__ == '__main__': ema_logging.log_to_stderr(ema_logging.INFO) dike_model = get_model_for_problem_formulation(0) # Build a user-defined scenario and policy: reference_values = { 'Bmax': 175, 'Brate': 1.5, 'pfail': 0.5, 'discount rate': 3.5, 'ID flood wave shape': 4 } scen1 = {} for key in dike_model.uncertainties: name_split = key.name.split('_') if len(name_split) == 1: scen1.update({key.name: reference_values[key.name]})
See flu_example.py for the code. The dataset was generated using 32 bit Python. Therefore, this example will not work if you are running 64 bit Python. .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl> chamarat <c.hamarat (at) tudelft (dot) nl> ''' import numpy as np import matplotlib.pyplot as plt import ema_workbench.analysis.prim as prim from ema_workbench.util import ema_logging, load_results ema_logging.log_to_stderr(level=ema_logging.INFO) def classify(data): #get the output for deceased population result = data['deceased population region 1'] #make an empty array of length equal to number of cases classes = np.zeros(result.shape[0]) #if deceased population is higher then 1.000.000 people, classify as 1 classes[result[:, -1] > 1000000] = 1 return classes #load data fn = r'./data/1000 flu cases.tar.gz'
''' from __future__ import unicode_literals, absolute_import import matplotlib.pyplot as plt from ema_workbench.connectors.netlogo import NetLogoModel from ema_workbench.em_framework import (TimeSeriesOutcome, RealParameter, perform_experiments) from ema_workbench.util import ema_logging from ema_workbench.analysis import plotting, plotting_util if __name__ == '__main__': #turn on logging ema_logging.log_to_stderr(ema_logging.DEBUG) model = NetLogoModel('predprey', wd="./models/predatorPreyNetlogo", model_file="Wolf Sheep Predation.nlogo") model.run_length = 100 model.uncertainties = [RealParameter("grass-regrowth-time", 1, 99), RealParameter("initial-number-sheep", 1, 200), RealParameter("initial-number-wolves", 1, 200), RealParameter("sheep-reproduce", 1, 20), RealParameter("wolf-reproduce", 1, 20), ] model.outcomes = [TimeSeriesOutcome('sheep'), TimeSeriesOutcome('wolves'),
''' Created on 20 sep. 2011 .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl> ''' import numpy as np import matplotlib.pyplot as plt from ema_workbench.analysis.pairs_plotting import (pairs_lines, pairs_scatter, pairs_density) from ema_workbench.util import load_results, ema_logging ema_logging.log_to_stderr(level=ema_logging.DEFAULT_LEVEL) # load the data fh = r'.\data\1000 flu cases no policy.tar.gz' experiments, outcomes = load_results(fh) # transform the results to the required format # that is, we want to know the max peak and the casualties at the end of the # run tr = {} # get time and remove it from the dict time = outcomes.pop('TIME') for key, value in outcomes.items(): if key == 'deceased population region 1': tr[key] = value[:,-1] #we want the end value else: # we want the maximum value of the peak