def test_optimization(): if os.name != 'nt': return ema_logging.log_to_stderr(ema_logging.INFO) model = FluModel(r'../models', "fluCase") ensemble = ModelEnsemble() ensemble.model_structure = model ensemble.parallel = True pop_size = 8 nr_of_generations = 10 eps = np.array([1e-3, 1e6]) stats, pop = ensemble.perform_outcome_optimization( obj_function=obj_function_multi, algorithm=epsNSGA2, reporting_interval=100, weights=(MAXIMIZE, MAXIMIZE), pop_size=pop_size, nr_of_generations=nr_of_generations, crossover_rate=0.8, mutation_rate=0.05, eps=eps)
def test_log_to_stderr(self): ema_logging._logger = None logger = ema_logging.log_to_stderr(ema_logging.DEBUG) self.assertEqual(len(logger.handlers), 2) self.assertEqual(logger.level, ema_logging.DEBUG) ema_logging._logger = None logger = ema_logging.log_to_stderr() self.assertEqual(len(logger.handlers), 2) self.assertEqual(logger.level, ema_logging.DEFAULT_LEVEL) logger = ema_logging.log_to_stderr() self.assertEqual(len(logger.handlers), 2) self.assertEqual(logger.level, ema_logging.DEFAULT_LEVEL)
def test_log_messages(self): ema_logging.log_to_stderr(ema_logging.DEBUG) with mock.patch('util.ema_logging._logger') as mocked_logger: message = 'test message' ema_logging.debug(message) mocked_logger.debug.assert_called_with(message) ema_logging.info(message) mocked_logger.info.assert_called_with(message) ema_logging.warning(message) mocked_logger.warning.assert_called_with(message) ema_logging.error(message) mocked_logger.error.assert_called_with(message) ema_logging.exception(message) mocked_logger.exception.assert_called_with(message) ema_logging.critical(message) mocked_logger.critical.assert_called_with(message)
def test_optimization(): if os.name != 'nt': return ema_logging.log_to_stderr(ema_logging.INFO) model = FluModel(r'../models', "fluCase") ensemble = ModelEnsemble() ensemble.model_structure = model ensemble.parallel=True pop_size = 8 nr_of_generations = 10 eps = np.array([1e-3, 1e6]) stats, pop = ensemble.perform_outcome_optimization(obj_function = obj_function_multi, algorithm=epsNSGA2, reporting_interval=100, weights=(MAXIMIZE, MAXIMIZE), pop_size=pop_size, nr_of_generations=nr_of_generations, crossover_rate=0.8, mutation_rate=0.05, eps=eps)
''' Created on Mar 15, 2012 .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl> ''' from analysis import clusterer from util import ema_logging from core import ModelEnsemble from test.scarcity_example import ScarcityModel if __name__ == "__main__": ema_logging.log_to_stderr(ema_logging.INFO) model = ScarcityModel(r'..\..\src\test', "fluCase") ensemble = ModelEnsemble() ensemble.set_model_structure(model) ensemble.parallel = True results = ensemble.perform_experiments(200) clusterer.cluster(data=results, outcome='relative market price', distance='gonenc', cMethod='maxclust', cValue=5, plotDendrogram=False)
''' Created on May 26, 2015 @author: jhkwakkel ''' import numpy as np import matplotlib.pyplot as plt import analysis.cart as cart from util import ema_logging from util.util import load_results ema_logging.log_to_stderr(level=ema_logging.INFO) def classify(data): #get the output for deceased population result = data['deceased population region 1'] #make an empty array of length equal to number of cases classes = np.zeros(result.shape[0]) #if deceased population is higher then 1.000.000 people, classify as 1 classes[result[:, -1] > 1000000] = 1 return classes #load data fn = r'./data/1000 flu cases.tar.gz' results = load_results(fn) experiments, results = results
def obj_func(outcomes): a = outcomes['a'] b = outcomes['b'] a_mean = np.mean(a) b_mean = np.mean(b) if a_mean < 0.5 or b_mean < 0.5: return (np.inf,) * 2 else: return a_mean, b_mean if __name__ == "__main__": ema_logging.log_to_stderr(ema_logging.INFO) model = DummyModel(r"", "dummy") np.random.seed(123456789) ensemble = ModelEnsemble() ensemble.set_model_structure(model) policy_levers = {'Trigger a': {'type':'list', 'values':[0, 0.25, 0.5, 0.75, 1]}, 'Trigger b': {'type':'list', 'values':[0, 0.25, 0.5, 0.75, 1]}, 'Trigger c': {'type':'list', 'values':[0, 0.25, 0.5, 0.75, 1]}} cases = ensemble._generate_samples(10, UNION)[0] ensemble.add_policy({"name":None}) experiments = [entry for entry in ensemble._generate_experiments(cases)]
''' Created on May 26, 2015 @author: jhkwakkel ''' import numpy as np import matplotlib.pyplot as plt import analysis.cart as cart from util import ema_logging from util.util import load_results ema_logging.log_to_stderr(level=ema_logging.INFO) def classify(data): #get the output for deceased population result = data['deceased population region 1'] #make an empty array of length equal to number of cases classes = np.zeros(result.shape[0]) #if deceased population is higher then 1.000.000 people, classify as 1 classes[result[:, -1] > 1000000] = 1 return classes #load data fn = r'./data/1000 flu cases.tar.gz' results = load_results(fn)
''' Created on 20 sep. 2011 .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl> ''' import numpy as np import matplotlib.pyplot as plt from analysis.pairs_plotting import pairs_lines, pairs_scatter, pairs_density from util.util import load_results from util import ema_logging ema_logging.log_to_stderr(level=ema_logging.DEFAULT_LEVEL) # load the data fh = r'.\data\1000 flu cases no policy.tar.gz' experiments, outcomes = load_results(fh) # transform the results to the required format # that is, we want to know the max peak and the casualties at the end of the # run tr = {} # get time and remove it from the dict time = outcomes.pop('TIME') for key, value in outcomes.items(): if key == 'deceased population region 1': tr[key] = value[:, -1] #we want the end value else: # we want the maximum value of the peak
""" Created on 20 sep. 2011 .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl> """ import numpy as np import matplotlib.pyplot as plt from analysis.pairs_plotting import pairs_lines, pairs_scatter, pairs_density from util.util import load_results from util import ema_logging ema_logging.log_to_stderr(level=ema_logging.DEFAULT_LEVEL) # load the data fh = r".\data\1000 flu cases no policy.tar.gz" experiments, outcomes = load_results(fh) # transform the results to the required format # that is, we want to know the max peak and the casualties at the end of the # run tr = {} # get time and remove it from the dict time = outcomes.pop("TIME") for key, value in outcomes.items(): if key == "deceased population region 1": tr[key] = value[:, -1] # we want the end value else: # we want the maximum value of the peak
speed = kwargs.pop("lookup price substitute speed") begin = kwargs.pop("lookup price substitute begin") end = kwargs.pop("lookup price substitute end") lookup = [self.priceSubstite(x, speed, begin, end) for x in range(0,100, 10)] kwargs['relative price substitute lookup'] = lookup scale = kwargs.pop("lookup returns to scale speed") speed = kwargs.pop("lookup returns to scale scale") lookup = [self.returnsToScale(x, speed, scale) for x in range(0, 101, 10)] kwargs['returns to scale lookup'] = lookup scale = kwargs.pop("lookup approximated learning speed") speed = kwargs.pop("lookup approximated learning scale") start = kwargs.pop("lookup approximated learning start") lookup = [self.approxLearning(x, speed, scale, start) for x in range(0, 101, 10)] kwargs['approximated learning effect lookup'] = lookup super(ScarcityModel, self).run_model(kwargs) if __name__ == "__main__": ema_logging.log_to_stderr(ema_logging.DEBUG) model = ScarcityModel(r'./models/scarcity', "scarcity") ensemble = ModelEnsemble() ensemble.model_structure = model ensemble.parallel = True results = ensemble.perform_experiments(2)
kwargs['relative price substitute lookup'] = lookup scale = kwargs.pop("lookup returns to scale speed") speed = kwargs.pop("lookup returns to scale scale") lookup = [ self.returnsToScale(x, speed, scale) for x in range(0, 101, 10) ] kwargs['returns to scale lookup'] = lookup scale = kwargs.pop("lookup approximated learning speed") speed = kwargs.pop("lookup approximated learning scale") start = kwargs.pop("lookup approximated learning start") lookup = [ self.approxLearning(x, speed, scale, start) for x in range(0, 101, 10) ] kwargs['approximated learning effect lookup'] = lookup super(ScarcityModel, self).run_model(kwargs) if __name__ == "__main__": ema_logging.log_to_stderr(ema_logging.DEBUG) model = ScarcityModel(r'./models/scarcity', "scarcity") ensemble = ModelEnsemble() ensemble.model_structure = model ensemble.parallel = True results = ensemble.perform_experiments(2)