def test_multiple_models(self):
       """
       Test running running with two different pysd models
       Returns
       -------
 
       """
       relative_path_to_file = '../models/Sales_Agent_Market_Building_Dynamics.mdl'
       directory = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
       mdl_file = os.path.join(directory, relative_path_to_file)
        
       market_model = PysdModel(mdl_file=mdl_file)
       market_model.uncertainties = [RealParameter('Startup Subsidy',0, 3),
                                     RealParameter('Startup Subsidy Length', 0, 10)]
       market_model.outcomes = [TimeSeriesOutcome('Still Employed')]
 
       relative_path_to_file = '../models/Sales_Agent_Motivation_Dynamics.mdl'
       directory = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
       mdl_file = os.path.join(directory, relative_path_to_file)
 
       motivation_model = PysdModel(mdl_file=mdl_file)
       motivation_model.uncertainties = [RealParameter('Startup Subsidy', 0, 3),
                                     RealParameter('Startup Subsidy Length', 0, 10)]
       motivation_model.outcomes =[TimeSeriesOutcome('Still Employed')]
 
       ensemble = ModelEnsemble()  # instantiate an ensemble
       ensemble.model_structures = [market_model, motivation_model]  # set the model on the ensemble
       ensemble.perform_experiments(5)
    def test_parallel_experiment(self):
        """
        Test running an experiment in parallel
        Returns
        -------

        """
        from ema_workbench.connectors import PySDConnector
        model = PySDConnector('../models/Teacup.mdl',
                              uncertainties_dict={'Room Temperature': (33, 120)},
                              outcomes_list=['Teacup Temperature'])

        ensemble = ModelEnsemble()  # instantiate an ensemble
        ensemble.model_structure = model  # set the model on the ensemble
        ensemble.parallel = True
        results = ensemble.perform_experiments(cases=20)
    def test_add_outcomes(self):
        from ema_workbench.connectors import PySDConnector
        model = PySDConnector('../models/Teacup.mdl',
                              uncertainties_dict={'Room Temperature': (33, 120)},
                              outcomes_list=['Teacup Temperature'])

        ensemble = ModelEnsemble()  # instantiate an ensemble
        ensemble.model_structure = model  # set the model on the ensemble
        ensemble.parallel = False

        nr_runs = 10
        experiments, outcomes = ensemble.perform_experiments(nr_runs)

        self.assertEqual(experiments.shape[0], nr_runs)
        self.assertIn('TIME', outcomes.keys())
        self.assertIn('Teacup Temperature', outcomes.keys())
示例#4
0
 def test_vensim_model(self):
     
     #instantiate a model
     wd = r'../models'
     model = VensimExampleModel(wd, "simpleModel")
     
     #instantiate an ensemble
     ensemble = ModelEnsemble()
     
     #set the model on the ensemble
     ensemble.model_structure = model
     
     nr_runs = 10
     experiments, outcomes = ensemble.perform_experiments(nr_runs)
     
     self.assertEqual(experiments.shape[0], nr_runs)
     self.assertIn('TIME', outcomes.keys())
     self.assertIn(model.outcomes[0].name, outcomes.keys())
示例#5
0
 def test_running_lookup_uncertainties(self):
     '''
     This is the more comprehensive test, given that the lookup
     uncertainty replaces itself with a bunch of other uncertainties, check
     whether we can successfully run a set of experiments and get results
     back. We assert that the uncertainties are correctly replaced by
     analyzing the experiments array. 
     
     '''
     if os.name != 'nt':
         return
     
     model = LookupTestModel( r'../models/', 'lookupTestModel')
     
     #model.step = 4 #reduce data to be stored
     ensemble = ModelEnsemble()
     ensemble.model_structure = model
     
     ensemble.perform_experiments(10)
    def test_multiple_models(self):
        """
        Test running running with two different pysd models
        Returns
        -------

        """
        from ema_workbench.connectors import PySDConnector
        market_model = PySDConnector('../models/Sales_Agent_Market_Building_Dynamics.mdl',
                                     uncertainties_dict={'Startup Subsidy': (0, 3),
                                                         'Startup Subsidy Length': (0, 10)},
                                     outcomes_list=['Still Employed'])

        motivation_model = PySDConnector('../models/Sales_Agent_Market_Building_Dynamics.mdl',
                                         uncertainties_dict={'Startup Subsidy': (0, 3),
                                                             'Startup Subsidy Length': (0, 10)},
                                         outcomes_list=['Still Employed'])

        ensemble = ModelEnsemble()  # instantiate an ensemble
        ensemble.model_structures = [market_model, motivation_model]  # set the model on the ensemble
        results = ensemble.perform_experiments(cases=20)
def test_optimization():
    if os.name != 'nt':
        return
    ema_logging.log_to_stderr(ema_logging.INFO)
    
    model = FluModel(r'../models', "fluCase")
    ensemble = ModelEnsemble()
    
    ensemble.model_structure = model
    ensemble.parallel=True
    
    pop_size = 8
    nr_of_generations = 10
    eps = np.array([1e-3, 1e6])

    stats, pop  = ensemble.perform_outcome_optimization(obj_function = obj_function_multi,
                                                    algorithm=epsNSGA2,
                                                    reporting_interval=100, 
                                                    weights=(MAXIMIZE, MAXIMIZE),
                                                    pop_size=pop_size,          
                                                    nr_of_generations=nr_of_generations,
                                                    crossover_rate=0.8,
                                                    mutation_rate=0.05,
                                                    eps=eps)
   def test_parallel_experiment(self):
       """
       Test running an experiment in parallel
       Returns
       -------
 
       """
       relative_path_to_file = '../models/Teacup.mdl'
       directory = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
       mdl_file = os.path.join(directory, relative_path_to_file)
        
       model = PysdModel(mdl_file=mdl_file)
        
       model.uncertainties = [RealParameter('Room Temperature', 33, 120)]
       model.outcomes = [TimeSeriesOutcome('Teacup Temperature')]
 
       ensemble = ModelEnsemble()  # instantiate an ensemble
       ensemble.model_structures = model  # set the model on the ensemble
       ensemble.parallel = True
       ensemble.perform_experiments(5)
        susceptible_population_region_2 = susceptible_population_region_2_NEXT
    
        immune_population_region_1 = immune_population_region_1_NEXT
        immune_population_region_2 = immune_population_region_2_NEXT
    
        deceased_population_region_1.append(deceased_population_region_1_NEXT)
        deceased_population_region_2.append(deceased_population_region_2_NEXT)
        
        #End of main code
    return (runTime, deceased_population_region_1) #, Max_infected, Max_time)

        
if __name__ == "__main__":
   
    np.random.seed(150) #set the seed for replication purposes
    
    ema_logging.log_to_stderr(ema_logging.INFO)
    
    fluModel = MexicanFlu(None, "mexicanFluExample")
    ensemble = ModelEnsemble()
    ensemble.parallel = True
    ensemble.model_structure = fluModel
    
    nr_experiments = 500
    results = ensemble.perform_experiments(nr_experiments, reporting_interval=100)

    lines(results, outcomes_to_show="deceased_population_region_1", 
          show_envelope=True, density=KDE, titles=None, 
          experiments_to_show=np.arange(0, nr_experiments, 10)
          )
    plt.show()
                                        ParameterUncertainty, Outcome)

class SimplePythonModel(ModelStructureInterface):
    '''
    This class represents a simple example of how one can extent the basic
    ModelStructureInterface in order to do EMA on a simple model coded in
    Python directly
    '''
    
    # specify uncertainties
    uncertainties = [ParameterUncertainty((0.1, 10), "x1"),
                     ParameterUncertainty((-0.01,0.01), "x2"),
                     ParameterUncertainty((-0.01,0.01), "x3")]
   
    # specify outcomes
    outcomes = [Outcome('y')]

    def model_init(self, policy, kwargs):
        pass
    
    def run_model(self, case):
        """Method for running an instantiated model structure """
        self.output[self.outcomes[0].name] =  case['x1']*case['x2']+case['x3']
    

if __name__ == '__main__':
    model = SimplePythonModel(None, 'simpleModel') #instantiate the model
    ensemble = ModelEnsemble() #instantiate an ensemble
#     ensemble.parallel = True
    ensemble.set_model_structure(model) #set the model on the ensemble
    results = ensemble.perform_experiments(1000) #run 1000 experiments
    #note that this reference to the model should be relative
    #this relative path will be combined with the workingDirectory
    model_file = r'\model.vpm'

    #specify outcomes
    outcomes = [Outcome('a', time=True)]

    #specify your uncertainties
    uncertainties = [ParameterUncertainty((0, 2.5), "x11"),
                     ParameterUncertainty((-2.5, 2.5), "x12")]

if __name__ == "__main__":
    #turn on logging
    ema_logging.log_to_stderr(ema_logging.INFO)
    
    #instantiate a model
    wd = r'./models/vensim example'
    vensimModel = VensimExampleModel(wd, "simpleModel")
    
    #instantiate an ensemble
    ensemble = ModelEnsemble()
    
    #set the model on the ensemble
    ensemble.model_structure = vensimModel
    
    #run in parallel, if not set, FALSE is assumed
    ensemble.parallel = True
    
    #perform experiments
    result = ensemble.perform_experiments(1000)
    
            else:
                atomicBehavior.append([last, steps])
                last = entry
                steps = 0
    atomicBehavior.append([last, steps])
    
    behavior = []
    behavior.append(atomicBehavior.pop(0))
    for entry in atomicBehavior:
        if entry[0] != behavior[-1][0] and entry[1] >2:
            behavior.append(entry)
        elif entry[1] <2:
            continue
        else:
            behavior[-1][1] =+ entry[1]
    behavior = [entry[0] for entry in behavior]
    
    return behavior   


if __name__ == "__main__":
    ema_logging.log_to_stderr(ema_logging.INFO)
    
    model = ScarcityModel(r'..\data', "scarcity")
    
    ensemble = ModelEnsemble()
    ensemble.set_model_structure(model)
    ensemble.parallel = True
    results = ensemble.perform_experiments(100)
#    determineBehavior(results)
示例#13
0
                                        "effect of perceived adequacy on energy drain lookup", self, 0, 10),
                         LookupUncertainty('hearne2', [(-2, 2), (-1, 2), (0, 1.5), (0.1, 1.6), (0.5, 1.5), (0.1, 2)], 
                                        "effect of perceived adequacy of hours worked lookup", self, 0, 2.5),
                         LookupUncertainty('hearne2', [(-1, 1), (-1, 1), (0, 0.9), (0.1, 1), (0.5, 1.5), (1, 1.5)], 
                                        "effect of energy levels on hours worked lookup", self, 0, 1.5),
                         LookupUncertainty('hearne2', [(-1, 1), (-1, 1), (0, 0.9), (0.1, 1), (0.5, 1.5), (1, 1.5)], 
                                        "effect of high energy on further recovery lookup", self, 0, 1.25),
                         LookupUncertainty('hearne2', [(-2, 2), (-1, 1), (0, 100), (20, 120), (0.5, 1.5), (0.5, 2)], 
                                        "effect of hours worked on energy recovery lookup", self, 0, 1.5),
                         LookupUncertainty('approximation', [(-0.5, 0.35), (3, 5), (1, 10), (0.2, 0.4), (0, 120)],
                                        "effect of hours worked on energy drain lookup", self, 0, 3),
                         LookupUncertainty('hearne1', [(0, 1), (0, 0.15), (1, 1.5), (0.75, 1.25)], 
                                        "effect of low energy on further depletion lookup", self, 0, 1)]     

        self._delete_lookup_uncertainties()                   

        
if __name__ == "__main__":
    ema_logging.log_to_stderr(ema_logging.INFO)
    model = Burnout(r'./models/burnout', "burnout")

    ensemble = ModelEnsemble()
    ensemble.model_structures = model
    
    #run policy with old cases
    results = ensemble.perform_experiments(100)
    lines(results, 'Energy Level', density=BOXPLOT)
    plt.show()

    
示例#14
0
                        RealParameter("stdev", 0.001, 0.005),
                        RealParameter("delta", 0.93, 0.99)]
 #specify outcomes 
 model.outcomes = [ScalarOutcome("max_P",),
                   ScalarOutcome("utility"),
                   ScalarOutcome("inertia"),
                   ScalarOutcome("reliability")]
 
 # override some of the defaults of the model
 model.constants = [Constant('alpha', 0.41),
                    Constant('nsamples', 150),]
 
 # set levers, one for each time step
 model.levers = [RealParameter(str(i), 0, 0.1) for i in range(100)]
 
 ensemble = ModelEnsemble() #instantiate an ensemble
 ensemble.model_structures = model #set the model on the ensemble
 ensemble.parallel = True
 ensemble.processes = 1
 
 # generate some random policies by sampling over levers
 policies, levers, n = samplers.sample_levers(ensemble.model_structures, 4, 
                                      sampler=samplers.MonteCarloSampler())
 
 # policies is a generator, so let's exhaust the generator
 policies = [policy for policy in policies]
 
 # policy name defaults to a repr(dict), let's rename
 for i, policy in enumerate(policies):
     policy.name = str(i)
 
示例#15
0
                             "normal contact rate region 2")]
                         
    def model_init(self, policy, kwargs):
        '''initializes the model'''
        
        try:
            self.model_file = policy['file']
        except KeyError:
            ema_logging.warning("key 'file' not found in policy")
        super(FluModel, self).model_init(policy, kwargs)
        
if __name__ == "__main__":
    ema_logging.log_to_stderr(ema_logging.INFO)
        
    model = FluModel(r'./models/flu', "flucase")
    ensemble = ModelEnsemble()
    ensemble.model_structure = model
    
    #add policies
    policies = [{'name': 'no policy',
                 'file': r'\FLUvensimV1basecase.vpm'},
                {'name': 'static policy',
                 'file': r'\FLUvensimV1static.vpm'},
                {'name': 'adaptive policy',
                 'file': r'\FLUvensimV1dynamic.vpm'}
                ]
    ensemble.policies = policies
    
    #turn on parallel processing
    ensemble.parallel = True 
    
                Outcome('grass', time=True) # TODO patches not working in reporting
                ]
    
if __name__ == "__main__":
    import multiprocessing
    ema_logging.LOG_FORMAT = multiprocessing.util.DEFAULT_LOGGING_FORMAT
    
    #turn on logging
    ema_logging.log_to_stderr(ema_logging.DEBUG)
    ema_logging.info('in main')
    
    #instantiate a model
    fh = r"./models/predatorPreyNetlogo"
    model = PredatorPrey(fh, "simpleModel")
    
    #instantiate an ensemble
    ensemble = ModelEnsemble()
    
    #set the model on the ensemble
    ensemble.model_structure = model
    
    #run in parallel, if not set, FALSE is assumed
    ensemble.parallel = True
    ensemble.processes = 2
    
    #perform experiments
    results = ensemble.perform_experiments(10, reporting_interval=1)

    plotting.lines(results, density=plotting_util.KDE)
    plt.show()
    Python directly
    '''
    
    #specify uncertainties
    uncertainties = [ParameterUncertainty((0.1, 10), "x1"),
                     ParameterUncertainty((-0.01,0.01), "x2"),
                     ParameterUncertainty((-0.01,0.01), "x3")]
   
    #specify outcomes 
    outcomes = [Outcome('y')]

    def model_init(self, policy, kwargs):
        pass
    
    def run_model(self, case):
        """Method for running an instantiated model structure """
        self.output[self.outcomes[0].name] =  case['x1']*case['x2']+case['x3']
    

if __name__ == '__main__':
    ema_logging.LOG_FORMAT = '[%(name)s/%(levelname)s/%(processName)s] %(message)s'
    ema_logging.log_to_stderr(ema_logging.INFO)
    
    model = SimplePythonModel(None, 'simpleModel') #instantiate the model
    ensemble = ModelEnsemble() #instantiate an ensemble
    ensemble.parallel = True
    ensemble.model_structure = model #set the model on the ensemble
    results = ensemble.perform_experiments(1000, reporting_interval=1) #run 1000 experiments
    

    
示例#18
0
            else:
                atomicBehavior.append([last, steps])
                last = entry
                steps = 0
    atomicBehavior.append([last, steps])

    behavior = []
    behavior.append(atomicBehavior.pop(0))
    for entry in atomicBehavior:
        if entry[0] != behavior[-1][0] and entry[1] > 2:
            behavior.append(entry)
        elif entry[1] < 2:
            continue
        else:
            behavior[-1][1] = +entry[1]
    behavior = [entry[0] for entry in behavior]

    return behavior


if __name__ == "__main__":
    ema_logging.log_to_stderr(ema_logging.INFO)

    model = ScarcityModel(r'..\data', "scarcity")

    ensemble = ModelEnsemble()
    ensemble.set_model_structure(model)
    ensemble.parallel = True
    results = ensemble.perform_experiments(100)
#    determineBehavior(results)
    a_mean = np.mean(a)
    b_mean = np.mean(b)

    if a_mean < 0.5 or b_mean < 0.5:
        return (np.inf,) * 2
    else:
        return a_mean, b_mean

if __name__ == "__main__":
    ema_logging.log_to_stderr(ema_logging.INFO)
    model = DummyModel(r"", "dummy")
    
    np.random.seed(123456789)
       
    ensemble = ModelEnsemble()
    ensemble.set_model_structure(model)

    
    policy_levers = {'Trigger a': {'type':'list', 'values':[0, 0.25, 0.5, 0.75, 1]},
                     'Trigger b': {'type':'list', 'values':[0, 0.25, 0.5, 0.75, 1]},
                     'Trigger c': {'type':'list', 'values':[0, 0.25, 0.5, 0.75, 1]}}
    
    cases = ensemble._generate_samples(10, UNION)[0]
    ensemble.add_policy({"name":None})
    experiments = [entry for entry in ensemble._generate_experiments(cases)]
    for entry in experiments:
        entry.pop("model")
        entry.pop("policy")
    cases = experiments    
    
                             "susceptible to immune population delay time region 1"),
        ParameterUncertainty((0.5,2), 
                             "susceptible to immune population delay time region 2"),
        ParameterUncertainty((0.01, 5), 
                             "root contact rate region 1"),
        ParameterUncertainty((0.01, 5), 
                             "root contact ratio region 2"),
        ParameterUncertainty((0, 0.15), 
                             "infection ratio region 1"),
        ParameterUncertainty((0, 0.15), 
                             "infection rate region 2"),
        ParameterUncertainty((10, 100), 
                             "normal contact rate region 1"),
        ParameterUncertainty((10, 200), 
                             "normal contact rate region 2")]
                         
        
if __name__ == "__main__":
    ema_logging.log_to_stderr(ema_logging.INFO)
        
    model = FluModel(r'./models/flu', "fluCase")
    ensemble = ModelEnsemble()
    ensemble.model_structure = model
    
    ensemble.parallel = True #turn on parallel processing

    nr_experiments = 1000
    results = ensemble.perform_experiments(nr_experiments)
    
    fh =  r'./data/{} flu cases no policy.tar.gz'.format(nr_experiments)
    save_results(results, fh)
        
        speed = kwargs.pop("lookup price substitute speed")
        begin = kwargs.pop("lookup price substitute begin")
        end = kwargs.pop("lookup price substitute end")
        lookup = [self.priceSubstite(x, speed, begin, end) for x in range(0,100, 10)]
        kwargs['relative price substitute lookup'] = lookup
                        
        scale = kwargs.pop("lookup returns to scale speed")
        speed = kwargs.pop("lookup returns to scale scale")
        lookup = [self.returnsToScale(x, speed, scale) for x in range(0, 101, 10)]
        kwargs['returns to scale lookup'] = lookup
        
        scale = kwargs.pop("lookup approximated learning speed")
        speed = kwargs.pop("lookup approximated learning scale")
        start = kwargs.pop("lookup approximated learning start")
        lookup = [self.approxLearning(x, speed, scale, start) for x in range(0, 101, 10)]
        kwargs['approximated learning effect lookup'] = lookup
        
        super(ScarcityModel, self).run_model(kwargs)


if __name__ == "__main__":
    ema_logging.log_to_stderr(ema_logging.DEBUG)
    
    model = ScarcityModel(r'./models/scarcity', "scarcity")
    
    ensemble = ModelEnsemble()
    ensemble.model_structure = model
    ensemble.parallel = True
    results = ensemble.perform_experiments(2)