예제 #1
0
        '''initializes the model'''
        
        try:
            self.model_file = policy['file']
        except KeyError:
            ema_logging.warning("key 'file' not found in policy")
        super(FluModel, self).model_init(policy, kwargs)
        
if __name__ == "__main__":
    ema_logging.log_to_stderr(ema_logging.INFO)
        
    model = FluModel(r'..\..\models\flu', "fluCase")
    ensemble = ModelEnsemble()
    ensemble.set_model_structure(model)
    
    #add policies
    policies = [{'name': 'no policy',
                 'file': r'\FLUvensimV1basecase.vpm'},
                {'name': 'static policy',
                 'file': r'\FLUvensimV1static.vpm'},
                {'name': 'adaptive policy',
                 'file': r'\FLUvensimV1dynamic.vpm'}
                ]
    ensemble.add_policies(policies)

    ensemble.parallel = True #turn on parallel processing

    results = ensemble.perform_experiments(1000)
    
    save_results(results, r'./data/1000 flu cases.bz2')
예제 #2
0
    outcomes = [
        Outcome('CumulativeGHGreduction', time=True),
    ]


if __name__ == "__main__":
    #turn on logging
    ema_logging.log_to_stderr(ema_logging.INFO)

    #instantiate a model
    vensimModel = EVO(r"./models", "simpleModel")

    #instantiate an ensemble
    ensemble = ModelEnsemble()

    #set the model on the ensemble
    ensemble.set_model_structure(vensimModel)
    #
    #     run in parallel, if not set, FALSE is assumed
    ensemble.parallel = True

    cases = [{} for _ in range(1000)]

    #perform experiments
    results = ensemble.perform_experiments(cases, reporting_interval=100)

    save_results(
        results,
        r'.\data\EMA results ModelSebastiaanGreeven 1000 exp Stoch Test.tar.gz'
    )
예제 #3
0
        #self.uncertainties.pop()
        self.uncertainties.append(LookupUncertainty([(0, 4), (1, 5), (1, 5), (0, 2), (0, 2)], "TF2", 'approximation', self, 0, 10))
        #self.uncertainties.pop()
        self.uncertainties.append(ParameterUncertainty((0.02, 0.08), "rate1"))
        self.uncertainties.append(ParameterUncertainty((0.02, 0.08), "rate2"))
        self.uncertainties.append(LookupUncertainty([[(0.0, 0.05), (0.25, 0.15), (0.5, 0.4), (0.75, 1), (1, 1.25)], 
                                                     [(0.0, 0.1), (0.25, 0.25), (0.5, 0.75), (1, 1.25)],
                                                     [(0.0, 0.0), (0.1, 0.2), (0.3, 0.6), (0.6, 0.9), (1, 1.25)]], "TF3", 'categories', self, 0, 2))
        #self.uncertainties.pop()   
        self.delete_lookup_uncertainties()                   

        
if __name__ == "__main__":
    logger = logging.log_to_stderr(logging.INFO)
    model = lookup_model(r'..\lookups', "sampleModel")

    #model.step = 4 #reduce data to be stored
    ensemble = ModelEnsemble()
    ensemble.set_model_structure(model)

    #turn on parallel
    ensemble.parallel = False
    
    #run policy with old cases
    results = ensemble.perform_experiments(10)
    save_results(results, 'lookup_3methods.cpickle')
    
    results = load_results('lookup_3methods.cpickle')
    outcomes =['TF', 'TF2', 'TF3', 'flow1']
    lines(results, outcomes, density=True, hist=True)
    plt.show()  
예제 #4
0
    def model_init(self, policy, kwargs):
        """initializes the model"""

        try:
            self.modelFile = policy["file"]
        except KeyError:
            logging.warning("key 'file' not found in policy")
        super(FluModel, self).model_init(policy, kwargs)


if __name__ == "__main__":
    logging.log_to_stderr(logging.INFO)

    model = FluModel(r"..\..\models\flu", "fluCase")
    ensemble = SimpleModelEnsemble()
    ensemble.set_model_structure(model)

    # add policies
    policies = [
        {"name": "no policy", "file": r"\FLUvensimV1basecase.vpm"},
        {"name": "static policy", "file": r"\FLUvensimV1static.vpm"},
        {"name": "adaptive policy", "file": r"\FLUvensimV1dynamic.vpm"},
    ]
    ensemble.add_policies(policies)

    ensemble.parallel = True  # turn on parallel processing

    results = ensemble.perform_experiments(1000)

    save_results(results, r"../../src/analysis/1000 flu cases no policy.cPickle")
        ParameterUncertainty((0.5,2), 
                             "susceptible to immune population delay time region 2"),
        ParameterUncertainty((0.01, 5), 
                             "root contact rate region 1"),
        ParameterUncertainty((0.01, 5), 
                             "root contact ratio region 2"),
        ParameterUncertainty((0, 0.15), 
                             "infection ratio region 1"),
        ParameterUncertainty((0, 0.15), 
                             "infection rate region 2"),
        ParameterUncertainty((10, 100), 
                             "normal contact rate region 1"),
        ParameterUncertainty((10, 200), 
                             "normal contact rate region 2")]
                         
        
if __name__ == "__main__":
    ema_logging.log_to_stderr(ema_logging.INFO)
        
    model = FluModel(r'./models/flu', "fluCase")
    ensemble = ModelEnsemble()
    ensemble.set_model_structure(model)
    
    ensemble.parallel = True #turn on parallel processing

    nr_experiments = 1000
    results = ensemble.perform_experiments(nr_experiments)
    
    fh =  r'./data/{} flu cases no policy.tar.gz'.format(nr_experiments)
    save_results(results, fh)
예제 #6
0
        kwargs['shortage price effect lookup'] =  [self.f(x/10, speed, loc) for x in range(0,100)]
        
        speed = kwargs.pop("lookup price substitute speed")
        begin = kwargs.pop("lookup price substitute begin")
        end = kwargs.pop("lookup price substitute end")
        kwargs['relative price substitute lookup'] = [self.priceSubstite(x, speed, begin, end) for x in range(0,100, 10)]
                
        scale = kwargs.pop("lookup returns to scale speed")
        speed = kwargs.pop("lookup returns to scale scale")
        kwargs['returns to scale lookup'] = [self.returnsToScale(x, speed, scale) for x in range(0, 101, 10)]
        
        scale = kwargs.pop("lookup approximated learning speed")
        speed = kwargs.pop("lookup approximated learning scale")
        start = kwargs.pop("lookup approximated learning start")
        kwargs['approximated learning effect lookup'] = [self.approxLearning(x, speed, scale, start) for x in range(0, 101, 10)]    
        
        super(ScarcityModel, self).run_model(kwargs)


if __name__ == "__main__":
    ema_logging.log_to_stderr(ema_logging.INFO)
    
    model = ScarcityModel(r'..\..\models\scarcity', "scarcity")
    
    ensemble = ModelEnsemble()
    ensemble.set_model_structure(model)
    ensemble.parallel = True
    results = ensemble.perform_experiments(1000)
    save_results(results, r'.\data\scarcity 1000.bz2')

예제 #7
0
def perform_experiments():
    logger = EMAlogging.log_to_stderr(level=EMAlogging.INFO)
    model = SalinizationModel(r"C:\eclipse\workspace\EMA-workbench\models\salinization", "verzilting")
    model.step = 4
    
    ensemble = SimpleModelEnsemble()
    ensemble.set_model_structure(model)

    policies=[{'name': 'no policy',
               'file': r'\verzilting 2.vpm'},
              {'name': 'policy group 8',
               'file': r'\group 8 best policy.vpm'},
              {'name': 'policy other group',
               'file': r'\other group best policy.vpm'},
              {'name': 'policies combined',
               'file': r'\best policies combined.vpm'}
              ]
    ensemble.add_policies(policies)
    
    ensemble.parallel = True
    nr_of_experiments = 1000
    results = ensemble.perform_experiments(nr_of_experiments)
    return results
        
if __name__ == "__main__":
    results = perform_experiments()
    fig = graphs.envelopes(results, column='policy')
    plt.show()
    save_results(results, 'salinization policys both groups.cPickle')
       
        ParameterUncertainty((0.5,2), 
                             "susceptible to immune population delay time region 1"),
        ParameterUncertainty((0.5,2), 
                             "susceptible to immune population delay time region 2"),
        ParameterUncertainty((0.01, 5), 
                             "root contact rate region 1"),
        ParameterUncertainty((0.01, 5), 
                             "root contact ratio region 2"),
        ParameterUncertainty((0, 0.15), 
                             "infection ratio region 1"),
        ParameterUncertainty((0, 0.15), 
                             "infection rate region 2"),
        ParameterUncertainty((10, 100), 
                             "normal contact rate region 1"),
        ParameterUncertainty((10, 200), 
                             "normal contact rate region 2")]
                         
        
if __name__ == "__main__":
    ema_logging.log_to_stderr(ema_logging.INFO)
        
    model = FluModel(r'..\..\models\flu', "fluCase")
    ensemble = ModelEnsemble()
    ensemble.set_model_structure(model)
    
    ensemble.parallel = True #turn on parallel processing

    results = ensemble.perform_experiments(1000)
    
    save_results(results, r'1000 flu cases no policy.bz2')
예제 #9
0
        self.uncertainties.append(LookupUncertainty([(2, 5), (0, 0.8), (1, 10), (2, 3), (0, 1.6)], "effect of perceived adequacy of hours worked lookup", 'approximation', self, 0, 2.5))
        self.uncertainties.append(LookupUncertainty([(-1, 0.1), (0.5, 1.5), (1, 10), (0, 0.2), (0, 1)], "effect of energy levels on hours worked lookup", 'approximation', self, 0, 1.5))
        self.uncertainties.append(LookupUncertainty([(1, 2), (-1, 0.1), (1, 10), (0.9, 1.2), (0.8, 1)], "effect of high energy on further recovery lookup", 'approximation', self, 0, 1.25))
        self.uncertainties.append(LookupUncertainty([(1, 2), (-1, 0.3), (1, 10), (1, 1.5), (0, 120)], "effect of hours worked on energy recovery lookup", 'approximation', self, 0, 1.5))
        self.uncertainties.append(LookupUncertainty([(-0.5, 0.35), (3, 5), (1, 10), (0.2, 0.4), (0, 120)], "effect of hours worked on energy drain lookup", 'approximation', self, 0, 3))
        self.uncertainties.append(LookupUncertainty([(-1, 0), (0.8, 2), (1, 10), (0, 0.1), (0, 0.2)], "effect of low energy on further depletion lookup", 'approximation', self, 0, 1))        

        self.delete_lookup_uncertainties()                   

        
if __name__ == "__main__":
    logger = logging.log_to_stderr(logging.INFO)
    model = Burnout(r'..\lookups', "burnout")

    model.step = 4 #reduce data to be stored
    ensemble = ModelEnsemble()
    ensemble.set_model_structure(model)

    #turn on parallel
    ensemble.parallel = False
    
    #run policy with old cases
    results = ensemble.perform_experiments(10)
    save_results(results, 'burnout_10_approx.cpickle')
    
#    results = load_results('burnout_100_2.cpickle')
#    outcome1 =['effect of hours worked on energy drain lookup']
#    outcome2 =['effect of hours worked on energy drain']
#    lines(results, outcome1, density=True, hist=True)
#    lines(results, outcome2, density=True, hist=True)
#    plt.show()
    ensemble = ModelEnsemble()
    ensemble.set_model_structure(msi)
    
    policies = [{'name': 'neoclassical_all_same',
                 'file': r'\Model_adoption_of_ISG_appliances.nlogo'},
#                 {'name': 'normality each run',
#                  'file': r'\Model_adoption_of_ISG_appliances_-_1_run_normality_household_properties.nlogo'},
                ]
    ensemble.add_policies(policies)
    ensemble.parallel = True
    n = 50
    results = ensemble.perform_experiments(n)
#     fig, axesdict = envelopes(results, 
#                 outcomes_to_show=['percentage_of_households_owning_ISG_app'], 
#                 group_by='policy', 
#                 grouping_specifiers=['no normality',
#                                     'normality each run'
# #                                     'normality each step'
#                                      ],
#                 legend=True,
#                 density='kde', fill=True,titles=None)
# # set the size of the figure to look reasonable nice
#     fig.set_size_inches(8,5)
# # save figure
#     plt.show()
#     plt.savefig("./pictures/KDE comparison savings.png", dpi=75)

    fn = r'./data/neoclassical_all_same.bz2'.format(n)
    save_results(results, fn)
  
    print "finish"
    #                 'TimeHorizonGov3':50,
    #                 'TimeHorizonGov4':50,
    #                 'TimeHorizonGov5':50,
    #                 'TimeHorizonInd1':50,
    #                 'TimeHorizonInd2':50,
    #                 'TimeHorizonInd3':50,
    #                 'TimeHorizonInd4':50,
    #                 'TimeHorizonInd5':50}
    #     msi2 = EVO(r"./models", 'longTimeHorizon', defaults=defaults)

    msi1 = EVO('./models', 'full')

    #instantiate an ensemble
    ensemble = ModelEnsemble()

    #set the model on the ensemble
    ensemble.add_model_structure(msi1)
    #     ensemble.add_model_structure(msi2)

    ensemble.parallel = True
    ensemble.processes = 36

    #perform experiments
    nr_experiments = 1000
    results = ensemble.perform_experiments(nr_experiments,
                                           reporting_interval=100)

    fn = r'.\data\full {} exp {} rep.tar.gz'.format(nr_experiments,
                                                    msi1.nr_replications)
    save_results(results, fn)
예제 #12
0
    model = EnergyTrans(r'..\..\models\EnergyTrans', "ESDMAElecTrans")
    model.step = 4 #reduce data to be stored
    ensemble = SimpleModelEnsemble()
    ensemble.set_model_structure(model)
    
    policies = [{'name': 'no policy',
                 'file': r'\ESDMAElecTrans_NoPolicy.vpm'},
                {'name': 'basic policy',
                 'file': r'\ESDMAElecTrans_basic_policy.vpm'},
                {'name': 'tech2',
                 'file': r'\ESDMAElecTrans_tech2.vpm'},
                {'name': 'econ',
                 'file': r'\ESDMAElecTrans_econ.vpm'},
                {'name': 'adaptive policy',
                 'file': r'\ESDMAElecTrans_adaptive_policy.vpm'},
                {'name': 'ap with op',
                 'file': r'\ESDMAElecTrans_ap_with_op.vpm'},
                ]
    ensemble.add_policies(policies)

    #turn on parallel
    ensemble.parallel = True
    
    #run policy with old cases
    results = ensemble.perform_experiments(100)

    save_results(results, r'C:\workspace\EMA-workbench\src\analysis\eng_trans_100.cPickle')