Ejemplo n.º 1
0
def test_generate_samples():
    # everything shared
    model_a = DummyInterface(None, "A")
    model_b = DummyInterface(None, "B")
    model_c = DummyInterface(None, "C")
    
    # let's add some uncertainties to this
    shared_abc_1 = ParameterUncertainty((0,1), "shared abc 1")
    shared_abc_2 = ParameterUncertainty((0,1), "shared abc 2")
    shared_ab_1 = ParameterUncertainty((0,1), "shared ab 1")
    shared_bc_1 = ParameterUncertainty((0,1), "shared bc 1")
    a_1 = ParameterUncertainty((0,1), "a 1")
    b_1 = ParameterUncertainty((0,1), "b 1")
    model_a.uncertainties = [shared_abc_1, shared_abc_2, shared_ab_1, a_1]
    model_b.uncertainties = [shared_abc_1, shared_abc_2, shared_ab_1, shared_bc_1, b_1]
    model_c.uncertainties = [shared_abc_1, shared_abc_2, shared_bc_1]
    
    print '------------ UNION ------------'
    ensemble = ModelEnsemble()
    ensemble.add_model_structures([model_a, model_b, model_c])
    sampled_unc = ensemble._generate_samples(10, UNION )
    for entry in sampled_unc.keys(): 
        print entry 
    
    
    print '------------ INTERSECTION ------------'
    sampled_unc = ensemble._generate_samples(10, INTERSECTION )
     
    for entry in sampled_unc.keys(): 
        print entry 
Ejemplo n.º 2
0
def test_perform_experiments():
#    # let's make some interfaces
#    model_a = Dummy_interface(None, "A")
#    model_b = Dummy_interface(None, "B")
#    
#    # let's add some uncertainties to this
#    shared_ab_1 = ParameterUncertainty((0,1), "shared ab 1")
#    shared_ab_2 = ParameterUncertainty((0,10), "shared ab 1")
#    model_a.uncertainties = [shared_ab_1, shared_ab_2]
#    model_b.uncertainties = [shared_ab_1, shared_ab_2]
#    
#    ensemble = ModelEnsemble()
#    ensemble.add_model_structures([model_a, model_b])
    
    # what are all the test cases?
    # test for error in case uncertainty by same name but different 
    # in other respects

    
    # everything shared
    model_a = Dummy_interface(None, "A")
    model_b = Dummy_interface(None, "B")
    model_c = Dummy_interface(None, "C")
    
    # let's add some uncertainties to this
    shared_abc_1 = ParameterUncertainty((0,1), "shared abc 1")
    shared_abc_2 = ParameterUncertainty((0,1), "shared abc 2")
    shared_ab_1 = ParameterUncertainty((0,1), "shared ab 1")
    shared_bc_1 = ParameterUncertainty((0,1), "shared bc 1")
    a_1 = ParameterUncertainty((0,1), "a 1")
    b_1 = ParameterUncertainty((0,1), "b 1")
    model_a.uncertainties = [shared_abc_1, shared_abc_2, shared_ab_1, a_1]
    model_b.uncertainties = [shared_abc_1, shared_abc_2, shared_ab_1, shared_bc_1, b_1]
    model_c.uncertainties = [shared_abc_1, shared_abc_2, shared_bc_1]
    
    #let's add an outcome to this
    outcome_shared = Outcome("test", time=True)
    model_a.outcomes = [outcome_shared]
    model_b.outcomes = [outcome_shared]
    model_c.outcomes = [outcome_shared]
    
    ensemble = ModelEnsemble()
    ensemble.parallel=True
    ensemble.add_model_structures([model_a, model_b, model_c])
    
    ema_logging.info('------------- union of uncertainties -------------')
    
    ensemble.perform_experiments(10, which_uncertainties=UNION, reporting_interval=1 )
    
    ema_logging.info('------------- intersection of uncertainties -------------')
    ensemble.perform_experiments(10, which_uncertainties=INTERSECTION, reporting_interval=1)
Ejemplo n.º 3
0
def test_determine_intersecting_uncertainties():
    
#    # let's make some interfaces
#    model_a = DummyInterface(None, "A")
#    model_b = DummyInterface(None, "B")
#    
#    # let's add some uncertainties to this
#    shared_ab_1 = ParameterUncertainty((0,1), "shared ab 1")
#    shared_ab_2 = ParameterUncertainty((0,10), "shared ab 1")
#    model_a.uncertainties = [shared_ab_1, shared_ab_2]
#    model_b.uncertainties = [shared_ab_1, shared_ab_2]
#    
#    ensemble = ModelEnsemble()
#    ensemble.add_model_structures([model_a, model_b])
    
    # what are all the test cases?
    # test for error in case uncertainty by same name but different 
    # in other respects

    
    # everything shared
    model_a = DummyInterface(None, "A")
    model_b = DummyInterface(None, "B")
    model_c = DummyInterface(None, "C")
    
    # let's add some uncertainties to this
    shared_abc_1 = ParameterUncertainty((0,1), "shared abc 1")
    shared_abc_2 = ParameterUncertainty((0,1), "shared abc 2")
    shared_ab_1 = ParameterUncertainty((0,1), "shared ab 1")
    shared_bc_1 = ParameterUncertainty((0,1), "shared bc 1")
    a_1 = ParameterUncertainty((0,1), "a 1")
    b_1 = ParameterUncertainty((0,1), "b 1")
    model_a.uncertainties = [shared_abc_1, shared_abc_2, shared_ab_1, a_1]
    model_b.uncertainties = [shared_abc_1, shared_abc_2, shared_ab_1, shared_bc_1, b_1]
    model_c.uncertainties = [shared_abc_1, shared_abc_2, shared_bc_1]
    
    ensemble = ModelEnsemble()
    ensemble.add_model_structures([model_a, model_b, model_c])
    overview, unique_unc = ensemble.determine_uncertainties()
    for key, value in overview.iteritems():
        print [msi for msi in key], [un.name for un in value]
    
    for key, value in unique_unc.iteritems():
        print key, value
    
    '''
    het zou nog simpeler kunnen. als ik gewoon de dict heb met uncertainty
    name en de onzekerheid en ik weet dat er geen fouten zitten qua naam,
    dan kan ik voor elke een lhs genereren en dan in de run experiments
    gewoon uit deze dict de lhs's pakken die horen bij de onzekerheid in de
    msi.
    
    '''
    
    # some shared between all, some between a and b, some between b and c
    # some between a and c
    
    # some shared, some unique
    
    # nothing shared 
    
    ensemble = ModelEnsemble()
#                     temp_output[key] = [value]
#           
#         self.output = {}
#         for key, value in temp_output.iteritems():
#             value = np.asarray(value)
#             self.output[key] = value
#             self.output["mean_{}".format(key)] = np.mean(value, axis=0)
#             self.output["std_{}".format(key)] = np.std(value, axis=0)
            
if __name__ == '__main__':
    logger = ema_logging.log_to_stderr(ema_logging.INFO)
    
#     model.step = 1 #reduce data to be stored
    wd = r'./netlogo_models'
    msi = DiffusionModelInterfaceHeterogeneity(wd, 'dmodel')
    ensemble = ModelEnsemble()
    ensemble.set_model_structure(msi)
    
    policies = [{'name': 'neoclassical_all_same',
                 'file': r'\Model_adoption_of_ISG_appliances.nlogo'},
#                 {'name': 'normality each run',
#                  'file': r'\Model_adoption_of_ISG_appliances_-_1_run_normality_household_properties.nlogo'},
                ]
    ensemble.add_policies(policies)
    ensemble.parallel = True
    n = 50
    results = ensemble.perform_experiments(n)
#     fig, axesdict = envelopes(results, 
#                 outcomes_to_show=['percentage_of_households_owning_ISG_app'], 
#                 group_by='policy', 
#                 grouping_specifiers=['no normality',
                Outcome("percentage_of_late_majority_with_ISG_appliances", time=True),
                Outcome("percentage_of_laggards_with_ISG_appliances", time=True),
                Outcome("new_households_with_ISG_appliance_stat", time=True),
                Outcome("average_total_savings", time=True),
                Outcome("savings_made_by_last_adopters", time=True)]
         
        
            
if __name__ == '__main__':
    ema_logging.log_to_stderr(ema_logging.INFO)
    
    
    
    wd = r'C:/Users/Tristan/Documents/GitHub/SmartGridDiffusion/src/netlogo_models'
#     wd = r'C:/Users/Titan946/Documents/GitHub/SmartGridDiffusion/src/netlogo_models'
    ensemble = ModelEnsemble()
    
<<<<<<< HEAD
    wd = r'./netlogo_models'
=======
>>>>>>> ab19030f8f1c8bb1d2ffb87d3f61eecd9d721b1d
    msi = DiffusionModelInterface(wd, 'dmodel')
    
    ensemble.add_model_structure(msi)
    
    ensemble.parallel = True
    
<<<<<<< HEAD
    n = 250
=======
Ejemplo n.º 6
0
                             "normal contact rate region 2")]
                         
    def model_init(self, policy, kwargs):
        '''initializes the model'''
        
        try:
            self.modelFile = policy['file']
        except KeyError:
            ema_logging.warning("key 'file' not found in policy")
        super(FluModel, self).model_init(policy, kwargs)
        
if __name__ == "__main__":
    ema_logging.log_to_stderr(ema_logging.INFO)
        
    model = FluModel(r'..\..\models\flu', "fluCase")
    ensemble = ModelEnsemble()
    ensemble.set_model_structure(model)
    
    #add policies
    policies = [{'name': 'no policy',
                 'file': r'\FLUvensimV1basecase.vpm'},
                {'name': 'static policy',
                 'file': r'\FLUvensimV1static.vpm'},
                {'name': 'adaptive policy',
                 'file': r'\FLUvensimV1dynamic.vpm'}
                ]
    ensemble.add_policies(policies)

    ensemble.parallel = True #turn on parallel processing

    import time