def test_perform_experiments():
#    # let's make some interfaces
#    model_a = DummyInterface(None, "A")
#    model_b = DummyInterface(None, "B")
#    
#    # let's add some uncertainties to this
#    shared_ab_1 = ParameterUncertainty((0,1), "shared ab 1")
#    shared_ab_2 = ParameterUncertainty((0,10), "shared ab 1")
#    model_a.uncertainties = [shared_ab_1, shared_ab_2]
#    model_b.uncertainties = [shared_ab_1, shared_ab_2]
#    
#    ensemble = ModelEnsemble()
#    ensemble.add_model_structures([model_a, model_b])
    
    # what are all the test cases?
    # test for error in case uncertainty by same name but different 
    # in other respects

    
    # everything shared
    model_a = DummyInterface(None, "A")
    model_b = DummyInterface(None, "B")
    model_c = DummyInterface(None, "C")
    
    # let's add some uncertainties to this
    shared_abc_1 = ParameterUncertainty((0,1), "shared abc 1")
    shared_abc_2 = ParameterUncertainty((0,1), "shared abc 2")
    shared_ab_1 = ParameterUncertainty((0,1), "shared ab 1")
    shared_bc_1 = ParameterUncertainty((0,1), "shared bc 1")
    a_1 = ParameterUncertainty((0,1), "a 1")
    b_1 = ParameterUncertainty((0,1), "b 1")
    model_a.uncertainties = [shared_abc_1, shared_abc_2, shared_ab_1, a_1]
    model_b.uncertainties = [shared_abc_1, shared_abc_2, shared_ab_1, shared_bc_1, b_1]
    model_c.uncertainties = [shared_abc_1, shared_abc_2, shared_bc_1]
    
    #let's add an outcome to this
    outcome_shared = Outcome("test", time=True)
    model_a.outcomes = [outcome_shared]
    model_b.outcomes = [outcome_shared]
    model_c.outcomes = [outcome_shared]
    
    ensemble = ModelEnsemble()
    ensemble.parallel=True
    ensemble.add_model_structures([model_a, model_b, model_c])
    
    ema_logging.info('------------- union of uncertainties -------------')
    
    results = ensemble.perform_experiments(10, which_uncertainties=UNION, reporting_interval=1 )
    
    ema_logging.info('------------- intersection of uncertainties -------------')
    ensemble.perform_experiments(10, which_uncertainties=INTERSECTION, reporting_interval=1)
    
#     model.step = 1 #reduce data to be stored
    wd = r'./netlogo_models'
    msi = DiffusionModelInterfaceHeterogeneity(wd, 'dmodel')
    ensemble = ModelEnsemble()
    ensemble.set_model_structure(msi)
    
    policies = [{'name': 'neoclassical_all_same',
                 'file': r'\Model_adoption_of_ISG_appliances.nlogo'},
#                 {'name': 'normality each run',
#                  'file': r'\Model_adoption_of_ISG_appliances_-_1_run_normality_household_properties.nlogo'},
                ]
    ensemble.add_policies(policies)
    ensemble.parallel = True
    n = 50
    results = ensemble.perform_experiments(n)
#     fig, axesdict = envelopes(results, 
#                 outcomes_to_show=['percentage_of_households_owning_ISG_app'], 
#                 group_by='policy', 
#                 grouping_specifiers=['no normality',
#                                     'normality each run'
# #                                     'normality each step'
#                                      ],
#                 legend=True,
#                 density='kde', fill=True,titles=None)
# # set the size of the figure to look reasonable nice
#     fig.set_size_inches(8,5)
# # save figure
#     plt.show()
#     plt.savefig("./pictures/KDE comparison savings.png", dpi=75)