Esempio n. 1
0
def test_pairs_density():
    results = load_results(r'..\data\eng_trans_100.cPickle', zipped=False)
#    pairs_density(results)
#    pairs_density(results, colormap='binary')

    pairs_density(results, group_by='policy', grouping_specifiers=['no policy'])
    plt.show()
Esempio n. 2
0
def test_envelopes3d_group_by():
    results = expWorkbench.load_results(r'1000 flu cases.cPickle')

    envelopes3d_group_by(results, 
                         outcome='infected fraction R1', 
                         groupBy="normal interregional contact rate",
                         logSpace=True)
Esempio n. 3
0
 def test_prim_init_select(self):
     self.results = load_results(r'../data/1000 flu cases no policy.bz2')
     self.classify = flu_classify        
     
     experiments, outcomes = self.results
     
     unc = experiments.dtype.descr
     
     # test initialization, including t_coi calculation in case of searching
     # for results equal to or higher than the threshold
     outcomes['death toll'] = outcomes['deceased population region 1'][:, -1]
     results = experiments, outcomes
     threshold = 10000
     prim_obj = prim.Prim(results, classify='death toll', 
                          threshold_type=prim.ABOVE, threshold=threshold,
                          inc_unc=unc)
     
     value = np.ones((experiments.shape[0],))
     value = value[outcomes['death toll'] >= threshold].shape[0]
     self.assertTrue(prim_obj.t_coi==value)
             
     # test initialization, including t_coi calculation in case of searching
     # for results equal to or lower  than the threshold
     threshold = 1000
     prim_obj = prim.Prim(results, classify='death toll', 
                          threshold_type=prim.BELOW, 
                          threshold=threshold)
     
     value = np.ones((experiments.shape[0],))
     value = value[outcomes['death toll'] <= threshold].shape[0]
     self.assertTrue(prim_obj.t_coi==value)
     
     prim.Prim(self.results, self.classify, threshold=prim.ABOVE)
Esempio n. 4
0
def test_kde_over_time():
    results = load_results(r'./../data/eng_trans_100.cPickle', zipped=False)
    
#    kde_over_time(results, log=False)
#    kde_over_time(results, log=True)
    kde_over_time(results, group_by='policy', grouping_specifiers=['no policy', 'adaptive policy'])
    plt.show()
 def test_prepare_outcomes(self):
     fn = r'../data/1000 flu cases no policy.tar.gz'
     results = load_results(fn)
     
     # string type correct
     ooi = 'nr deaths'
     results[1][ooi] = results[1]['deceased population region 1'][:,-1]
     y, categorical = fs._prepare_outcomes(results[1], ooi)
     
     self.assertFalse(categorical)
     self.assertTrue(len(y.shape)==1)
     
     # string type not correct --> KeyError
     with self.assertRaises(KeyError):
         fs._prepare_outcomes(results[1], "non existing key")
     
     # classify function correct
     def classify(data):
         result = data['deceased population region 1']
         classes =  np.zeros(result.shape[0])
         classes[result[:, -1] > 1000000] = 1
         return classes
     
     y, categorical = fs._prepare_outcomes(results[1], classify)
     
     self.assertTrue(categorical)
     self.assertTrue(len(y.shape)==1)
     
     # neither string nor classify function --> TypeError
     with self.assertRaises(TypeError):
         fs._prepare_outcomes(results[1], 1)
    def test_get_univariate_feature_scores(self):
        fn = r'../data/1000 flu cases no policy.tar.gz'
        results = load_results(fn)
        
        def classify(data):
            #get the output for deceased population
            result = data['deceased population region 1']
            
            #make an empty array of length equal to number of cases 
            classes =  np.zeros(result.shape[0])
            
            #if deceased population is higher then 1.000.000 people, classify as 1 
            classes[result[:, -1] > 1000000] = 1
            
            return classes
        
        # f classify
        scores = fs.get_univariate_feature_scores(results, classify)
        self.assertEqual(len(scores), len(results[0].dtype.fields))

        # chi2
        scores = fs.get_univariate_feature_scores(results, classify, 
                                                  score_func='chi2')
        self.assertEqual(len(scores), len(results[0].dtype.fields))
        
        # f regression
        ooi = 'nr deaths'
        results[1][ooi] = results[1]['deceased population region 1'][:,-1]
        scores = fs.get_univariate_feature_scores(results, ooi)
        self.assertEqual(len(scores), len(results[0].dtype.fields))
 def test_get_rf_feature_scores(self):
     fn = r'../data/1000 flu cases no policy.tar.gz'
     results = load_results(fn)
     
     def classify(data):
         #get the output for deceased population
         result = data['deceased population region 1']
         
         #make an empty array of length equal to number of cases 
         classes =  np.zeros(result.shape[0])
         
         #if deceased population is higher then 1.000.000 people, classify as 1 
         classes[result[:, -1] > 1000000] = 1
         
         return classes
     
     scores, forest = fs.get_rf_feature_scores(results, classify, 
                                               random_state=10)
     
     self.assertEqual(len(scores), len(results[0].dtype.fields))
     self.assertTrue(isinstance(forest, RandomForestClassifier))
     
     ooi = 'nr deaths'
     results[1][ooi] = results[1]['deceased population region 1'][:,-1]
     scores, forest = fs.get_rf_feature_scores(results, ooi, 
                                               random_state=10)
     
     self.assertEqual(len(scores), len(results[0].dtype.fields))
     self.assertTrue(isinstance(forest, RandomForestRegressor))
Esempio n. 8
0
def test_pairs_lines():
    results = load_results(r'..\data\eng_trans_100.cPickle', zipped=False)    
    pairs_lines(results)
#    set_fig_to_bw(pairs_lines(results)[0])
    
    pairs_lines(results, group_by='policy')
#    set_fig_to_bw(pairs_lines(results, group_by='policy')[0])
    plt.show()
Esempio n. 9
0
 def test_pca(self):
     results = load_results(r'../data/1000 flu cases no policy.bz2')
     classify = flu_classify
     
     
     prim_obj = prim.Prim(results, classify, 
                          threshold=0.8)
     prim_obj.perform_pca()
Esempio n. 10
0
 def test_in_box(self):
     results = load_results(r'../data/1000 flu cases no policy.bz2')
     prim_obj = prim.Prim(results, flu_classify, threshold=0.8)
     
     box = prim_obj.make_box(results[0])
     # I need an encompassing box
     # the shape[0] of the return should be equal to experiment.shape[0]
     # assuming that the box is an encompassing box
     self.assertEqual(prim_obj.in_box(box).shape[0], results[0].shape[0])
Esempio n. 11
0
 def test_select(self):
     results = load_results(r'../data/1000 flu cases no policy.bz2')
     classify = flu_classify
     
     prim_obj = prim.Prim(results, classify, 
                          threshold=0.8)
     box = prim_obj.find_box()
     sb = 27
     box.select(sb)
     
     self.assertEqual(len(box.mean), sb+1)
Esempio n. 12
0
def test_envelopes3d():
    results = expWorkbench.load_results(r"1000 flu cases.cPickle")
    exp, res = results

    logical = exp["policy"] == "adaptive policy"
    new_exp = exp[logical][0:100]
    new_res = {}
    for key, value in res.items():
        new_res[key] = value[logical][0:100, :]

    envelopes3d((new_exp, new_res), "infected fraction R1", logSpace=True)
Esempio n. 13
0
 def test_prim_init_exception(self):
     results = load_results(r'../data/1000 flu cases no policy.bz2')
     self.assertRaises(prim.PrimException, 
                       prim.Prim,
                       results, 
                       'deceased population region 1', 
                       threshold=0.8)
     
     def faulty_classify(outcomes):
         return outcomes['deceased population region 1'][:, 0:10]
     self.assertRaises(prim.PrimException, prim.Prim, results, 
                       faulty_classify, threshold=0.8)
Esempio n. 14
0
def test_pairs_scatter():
    results = load_results(r'..\data\eng_trans_100.cPickle', zipped=False)    
    
    pairs_scatter(results)
#    set_fig_to_bw(pairs_scatter(results)[0])
    
    pairs_scatter(results, group_by='policy',
                  grouping_specifiers='basic policy', legend=False)
#    set_fig_to_bw(pairs_scatter(results, group_by='policy')[0])
    
    pairs_scatter(results, group_by='policy', 
                  grouping_specifiers=['no policy', 'adaptive policy'])
#    set_fig_to_bw(pairs_scatter(results, group_by='policy', legend=False)[0])
    plt.show()
Esempio n. 15
0
def test_multiple_densities():
    results = load_results(r'..\data\eng_trans_100.cPickle', zipped=False)
    
    
#    multiple_densities(results, 
#                  outcome_to_show="total fraction new technologies", 
#                  group_by="policy", 
#                  points_in_time = [2000])
#    multiple_densities(results, 
#                  outcome_to_show="total fraction new technologies", 
#                  group_by="policy", 
#                  points_in_time = [2000, 2100])
#    multiple_densities(results, 
#                  outcome_to_show="total fraction new technologies", 
#                  group_by="policy", 
#                  points_in_time = [2000, 2020, 2100])
#    multiple_densities(results, 
#                  outcome_to_show="total fraction new technologies", 
#                  group_by="policy", 
#                  points_in_time = [2000, 2020, 2040, 2060])
#    multiple_densities(results, 
#                  outcome_to_show="total fraction new technologies", 
#                  group_by="policy", 
#                  points_in_time = [2020, 2040, 2060, 2080, 2100])
#    multiple_densities(results, 
#                  outcome_to_show="total fraction new technologies", 
#                  group_by="policy", 
#                  grouping_specifiers="no policy",
#                  points_in_time = [2000, 2020, 2040, 2060, 2080, 2100],
#                  plot_type=ENV_LIN,
#                  experiments_to_show=[1,2,10])
#    multiple_densities(results, 
#                  outcome_to_show="total fraction new technologies", 
#                  group_by="policy", 
#                  grouping_specifiers="no policy",
#                  points_in_time = [2000, 2020, 2040, 2060, 2080, 2100],
#                  plot_type=ENVELOPE,
#                  experiments_to_show=[1,2,10])
    multiple_densities(results, 
#                  group_by="policy", 
    #              grouping_specifiers="no policy",
                  points_in_time = [2040, 2045, 2050, 2060,2070,2080],
                  plot_type=ENVELOPE,
                  density=KDE,
                  log=False
    #              experiments_to_show=[np.arange(0, 100, 20)]
                  )
    
    plt.show()
Esempio n. 16
0
def test_group_results():
    results = load_results(r'./../data/eng_trans_100.cPickle', zipped=False)
    experiments, outcomes = results
    
    # test indices
    grouping_specifiers = {'set1':np.arange(0,11),
                           'set2':np.arange(11,25),
                           'set3':np.arange(25,experiments.shape[0])}
    groups = group_results(experiments, outcomes, 
                           group_by='index', 
                           grouping_specifiers=grouping_specifiers)
    total_data = 0
    for value in groups.values():
        total_data += value[0].shape[0]
    print experiments.shape[0], total_data
    
    # test continuous parameter type
    array = experiments['average planning and construction period T1']
    grouping_specifiers = make_continuous_grouping_specifiers(array, nr_of_groups=5)
    groups = group_results(experiments, outcomes, 
                           group_by='average planning and construction period T1', 
                           grouping_specifiers=grouping_specifiers) 
    total_data = 0
    for value in groups.values():
        total_data += value[0].shape[0]
    print experiments.shape[0], total_data   
    
    # test integer type
    array = experiments['seed PR T1']
    grouping_specifiers = make_continuous_grouping_specifiers(array, nr_of_groups=10)
    groups = group_results(experiments, outcomes, 
                           group_by='seed PR T1', 
                           grouping_specifiers=grouping_specifiers) 
    total_data = 0
    for value in groups.values():
        total_data += value[0].shape[0]
    print experiments.shape[0], total_data   

    
    # test categorical type
    grouping_specifiers = set(experiments["policy"])
    groups = group_results(experiments, outcomes, 
                       group_by='policy', 
                       grouping_specifiers=grouping_specifiers)
    total_data = 0
    for value in groups.values():
        total_data += value[0].shape[0]
    print experiments.shape[0], total_data   
Esempio n. 17
0
    def test_show_boxes(self):
#        results = load_results(r'../data/1000 flu cases no policy.bz2')
#        classify = flu_classify

        results = load_results(r'../data/scarcity 1000.bz2')
        classify = scarcity_classify
                
        prim_obj = prim.Prim(results, classify, 
                             threshold=0.7)
        prim_obj.find_box()
        prim_obj.find_box()
        
        prim_obj.write_boxes_to_stdout()
        
        prim_obj.show_boxes()   
        plt.show()
Esempio n. 18
0
 def test_compare(self):
     self.results = load_results(r'../data/scarcity 1000.bz2')
     self.classify = scarcity_classify
     
     prim_obj = prim.Prim(self.results, self.classify, threshold=0.8)
     
     # all dimensions the same
     a = np.array([(0,1),
                   (0,1)], 
                  dtype=[('a', np.float),
                         ('b', np.float)])
     b = np.array([(0,1),
                   (0,1)], 
                  dtype=[('a', np.float),
                         ('b', np.float)])
     
     self.assertTrue(np.all(prim_obj.compare(a,b)))
     
     # all dimensions different
     a = np.array([(0,1),
                   (0,1)], 
                  dtype=[('a', np.float),
                         ('b', np.float)])
     b = np.array([(1,1),
                   (0,0)], 
                  dtype=[('a', np.float),
                         ('b', np.float)])
     test = prim_obj.compare(a,b)==False
     self.assertTrue(np.all(test))
     
     # dimensions 1 different and dimension 2 the same
     a = np.array([(0,1),
                   (0,1)], 
                  dtype=[('a', np.float),
                         ('b', np.float)])
     b = np.array([(1,1),
                   (0,1)], 
                  dtype=[('a', np.float),
                         ('b', np.float)])
     test = prim_obj.compare(a,b)
     test = (test[0]==False) & (test[1]==True)
     self.assertTrue(test)
Esempio n. 19
0
 def test_find_boxes(self):
     results = load_results(r'../data/1000 flu cases no policy.bz2')
     classify = flu_classify
     
     
     prim_obj = prim.Prim(results, classify, 
                          threshold=0.8)
     box_1 = prim_obj.find_box()
     prim_obj._update_yi_remaining()
     
     after_find = box_1.yi.shape[0] + prim_obj.yi_remaining.shape[0]
     self.assertEqual(after_find, prim_obj.y.shape[0])
     
     box_2 = prim_obj.find_box()
     prim_obj._update_yi_remaining()
     
     after_find = box_1.yi.shape[0] +\
                  box_2.yi.shape[0] +\
                  prim_obj.yi_remaining.shape[0]
     self.assertEqual(after_find, prim_obj.y.shape[0])
Esempio n. 20
0
ema_logging.log_to_stderr(ema_logging.INFO)

def classify(data):
    #get the output for deceased population
    result = data['deceased population region 1']
    
    #make an empty array of length equal to number of cases 
    classes =  np.zeros(result.shape[0])
    
    #if deceased population is higher then 1.000.000 people, classify as 1 
    classes[result[:, -1] > 1500000] = 1
    
    return classes

results = load_results(r".\data\1000 flu cases no policy.cPickle")

#perform prim on modified results tuple
res = pca_prim.perform_pca_prim(results, 
                                classify,
                                mass_min=0.075, 
                                threshold=0.8, 
                                threshold_type=1)

rotation_matrix, row_names, column_names, rotated_experiments, boxes = res

#visualize results
prim.write_prim_to_stdout(boxes)

# we need to use the rotated experiments now
results = (rotated_experiments, results[1])
Esempio n. 21
0
'''
Created on 16 okt. 2012

@author: localadmin
'''
from expWorkbench import load_results
import numpy as np
import matplotlib.pyplot as plt

from analysis.plotting import lines
from analysis import plotting

results = load_results(r'.\data\2000 flu cases no policy.bz2')

experiments, outcomes = results

# get indices with of worst runs in terms of deaths and max fraction of
# population that is ill at any given point in time
deaths = outcomes['deceased population region 1'][:, -1]
peak = np.max(outcomes['infected fraction R1'], axis=1)

deaths = [(deaths[i], i) for i in range(deaths.shape[0])]
deaths = sorted(deaths, reverse=True)
death_indices = [death[1] for death in deaths]

peak = [(peak[i], i) for i in range(peak.shape[0])]
peak = sorted(peak, reverse=True)
peak_indices = [element[1] for element in peak]

# combine the top 20 of both
indices = death_indices[0:20]
Esempio n. 22
0
Created on Sep 8, 2011

.. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
                gonengyucel

'''
import matplotlib.pyplot as plt

from analysis.clusterer import cluster

from expWorkbench import load_results, ema_logging

ema_logging.log_to_stderr(ema_logging.INFO)

#load the data
data = load_results(r'..\examples\100 flu cases no policy.cPickle')

# specify the number of desired clusters
# note: the meaning of cValue is tied to the value for cMethod
cValue = 5

#perform cluster analysis
dist, clusteraloc, runlog, z = cluster(data=data, 
                                    outcome='deceased population region 1', 
                                    distance='gonenc', 
                                    interClusterDistance='complete', 
                                    cMethod = 'maxclust',
                                    cValue = cValue,
                                    plotDendrogram=False, 
                                    plotClusters=False, 
                                    groupPlot=False,
Esempio n. 23
0
        #self.uncertainties.pop()
        self.uncertainties.append(LookupUncertainty([(0, 4), (1, 5), (1, 5), (0, 2), (0, 2)], "TF2", 'approximation', self, 0, 10))
        #self.uncertainties.pop()
        self.uncertainties.append(ParameterUncertainty((0.02, 0.08), "rate1"))
        self.uncertainties.append(ParameterUncertainty((0.02, 0.08), "rate2"))
        self.uncertainties.append(LookupUncertainty([[(0.0, 0.05), (0.25, 0.15), (0.5, 0.4), (0.75, 1), (1, 1.25)], 
                                                     [(0.0, 0.1), (0.25, 0.25), (0.5, 0.75), (1, 1.25)],
                                                     [(0.0, 0.0), (0.1, 0.2), (0.3, 0.6), (0.6, 0.9), (1, 1.25)]], "TF3", 'categories', self, 0, 2))
        #self.uncertainties.pop()   
        self.delete_lookup_uncertainties()                   

        
if __name__ == "__main__":
    logger = logging.log_to_stderr(logging.INFO)
    model = lookup_model(r'..\lookups', "sampleModel")

    #model.step = 4 #reduce data to be stored
    ensemble = ModelEnsemble()
    ensemble.set_model_structure(model)

    #turn on parallel
    ensemble.parallel = False
    
    #run policy with old cases
    results = ensemble.perform_experiments(10)
    save_results(results, 'lookup_3methods.cpickle')
    
    results = load_results('lookup_3methods.cpickle')
    outcomes =['TF', 'TF2', 'TF3', 'flow1']
    lines(results, outcomes, density=True, hist=True)
    plt.show()  
Esempio n. 24
0
'''
Created on 7 Sep 2011

@author: chamarat
'''
import matplotlib.pyplot as plt

from expWorkbench import load_results
from analysis.graphs import envelopes

results = load_results(r'.\data\TFSC_policies.cPickle')

fig = envelopes(results, column='policy', fill=True, legend=False)
fig = plt.gcf()
fig.set_size_inches(15, 5)
plt.savefig("policycomparison.png", dpi=75)
Esempio n. 25
0
'''
Created on 16 okt. 2012

@author: localadmin
'''
from expWorkbench import load_results
import numpy as np
import matplotlib.pyplot as plt

from analysis.plotting import lines
from analysis import plotting

results = load_results(r'.\data\2000 flu cases no policy.bz2')

experiments, outcomes = results

# get indices with of worst runs in terms of deaths and max fraction of 
# population that is ill at any given point in time
deaths = outcomes['deceased population region 1'][:, -1]
peak = np.max(outcomes['infected fraction R1'], axis=1)

deaths = [(deaths[i], i) for i in range(deaths.shape[0])]
deaths = sorted(deaths, reverse=True)
death_indices = [death[1] for death in deaths]

peak = [(peak [i], i) for i in range(peak.shape[0])]
peak = sorted(peak, reverse=True)
peak_indices = [element[1] for element in peak]

# combine the top 20 of both
indices = death_indices[0:20]
Esempio n. 26
0
'''
Created on Aug 21, 2012

@author: jhkwakkel
'''
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np

from expWorkbench import load_results
from analysis.plotting import lines, envelopes
from analysis.b_and_w_plotting import set_fig_to_bw

# load results
results = load_results(r'.\data\JotKE 50000.bz2')
experiments, outcomes = results

# pre process the data
new_outcomes = {}
new_outcomes[
    'total capacity'] = outcomes['capa central'] + outcomes['capa decentral']
new_outcomes[
    'total generation'] = outcomes['gen central'] + outcomes['gen decentral']
new_outcomes['total fossil'] = outcomes['central coal'] + outcomes[
    'central gas'] + outcomes['decentral gas']
new_outcomes['total non-fossil'] = new_outcomes[
    'total generation'] - new_outcomes['total fossil']
new_outcomes['avg. price'] = outcomes['avg price']
new_outcomes['fraction non-fossil'] = new_outcomes[
    'total non-fossil'] / new_outcomes['total generation']
Esempio n. 27
0

if __name__ == "__main__":

    #    CONSTRUCTING THE ENSEMBLE AND SAVING THE RESULTS
    EMAlogging.log_to_stderr(EMAlogging.DEBUG)
    #
    #    model = ScarcityModel(r'D:\tbm-g367\workspace\EMA workbench\src\sandbox\sils',"scarcity")
    #
    #    ensemble = ModelEnsemble()
    #    ensemble.set_model_structure(model)
    ##    ensemble.parallel = True
    #    results = ensemble.perform_experiments(1000)
    #    save_results(results, r'base.cPickle')

    results = load_results(r'base.cPickle')

    #    PLOTS FOR ENSEMBLE
    #    fig = make_interactive_plot(results, outcomes=['relative market price'], type='lines')
    #    fig = lines(results, outcomes = ['relative market price'], density=True, hist=False)
    #    plt.show()

    #    CONSTRUCTING THE CLUSTERS
    #    dRow, clusters, z = clusterer.cluster(results,
    #                              outcome='relative market price',
    #                              distance='gonenc',
    #                              cMethod='maxclust',
    #                              cValue=20,
    #                              plotDendrogram=True,
    #                              plotClusters=True,
    #                              groupPlot=True)
Esempio n. 28
0
def perform_loop_knockout():
    unique_edges = [['In Goods', 'lost'],
                    ['loss unprofitable extraction capacity', 'decommissioning extraction capacity'],
                    ['production', 'In Goods'],
                    ['production', 'lost'],
                    ['production', 'Supply'],
                    ['Real Annual Demand', 'substitution losses'],
                    ['Real Annual Demand', 'price elasticity of demand losses'],
                    ['Real Annual Demand', 'desired extraction capacity'],
                    ['Real Annual Demand', 'economic demand growth'],
                    ['average recycling cost', 'relative market price'],
                    ['recycling fraction', 'lost'],
                    ['commissioning recycling capacity', 'Recycling Capacity Under Construction'],
                    ['maximum amount recyclable', 'recycling fraction'],
                    ['profitability recycling', 'planned recycling capacity'],
                    ['relative market price', 'price elasticity of demand losses'],
                    ['constrained desired recycling capacity', 'gap between desired and constrained recycling capacity'],
                    ['profitability extraction', 'planned extraction capacity'],
                    ['commissioning extraction capacity', 'Extraction Capacity Under Construction'],
                    ['desired recycling', 'gap between desired and constrained recycling capacity'],
                    ['Installed Recycling Capacity', 'decommissioning recycling capacity'],
                    ['Installed Recycling Capacity', 'loss unprofitable recycling capacity'],
                    ['average extraction costs', 'profitability extraction'],
                    ['average extraction costs', 'relative attractiveness recycling']]

    unique_cons_edges = [['recycling', 'recycling'],
                           ['recycling', 'supply demand ratio'],
                           ['decommissioning recycling capacity', 'recycling fraction'],
                           ['returns to scale', 'relative attractiveness recycling'],
                           ['shortage price effect', 'relative price last year'],
                           ['shortage price effect', 'profitability extraction'],
                           ['loss unprofitable extraction capacity', 'loss unprofitable extraction capacity'],
                           ['production', 'recycling fraction'],
                           ['production', 'constrained desired recycling capacity'],
                           ['production', 'new cumulatively recycled'],
                           ['production', 'effective fraction recycled of supplied'],
                           ['loss unprofitable recycling capacity', 'recycling fraction'],
                           ['average recycling cost', 'loss unprofitable recycling capacity'],
                           ['recycling fraction', 'new cumulatively recycled'],
                           ['substitution losses', 'supply demand ratio'],
                           ['Installed Extraction Capacity', 'Extraction Capacity Under Construction'],
                           ['Installed Extraction Capacity', 'commissioning extraction capacity'],
                           ['Installed Recycling Capacity', 'Recycling Capacity Under Construction'],
                           ['Installed Recycling Capacity', 'commissioning recycling capacity'],
                           ['average extraction costs', 'profitability extraction']]

#    CONSTRUCTING THE ENSEMBLE AND SAVING THE RESULTS
    EMAlogging.log_to_stderr(EMAlogging.INFO)
    results = load_results(r'base.cPickle')

#    GETTING OUT THOSE BEHAVIOURS AND EXPERIMENT SETTINGS
#    Indices of a number of examples, these will be looked at.
    runs = [526,781,911,988,10,780,740,943,573,991]
    VOI = 'relative market price'

    results_of_interest = experiment_settings(results,runs,VOI)
    cases_of_interest = experiments_to_cases(results_of_interest[0])
    behaviour_int = results_of_interest[1][VOI]

#    CONSTRUCTING INTERVALS OF ATOMIC BEHAVIOUR PATTERNS
    ints = intervals(behaviour_int,False)

#    GETTING OUT ONLY THOSE OF MAXIMUM LENGTH PER BEHAVIOUR
    max_intervals = intervals_interest(ints)

#    THIS HAS TO DO WITH THE MODEL FORMULATION OF THE SWITCHES/VALUES
    double_list = [6,9,11,17,19]

    indCons = len(unique_edges)
#    for elem in unique_cons_edges:
#        unique_edges.append(elem)

    current = os.getcwd()

    for beh_no in range(0,10):
#        beh_no = 0 # Varies between 0 and 9, index style.
        interval = max_intervals[beh_no]

        rmp = behaviour_int[beh_no]
    #    rmp = rmp[interval[0]:interval[1]]
        x = range(0,len(rmp))
        fig = plt.figure()
        ax = fig.add_subplot(111)

        vensim.be_quiet()
    #    for loop_index in range(7,8):
        for loop_index in range(1,len(unique_edges)+1):

            if loop_index-indCons > 0:
                model_location = current + r'\Models\Consecutive\Metals EMA.vpm'
            elif loop_index == 0:
                model_location = current + r'\Models\Base\Metals EMA.vpm'
            else:
                model_location = current + r'\Models\Switches\Metals EMA.vpm'

            serie = run_interval(model_location,loop_index,
                                  interval,'relative market price',
                                  unique_edges,indCons,double_list,
                                  cases_of_interest[beh_no])

            if serie.shape != rmp.shape:
                EMAlogging.info('Loop %s created a floating point error' % (loop_index))
                EMAlogging.info('Caused by trying to switch %s' % (unique_edges[loop_index-1]))

            if serie.shape == rmp.shape:
                ax.plot(x,serie,'b')

    #        data = np.zeros(rmp.shape[0])
    #        data[0:serie.shape[0]] = serie
    #        ax.plot(x,data,'b')

        ax.plot(x,rmp,'r')
        ax.axvspan(interval[0]-1,interval[1], facecolor='lightgrey', alpha=0.5)
        f_name = 'switched unique edges only'+str(beh_no)
        plt.savefig(f_name)
Esempio n. 29
0
'''
Created on Sep 8, 2011

@author: gonengyucel, jhkwakkel
'''
import matplotlib.pyplot as plt

from analysis.clusterer import cluster

from expWorkbench import load_results
from expWorkbench import ema_logging

ema_logging.log_to_stderr(ema_logging.INFO)

#load the data
data = load_results(r'..\gallery\data\100 flu cases no policy.cPickle')

# specify the number of desired clusters
# note: the meaning of cValue is tied to the value for cMethod
cValue = 5

#perform cluster analysis
dist, clusteraloc, runlog, z = cluster(data=data, 
                                    outcome='deceased population region 1', 
                                    distance='gonenc', 
                                    interClusterDistance='complete', 
                                    cMethod = 'maxclust',
                                    cValue = cValue,
                                    plotDendrogram=False, 
                                    plotClusters=False, 
                                    groupPlot=False,
Esempio n. 30
0
def perform_loop_knockout():    
    unique_edges = [['In Goods', 'lost'],
                    ['loss unprofitable extraction capacity', 'decommissioning extraction capacity'],
                    ['production', 'In Goods'],
                    ['production', 'lost'],
                    ['production', 'Supply'],
                    ['Real Annual Demand', 'substitution losses'],
                    ['Real Annual Demand', 'price elasticity of demand losses'],
                    ['Real Annual Demand', 'desired extraction capacity'],
                    ['Real Annual Demand', 'economic demand growth'],
                    ['average recycling cost', 'relative market price'],
                    ['recycling fraction', 'lost'],
                    ['commissioning recycling capacity', 'Recycling Capacity Under Construction'],
                    ['maximum amount recyclable', 'recycling fraction'],
                    ['profitability recycling', 'planned recycling capacity'],
                    ['relative market price', 'price elasticity of demand losses'],
                    ['constrained desired recycling capacity', 'gap between desired and constrained recycling capacity'],
                    ['profitability extraction', 'planned extraction capacity'],
                    ['commissioning extraction capacity', 'Extraction Capacity Under Construction'],
                    ['desired recycling', 'gap between desired and constrained recycling capacity'],
                    ['Installed Recycling Capacity', 'decommissioning recycling capacity'],
                    ['Installed Recycling Capacity', 'loss unprofitable recycling capacity'],
                    ['average extraction costs', 'profitability extraction'],
                    ['average extraction costs', 'relative attractiveness recycling']]
    
    unique_cons_edges = [['recycling', 'recycling'],
                           ['recycling', 'supply demand ratio'],
                           ['decommissioning recycling capacity', 'recycling fraction'],
                           ['returns to scale', 'relative attractiveness recycling'],
                           ['shortage price effect', 'relative price last year'],
                           ['shortage price effect', 'profitability extraction'],
                           ['loss unprofitable extraction capacity', 'loss unprofitable extraction capacity'],
                           ['production', 'recycling fraction'],
                           ['production', 'constrained desired recycling capacity'],
                           ['production', 'new cumulatively recycled'],
                           ['production', 'effective fraction recycled of supplied'],
                           ['loss unprofitable recycling capacity', 'recycling fraction'],
                           ['average recycling cost', 'loss unprofitable recycling capacity'],
                           ['recycling fraction', 'new cumulatively recycled'],
                           ['substitution losses', 'supply demand ratio'],
                           ['Installed Extraction Capacity', 'Extraction Capacity Under Construction'],
                           ['Installed Extraction Capacity', 'commissioning extraction capacity'],
                           ['Installed Recycling Capacity', 'Recycling Capacity Under Construction'],
                           ['Installed Recycling Capacity', 'commissioning recycling capacity'],
                           ['average extraction costs', 'profitability extraction']]
    
#    CONSTRUCTING THE ENSEMBLE AND SAVING THE RESULTS
    ema_logging.log_to_stderr(ema_logging.INFO)
    results = load_results(r'base.cPickle')

#    GETTING OUT THOSE BEHAVIOURS AND EXPERIMENT SETTINGS
#    Indices of a number of examples, these will be looked at.
    runs = [526,781,911,988,10,780,740,943,573,991]
    VOI = 'relative market price'
    
    results_of_interest = experiment_settings(results,runs,VOI)
    cases_of_interest = experiments_to_cases(results_of_interest[0])
    behaviour_int = results_of_interest[1][VOI]
    
#    CONSTRUCTING INTERVALS OF ATOMIC BEHAVIOUR PATTERNS
    ints = intervals(behaviour_int,False)

#    GETTING OUT ONLY THOSE OF MAXIMUM LENGTH PER BEHAVIOUR
    max_intervals = intervals_interest(ints)
    
#    THIS HAS TO DO WITH THE MODEL FORMULATION OF THE SWITCHES/VALUES
    double_list = [6,9,11,17,19]
    
    indCons = len(unique_edges)
#    for elem in unique_cons_edges:
#        unique_edges.append(elem)
    
    current = os.getcwd()

    for beh_no in range(0,10):
#        beh_no = 0 # Varies between 0 and 9, index style.
        interval = max_intervals[beh_no]
    
        rmp = behaviour_int[beh_no]
    #    rmp = rmp[interval[0]:interval[1]]
        x = range(0,len(rmp))
        fig = plt.figure()
        ax = fig.add_subplot(111)
    
        vensim.be_quiet()
    #    for loop_index in range(7,8):
        for loop_index in range(1,len(unique_edges)+1):
    
            if loop_index-indCons > 0:
                model_location = current + r'\Models\Consecutive\Metals EMA.vpm'
            elif loop_index == 0:
                model_location = current + r'\Models\Base\Metals EMA.vpm'
            else:
                model_location = current + r'\Models\Switches\Metals EMA.vpm'
        
            serie = run_interval(model_location,loop_index,
                                  interval,'relative market price',
                                  unique_edges,indCons,double_list,
                                  cases_of_interest[beh_no])
            
            if serie.shape != rmp.shape:
                ema_logging.info('Loop %s created a floating point error' % (loop_index))
                ema_logging.info('Caused by trying to switch %s' % (unique_edges[loop_index-1]))
                
            if serie.shape == rmp.shape:
                ax.plot(x,serie,'b')
                
    #        data = np.zeros(rmp.shape[0])
    #        data[0:serie.shape[0]] = serie
    #        ax.plot(x,data,'b')
      
        ax.plot(x,rmp,'r')
        ax.axvspan(interval[0]-1,interval[1], facecolor='lightgrey', alpha=0.5)
        f_name = 'switched unique edges only'+str(beh_no)
        plt.savefig(f_name)
Esempio n. 31
0
@author: chamarat
'''

import matplotlib.pyplot as plt

from expWorkbench import load_results
from analysis.plotting import envelopes
import analysis.plotting_util as plottingUtil

# force matplotlib to use tight layout
# see http://matplotlib.sourceforge.net/users/tight_layout_guide.html 
# for details
plottingUtil.TIGHT= True

#get the data
results = load_results(r'.\data\TFSC_corrected.bz2')

# make an envelope
fig, axesdict = envelopes(results, 
                outcomes_to_show=['total fraction new technologies'], 
                group_by='policy', 
                grouping_specifiers=['No Policy',
                                     'Basic Policy',
                                     'Optimized Adaptive Policy'],
                legend=False,
                density='kde', fill=True,titles=None)

# set the size of the figure to look reasonable nice
fig.set_size_inches(8,5)

# save figure
Esempio n. 32
0
from expWorkbench import load_results
from analysis.prim import perform_prim, write_prim_to_stdout
from analysis.prim import show_boxes_individually


def classify(data):

    result = data["total fraction new technologies"]
    classes = np.zeros(result.shape[0])
    classes[result[:, -1] > 0.8] = 1

    return classes


if __name__ == "__main__":

    results = load_results(r"CESUN_optimized_1000_new.cPickle")
    experiments, results = results
    logicalIndex = experiments["policy"] == "Optimized Adaptive Policy"
    newExperiments = experiments[logicalIndex]
    newResults = {}
    for key, value in results.items():
        newResults[key] = value[logicalIndex]
    results = (newExperiments, newResults)

    boxes = perform_prim(results, "total fraction new technologies", threshold=0.6, threshold_type=-1)

    write_prim_to_stdout(boxes)
    show_boxes_individually(boxes, results)
    plt.show()
Esempio n. 33
0
'''
Created on 26 sep. 2011

@author: jhkwakkel
'''
import matplotlib.pyplot as plt

from expWorkbench import load_results
from analysis.graphs import lines

data = load_results(r'../../../src/analysis/1000 flu cases.cPickle')
fig = lines(data, column='fatality ratio region 1', density=False)
plt.show()
Esempio n. 34
0
from analysis.prim import perform_prim, write_prim_to_stdout
from analysis.prim import show_boxes_individually


def classify(data):

    result = data['total fraction new technologies']
    classes = np.zeros(result.shape[0])
    classes[result[:, -1] > 0.8] = 1

    return classes


if __name__ == '__main__':

    results = load_results(r'CESUN_optimized_1000_new.cPickle')
    experiments, results = results
    logicalIndex = experiments['policy'] == 'Optimized Adaptive Policy'
    newExperiments = experiments[logicalIndex]
    newResults = {}
    for key, value in results.items():
        newResults[key] = value[logicalIndex]
    results = (newExperiments, newResults)

    boxes = perform_prim(results,
                         'total fraction new technologies',
                         threshold=0.6,
                         threshold_type=-1)

    write_prim_to_stdout(boxes)
    show_boxes_individually(boxes, results)
Esempio n. 35
0
def test_lines3d():
    results = expWorkbench.load_results(r'eng_trans_100.cPickle')
    lines3d(results, outcomes=['installed capacity T1',
                               'installed capacity T2'])
Esempio n. 36
0
def classify(data):
    #get the output for deceased population
    result = data['deceased population region 1']

    #make an empty array of length equal to number of cases
    classes = np.zeros(result.shape[0])

    #if deceased population is higher then 1.000.000 people,
    #classify as 1
    classes[result[:, -1] > 1000000] = 1

    return classes


#load data
results = load_results(r'../../../src/analysis/1000 flu cases.cPickle',
                       zipped=False)
experiments, results = results

#extract results for 1 policy
logicalIndex = experiments['policy'] == 'no policy'
newExperiments = experiments[logicalIndex]
newResults = {}
for key, value in results.items():
    newResults[key] = value[logicalIndex]

results = (newExperiments, newResults)

#perform prim on modified results tuple
prims, uncertainties, x = prim.perform_prim(results,
                                            classify,
                                            threshold=0.8,
Esempio n. 37
0
ema_logging.log_to_stderr(level=ema_logging.INFO)

def classify(data):
    #get the output for deceased population
    result = data['deceased population region 1']
    
    #make an empty array of length equal to number of cases 
    classes =  np.zeros(result.shape[0])
    
    #if deceased population is higher then 1.000.000 people, classify as 1 
    classes[result[:, -1] > 1000000] = 1
    
    return classes

#load data
results = load_results(r'./data/1000 flu cases.bz2')
experiments, results = results

#extract results for 1 policy
logical = experiments['policy'] == 'no policy'
new_experiments = experiments[ logical ]
new_results = {}
for key, value in results.items():
    new_results[key] = value[logical]

results = (new_experiments, new_results)

#perform prim on modified results tuple

prim = prim.Prim(results, classify, threshold=0.8, threshold_type=1)
box_1 = prim.find_box()
Esempio n. 38
0
'''
Created on 26 sep. 2011

@author: jhkwakkel
'''
import matplotlib.pyplot as plt

from expWorkbench import load_results
from analysis.plotting import lines

data = load_results(r'../../../src/analysis/1000 flu cases.cPickle')
fig = lines(data, group_by='fatality ratio region 1')
plt.show()
Esempio n. 39
0
'''
Created on Oct 2, 2012

@author: sibeleker
'''
from expWorkbench import load_results
import matplotlib
import matplotlib.pyplot as plt
from analysis.graphs import lines, envelopes

results = load_results('burnout_1000_approx.cpickle')


Esempio n. 40
0
'''
Created on Sep 8, 2011

@author: gonengyucel, jhkwakkel
'''
import matplotlib.pyplot as plt

from analysis.clusterer import cluster

from expWorkbench import load_results
from expWorkbench import ema_logging

ema_logging.log_to_stderr(ema_logging.INFO)

#load the data
data = load_results(r'..\gallery\data\100 flu cases no policy.bz2')

# specify the number of desired clusters
# note: the meaning of cValue is tied to the value for cMethod
cValue = 5

#perform cluster analysis
dRow, clusters, z = cluster(data=data,
                            outcome='deceased population region 1',
                            distance='gonenc',
                            interClusterDistance='complete',
                            cMethod='maxclust',
                            cValue=cValue,
                            plotDendrogram=False,
                            plotClusters=False,
                            groupPlot=False,