Esempio n. 1
0
def test_load_results():

    data = np.random.rand(1000, 1000)
    file_name = "test.bz2"
    util.save_results(data, file_name)

    ema_logging.log_to_stderr(ema_logging.DEBUG)
    util.load_results(file_name)
    os.remove(file_name)
    ema_logging.debug("removing " + file_name)
Esempio n. 2
0
def test_save_results():
    ema_logging.log_to_stderr(ema_logging.DEBUG)
    data = util.load_results("./data/1000 flu cases no policy.cPickle", zip=False)
    file_name = "test.bz2"
    util.save_results(data, file_name)
    os.remove(file_name)
    ema_logging.debug("removing " + file_name)
Esempio n. 3
0
def load_flu_data():
    path = os.path.dirname(__file__)
    fn = './data/1000 flu cases no policy.tar.gz'
    fn = os.path.join(path, fn)

    experiments, outcomes = load_results(fn)
    return experiments, outcomes
Esempio n. 4
0
def load_scarcity_data():
    path = os.path.dirname(__file__)
    fn = './data/1000 runs scarcity.tar.gz'
    fn = os.path.join(path, fn)

    experiments, outcomes = load_results(fn)
    return experiments, outcomes
    
    return experiments, outcomes
Esempio n. 5
0
def test_load_results():
    # test for 1d
    # test for 2d
    # test for 3d
    # test for nd

    nr_experiments = 10000
    experiments = np.recarray((nr_experiments,),
                           dtype=[('x', float), ('y', float)])
    outcome_a = np.random.rand(nr_experiments,1)
    
    results = (experiments, {'a': outcome_a})
    
    save_results(results, r'../data/test.tar.gz')
    experiments, outcomes  = load_results(r'../data/test.tar.gz')
    
    logical = np.allclose(outcomes['a'],outcome_a)
    
    os.remove('../data/test.tar.gz')
    
    if logical:
        ema_logging.info('1d loaded successfully')
    
    nr_experiments = 1000
    nr_timesteps = 100
    nr_replications = 10
    experiments = np.recarray((nr_experiments,),
                           dtype=[('x', float), ('y', float)])
    outcome_a = np.random.rand(nr_experiments,nr_timesteps,nr_replications)
     
    results = (experiments, {'a': outcome_a})
    save_results(results, r'../data/test.tar.gz')
    experiments, outcomes = load_results(r'../data/test.tar.gz')
    
    logical = np.allclose(outcomes['a'],outcome_a)
    
    os.remove('../data/test.tar.gz')
    
    if logical:
        ema_logging.info('3d loaded successfully')
Esempio n. 6
0
def test_scatter3d():
    #load the data
    experiments, results = load_results(r'1000 flu cases.cPickle')
    
    #transform the results to the required format
    newResults = {}
    
    #get time and remove it from the dict
    time = results.pop('TIME')
    
    for key, value in results.items():
        if key == 'deceased population region 1':
            newResults[key] = value[:,-1] #we want the end value
        else:
            # we want the maximum value of the peak
            newResults['max peak'] = np.max(value, axis=1) 
            
            # we want the time at which the maximum occurred
            # the code here is a bit obscure, I don't know why the transpose 
            # of value is needed. This however does produce the appropriate results
            logicalIndex = value.T==np.max(value, axis=1)
            newResults['time of max'] = time[logicalIndex.T]
    results = (experiments, newResults)
    scatter3d(results, outcomes=newResults.keys())
Esempio n. 7
0
   clusterSetup = {}
   clusterSetup['plotClusters?'] = False
   clusterSetup['Plot type'] = 'single-window' #other option is 'single-window'
   clusterSetup['plotDendrogram?'] = False
   clusterSetup['inter-cluster distance'] = 'single' # Other options are 'complete', 'single' and 'average'
   clusterSetup['cutoff criteria'] = 'inconsistent'   # Other options are 'distance' and 'maxclust' 
   clusterSetup['cutoff criteria value'] = 0.5
   
   distanceSetup = {}
   distanceSetup['distance'] = 'gonenc'
   distanceSetup['filter?'] = True
   distanceSetup['slope filter'] = 0.001
   distanceSetup['curvature filter'] = 0.005
   distanceSetup['no of sisters'] = 20
   
   
   
   #cluster('chacoTest.cpickle', 'total population', distance='gonenc',plotClusters=True)
   cSet = cluster('PatternSet_Basics.cpickle', 'outcome', distanceSetup, clusterSetup)
   print max(cSet)
   
       
   cases, results = util.load_results('PatternSet_Basics.cpickle')
   cSetActual = cases['Class ID']
   
   compareClusterSets(cSet,cSetActual)   
   #jpypeTester()
   #multiprocessTester()
   for i in range(len(cSet)):
       print cSet[i]
 
import numpy as np
import matplotlib.pyplot as plt

from analysis.pairs_plotting import pairs_scatter, pairs_density, pairs_lines
from expWorkbench.util import load_results

#load the data
experiments, results = load_results(r'Data/1000_runs_neoclassical_rational.bz2')

#transform the results to the required format
newResults = {}

#get time and remove it from the dict
# time = results.pop('TIME')

for key, value in results.items():
    if key == 'percentage_of_households_owning_ISG_app':
        newResults[key] = value[:,60] #we want the end value
    else:
        # we want the maximum value of the peak
        newResults['max peak'] = np.max(value, axis=1) 
         
        # we want the time at which the maximum occurred
        # the code here is a bit obscure, I don't know why the transpose 
        # of value is needed. This however does produce the appropriate results
        logicalIndex = value.T==np.max(value, axis=1)
        newResults['time of max'] = time[logicalIndex.T]

pairs_density((experiments, newResults))
plt.show() 
Esempio n. 9
0
'''
Created on 26 sep. 2011

@author: jhkwakkel
'''
import matplotlib.pyplot as plt

from expWorkbench.util import load_results
from analysis.plotting import envelopes

data = load_results(r'./data/2000 flu cases no policy.bz2')
fig, axes_dict = envelopes(data, group_by='policy')
plt.savefig("./pictures/basic_envelope.png", dpi=75)
Esempio n. 10
0
'''
Created on 30 nov. 2011

@author: chamarat
'''

from expWorkbench.util import load_results
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats.kde as kde 

results = load_results('storeduncertainties.cPickle')

results = np.asarray(results)

fig = plt.figure()
ax = fig.add_subplot(111)

##For a KDE graph
#results = results[:-1,20]
#ymin = np.min(results)
#ymax = np.max(results)
#line = np.linspace(ymin, ymax, 1000)[::-1]
#b = kde.gaussian_kde(results)
#b = b.evaluate(line)
#b = np.log((b+1))
#ax.plot(b, line)

##Normal histogram graph
#results = results[:-1,20]
#ax.hist(results,30)
Esempio n. 11
0
import numpy as np
import matplotlib.pyplot as plt

from analysis.pairs_plotting import pairs_lines
from expWorkbench.util import load_results


#load the data
data = load_results(r'../../../src/analysis/100 flu cases.cPickle', zipped=False)

pairs_lines(data, group_by='policy')
plt.show() 
Esempio n. 12
0
'''
Created on 20 sep. 2011

.. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
'''
import numpy as np
import matplotlib.pyplot as plt

from analysis.pairs_plotting import pairs_lines, pairs_scatter, pairs_density
from expWorkbench.util import load_results
from expWorkbench import ema_logging

ema_logging.log_to_stderr(level=ema_logging.DEFAULT_LEVEL)

#load the data
experiments, outcomes = load_results(r'.\data\100 flu cases no policy.bz2')

#transform the results to the required format
tr = {}

#get time and remove it from the dict
time = outcomes.pop('TIME')

for key, value in outcomes.items():
    if key == 'deceased population region 1':
        tr[key] = value[:,-1] #we want the end value
    else:
        # we want the maximum value of the peak
        tr['max peak'] = np.max(value, axis=1) 
        
        # we want the time at which the maximum occurred
Esempio n. 13
0
Created on 20 sep. 2011

.. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
'''
import numpy as np
import matplotlib.pyplot as plt

from analysis.pairs_plotting import pairs_lines, pairs_scatter, pairs_density
from expWorkbench.util import load_results
from expWorkbench import ema_logging

ema_logging.log_to_stderr(level=ema_logging.DEFAULT_LEVEL)

# load the data
fh = r'.\data\1000 flu cases no policy.tar.gz'
experiments, outcomes = load_results(fh)

# transform the results to the required format
# that is, we want to know the max peak and the casualties at the end of the 
# run
tr = {}

# get time and remove it from the dict
time = outcomes.pop('TIME')

for key, value in outcomes.items():
    if key == 'deceased population region 1':
        tr[key] = value[:,-1] #we want the end value
    else:
        # we want the maximum value of the peak
        max_peak = np.max(value, axis=1) 
'''
Created on 25 okt. 2012

@author: tushithislam
'''
from expWorkbench.util import load_results

#results = load_results(r'D:\tushithislam\workspace\EMA workbench\src\examples\100 flu cases no policy.cPickle')
results = load_results(r'..\..\test\data\eng_trans_100.cPickle')
experiments, outcomes = results

#print set(experiments['policy'])
#
#for key, value in outcomes.iteritems():
#    print key, value.shape
#    
#logical = experiments['policy']=='basic policy'
#
#bp_out = {}
#for key, value in outcomes.iteritems():
#    bp_out[key] = value[logical]

#print experiments
#print outcomes

def main1():
    print outcomes.keys()
    

def main(a):
    i = outcomes.keys();cntr=0;cdata=[]
Esempio n. 15
0
'''
Created on 20 sep. 2011

.. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
'''
import numpy as np
import matplotlib.pyplot as plt

from analysis.graphs import multiplot_scatter, multiplot_density, multiplot_lines
from expWorkbench.util import load_results
import expWorkbench.EMAlogging as logging

logging.log_to_stderr(level=logging.DEFAULT_LEVEL)

#load the data
experiments, results = load_results(r'..\..\src\analysis\1000 flu cases.cPickle')

#transform the results to the required format
newResults = {}

#get time and remove it from the dict
time = results.pop('TIME')

for key, value in results.items():
    if key == 'deceased population region 1':
        newResults[key] = value[:,-1] #we want the end value
    else:
        # we want the maximum value of the peak
        newResults['max peak'] = np.max(value, axis=1) 
        
        # we want the time at which the maximum occurred
import numpy as np
import matplotlib.pyplot as plt

from analysis.graphs import multiplot_scatter, multiplot_density, multiplot_lines
from expWorkbench.util import load_results

#load the data
experiments, results = load_results(r'../../../src/analysis/1000 flu cases.cPickle')

#transform the results to the required format
newResults = {}

#get time and remove it from the dict
time = results.pop('TIME')

for key, value in results.items():
    if key == 'deceased population region 1':
        newResults[key] = value[:,-1] #we want the end value
    else:
        # we want the maximum value of the peak
        newResults['max peak'] = np.max(value, axis=1) 
        
        # we want the time at which the maximum occurred
        # the code here is a bit obscure, I don't know why the transpose 
        # of value is needed. This however does produce the appropriate results
        logicalIndex = value.T==np.max(value, axis=1)
        newResults['time of max'] = time[logicalIndex.T]

multiplot_density((experiments, newResults))
plt.show() 
Esempio n. 17
0
import numpy as np
import matplotlib.pyplot as plt

from analysis.pairs_plotting import pairs_scatter, pairs_density, pairs_lines
from expWorkbench.util import load_results

#load the data
experiments, results = load_results(
    r'../../../src/analysis/1000 flu cases.cPickle', zipped=False)

#transform the results to the required format
newResults = {}

#get time and remove it from the dict
time = results.pop('TIME')

for key, value in results.items():
    if key == 'deceased population region 1':
        newResults[key] = value[:, -1]  #we want the end value
    else:
        # we want the maximum value of the peak
        newResults['max peak'] = np.max(value, axis=1)

        # we want the time at which the maximum occurred
        # the code here is a bit obscure, I don't know why the transpose
        # of value is needed. This however does produce the appropriate results
        logicalIndex = value.T == np.max(value, axis=1)
        newResults['time of max'] = time[logicalIndex.T]

pairs_density((experiments, newResults))
plt.show()
Esempio n. 18
0
'''
Created on 30 nov. 2011

@author: chamarat
'''

from expWorkbench.util import load_results
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.gridspec as gridspec 

results = load_results('storedresults_2.cPickle')
results = np.asarray(results)
results = results[:,0:4]


grid = gridspec.GridSpec(len(results[0]), len(results[0]))     
figure = plt.figure()

#FIELDS = ["ini cap T1" , "ini cap T2" , "ini cap T3" , "ini cap T4" , "ini cost T1" , "ini cost T2" , "ini cost T3" , "ini cost T4" , "ini cum decom cap T1" , "ini cum decom cap T2" , "ini cum decom cap T3" , "ini cum decom cap T4" , "average planning and construction period T1" , "average planning and construction period T2" , "average planning and construction period T3" , "average planning and construction period T4" , "ini PR T1" , "ini PR T2" , "ini PR T3" , "ini PR T4" , "lifetime T1" , "lifetime T2" , "lifetime T3" , "lifetime T4" , "ec gr t1" , "ec gr t2" , "ec gr t3" , "ec gr t4" , "ec gr t5" , "ec gr t6" , "ec gr t7" , "ec gr t8" , "ec gr t9" , "ec gr t10" , "random PR min" , "random PR max" , "seed PR T1" , "seed PR T2" , "seed PR T3" , "seed PR T4" , "absolute preference for MIC" , "absolute preference for expected cost per MWe" , "absolute preference against unknown" , "absolute preference for expected progress" , "absolute preference against specific CO2 emissions" , "SWITCH preference for MIC" , "SWITCH preference for expected cost per MWe" , "SWITCH preference against unknown" , "SWITCH preference for expected progress" , "SWITCH preference against specific CO2 emissions" , "performance expected cost per MWe T1" , "performance expected cost per MWe T2" , "performance expected cost per MWe T3" , "performance expected cost per MWe T4" , "performance CO2 avoidance T1" , "performance CO2 avoidance T2" , "performance CO2 avoidance T3" , "performance CO2 avoidance T4" , "SWITCH T3" , "SWITCH T4"]

combis = [(field1, field2) for field1 in range(len(results[0])) for field2 in range(len(results[0]))]
for field1, field2 in combis:
    i = field1
    j = field2
    ax = figure.add_subplot(grid[i,j])
    
    data1 = results[:-1,i]
    data2 = results[:-1,j]

    ax.scatter(data2, data1)
import numpy as np
import matplotlib.pyplot as plt

from analysis.pairs_plotting import pairs_scatter, pairs_density, pairs_lines
from expWorkbench.util import load_results

# load the data
experiments, results = load_results(r"../../../src/analysis/1000 flu cases.cPickle", zipped=False)

# transform the results to the required format
newResults = {}

# get time and remove it from the dict
time = results.pop("TIME")

for key, value in results.items():
    if key == "deceased population region 1":
        newResults[key] = value[:, -1]  # we want the end value
    else:
        # we want the maximum value of the peak
        newResults["max peak"] = np.max(value, axis=1)

        # we want the time at which the maximum occurred
        # the code here is a bit obscure, I don't know why the transpose
        # of value is needed. This however does produce the appropriate results
        logicalIndex = value.T == np.max(value, axis=1)
        newResults["time of max"] = time[logicalIndex.T]

pairs_density((experiments, newResults))
plt.show()
Esempio n. 20
0
    return var

def periodDominance(ds):
    Y = np.fft.rfft(ds)
    n = len(Y)
    powerSpect = np.abs(Y)**2
    timeStep = 1 
    freq = np.fft.fftfreq(n, d=timeStep)
    print len(freq), len(powerSpect)
    for i in range(len(freq)/2+1):
        print freq[i], 1/freq[i], powerSpect[i]


if __name__ == '__main__':
    
    cases, results = util.load_results('PatternSet_Periodic.cpickle')
    dataSeries = results.get('outcome')
    ds1 = dataSeries[25]
    ds2 = dataSeries[26]
    
    print linearFit(ds1)
    print quadraticFit(ds1)
    print mean(ds1), variance(ds1), stdDev(ds1)
    print autoCovariance(ds1,0)
    for k in range(31):
        print k,autoCorrelation(ds1,k)
    
    for k in range(31):
        print k, crossCorrelation(ds1,ds2,k)
    
    periodDominance(ds1)    
Esempio n. 21
0
import numpy as np
import matplotlib.pyplot as plt

from analysis.graphs import multiplot_lines
from expWorkbench.util import load_results


#load the data
data = load_results(r'../../../src/analysis/100 flu cases.cPickle')

multiplot_lines(data, column='policy')
plt.show()