Ejemplo n.º 1
0
    def test_merge_results(self):
        results1 = load_results('../data/1000 runs scarcity.tar.gz')
        results2 = load_results('../data/1000 runs scarcity.tar.gz')

        n1 = results1[0].shape[0]
        n2 = results2[0].shape[0]

        merged = merge_results(results1, results2)

        self.assertEqual(merged[0].shape[0], n1 + n2)
Ejemplo n.º 2
0
 def test_merge_results(self):
     results1 = load_results('../data/1000 runs scarcity.tar.gz')
     results2 = load_results('../data/1000 runs scarcity.tar.gz')
     
     n1 = results1[0].shape[0]
     n2 = results2[0].shape[0]
     
     merged = merge_results(results1, results2)
     
     self.assertEqual(merged[0].shape[0], n1+n2)
Ejemplo n.º 3
0
    def test_load_results(self):
        # test for 1d
        # test for 2d
        # test for 3d

        nr_experiments = 10000

        experiments = pd.DataFrame(index=np.arange(nr_experiments),
                                   columns={
                                       'x': np.float,
                                       'y': np.float
                                   })

        experiments['x'] = np.random.rand(nr_experiments)
        experiments['y'] = np.random.rand(nr_experiments)

        outcome_a = np.zeros((nr_experiments, 1))
        results = (experiments, {'a': outcome_a})

        save_results(results, '../data/test.tar.gz')
        loaded_experiments, outcomes = load_results('../data/test.tar.gz')

        self.assertTrue(np.all(np.allclose(outcomes['a'], outcome_a)))
        self.assertTrue(
            np.all(np.allclose(experiments['x'], loaded_experiments['x'])))
        self.assertTrue(
            np.all(np.allclose(experiments['y'], loaded_experiments['y'])))

        os.remove('../data/test.tar.gz')

        nr_experiments = 1000
        nr_timesteps = 100
        nr_replications = 10
        experiments = pd.DataFrame(index=np.arange(nr_experiments),
                                   columns={
                                       'x': np.float,
                                       'y': np.float
                                   })
        experiments['x'] = np.random.rand(nr_experiments)
        experiments['y'] = np.random.rand(nr_experiments)

        outcome_a = np.zeros((nr_experiments, nr_timesteps, nr_replications))

        results = (experiments, {'a': outcome_a})
        save_results(results, '../data/test.tar.gz')
        loaded_experiments, outcomes = load_results('../data/test.tar.gz')

        os.remove('../data/test.tar.gz')

        self.assertTrue(np.all(np.allclose(outcomes['a'], outcome_a)))
        self.assertTrue(
            np.all(np.allclose(experiments['x'], loaded_experiments['x'])))
        self.assertTrue(
            np.all(np.allclose(experiments['y'], loaded_experiments['y'])))
Ejemplo n.º 4
0
    def test_load_results(self):
        # test for 1d
        # test for 2d
        # test for 3d

        nr_experiments = 10000

        experiments = np.recarray((nr_experiments, ),
                                  dtype=[('x', np.float), ('y', np.float)])

        experiments['x'] = np.random.rand(nr_experiments)
        experiments['y'] = np.random.rand(nr_experiments)

        outcome_a = np.zeros((nr_experiments, 1))
        results = (experiments, {'a': outcome_a})

        save_results(results, '../data/test.tar.gz')
        loaded_experiments, outcomes = load_results('../data/test.tar.gz')

        self.assertTrue(np.all(np.allclose(outcomes['a'], outcome_a)))
        self.assertTrue(
            np.all(np.allclose(experiments['x'], loaded_experiments['x'])))
        self.assertTrue(
            np.all(np.allclose(experiments['y'], loaded_experiments['y'])))

        os.remove('../data/test.tar.gz')

        nr_experiments = 1000
        nr_timesteps = 100
        nr_replications = 10
        experiments = np.recarray((nr_experiments, ),
                                  dtype=[('x', np.float), ('y', np.float)])
        experiments['x'] = np.random.rand(nr_experiments)
        experiments['y'] = np.random.rand(nr_experiments)

        outcome_a = np.zeros((nr_experiments, nr_timesteps, nr_replications))

        results = (experiments, {'a': outcome_a})
        save_results(results, '../data/test.tar.gz')
        loaded_experiments, outcomes = load_results('../data/test.tar.gz')

        os.remove('../data/test.tar.gz')

        self.assertTrue(np.all(np.allclose(outcomes['a'], outcome_a)))
        self.assertTrue(
            np.all(np.allclose(experiments['x'], loaded_experiments['x'])))
        self.assertTrue(
            np.all(np.allclose(experiments['y'], loaded_experiments['y'])))
Ejemplo n.º 5
0
 def test_load_results(self):
     # test for 1d
     # test for 2d
     # test for 3d
 
     nr_experiments = 10000
     
     experiments = pd.DataFrame(index=np.arange(nr_experiments),
                                columns={'x':np.float, 
                                         'y': np.float})
         
     experiments['x'] = np.random.rand(nr_experiments)
     experiments['y'] = np.random.rand(nr_experiments)
     
     outcome_a = np.zeros((nr_experiments,1))
     results = (experiments, {'a': outcome_a})
     
     save_results(results, '../data/test.tar.gz')
     loaded_experiments, outcomes  = load_results('../data/test.tar.gz')
     
     self.assertTrue(np.all(np.allclose(outcomes['a'],outcome_a)))
     self.assertTrue(np.all(np.allclose(experiments['x'],loaded_experiments['x'])))
     self.assertTrue(np.all(np.allclose(experiments['y'],loaded_experiments['y'])))        
     
     os.remove('../data/test.tar.gz')
     
     
     nr_experiments = 1000
     nr_timesteps = 100
     nr_replications = 10
     experiments = pd.DataFrame(index=np.arange(nr_experiments),
                                columns={'x':np.float, 
                                         'y': np.float})
     experiments['x'] = np.random.rand(nr_experiments)
     experiments['y'] = np.random.rand(nr_experiments)
     
     outcome_a = np.zeros((nr_experiments,nr_timesteps,nr_replications))
      
     results = (experiments, {'a': outcome_a})
     save_results(results, '../data/test.tar.gz')
     loaded_experiments, outcomes = load_results('../data/test.tar.gz')
     
     os.remove('../data/test.tar.gz')
     
     self.assertTrue(np.all(np.allclose(outcomes['a'],outcome_a)))
     self.assertTrue(np.all(np.allclose(experiments['x'],loaded_experiments['x'])))
     self.assertTrue(np.all(np.allclose(experiments['y'],loaded_experiments['y'])))        
Ejemplo n.º 6
0
    def test_load_results(self):
        # test for 1d
        # test for 2d
        # test for 3d

        nr_experiments = 10000
        experiments = np.recarray((nr_experiments, ),
                                  dtype=[('x', float), ('y', float)])
        outcome_a = np.zeros((nr_experiments, 1))

        results = (experiments, {'a': outcome_a})

        save_results(results, u'../data/test.tar.gz')
        experiments, outcomes = load_results(u'../data/test.tar.gz')

        logical = np.allclose(outcomes['a'], outcome_a)

        os.remove('../data/test.tar.gz')

        #         if logical:
        #             ema_logging.info('1d loaded successfully')

        nr_experiments = 1000
        nr_timesteps = 100
        nr_replications = 10
        experiments = np.recarray((nr_experiments, ),
                                  dtype=[('x', float), ('y', float)])
        outcome_a = np.zeros((nr_experiments, nr_timesteps, nr_replications))

        results = (experiments, {'a': outcome_a})
        save_results(results, '../data/test.tar.gz')
        experiments, outcomes = load_results('../data/test.tar.gz')

        logical = np.allclose(outcomes['a'], outcome_a)

        os.remove('../data/test.tar.gz')
Ejemplo n.º 7
0
    def test_load_results(self):
        # test for 1d
        # test for 2d
        # test for 3d
    
        nr_experiments = 10000
        experiments = np.recarray((nr_experiments,),
                               dtype=[('x', float), ('y', float)])
        outcome_a = np.zeros((nr_experiments,1))
        
        results = (experiments, {'a': outcome_a})
        
        save_results(results, u'../data/test.tar.gz')
        experiments, outcomes  = load_results(u'../data/test.tar.gz')
        
        logical = np.allclose(outcomes['a'],outcome_a)
        
        os.remove('../data/test.tar.gz')
        
#         if logical:
#             ema_logging.info('1d loaded successfully')
        
        nr_experiments = 1000
        nr_timesteps = 100
        nr_replications = 10
        experiments = np.recarray((nr_experiments,),
                               dtype=[('x', float), ('y', float)])
        outcome_a = np.zeros((nr_experiments,nr_timesteps,nr_replications))
         
        results = (experiments, {'a': outcome_a})
        save_results(results, u'../data/test.tar.gz')
        experiments, outcomes = load_results(u'../data/test.tar.gz')
        
        logical = np.allclose(outcomes['a'],outcome_a)
        
        os.remove('../data/test.tar.gz')
Ejemplo n.º 8
0

def periodDominance(ds):
    Y = np.fft.rfft(ds)
    n = len(Y)
    powerSpect = np.abs(Y)**2
    timeStep = 1
    freq = np.fft.fftfreq(n, d=timeStep)
    print len(freq), len(powerSpect)
    for i in range(len(freq) / 2 + 1):
        print freq[i], 1 / freq[i], powerSpect[i]


if __name__ == '__main__':

    cases, results = utilities.load_results('PatternSet_Periodic.cpickle')
    dataSeries = results.get('outcome')
    ds1 = dataSeries[25]
    ds2 = dataSeries[26]

    print linearFit(ds1)
    print quadraticFit(ds1)
    print mean(ds1), variance(ds1), stdDev(ds1)
    print autoCovariance(ds1, 0)
    for k in range(31):
        print k, autoCorrelation(ds1, k)

    for k in range(31):
        print k, crossCorrelation(ds1, ds2, k)

    periodDominance(ds1)
Ejemplo n.º 9
0
    return var

def periodDominance(ds):
    Y = np.fft.rfft(ds)
    n = len(Y)
    powerSpect = np.abs(Y)**2
    timeStep = 1 
    freq = np.fft.fftfreq(n, d=timeStep)
    print(len(freq), len(powerSpect))
    for i in range(len(freq)/2+1):
        print(freq[i], 1/freq[i], powerSpect[i])


if __name__ == '__main__':
    
    cases, results = utilities.load_results('PatternSet_Periodic.cpickle')
    dataSeries = results.get('outcome')
    ds1 = dataSeries[25]
    ds2 = dataSeries[26]
    
    print(linearFit(ds1))
    print(quadraticFit(ds1))
    print(mean(ds1), variance(ds1), stdDev(ds1))
    print(autoCovariance(ds1,0))
    for k in range(31):
        print(k,autoCorrelation(ds1,k))
    
    for k in range(31):
        print(k, crossCorrelation(ds1,ds2,k))
    
    periodDominance(ds1)    
Ejemplo n.º 10
0
                           Scenario, Policy, Constraint)
from ema_workbench import load_results
from ema_workbench.analysis import prim, dimensional_stacking, cart
from ema_workbench.util import ema_logging, utilities

from ema_workbench.em_framework.optimization import (HyperVolume,
                                                     EpsilonProgress)
from ema_workbench.em_framework import sample_uncertainties, MonteCarloSampler
from ema_workbench.em_framework.evaluators import BaseEvaluator

ema_logging.log_to_stderr(ema_logging.INFO)

from dike_model_function import DikeNetwork  # @UnresolvedImport
from problem_formulation import get_model_for_problem_formulation

results = utilities.load_results('results/base_case_75.csv')

experiments, outcomes = results
outcomes = pd.DataFrame(outcomes)
experiments = pd.DataFrame(experiments)

outcome_total = pd.read_csv('results/75policies_with_total_values.csv')

results = experiments.join(outcome_total)
results = results.drop(columns="model")

# minimise the worst outcome and minimise the standard deviation
#defined as the median value multiplied by the interquartile distance plus one


def robustness(result):
Ejemplo n.º 11
0
def aggregate_outcomes(results, outcome):
    list_outcomes_columns = []

    for i in results.columns:
        if outcome in i:
            list_outcomes_columns.append(i)

    results["Total " +
            str(outcome)] = results[list_outcomes_columns].sum(axis=1)


# ### Find the ranges for epsilon and hypervolume convergence
#
# To set $\epsilon$ values, we must minimize noise by first running a robust optimize quickly to see a Pareto front develop as stated in section 3.4 of doi: 10.1016/j.envsoft.2011.04.003 (we don't only look at Monte Carlo policies in hope that this will save time).
results = utilities.load_results('Outcomes/MOROpolicies50Scenarios.csv')

experiments, outcomes = results

outcomes = pd.DataFrame(outcomes)
experiments = pd.DataFrame(experiments)
results = experiments.join(outcomes)
results = results.drop(columns="model")

# Aggregate
aggregate_outcomes(outcomes, "Expected Annual Damage")
aggregate_outcomes(outcomes, "Dike Investment Costs")
aggregate_outcomes(outcomes, "Expected Number of Deaths")
aggregate_outcomes(outcomes, "RfR Total Costs")
aggregate_outcomes(outcomes, "Expected Evacuation Costs")