예제 #1
0
def test_box():
    ema_logging.log_to_stderr(ema_logging.INFO)
    
    x = np.loadtxt(r'quasiflow x.txt')
    y = np.loadtxt(r'quasiflow y.txt')
    
#    prim = prim_box(x, y, pasting=True, threshold = 0, threshold_type = -1)
    prim = perform_prim(x, y, pasting=True, threshold = 0, threshold_type =-1)
    
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.scatter(x[:,0], x[:, 1], c=y)
    
    
    print '           \tmass\tmean'
    for i, entry in enumerate(prim[0:-1]):
        print 'found box %s:\t%s\t%s' %(i, entry.box_mass, entry.y_mean)
    print 'rest box    :\t%s\t%s' %(prim[-1].box_mass, prim[-1].y_mean)
    
    colors = graphs.COLOR_LIST
    for i, box in enumerate(prim):
        box = box.box
#        print box
        x = np.array([box[0,0], box[1,0], box[1,0], box[0,0], box[0,0]])
        y = np.array([box[0,1], box[0,1], box[1,1], box[1,1], box[0,1]])
#        print x
#        print y
        ax.plot(x,y, c=colors[i%len(colors)], lw=4)
    
    plt.show()     
예제 #2
0
def test_optimization():
    ema_logging.log_to_stderr(ema_logging.INFO)
    
    model = FluModel(r'..\data', "fluCase")
    ensemble = ModelEnsemble()
    
    ensemble.set_model_structure(model)
    ensemble.parallel=True
#    ensemble.processes = 12
        
    stats, pop  = ensemble.perform_outcome_optimization(obj_function = obj_function_multi,
                                                    reporting_interval=10, 
                                                    weights=(MAXIMIZE, MAXIMIZE),
                                                    pop_size=100,
                                                    nr_of_generations=5,
                                                    crossover_rate=0.5,
                                                    mutation_rate=0.05)
    res = stats.hall_of_fame.keys
    
    x = [entry.values[0] for entry in res]
    y = [entry.values[1] for entry in res]
    
    print len(x), len(y)
    
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.scatter(x,y)
    ax.set_ylabel("deceased population")
    ax.set_xlabel("infected fraction")
    
    plt.show()
예제 #3
0
def maxmin_optimize():
    ema_logging.log_to_stderr(ema_logging.INFO)
 
    model = TestModel("", 'simpleModel') #instantiate the model
    ensemble = ModelEnsemble() #instantiate an ensemble
    ensemble.set_model_structure(model) #set the model on the ensemble
    ensemble.parallel = True
    ensemble.processes = 12
    
    def obj_function1(outcomes):
        return outcomes['y']
    
    policy_levers = { "L1": (0,1),
                      "L2": (0,1)}
    
    
    
    results = ensemble.perform_maximin_optimization(obj_function1 = obj_function1, 
                                                policy_levers = policy_levers,
                                                minimax1='minimize',
                                                nrOfGenerations1=50,
                                                nrOfPopMembers1=200,
                                                minimax2 = "maximize",                                   
                                                nrOfGenerations2 = 50,
                                                nrOfPopMembers2 = 100,
                                                )

    graph_errorbars_raw(results['stats'])
    plt.show()
예제 #4
0
def test_save_results():
    ema_logging.log_to_stderr(ema_logging.DEBUG)
    data = util.load_results("./data/1000 flu cases no policy.cPickle", zip=False)
    file_name = "test.bz2"
    util.save_results(data, file_name)
    os.remove(file_name)
    ema_logging.debug("removing " + file_name)
예제 #5
0
def test_load_results():

    data = np.random.rand(1000, 1000)
    file_name = "test.bz2"
    util.save_results(data, file_name)

    ema_logging.log_to_stderr(ema_logging.DEBUG)
    util.load_results(file_name)
    os.remove(file_name)
    ema_logging.debug("removing " + file_name)
def perform_experiments():
    ema_logging.log_to_stderr(level=ema_logging.INFO)
    model = SalinizationModel(r"C:\workspace\EMA-workbench\models\salinization", "verzilting")
    model.step = 4
    
    ensemble = ModelEnsemble()
    ensemble.set_model_structure(model)
    
    ensemble.parallel = True
    nr_of_experiments = 10000
    results = ensemble.perform_experiments(nr_of_experiments)
    return results
def test_optimization():
    ema_logging.log_to_stderr(ema_logging.INFO)
    
    model = FluModel(r'..\data', "fluCase")
    ensemble = ModelEnsemble()
    
    ensemble.set_model_structure(model)
    ensemble.parallel=True
        
    stats, pop  = ensemble.perform_outcome_optimization(obj_function = obj_function_multi,
                                                    reporting_interval=100, 
                                                    weights=(MAXIMIZE, MAXIMIZE),
                                                    pop_size=100,
                                                    nr_of_generations=20,
                                                    crossover_rate=0.5,
                                                    mutation_rate=0.05,
                                                    caching=False)
    res = stats.hall_of_fame.keys
    
    print len(stats.tried_solutions.values())
예제 #8
0
def outcome_optimize():
    ema_logging.log_to_stderr(ema_logging.INFO)
 
    model = TestModel("", 'simpleModel') #instantiate the model
    ensemble = ModelEnsemble() #instantiate an ensemble
    ensemble.set_model_structure(model) #set the model on the ensemble
    policy = {"name": "test",
              "L1": 1,
              "L2": 1}
    ensemble.add_policy(policy)
    
    def obj_func(results):
        return results['y']        
    
    results = ensemble.perform_outcome_optimization(obj_function=obj_func, 
                                          minimax = 'minimize', 
                                          nrOfGenerations = 1000, 
                                          nrOfPopMembers = 10)

    graph_errorbars_raw(results['stats'])
    plt.show()
예제 #9
0
def test_tree():
    
    log_to_stderr(level= INFO)
        
    model = FluModel(r'..\..\models\flu', "fluCase")
    ensemble = ModelEnsemble()
    ensemble.parallel = True
    ensemble.set_model_structure(model)
    
    policies = [{'name': 'no policy',
                 'file': r'\FLUvensimV1basecase.vpm'},
                {'name': 'static policy',
                 'file': r'\FLUvensimV1static.vpm'},
                {'name': 'adaptive policy',
                 'file': r'\FLUvensimV1dynamic.vpm'}
                ]
    ensemble.add_policies(policies)
    
    results = ensemble.perform_experiments(10)
   
    a_tree = tree(results, classify)
예제 #10
0
def test_feature_selection():
    log_to_stderr(level= INFO)
        
    model = FluModel(r'..\..\models\flu', "fluCase")
    ensemble = ModelEnsemble()
    ensemble.parallel = True
    ensemble.set_model_structure(model)
    
    policies = [{'name': 'no policy',
                 'file': r'\FLUvensimV1basecase.vpm'},
                {'name': 'static policy',
                 'file': r'\FLUvensimV1static.vpm'},
                {'name': 'adaptive policy',
                 'file': r'\FLUvensimV1dynamic.vpm'}
                ]
    ensemble.add_policies(policies)
    
    results = ensemble.perform_experiments(5000)
   
    results = feature_selection(results, classify)
    for entry in results:
        print entry[0] +"\t" + str(entry[1])
예제 #11
0
def robust_optimize():
    ema_logging.log_to_stderr(ema_logging.INFO)
 
    model = TestModel("", 'simpleModel') #instantiate the model
    ensemble = ModelEnsemble() #instantiate an ensemble
    ensemble.set_model_structure(model) #set the model on the ensemble
    
    
    policy_levers = { "L1": (0,1),
                      "L2": (0,1)}
    
    def obj_func(results):
        return np.average(results['y'])        
    
    results = ensemble.perform_robust_optimization(cases=1000, 
                                                   obj_function=obj_func, 
                                                   policy_levers=policy_levers, 
                                                   minimax='minimize', 
                                                   nrOfGenerations=50, 
                                                   nrOfPopMembers=20 )
    graph_errorbars_raw(results['stats'])
    plt.show()
def test_optimization():
    ema_logging.log_to_stderr(ema_logging.INFO)
    
    model = FluModel(r'../models', "fluCase")
    ensemble = ModelEnsemble()
    
    ensemble.set_model_structure(model)
    ensemble.parallel=True
    
    pop_size = 8
    nr_of_generations = 10
    eps = np.array([1e-3, 1e6])

    stats, pop  = ensemble.perform_outcome_optimization(obj_function = obj_function_multi,
                                                    algorithm=epsNSGA2,
                                                    reporting_interval=100, 
                                                    weights=(MAXIMIZE, MAXIMIZE),
                                                    pop_size=pop_size,          
                                                    nr_of_generations=nr_of_generations,
                                                    crossover_rate=0.8,
                                                    mutation_rate=0.05,
                                                    eps=eps)
    fn = '../data/test optimization save.bz2'
예제 #13
0
def perform_loop_knockout():    
    unique_edges = [['In Goods', 'lost'],
                    ['loss unprofitable extraction capacity', 'decommissioning extraction capacity'],
                    ['production', 'In Goods'],
                    ['production', 'lost'],
                    ['production', 'Supply'],
                    ['Real Annual Demand', 'substitution losses'],
                    ['Real Annual Demand', 'price elasticity of demand losses'],
                    ['Real Annual Demand', 'desired extraction capacity'],
                    ['Real Annual Demand', 'economic demand growth'],
                    ['average recycling cost', 'relative market price'],
                    ['recycling fraction', 'lost'],
                    ['commissioning recycling capacity', 'Recycling Capacity Under Construction'],
                    ['maximum amount recyclable', 'recycling fraction'],
                    ['profitability recycling', 'planned recycling capacity'],
                    ['relative market price', 'price elasticity of demand losses'],
                    ['constrained desired recycling capacity', 'gap between desired and constrained recycling capacity'],
                    ['profitability extraction', 'planned extraction capacity'],
                    ['commissioning extraction capacity', 'Extraction Capacity Under Construction'],
                    ['desired recycling', 'gap between desired and constrained recycling capacity'],
                    ['Installed Recycling Capacity', 'decommissioning recycling capacity'],
                    ['Installed Recycling Capacity', 'loss unprofitable recycling capacity'],
                    ['average extraction costs', 'profitability extraction'],
                    ['average extraction costs', 'relative attractiveness recycling']]
    
    unique_cons_edges = [['recycling', 'recycling'],
                           ['recycling', 'supply demand ratio'],
                           ['decommissioning recycling capacity', 'recycling fraction'],
                           ['returns to scale', 'relative attractiveness recycling'],
                           ['shortage price effect', 'relative price last year'],
                           ['shortage price effect', 'profitability extraction'],
                           ['loss unprofitable extraction capacity', 'loss unprofitable extraction capacity'],
                           ['production', 'recycling fraction'],
                           ['production', 'constrained desired recycling capacity'],
                           ['production', 'new cumulatively recycled'],
                           ['production', 'effective fraction recycled of supplied'],
                           ['loss unprofitable recycling capacity', 'recycling fraction'],
                           ['average recycling cost', 'loss unprofitable recycling capacity'],
                           ['recycling fraction', 'new cumulatively recycled'],
                           ['substitution losses', 'supply demand ratio'],
                           ['Installed Extraction Capacity', 'Extraction Capacity Under Construction'],
                           ['Installed Extraction Capacity', 'commissioning extraction capacity'],
                           ['Installed Recycling Capacity', 'Recycling Capacity Under Construction'],
                           ['Installed Recycling Capacity', 'commissioning recycling capacity'],
                           ['average extraction costs', 'profitability extraction']]
    
#    CONSTRUCTING THE ENSEMBLE AND SAVING THE RESULTS
    ema_logging.log_to_stderr(ema_logging.INFO)
    results = load_results(r'base.cPickle')

#    GETTING OUT THOSE BEHAVIOURS AND EXPERIMENT SETTINGS
#    Indices of a number of examples, these will be looked at.
    runs = [526,781,911,988,10,780,740,943,573,991]
    VOI = 'relative market price'
    
    results_of_interest = experiment_settings(results,runs,VOI)
    cases_of_interest = experiments_to_cases(results_of_interest[0])
    behaviour_int = results_of_interest[1][VOI]
    
#    CONSTRUCTING INTERVALS OF ATOMIC BEHAVIOUR PATTERNS
    ints = intervals(behaviour_int,False)

#    GETTING OUT ONLY THOSE OF MAXIMUM LENGTH PER BEHAVIOUR
    max_intervals = intervals_interest(ints)
    
#    THIS HAS TO DO WITH THE MODEL FORMULATION OF THE SWITCHES/VALUES
    double_list = [6,9,11,17,19]
    
    indCons = len(unique_edges)
#    for elem in unique_cons_edges:
#        unique_edges.append(elem)
    
    current = os.getcwd()

    for beh_no in range(0,10):
#        beh_no = 0 # Varies between 0 and 9, index style.
        interval = max_intervals[beh_no]
    
        rmp = behaviour_int[beh_no]
    #    rmp = rmp[interval[0]:interval[1]]
        x = range(0,len(rmp))
        fig = plt.figure()
        ax = fig.add_subplot(111)
    
        vensim.be_quiet()
    #    for loop_index in range(7,8):
        for loop_index in range(1,len(unique_edges)+1):
    
            if loop_index-indCons > 0:
                model_location = current + r'\Models\Consecutive\Metals EMA.vpm'
            elif loop_index == 0:
                model_location = current + r'\Models\Base\Metals EMA.vpm'
            else:
                model_location = current + r'\Models\Switches\Metals EMA.vpm'
        
            serie = run_interval(model_location,loop_index,
                                  interval,'relative market price',
                                  unique_edges,indCons,double_list,
                                  cases_of_interest[beh_no])
            
            if serie.shape != rmp.shape:
                ema_logging.info('Loop %s created a floating point error' % (loop_index))
                ema_logging.info('Caused by trying to switch %s' % (unique_edges[loop_index-1]))
                
            if serie.shape == rmp.shape:
                ax.plot(x,serie,'b')
                
    #        data = np.zeros(rmp.shape[0])
    #        data[0:serie.shape[0]] = serie
    #        ax.plot(x,data,'b')
      
        ax.plot(x,rmp,'r')
        ax.axvspan(interval[0]-1,interval[1], facecolor='lightgrey', alpha=0.5)
        f_name = 'switched unique edges only'+str(beh_no)
        plt.savefig(f_name)
예제 #14
0
    #load the data
    experiments, results = load_results(r'1000 flu cases.cPickle')
    
    #transform the results to the required format
    newResults = {}
    
    #get time and remove it from the dict
    time = results.pop('TIME')
    
    for key, value in results.items():
        if key == 'deceased population region 1':
            newResults[key] = value[:,-1] #we want the end value
        else:
            # we want the maximum value of the peak
            newResults['max peak'] = np.max(value, axis=1) 
            
            # we want the time at which the maximum occurred
            # the code here is a bit obscure, I don't know why the transpose 
            # of value is needed. This however does produce the appropriate results
            logicalIndex = value.T==np.max(value, axis=1)
            newResults['time of max'] = time[logicalIndex.T]
    results = (experiments, newResults)
    scatter3d(results, outcomes=newResults.keys())


if __name__ == '__main__':
    log_to_stderr(level= INFO)
#    test_envelopes3d()
#    test_envelopes3d_group_by()
#    test_lines3d()
#    test_scatter3d()
예제 #15
0
'''
Created on Sep 8, 2011

@author: gonengyucel, jhkwakkel
'''
import matplotlib.pyplot as plt

from analysis.clusterer import cluster

from expWorkbench import load_results
from expWorkbench import ema_logging

ema_logging.log_to_stderr(ema_logging.INFO)

#load the data
data = load_results(r'..\gallery\data\100 flu cases no policy.cPickle')

# specify the number of desired clusters
# note: the meaning of cValue is tied to the value for cMethod
cValue = 5

#perform cluster analysis
dist, clusteraloc, runlog, z = cluster(data=data, 
                                    outcome='deceased population region 1', 
                                    distance='gonenc', 
                                    interClusterDistance='complete', 
                                    cMethod = 'maxclust',
                                    cValue = cValue,
                                    plotDendrogram=False, 
                                    plotClusters=False, 
                                    groupPlot=False,
예제 #16
0
    uncs = [ParameterUncertainty((0,1), "a"),
           ParameterUncertainty((0,1), "b")]
    outcomes = [Outcome("test 1", time=True), 
                Outcome("test 2", time=True)]
    callback = DefaultCallback(uncs, outcomes, nr_experiments=nr_experiments)
    
    policy = {"name": "none"}
    name = "test"
    
    for i in range(nr_experiments):
        if i % 2 == 0:
            case = {uncs[0].name: random.random()}
            result = {outcomes[0].name: np.random.rand(10)}
        else: 
            case = {uncs[1].name: random.random()}
            result = {outcomes[1].name: np.random.rand(10)}
    
        callback(case, policy, name, result)
    
    results = callback.get_results()
    debug("\n"+str(results[0]))
    for key, value in results[1].iteritems():
        debug("\n" + str(key) + "\n" + str(value))   

if __name__ == "__main__":
    ema_logging.log_to_stderr(ema_logging.DEBUG)
#    test_callback_initalization()
    test_callback_store_results()
#    test_callback_call_intersection()
#     test_callback_call_union()
    
예제 #17
0
        '''
        self.uncertainties.append(LookupUncertainty([(0, 5), (-1, 2), (0, 1.5), (0, 1.5), (0, 1), (0.5, 1.5)], "TF", 'hearne', self, 0, 2))
        #self.uncertainties.pop()
        self.uncertainties.append(LookupUncertainty([(0, 4), (1, 5), (1, 5), (0, 2), (0, 2)], "TF2", 'approximation', self, 0, 10))
        #self.uncertainties.pop()
        self.uncertainties.append(ParameterUncertainty((0.02, 0.08), "rate1"))
        self.uncertainties.append(ParameterUncertainty((0.02, 0.08), "rate2"))
        self.uncertainties.append(LookupUncertainty([[(0.0, 0.05), (0.25, 0.15), (0.5, 0.4), (0.75, 1), (1, 1.25)], 
                                                     [(0.0, 0.1), (0.25, 0.25), (0.5, 0.75), (1, 1.25)],
                                                     [(0.0, 0.0), (0.1, 0.2), (0.3, 0.6), (0.6, 0.9), (1, 1.25)]], "TF3", 'categories', self, 0, 2))
        #self.uncertainties.pop()   
        self.delete_lookup_uncertainties()                   

        
if __name__ == "__main__":
    logger = logging.log_to_stderr(logging.INFO)
    model = lookup_model(r'..\lookups', "sampleModel")

    #model.step = 4 #reduce data to be stored
    ensemble = ModelEnsemble()
    ensemble.set_model_structure(model)

    #turn on parallel
    ensemble.parallel = False
    
    #run policy with old cases
    results = ensemble.perform_experiments(10)
    save_results(results, 'lookup_3methods.cpickle')
    
    results = load_results('lookup_3methods.cpickle')
    outcomes =['TF', 'TF2', 'TF3', 'flow1']
예제 #18
0
'''
Created on 20 sep. 2011

.. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
'''
import numpy as np
import matplotlib.pyplot as plt

from analysis.pairs_plotting import pairs_lines, pairs_scatter, pairs_density
from expWorkbench.util import load_results
from expWorkbench import ema_logging

ema_logging.log_to_stderr(level=ema_logging.DEFAULT_LEVEL)

#load the data
experiments, outcomes = load_results(r'.\data\100 flu cases no policy.bz2')

#transform the results to the required format
tr = {}

#get time and remove it from the dict
time = outcomes.pop('TIME')

for key, value in outcomes.items():
    if key == 'deceased population region 1':
        tr[key] = value[:,-1] #we want the end value
    else:
        # we want the maximum value of the peak
        tr['max peak'] = np.max(value, axis=1) 
        
        # we want the time at which the maximum occurred
예제 #19
0
See flu_example.py for the code. The dataset was generated using 32 bit Python.
Therefore, this example will not work if you are running 64 bit Python. 


.. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
                chamarat <c.hamarat  (at) tudelft (dot) nl>

'''

import numpy as np
import matplotlib.pyplot as plt

import analysis.prim as prim
from expWorkbench import load_results, ema_logging

ema_logging.log_to_stderr(level=ema_logging.INFO)

def classify(data):
    #get the output for deceased population
    result = data['deceased population region 1']
    
    #make an empty array of length equal to number of cases 
    classes =  np.zeros(result.shape[0])
    
    #if deceased population is higher then 1.000.000 people, classify as 1 
    classes[result[:, -1] > 1000000] = 1
    
    return classes

#load data
results = load_results(r'./data/1000 flu cases.bz2')
예제 #20
0
import numpy as np
import matplotlib.pyplot as plt

import analysis.prim as prim
from expWorkbench import load_results
import expWorkbench.ema_logging as ema_logging

#perform_prim logs information to the logger
ema_logging.log_to_stderr(level=ema_logging.INFO)


def classify(data):
    #get the output for deceased population
    result = data['deceased population region 1']

    #make an empty array of length equal to number of cases
    classes = np.zeros(result.shape[0])

    #if deceased population is higher then 1.000.000 people,
    #classify as 1
    classes[result[:, -1] > 1000000] = 1

    return classes


#load data
results = load_results(r'../../../src/analysis/1000 flu cases.cPickle',
                       zipped=False)
experiments, results = results

#extract results for 1 policy
예제 #21
0
#            if len(activationTimeStep) > 0:
#                activationTimeStep = activationTimeStep[0]
#            else:
#                activationTimeStep = np.array([0])
##            if activationTimeStep.shape[0] > 0:
##                activationTimeStep = activationTimeStep
##            else:
##                activationTimeStep = np.array([0])
#            results[output.name] = activationTimeStep

        self.output = results
        if error:
            raise CaseError("run not completed", case)

if __name__ == "__main__":
    logger = logging.log_to_stderr(logging.INFO)
    model = EnergyTrans(r'..\..\models\CANER\CESUN', "ESDMAElecTrans")
    model.step = 4  #reduce data to be stored
    ensemble = SimpleModelEnsemble()
    ensemble.set_model_structure(model)
    ensemble.parallel = True
    policies = [
        {
            'name': 'No Policy',
            'file': r'\CESUN_no.vpm'
        },
        {
            'name': 'Basic Policy',
            'file': r'\CESUN_basic.vpm'
        },
        {
예제 #22
0
            results[output.name] = activationTimeStep
            
        
        self.output = results   
        if error:
            raise CaseError("run not completed", case) 

def obj_func(outcomes):
    outcome = outcomes['total fraction new technologies']
    zeros = np.zeros((outcome.shape[0], 1))
    zeros[outcome[:,-1]>0.6] = 1
    value = np.sum(zeros)/zeros.shape[0] 
    return value,

if __name__ == "__main__":
    ema_logging.log_to_stderr(ema_logging.INFO)
    model = EnergyTrans(r"..\data", "ESDMAElecTrans")
       
    ensemble = ModelEnsemble()
    ensemble.set_model_structure(model)
    ensemble.parallel = True
    
    policy_levers = {'Trigger subsidy T2': {'type':'range', 'values':(0,1)},
                   'Trigger subsidy T3': {'type':'range', 'values':(0,1)},
                   'Trigger subsidy T4': {'type':'range', 'values':(0,1)},
                   'Trigger addnewcom': {'type':'list', 'values':[0, 0.25, 0.5, 0.75, 1]}}
    
    stats_callback, pop   = ensemble.perform_robust_optimization(cases=10,
                                               reporting_interval=100,
                                               obj_function=obj_func,
                                               policy_levers=policy_levers,
예제 #23
0
        susceptible_population_region_1 = susceptible_population_region_1_NEXT
        susceptible_population_region_2 = susceptible_population_region_2_NEXT
    
        immune_population_region_1 = immune_population_region_1_NEXT
        immune_population_region_2 = immune_population_region_2_NEXT
    
        deceased_population_region_1.append(deceased_population_region_1_NEXT)
        deceased_population_region_2.append(deceased_population_region_2_NEXT)
        
        #End of main code
    return (runTime, deceased_population_region_1) #, Max_infected, Max_time)

        
if __name__ == "__main__":
    import expWorkbench.ema_logging as logging
    np.random.seed(150) #set the seed for replication purposes
    logging.log_to_stderr(logging.INFO)
    
    fluModel = MexicanFlu(None, "mexicanFluExample")
    ensemble = ModelEnsemble()
    ensemble.parallel = True
    ensemble.set_model_structure(fluModel)
    
    nr_experiments = 500
    results = ensemble.perform_experiments(nr_experiments, reporting_interval=100)

    lines(results, outcomes_to_show="deceased_population_region_1", 
          show_envelope=True, density=KDE, titles=None, 
          experiments_to_show=np.arange(0, nr_experiments, 10)
          )
    plt.show()