Пример #1
0
def test_box():
    EMAlogging.log_to_stderr(EMAlogging.INFO)
    
    x = np.loadtxt(r'quasiflow x.txt')
    y = np.loadtxt(r'quasiflow y.txt')
    
#    prim = prim_box(x, y, pasting=True, threshold = 0, threshold_type = -1)
    prim = perform_prim(x, y, pasting=True, threshold = 0, threshold_type =-1)
    
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.scatter(x[:,0], x[:, 1], c=y)
    
    
    print '           \tmass\tmean'
    for i, entry in enumerate(prim[0:-1]):
        print 'found box %s:\t%s\t%s' %(i, entry.box_mass, entry.y_mean)
    print 'rest box    :\t%s\t%s' %(prim[-1].box_mass, prim[-1].y_mean)
    
    colors = graphs.COLOR_LIST
    for i, box in enumerate(prim):
        box = box.box
#        print box
        x = np.array([box[0,0], box[1,0], box[1,0], box[0,0], box[0,0]])
        y = np.array([box[0,1], box[0,1], box[1,1], box[1,1], box[0,1]])
#        print x
#        print y
        ax.plot(x,y, c=colors[i%len(colors)], lw=4)
    
    plt.show()     
Пример #2
0
    def model_init(self, policy, kwargs):
        """initializes the model"""

        try:
            self.modelFile = policy["file"]
        except KeyError:
            logging.warning("key 'file' not found in policy")
        super(FluModel, self).model_init(policy, kwargs)
Пример #3
0
 def model_init(self, policy, kwargs):
     '''initializes the model'''
     
     try:
         self.modelFile = policy['file']
     except KeyError:
         EMAlogging.warning("key 'file' not found in policy")
     super(SalinizationModel, self).model_init(policy, kwargs)
Пример #4
0
def perform_experiments():
    logger = EMAlogging.log_to_stderr(level=EMAlogging.INFO)
    model = SalinizationModel(r"C:\eclipse\workspace\EMA-workbench\models\salinization", "verzilting")
    model.step = 4
    
    ensemble = SimpleModelEnsemble()
    ensemble.set_model_structure(model)

    policies=[{'name': 'no policy',
               'file': r'\verzilting 2.vpm'},
              {'name': 'policy group 8',
               'file': r'\group 8 best policy.vpm'},
              {'name': 'policy other group',
               'file': r'\other group best policy.vpm'},
              {'name': 'policies combined',
               'file': r'\best policies combined.vpm'}
              ]
    ensemble.add_policies(policies)
    
    ensemble.parallel = True
    nr_of_experiments = 1000
    results = ensemble.perform_experiments(nr_of_experiments)
    return results
#        try:
#            self.modelFile = policy['file']
#        except:
#            EMAlogging.debug("no policy specified")
        super(EnergyTrans, self).model_init(policy, kwargs)
        
        #pop name
        policy = copy.copy(policy)
        policy.pop('name')
        
        for key, value in policy.items():
            vensim.set_value(key, value)
        
        
if __name__ == "__main__":
    logger = logging.log_to_stderr(logging.INFO)
    
    model = EnergyTrans(r"..\VensimModels\TFSC", "ESDMAElecTrans")
    ensemble = SimpleModelEnsemble()
    ensemble.set_model_structure(model)
    
    cases, uncertainties = ensemble._generate_cases(1)
    
    valuelist = [15.467089994193 , 18.3948367845855 , 17.5216359599053 , 0.0323513175268276 , 0.0267216806566911 , 0.0252897989265933 , 0.0211748970259063 , 0.0192967619764282 , 0.0298868721235403 , 0.026846492561752 , 0.0282265728603356 , 0.0274643497911105 , 0.0206173186487346 , 0.930953610229856 , 1.05807449426449 , 58.6261672319115 , 1.0959476696141 , 48.4897275078371 , 79.8968117041453 , 2.03012275630195 , 2.33576352581696 , 2.60266175740213 , 1.24700542123355 , 3.06884098418713 , 1 , 0 , 0 , 0 , 0 , 1.45807445678444 , 3.53395235847141 , 1.75257486371618 , 2.9795030911447 , 4.00199168664975 , 1.97473349200058 , 4.74196793795403 , 4.72730891245437 , 0 , 0 , 14826.4074143275 , 1.24609526886412 , 1.18827514220571 , 1.09824115488565 , 1245886.83942348 , 6282282.69560999 , 6118827.67237203 , 9531496.10651471 , 8693813.50295679 , 32.948697875027 , 17.1705785135149 , 13.0971274404015 , 3.74255065304761 , 1.36231655867486 , 1.92101352688469 , 3.8941723138427 , 0.898745338298322 , 0.782806406356795 , 0.817631734201507 , 0.705822656618514 , 43.3820783577107]


    newcases = [] 
    case = {}
    i=0
    for uncertainty in uncertainties:
        print uncertainty.name
Пример #6
0
'''
Created on 29 sep. 2011

@author: jhkwakkel
'''
import numpy as np
import matplotlib.pyplot as plt

from expWorkbench import load_results
from analysis import prim
from expWorkbench import EMAlogging
from analysis.graphs import envelopes
EMAlogging.log_to_stderr(EMAlogging.INFO) 


results  = load_results(r'C:\workspace\EMA-workbench\models\TFSC_all_policies.cPickle')
envelopes(results, 
          column='policy', 
          categories=['adaptive policy',
                      'ap with op'])

#exp, res = results
#
##get out only the results related to the last policy
#exp, res = results
#
#logical = exp['policy'] == 'adaptive policy'
#exp = exp[logical]
#
#temp_res = {}
#for key, value in res.items():
Пример #7
0
        ParameterUncertainty((10, 100), "normal contact rate region 1"),
        ParameterUncertainty((10, 200), "normal contact rate region 2"),
    ]

    def model_init(self, policy, kwargs):
        """initializes the model"""

        try:
            self.modelFile = policy["file"]
        except KeyError:
            logging.warning("key 'file' not found in policy")
        super(FluModel, self).model_init(policy, kwargs)


if __name__ == "__main__":
    logging.log_to_stderr(logging.INFO)

    model = FluModel(r"..\..\models\flu", "fluCase")
    ensemble = SimpleModelEnsemble()
    ensemble.set_model_structure(model)

    # add policies
    policies = [
        {"name": "no policy", "file": r"\FLUvensimV1basecase.vpm"},
        {"name": "static policy", "file": r"\FLUvensimV1static.vpm"},
        {"name": "adaptive policy", "file": r"\FLUvensimV1dynamic.vpm"},
    ]
    ensemble.add_policies(policies)

    ensemble.parallel = True  # turn on parallel processing
Пример #8
0
 def model_init(self, policy, kwargs):
     try:
         self.modelFile = policy['file']
     except:
         EMAlogging.debug("no policy specified")
     super(EnergyTrans, self).model_init(policy, kwargs)
Пример #9
0
import numpy as np
import matplotlib.pyplot as plt

import analysis.prim as prim
from expWorkbench import load_results
import expWorkbench.EMAlogging as EMAlogging
 
#perform_prim logs information to the logger
EMAlogging.log_to_stderr(level=EMAlogging.INFO)

def classify(data):
    #get the output for deceased population
    result = data['deceased population region 1']
    
    #make an empty array of length equal to number of cases 
    classes =  np.zeros(result.shape[0])
    
    #if deceased population is higher then 1.000.000 people, classify as 1 
    classes[result[:, -1] > 1000000] = 1
    
    return classes

#load data
results = load_results(r'../analysis/1000 flu cases.cPickle')
experiments, results = results

#extract results for 1 policy
logicalIndex = experiments['policy'] == 'no policy'
newExperiments = experiments[ logicalIndex ]
newResults = {}
for key, value in results.items():
Пример #10
0
    # Initiate the model to be run in game mode.
    venDLL.command("MENU>GAME")
    if start_interval > 0:
        venDLL.command('GAME>GAMEON')

    loop_on = 1
    loop_off = 0

    loop_turned_off = False
    while True:

        # Initiate the experiment of interest.
        # In other words set the uncertainties to the same value as in
        # those experiments.
        time = vensim.get_val(r'TIME')
        EMAlogging.debug(time)

        if time ==(2000+step*interval[0]) and not loop_turned_off:
            loop_turned_off = True

            if loop_index != 0:

                # If loop elimination method is based on unique edge.
                if loop_index-1 < ind_cons:
                    constant_value = vensim.get_val(edges[int(loop_index-1)][0])

                    if loop_off==1:
                        constant_value = 0

                    vensim.set_value('value loop '+str(loop_index),
                                     constant_value)
Пример #11
0
def perform_loop_knockout():
    unique_edges = [['In Goods', 'lost'],
                    ['loss unprofitable extraction capacity', 'decommissioning extraction capacity'],
                    ['production', 'In Goods'],
                    ['production', 'lost'],
                    ['production', 'Supply'],
                    ['Real Annual Demand', 'substitution losses'],
                    ['Real Annual Demand', 'price elasticity of demand losses'],
                    ['Real Annual Demand', 'desired extraction capacity'],
                    ['Real Annual Demand', 'economic demand growth'],
                    ['average recycling cost', 'relative market price'],
                    ['recycling fraction', 'lost'],
                    ['commissioning recycling capacity', 'Recycling Capacity Under Construction'],
                    ['maximum amount recyclable', 'recycling fraction'],
                    ['profitability recycling', 'planned recycling capacity'],
                    ['relative market price', 'price elasticity of demand losses'],
                    ['constrained desired recycling capacity', 'gap between desired and constrained recycling capacity'],
                    ['profitability extraction', 'planned extraction capacity'],
                    ['commissioning extraction capacity', 'Extraction Capacity Under Construction'],
                    ['desired recycling', 'gap between desired and constrained recycling capacity'],
                    ['Installed Recycling Capacity', 'decommissioning recycling capacity'],
                    ['Installed Recycling Capacity', 'loss unprofitable recycling capacity'],
                    ['average extraction costs', 'profitability extraction'],
                    ['average extraction costs', 'relative attractiveness recycling']]

    unique_cons_edges = [['recycling', 'recycling'],
                           ['recycling', 'supply demand ratio'],
                           ['decommissioning recycling capacity', 'recycling fraction'],
                           ['returns to scale', 'relative attractiveness recycling'],
                           ['shortage price effect', 'relative price last year'],
                           ['shortage price effect', 'profitability extraction'],
                           ['loss unprofitable extraction capacity', 'loss unprofitable extraction capacity'],
                           ['production', 'recycling fraction'],
                           ['production', 'constrained desired recycling capacity'],
                           ['production', 'new cumulatively recycled'],
                           ['production', 'effective fraction recycled of supplied'],
                           ['loss unprofitable recycling capacity', 'recycling fraction'],
                           ['average recycling cost', 'loss unprofitable recycling capacity'],
                           ['recycling fraction', 'new cumulatively recycled'],
                           ['substitution losses', 'supply demand ratio'],
                           ['Installed Extraction Capacity', 'Extraction Capacity Under Construction'],
                           ['Installed Extraction Capacity', 'commissioning extraction capacity'],
                           ['Installed Recycling Capacity', 'Recycling Capacity Under Construction'],
                           ['Installed Recycling Capacity', 'commissioning recycling capacity'],
                           ['average extraction costs', 'profitability extraction']]

#    CONSTRUCTING THE ENSEMBLE AND SAVING THE RESULTS
    EMAlogging.log_to_stderr(EMAlogging.INFO)
    results = load_results(r'base.cPickle')

#    GETTING OUT THOSE BEHAVIOURS AND EXPERIMENT SETTINGS
#    Indices of a number of examples, these will be looked at.
    runs = [526,781,911,988,10,780,740,943,573,991]
    VOI = 'relative market price'

    results_of_interest = experiment_settings(results,runs,VOI)
    cases_of_interest = experiments_to_cases(results_of_interest[0])
    behaviour_int = results_of_interest[1][VOI]

#    CONSTRUCTING INTERVALS OF ATOMIC BEHAVIOUR PATTERNS
    ints = intervals(behaviour_int,False)

#    GETTING OUT ONLY THOSE OF MAXIMUM LENGTH PER BEHAVIOUR
    max_intervals = intervals_interest(ints)

#    THIS HAS TO DO WITH THE MODEL FORMULATION OF THE SWITCHES/VALUES
    double_list = [6,9,11,17,19]

    indCons = len(unique_edges)
#    for elem in unique_cons_edges:
#        unique_edges.append(elem)

    current = os.getcwd()

    for beh_no in range(0,10):
#        beh_no = 0 # Varies between 0 and 9, index style.
        interval = max_intervals[beh_no]

        rmp = behaviour_int[beh_no]
    #    rmp = rmp[interval[0]:interval[1]]
        x = range(0,len(rmp))
        fig = plt.figure()
        ax = fig.add_subplot(111)

        vensim.be_quiet()
    #    for loop_index in range(7,8):
        for loop_index in range(1,len(unique_edges)+1):

            if loop_index-indCons > 0:
                model_location = current + r'\Models\Consecutive\Metals EMA.vpm'
            elif loop_index == 0:
                model_location = current + r'\Models\Base\Metals EMA.vpm'
            else:
                model_location = current + r'\Models\Switches\Metals EMA.vpm'

            serie = run_interval(model_location,loop_index,
                                  interval,'relative market price',
                                  unique_edges,indCons,double_list,
                                  cases_of_interest[beh_no])

            if serie.shape != rmp.shape:
                EMAlogging.info('Loop %s created a floating point error' % (loop_index))
                EMAlogging.info('Caused by trying to switch %s' % (unique_edges[loop_index-1]))

            if serie.shape == rmp.shape:
                ax.plot(x,serie,'b')

    #        data = np.zeros(rmp.shape[0])
    #        data[0:serie.shape[0]] = serie
    #        ax.plot(x,data,'b')

        ax.plot(x,rmp,'r')
        ax.axvspan(interval[0]-1,interval[1], facecolor='lightgrey', alpha=0.5)
        f_name = 'switched unique edges only'+str(beh_no)
        plt.savefig(f_name)
Пример #12
0
def perform_regret_analysis(results,
                          policyOfInterest,
                          uncertainty1,
                          uncertainty2,
                          resolution,
                          outcomeNames = []):
    '''
    perform a RAND-style regret analysis. That is, calculate regret across 
    all runs. Regret is here understood as the regret of the policy of 
    interest as compared to the best performing other policy. 
    
    Identify the case in which the regret is maximized. Show a 2-d slice 
    across two specified uncertainties, which contains the case where the 
    regret is maximized. So, in this slice all the uncertainties apart from 
    the 2 specified, are equal to their value in the case were the regret 
    is maximized. 
    
    Function requires a full factorial sampling as the experimental design
    to work.
    
    input:
    results             default returnValue from modelEnsemble.runExperiments()
    policyOfInterest    name of policy for which you want to calculate the 
                        regret
    uncertainty1        the uncertainty across which you want to slice
    uncertainty2        the uncertainty across which you want to slice
    resolution          resolution used in generating the full factorial
    outcomeNames        if provided, this should be a list of names of outcomes 
                        where high is bad the normalized results for these 
                        outcomes will be reverted
    
    NOTE: please provide the actual uncertainty, not their name
    
    returns:
    regret          1-d array that specifies the regret of policy to 
                    all other policies
    case    
    '''
    def getIndex(range, resolution, value):
        '''
        helper function to transform a case to an index in the regretPlotArray
        '''
        
        return ((resolution-1) * (value- range[0]))/ (range[1]-range[0]) 
        
    
    regret, cases, uncertainties = calculate_regret(results, 
                                                    policyOfInterest,
                                                    outcomeNames)

    # transform regret into a dictionary for quick lookup    
    regretDict = {}
    for entry in zip(cases, regret):
        regretDict[entry[0]] = entry[1]

    #identify maximum regret case
    maximumRegret, case = max_regret(regret, cases)
    
    # generate the cases that should be in the slice
    #
    # by generating the cases we need for the slice here
    # and combining it with the dict structure, we can fill the 
    # slice up quickly 
    #
    # another alternative approach would be to filter the available cases
    # based on the case that maximizes the regret. Only the specified 
    # uncertainties should be allowed to vary. This, however, would require 
    # us to go over the entire list of cases which can potentially become 
    # very slow
    #
    sampler = FullFactorialSampler()
    designs = sampler.generate_design([uncertainty1, 
                                      uncertainty2], 
                                      resolution)[0]
    designs = [design for design in designs]
    
    # get the indexes of the uncertainties
    # we use the max regret case and only modify the entries for
    # the uncertainties across which we want to slice
    index1 = uncertainties.index(uncertainty1.name)
    index2 = uncertainties.index(uncertainty2.name)
    
    # deduce the shape of the slice
    if len(designs) < resolution**2:
        resolution1 = len(set(np.asarray(designs)[:, 0]))
        resolution2 = len(set(np.asarray(designs)[:, 1]))
        shape = (resolution1, resolution2)
    else:
        shape = (resolution, resolution)
   
    regretPlot = np.zeros(shape)  
    case = list(case)
    for design in designs:
        case[index1] = design[0]
        case[index2] = design[1]
    
        # map case values back to index in regretPlot
        i = int(round( getIndex(uncertainty1.get_values(), 
                                regretPlot.shape[0], 
                                design[0]), 0)) 
        j = int(round( getIndex(uncertainty2.get_values(), 
                                regretPlot.shape[1], 
                                design[1]), 0))
        
        # retrieve regret for particular case
        try: 
            a = regretDict.get(tuple(case))
#            print a
            regretPlot[i, j] = np.max(a)
        except KeyError as e:
            EMAlogging.exception('case not found')
            raise e
    return regretPlot    
Пример #13
0
    plt.xlim(0, 801)
    color = ['grey', 'lightgrey']

    for i, int in enumerate(intervals):
        no = np.mod(i, len(color))
        begin = int[0]
        end = int[1]
        plt.axvspan(begin, end, facecolor=color[no], alpha=0.5)

    plt.show()


if __name__ == "__main__":

    #    CONSTRUCTING THE ENSEMBLE AND SAVING THE RESULTS
    EMAlogging.log_to_stderr(EMAlogging.DEBUG)
    #
    #    model = ScarcityModel(r'D:\tbm-g367\workspace\EMA workbench\src\sandbox\sils',"scarcity")
    #
    #    ensemble = ModelEnsemble()
    #    ensemble.set_model_structure(model)
    ##    ensemble.parallel = True
    #    results = ensemble.perform_experiments(1000)
    #    save_results(results, r'base.cPickle')

    results = load_results(r'base.cPickle')

    #    PLOTS FOR ENSEMBLE
    #    fig = make_interactive_plot(results, outcomes=['relative market price'], type='lines')
    #    fig = lines(results, outcomes = ['relative market price'], density=True, hist=False)
    #    plt.show()
Пример #14
0
'''
Created on 20 sep. 2011

.. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
'''
import numpy as np
import matplotlib.pyplot as plt

from analysis.graphs import multiplot_scatter, multiplot_density, multiplot_lines
from expWorkbench.util import load_results
import expWorkbench.EMAlogging as logging

logging.log_to_stderr(level=logging.DEFAULT_LEVEL)

#load the data
experiments, results = load_results(r'..\..\src\analysis\1000 flu cases.cPickle')

#transform the results to the required format
newResults = {}

#get time and remove it from the dict
time = results.pop('TIME')

for key, value in results.items():
    if key == 'deceased population region 1':
        newResults[key] = value[:,-1] #we want the end value
    else:
        # we want the maximum value of the peak
        newResults['max peak'] = np.max(value, axis=1) 
        
        # we want the time at which the maximum occurred
Пример #15
0
    def model_init(self, policy, kwargs):
        """
        Method to initialize the model, it is called just prior to running 
        the model its main use is to initialize aspects of the model that can 
        not be pickled. In this way it is possible to run a model in parallel 
        without having to worry about having only pickleable attributes 
        (for more details read up on the multiprocessing library
        
        """
        
        if not jpype.isJVMStarted():
            classpath = r'-Djava.class.path=C:\workspace\ElectTransEMA\bin;C:\workspace\Repast3.1\bin;C:\workspace\Repast3.1\lib\asm.jar;C:\workspace\Repast3.1\lib\beanbowl.jar;C:\workspace\Repast3.1\lib\colt.jar;C:\workspace\Repast3.1\lib\commons-collections.jar;C:\workspace\Repast3.1\lib\commons-logging.jar;C:\workspace\Repast3.1\lib\geotools_repast.jar;C:\workspace\Repast3.1\lib\ibis.jar;C:\workspace\Repast3.1\lib\jakarta-poi.jar;C:\workspace\Repast3.1\lib\jep-2.24.jar;C:\workspace\Repast3.1\lib\jgap.jar;C:\workspace\Repast3.1\lib\jh.jar;C:\workspace\Repast3.1\lib\jmf.jar;C:\workspace\Repast3.1\lib\jode-1.1.2-pre1.jar;C:\workspace\Repast3.1\lib\log4j-1.2.8.jar;C:\workspace\Repast3.1\lib\joone.jar;C:\workspace\Repast3.1\lib\JTS.jar;C:\workspace\Repast3.1\lib\junit.jar;C:\workspace\Repast3.1\lib\OpenForecast-0.4.0.jar;C:\workspace\Repast3.1\lib\openmap.jar;C:\workspace\Repast3.1\lib\plot.jar;C:\workspace\Repast3.1\lib\ProActive.jar;C:\workspace\Repast3.1\lib\trove.jar;C:\workspace\Repast3.1\lib\violinstrings-1.0.2.jar;C:\workspace\Repast3.1\repast.jar'
            jpype.startJVM(r'C:\Program Files (x86)\Java\jdk1.6.0_22\jre\bin\client\jvm.dll', classpath)
            logging.debug("jvm started")
        
        
        logging.debug("trying to find package")
        try:
            modelPackage = jpype.JPackage("org").electTransEma
#            testPkg = jpype.JPackage("org").test
        except RuntimeError as inst:
            logging.debug("exception " + repr(type(inst))+" " + str(inst))
        except TypeError as inst:
            logging.debug("TypeEror " +" " + str(inst))
        except Exception as inst:
            logging.debug("exception " + repr(type(inst))+" " + str(inst))
    
        else:
            logging.debug("modelPackage found")
            self.modelInterfaceClass = modelPackage.ElectTransInterface
#            testClass = testPkg.jPypeTestClass
            logging.debug("class found")
            
            try:
                directory = self.workingDirectory.replace("\\", "/")
                
                self.modelInterface = self.modelInterfaceClass(directory)
#                self.testObject = testClass()  
                logging.debug("class loaded succesfully")
            except TypeError as inst:
                logging.warning("failure to instantiate the model")
                raise inst
Пример #16
0
        """Method for retrieving output after a model run """
        return self.result
    
    def optimize(self, case, policy):
        """method called when using the model in an optimization context
        this method should return a single value that represents the performance of the policy
        params are the same as for run model
        """
        raise NotImplementedError 
    
    def reset_model(self):
        """Method for reseting the model to its initial state before runModel was called"""
        self.modelInterface.resetModel()
       
if __name__ == '__main__':
    
    logger = logging.log_to_stderr(logging.DEBUG)
#    emailHander = logging.TlsSMTPHandler(("smtp.gmail.com", 587), 
#                                         '*****@*****.**', 
#                                         ['*****@*****.**'], 
#                                         'finished!', 
#                                         ('*****@*****.**', 'password'))
#    emailHander.setLevel(logging.WARNING)
#    logger.addHandler(emailHander)

    model = ElectTransEMA(r'C:\workspace\ElectTransEMA\workingDirectory', "test")
    ensemble = SimpleModelEnsemble()
    ensemble.set_model_structure(model)
    ensemble.parallel=True
    results = ensemble.perform_experiments(10)