Exemplo n.º 1
0
    def model_init(self, policy, kwargs):
        """
        Init of the model, The provided implementation here assumes
        that `self.model_file`  is set correctly. In case of using different
        vensim models for different policies, it is recomended to extent
        this method, extract the model file from the policy dict, set 
        `self.model_file` to this file and then call this implementation through
        calling `super`.
        
        :param policy: a dict specifying the policy. In this 
                       implementation, this argument is ignored. 
        :param kwargs: additional keyword arguments. In this implementation 
                       this argument is ignored.
        """

        load_model(self.working_directory+self.model_file) #load the model
        ema_logging.debug("model initialized successfully")

        be_quiet() # minimize the screens that are shown
        
        try:
            initialTime  = get_val('INITIAL TIME')
            finalTime = get_val('FINAL TIME')
            timeStep = get_val('TIME STEP')
            savePer = get_val('SAVEPER')
             
            if savePer > 0:
                timeStep = savePer
            
            self.runLength = int((finalTime - initialTime)/timeStep +1)
        except VensimWarning:
            raise EMAWarning(str(VensimWarning))
Exemplo n.º 2
0
def test_save_results():
    ema_logging.log_to_stderr(ema_logging.DEBUG)
    data = util.load_results("./data/1000 flu cases no policy.cPickle", zip=False)
    file_name = "test.bz2"
    util.save_results(data, file_name)
    os.remove(file_name)
    ema_logging.debug("removing " + file_name)
Exemplo n.º 3
0
 def _handle_outcomes(self, fns):                  
     for key, value in fns.iteritems():
         if key in self.normal_handling: # voor "normale" globals werkt dit
             with open(value) as fh:    
                 result = fh.readline()
                 result = result.strip()
                 result = result.split()
                 result = [float(entry) for entry in result]
                 self.output[key] = np.asarray(result)
         elif key in self.once_handling:
             with open(value) as fh:
                 result = fh.readline() # lees line die bestaat uit nullen en een list 
                 result = result.strip() #spaties weghalen
                 result = result.strip('[')# haakje weghalen, welke alleen in de lijst nog zitten eventueel
                 result = result.strip(']')# haakje weghalen, welke alleen in de lijst nog zitten eventueel
                 result = result.split() # splitsen op elementen: nullen en een list
                 results = np.zeros((self.run_length*10)) # lege array maken
                 for j, entry in enumerate(result):                           
                      if entry:
                         entry = float(entry)
                      else:
                         entry = 0                            
                      results[j] = entry
                 self.output[key] = results
         else:
             raise CaseError('no hander specified for {}'.format(key), {})
           
     for value in fns.values():
         ema_logging.debug('value: {}'.format(value))
           
         try:
             os.remove(value)
         except WindowsError:
             pass
Exemplo n.º 4
0
def experiment_generator(sampled_unc, model_structures, policies, sampler):
    '''
    
    generator function which yields experiments
    
    '''
    
    # experiment is made up of case, policy, and msi
    # to get case, we need msi
    
    for msi in model_structures:
        debug("generating designs for model %s" % (msi.name))
        
        samples = [sampled_unc[unc.name] for unc in msi.uncertainties if\
                   sampled_unc.has_key(unc.name)]
        uncertainties = [unc.name for unc in msi.uncertainties if\
                         sampled_unc.has_key(unc.name)]
        for policy in policies:
            debug("generating designs for policy %s" % (policy['name']))
            designs = sampler.generate_designs(samples)
            for design in designs:
                experiment = {uncertainties[i]: design[i] for i in\
                                range(len(uncertainties))}
                experiment['policy'] = policy
                experiment['model'] = msi.name
                yield experiment
Exemplo n.º 5
0
def test_load_results():

    data = np.random.rand(1000, 1000)
    file_name = "test.bz2"
    util.save_results(data, file_name)

    ema_logging.log_to_stderr(ema_logging.DEBUG)
    util.load_results(file_name)
    os.remove(file_name)
    ema_logging.debug("removing " + file_name)
Exemplo n.º 6
0
def make_name(ind):
    keys  = sorted(ind.keys())
    try:
        keys.pop(keys.index('name'))
    except ValueError:
        ema_logging.debug("value error in make name, field 'name' not in list")
    
    name = ""
    for key in keys:
        name += " "+str(ind[key])
    return name
Exemplo n.º 7
0
def read_cin_file(file):
    '''
    read a .cin file
    
    :param file: location of the .cin file.
    :exception: raises a :class:`~EMAExceptions.VensimWarning` if the cin file
                cannot be read.
    '''
    ema_logging.debug("executing COMMAND: SIMULATE>READCIN|"+file)
    try:
        command(r"SIMULATE>READCIN|"+file)
    except VensimWarning as w:
        ema_logging.debug(str(w))
        raise w
Exemplo n.º 8
0
    def run_model(self, case):
        
        for key, value in self.policy.items():
            vensim.set_value(key, value)
        
        switches = case.pop("preference switches")

        case["SWITCH preference for MIC"] = switches[0]
        case["SWITCH preference for expected cost per MWe"]= switches[1]
        case["SWITCH preference against unknown"]= switches[2]
        case["SWITCH preference for expected progress"]= switches[3]
        case["SWITCH preference against specific CO2 emissions"]= switches[4]
            
        for key, value in case.items():
            vensim.set_value(key, value)
        ema_logging.debug("model parameters set successfully")
        
        ema_logging.debug("run simulation, results stored in " + self.workingDirectory+self.resultFile)
        try:
            vensim.run_simulation(self.workingDirectory+self.resultFile)
        except VensimError:
            raise

        results = {}
        error = False
        for output in self.outcomes:
            ema_logging.debug("getting data for %s" %output.name)
            result = vensim.get_data(self.workingDirectory+self.resultFile, 
                              output.name 
                              )
            ema_logging.debug("successfully retrieved data for %s" %output.name)
            if not result == []:
                if result.shape[0] != self.runLength:
                    a = np.zeros((self.runLength))
                    a[0:result.shape[0]] = result
                    result = a
                    error = True
            
            else:
                result = result[0::self.step]
            try:
                results[output.name] = result
            except ValueError:
                print "what"

        a = results.keys()
        for output in self.activation:
            value = results[output.name]
            time = results["TIME"]
            activationTimeStep = time[value>0]
            if activationTimeStep.shape[0] > 0:
                activationTimeStep = activationTimeStep[0]
            else:
                activationTimeStep = np.array([0])
            results[output.name] = activationTimeStep
            
        
        self.output = results   
        if error:
            raise CaseError("run not completed", case) 
Exemplo n.º 9
0
 def __store_result(self, result):
     for outcome in self.outcomes:
         try:
             outcome_res = result[outcome]
         except KeyError:
             ema_logging.debug("%s not in msi" % outcome)
         else:
             try:
                 self.results[outcome][self.i-1, ] = outcome_res
             except KeyError: 
                 shape = np.asarray(outcome_res).shape
                 shape = list(shape)
                 shape.insert(0, self.nr_experiments)
                 self.results[outcome] = np.empty(shape)
                 self.results[outcome][:] = np.NAN
                 self.results[outcome][self.i-1, ] = outcome_res
Exemplo n.º 10
0
def load_model(file):
    '''
    load the model 
    
    :param file: the location of the .vpm file to be loaded.
    :exception: raises a :class:`~EMAExceptions.VensimError` if the model 
                cannot be loaded.
    
    .. note: only works for .vpm files
    
    '''
    ema_logging.debug("executing COMMAND: SIMULATE>SPECIAL>LOADMODEL|"+file)
    try:
        command(r"SPECIAL>LOADMODEL|"+file)
    except VensimWarning as w:
        ema_logging.warning(str(w))
        raise VensimError("vensim file not found")
    def run_model(self, case):

        for key, value in self.defaults.items():
            case[key] = value

        replications = defaultdict(list)

        for i in range(self.nr_replications):
            ema_logging.debug('running replication {}'.format(i))
            self._run_case(case)

            for key, value in self.output.items():
                replications[key].append(value)

        for key, value in replications.items():
            data = np.asarray(value)
            self.output[key + '_mean'] = np.mean(data, axis=0)
            self.output[key + '_std'] = np.std(data, axis=0)
    def run_model(self, case):
        
        for key, value in self.defaults.items():
            case[key] = value
        
        replications = defaultdict(list)
        
        for i in range(self.nr_replications):
            ema_logging.debug('running replication {}'.format(i))
            self._run_case(case)
 
            for key, value in self.output.items():
                replications[key].append(value)
        
        for key, value in replications.items():
            data = np.asarray(value)
            self.output[key+'_mean'] = np.mean(data, axis=0)
            self.output[key+'_std'] = np.std(data, axis=0)    
Exemplo n.º 13
0
 def __call__(self, case, policy, name, result):
     '''
     Method responsible for storing results. The implementation in this
     class only keeps track of how many runs have been completed and 
     logging this. 
     
     :param case: the case to be stored
     :param policy: the name of the policy being used
     :param name: the name of the model being used
     :param result: the result dict
     
     '''
     
     self.i+=1
     debug(str(self.i)+" cases completed")
     
     if self.i % self.reporting_interval == 0:
         info(str(self.i)+" cases completed")
Exemplo n.º 14
0
 def __init__(self, working_directory, name):
     """interface to the model
     
     :param working_directory: working_directory for the model. 
     :param name: name of the modelInterface. The name should contain only
                  alphanumerical characters. 
     
     .. note:: Anything that is relative to `self.working_directory`
               should be specified in `model_init` and not
               in `__init__`. Otherwise, the code will not work when running
               it in parallel. The reason for this is that the working
               directory is being updated by parallelEMA to the worker's 
               separate working directory prior to calling `model_init`.
     """
     super(VensimModelStructureInterface, self).__init__(working_directory, 
                                                         name)
     self.outcomes.append(Outcome('TIME' , time=True))
     
     ema_logging.debug("vensim interface init completed")
Exemplo n.º 15
0
def experiment_generator_predef_cases(designs, model_structures, policies):
    '''
    
    generator function which yields experiments
    
    '''
    
    # experiment is made up of case, policy, and msi
    # to get case, we need msi
    
    for msi in model_structures:
        debug("generating designs for model %s" % (msi.name))

        for policy in policies:
            debug("generating designs for policy %s" % (policy['name']))
            for experiment in designs:
                experiment['policy'] = copy.deepcopy(policy)
                experiment['model'] = msi.name
                yield experiment
Exemplo n.º 16
0
 def cleanup(self):
     '''
     
     cleaning up prior to finishing performing experiments. This 
     will close the workbook and close Excel. 
             
     '''
     
     ema_logging.debug("cleaning up")
     if self.wb:
         self.wb.Close(False)
         del self.wb
     if self.xl:
         self.xl.DisplayAlerts = False
         self.xl.Quit()
         del self.xl
     
     
     self.xl = None
     self.wb = None
Exemplo n.º 17
0
def run_simulation(file):
    ''' 
    Convenient function to run a model and store the results of the run in 
    the specified .vdf file. The specified output file will be overwritten 
    by default

    :param file: the location of the outputfile
    :exception: raises a :class:`~EMAExceptions.VensimError` if running 
                the model failed in some way. 
                
    '''

    try:
        ema_logging.debug(" executing COMMAND: SIMULATE>RUNNAME|"+file+"|O")
        command("SIMULATE>RUNNAME|"+file+"|O")
        ema_logging.debug(r"MENU>RUN|o")
        command(r"MENU>RUN|o")
    except VensimWarning as w:
        ema_logging.warning((str(w)))
        raise VensimError(str(w))
Exemplo n.º 18
0
 def model_init(self, policy, kwargs):
     '''
     :param policy: policy to be run, in the default implementation, this
                    argument is ignored. Extent :meth:`model_init` to
                    specify how this argument should be used. 
     :param kwargs: keyword arguments to be used by :meth:`model_init`
     
     '''
     
     if not self.xl:
         try:
             ema_logging.debug("trying to start Excel")
             self.xl = win32com.client.Dispatch("Excel.Application")
             ema_logging.debug("Excel started") 
         except com_error as e:
             raise EMAError(str(e))
     ema_logging.debug("trying to open workbook")
     self.wb = self.xl.Workbooks.Open(self.working_directory + self.workbook)
     ema_logging.debug("workbook opened")
     ema_logging.debug(self.working_directory)
Exemplo n.º 19
0
    def run_model(self, case):
            
        for key, value in case.items():
            vensim.set_value(key, value)
        ema_logging.debug("model parameters set successfully")
        
        ema_logging.debug("run simulation, results stored in " + self.workingDirectory+self.resultFile)
        try:
            vensim.run_simulation(self.workingDirectory+self.resultFile)
        except VensimError:
            raise

        results = {}
        error = False
        for output in self.outcomes:
            ema_logging.debug("getting data for %s" %output.name)
            result = vensim.get_data(self.workingDirectory+self.resultFile, 
                              output.name 
                              )
            ema_logging.debug("successfully retrieved data for %s" %output.name)
            if not result == []:
                if result.shape[0] != self.runLength:
                    a = np.zeros((self.runLength))
                    a[0:result.shape[0]] = result
                    result = a
                    error = True
            
            else:
                result = result[0::self.step]
            try:
                results[output.name] = result
            except ValueError:
                print "what"
    
        self.output = results   
        if error:
            raise CaseError("run not completed", case) 
Exemplo n.º 20
0
    def perform_experiments(self, 
                           cases,
                           callback=DefaultCallback,
                           reporting_interval=100,
                           model_kwargs = {},
                           which_uncertainties=INTERSECTION,
                           which_outcomes=INTERSECTION,
                           **kwargs):
        """
        Method responsible for running the experiments on a structure. In case 
        of multiple model structures, the outcomes are set to the intersection 
        of the sets of outcomes of the various models.         
        
        :param cases: In case of Latin Hypercube sampling and Monte Carlo 
                      sampling, cases specifies the number of cases to
                      generate. In case of Full Factorial sampling,
                      cases specifies the resolution to use for sampling
                      continuous uncertainties. Alternatively, one can supply
                      a list of dicts, where each dicts contains a case.
                      That is, an uncertainty name as key, and its value. 
        :param callback: Class that will be called after finishing a 
                         single experiment,
        :param reporting_interval: parameter for specifying the frequency with
                                   which the callback reports the progress.
                                   (Default is 100) 
        :param model_kwargs: dictionary of keyword arguments to be passed to 
                            model_init
        :param which_uncertainties: keyword argument for controlling whether,
                                    in case of multiple model structure 
                                    interfaces, the intersection or the union
                                    of uncertainties should be used. 
                                    (Default is intersection).  
        :param which_uncertainties: keyword argument for controlling whether,
                                    in case of multiple model structure 
                                    interfaces, the intersection or the union
                                    of outcomes should be used. 
                                    (Default is intersection).  
        :param kwargs: generic keyword arguments to pass on to callback
         
                       
        :returns: a `structured numpy array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_ 
                  containing the experiments, and a dict with the names of the 
                  outcomes as keys and an numpy array as value.
                
        .. rubric:: suggested use
        
        In general, analysis scripts require both the structured array of the 
        experiments and the dictionary of arrays containing the results. The 
        recommended use is the following::
        
        >>> results = ensemble.perform_experiments(10000) #recommended use
        >>> experiments, output = ensemble.perform_experiments(10000) 
        
        The latter option will work fine, but most analysis scripts require 
        to wrap it up into a tuple again::
        
        >>> data = (experiments, output)
        
        Another reason for the recommended use is that you can save this tuple
        directly::
        
        >>> import expWorkbench.util as util
        >>> util.save_results(results, filename)
          
        .. note:: The current implementation has a hard coded limit to the 
          number of designs possible. This is set to 50.000 designs. 
          If one want to go beyond this, set `self.max_designs` to
          a higher value.
        
        """

        if not self._policies:
            self._policies.append({"name": "None"})
        
        # identify the uncertainties and sample over them
        if type(cases) ==  types.IntType:
            sampled_unc, unc_dict = self._generate_samples(cases, 
                                                           which_uncertainties)
            nr_of_exp =self.sampler.deterimine_nr_of_designs(sampled_unc)\
                      *len(self._policies)*len(self._msis)
            experiments = self._generate_experiments(sampled_unc)
        elif type(cases) == types.ListType:
            unc_dict = self.determine_uncertainties()[1]
            unc_names = cases[0].keys()
            sampled_unc = {name:[] for name in unc_names}
            nr_of_exp = len(cases)*len(self._policies)*len(self._msis)
            experiments = self._generate_experiments(cases)
        else:
            raise EMAError("unknown type for cases")
        uncertainties = [unc_dict[unc] for unc in sorted(sampled_unc)]

        # identify the outcomes that are to be included
        overview_dict, element_dict = self._determine_unique_attributes("outcomes")
        if which_outcomes==UNION:
            outcomes = element_dict.keys()
        elif which_outcomes==INTERSECTION:
            outcomes = overview_dict[tuple([msi.name for msi in self._msis])]
            outcomes = [outcome.name for outcome in outcomes]
        else:
            raise ValueError("incomplete value for which_outcomes")
         
        info(str(nr_of_exp) + " experiment will be executed")
                
        #initialize the callback object
        callback = callback(uncertainties, 
                            outcomes, 
                            nr_of_exp,
                            reporting_interval=reporting_interval,
                            **kwargs)

        if self.parallel:
            info("preparing to perform experiment in parallel")
            
            if not self._pool:
                self._make_pool(model_kwargs)
            info("starting to perform experiments in parallel")

            self._pool.run_experiments(experiments, callback)
        else:
            info("starting to perform experiments sequentially")

            def cleanup(modelInterfaces):
                for msi in modelInterfaces:
                    msi.cleanup()
                    del msi

            
            msi_initialization_dict = {}
            msis = {msi.name: msi for msi in self._msis}
            job_counter = itertools.count()
            
            cwd = os.getcwd() 
            for experiment in experiments:
                case_id = job_counter.next()
                policy = experiment.pop('policy')
                msi = experiment.pop('model')
                
                # check whether we already initialized the model for this 
                # policy
                if not msi_initialization_dict.has_key((policy['name'], msi)):
                    try:
                        debug("invoking model init")
                        msis[msi].model_init(copy.deepcopy(policy),\
                                             copy.deepcopy(model_kwargs))
                    except (EMAError, NotImplementedError) as inst:
                        exception(inst)
                        cleanup(self._msis)
                        raise
                    except Exception:
                        exception("some exception occurred when invoking the init")
                        cleanup(self._msis)
                        raise 
                    debug("initialized model %s with policy %s" % (msi, policy['name']))
                    #always, only a single initialized msi instance
                    msi_initialization_dict = {(policy['name'], msi):msis[msi]}
                msi = msis[msi]

                case = copy.deepcopy(experiment)
                try:
                    debug("trying to run model")
                    msi.run_model(case)
                except CaseError as e:
                    warning(str(e))
                    
                debug("trying to retrieve output")
                result = msi.retrieve_output()
                msi.reset_model()
                
                debug("trying to reset model")
                callback(case_id, experiment, policy, msi.name, result)
                
            cleanup(self._msis)
            os.chdir(cwd)
       
        results = callback.get_results()
        info("experiments finished")
        
        return results
Exemplo n.º 21
0
def test_callback_store_results():
    nr_experiments = 3
    uncs = [ParameterUncertainty((0,1), "a"),
           ParameterUncertainty((0,1), "b")]
    outcomes = [Outcome("test", time=True)]
    case = {unc.name:random.random() for unc in uncs}
    policy = {'name':'none'}
    name = "test"

    # case 1 scalar shape = (1)
    debug('----------- case 1 -----------')
    callback = DefaultCallback(uncs, 
                               [outcome.name for outcome in outcomes], 
                               nr_experiments=nr_experiments)
    result = {outcomes[0].name: 1}
    callback(case, policy, name, result)
    
    results = callback.get_results()
    for key, value in results[1].iteritems():
        debug("\n" + str(key) + "\n" + str(value))

    # case 2 time series shape = (1, nr_time_steps)
    debug('----------- case 2 -----------')
    callback = DefaultCallback(uncs, 
                               [outcome.name for outcome in outcomes], 
                               nr_experiments=nr_experiments)
    result = {outcomes[0].name: np.random.rand(10)}
    callback(case, policy, name, result)
    
    results = callback.get_results()
    for key, value in results[1].iteritems():
        debug("\n" + str(key) + "\n" + str(value))


    # case 3 maps etc. shape = (x,y)
    debug('----------- case 3 -----------')
    callback = DefaultCallback(uncs, 
                               [outcome.name for outcome in outcomes], 
                               nr_experiments=nr_experiments)
    result = {outcomes[0].name: np.random.rand(2,2)}
    callback(case, policy, name, result)
    
    results = callback.get_results()
    for key, value in results[1].iteritems():
        debug("\n" + str(key) + "\n" + str(value))


    # case 4 maps etc. shape = (x,y)
    debug('----------- case 4 -----------')
    callback = DefaultCallback(uncs, 
                               [outcome.name for outcome in outcomes], 
                               nr_experiments=nr_experiments)
    result = {outcomes[0].name: np.random.rand(2,2, 2)}
    callback(case, policy, name, result)
Exemplo n.º 22
0
def test_callback_call_union():
    # there are actually 3 cases that should be tested here
    # union unc, intersection outcomes
    # intersection unc, union outcomes
    # union unc, union outcomes
    
    # case 1 union unc, intersection outcomes
#    debug('----------- case 1 -----------')
#    nr_experiments = 10
#    uncs = [ParameterUncertainty((0,1), "a"),
#           ParameterUncertainty((0,1), "b")]
#    outcomes = [Outcome("test", time=True)]
#    callback = DefaultCallback(uncs, outcomes, nr_experiments=nr_experiments)
#    
#    policy = {"name": "none"}
#    name = "test"
#    
#    for i in range(nr_experiments):
#        if i % 2 == 0:
#            case = {uncs[0].name: np.random.rand(1)}
#        else: 
#            case = {uncs[1].name: np.random.rand(1)}
#        result = {outcome.name: np.random.rand(10) for outcome in outcomes}
#    
#        callback(case, policy, name, result)
#    
#    results = callback.get_results()
#    debug("\n"+str(results[0]))

    
    debug('----------- case 2 -----------')
#    nr_experiments = 10
#    uncs = [ParameterUncertainty((0,1), "a"),
#           ParameterUncertainty((0,1), "b")]
#    outcomes = [Outcome("test 1", time=True), 
#                Outcome("test 2", time=True)]
#    callback = DefaultCallback(uncs, outcomes, nr_experiments=nr_experiments)
#    
#    policy = {"name": "none"}
#    name = "test"
#    
#    for i in range(nr_experiments):
#        case = {unc.name: random.random()for unc in uncs}
#        if i % 2 == 0:
#            result = {outcomes[0].name: np.random.rand(10)}
#        else: 
#            result = {outcomes[1].name: np.random.rand(10)}
#    
#        callback(case, policy, name, result)
#    


  
    debug('----------- case 3 -----------')
    nr_experiments = 10
    uncs = [ParameterUncertainty((0,1), "a"),
           ParameterUncertainty((0,1), "b")]
    outcomes = [Outcome("test 1", time=True), 
                Outcome("test 2", time=True)]
    callback = DefaultCallback(uncs, outcomes, nr_experiments=nr_experiments)
    
    policy = {"name": "none"}
    name = "test"
    
    for i in range(nr_experiments):
        if i % 2 == 0:
            case = {uncs[0].name: random.random()}
            result = {outcomes[0].name: np.random.rand(10)}
        else: 
            case = {uncs[1].name: random.random()}
            result = {outcomes[1].name: np.random.rand(10)}
    
        callback(case, policy, name, result)
    
    results = callback.get_results()
    debug("\n"+str(results[0]))
    for key, value in results[1].iteritems():
        debug("\n" + str(key) + "\n" + str(value))   
Exemplo n.º 23
0
def envelopes(results, 
              outcomes_to_show = [],
              group_by = None,
              grouping_specifiers = None,
              density='',
              fill=False,
              legend=True,
              titles={},
              ylabels={},
              **kwargs):
    '''
    
    Make envelop plots. An envelope shows over time the minimum and maximum 
    value for a set of runs over time. It is thus to be used in case of time 
    series data. The function will try to find a result labeled "TIME". If this
    is present, these values will be used on the X-axis. In case of Vensim 
    models, TIME is present by default.  
    
    :param results: return from :meth:`perform_experiments`.
    :param outcomes_to_show: list of outcome of interest you want to plot. If 
                             empty, all outcomes are plotted. **Note**:  just 
                             names.
    :param group_by: name of the column in the cases array to group results by.
                     Alternatively, `index` can be used to use indexing arrays 
                     as the basis for grouping.
    :param grouping_specifiers: set of categories to be used as a basis for 
                                grouping by. Grouping_specifiers is only 
                                meaningful if group_by is provided as well. In
                                case of grouping by index, the grouping 
                                specifiers should be in a dictionary where the
                                key denotes the name of the group. 
    :param density: boolean, if true, the density of the endstates will be 
                    plotted.
    :param fill: boolean, if true, fill the envelope. 
    :param legend: boolean, if true, and there is a column specified for 
                   grouping, show a legend.
    :param titles: a way for controlling whether each of the axes should have
                   a title. There are three possibilities. If set to None, no
                   title will be shown for any of the axes. If set to an empty 
                   dict, the default, the title is identical to the name of the
                   outcome of interest. If you want to override these default 
                   names, provide a dict with the outcome of interest as key 
                   and the desired title as value. This dict need only contain
                   the outcomes for which you want to use a different title. 
    :param ylabels: a way for controlling the ylabels. Works identical to 
                    titles.
    :rtype: a `figure <http://matplotlib.sourceforge.net/api/figure_api.html>`_ instance
            and a dict with the individual axes.
            
    Additional key word arguments will be passed along to the density function,
    if density is `True`.
    
    ======== ===================================
    property description
    ======== ===================================
    log      log the resulting histogram or GKDE
    ======== ===================================
    
    .. rubric:: an example of use
    
    >>> import expWorkbench.util as util
    >>> data = util.load_results(r'1000 flu cases.cPickle')
    >>> envelopes(data, column='policy')
    
    will show an envelope for three three different policies, for all the 
    outcomes of interest.
    
    .. plot:: ../docs/source/pyplots/basicEnvelope.py
   
     while

    >>> envelopes(data, column='policy', categories=['static policy', 'adaptive policy'])
    
    will only show results for the two specified policies, ignoring any results 
    associated with \'no policy\'.

    .. plot:: ../docs/source/pyplots/basicEnvelope2.py
    
    .. note:: the current implementation is limited to seven different 
              categories in case of column, categories, and/or discretesize.
              This limit is due to the colors specified in COLOR_LIST.
    '''
    debug("generating envelopes")
   
    prepared_data = prepare_data(results, outcomes_to_show, group_by,
                                 grouping_specifiers)
    outcomes, outcomes_to_show, time, grouping_labels = prepared_data
    
    figure, grid = make_grid(outcomes_to_show, density)
    
    # do the plotting
    axes_dict = {}
    for i, outcome_to_plot in enumerate(outcomes_to_show):
        ax = figure.add_subplot(grid[i,0])
        axes_dict[outcome_to_plot] = ax
        
        ax_d= None
        if density:
            ax_d = figure.add_subplot(grid[i,1])
            axes_dict[outcome_to_plot+"_density"] = ax_d
    
        if group_by:
#            group_labels = sorted(outcomes.keys())
            group_by_envelopes(outcomes,outcome_to_plot, time, density,
                               ax, ax_d, fill, grouping_labels, **kwargs)
        else:
            single_envelope(outcomes, outcome_to_plot, time, density,
                            ax, ax_d, fill, **kwargs)
            
        if ax_d:
            for tl in ax_d.get_yticklabels():
                tl.set_visible(False)
        
        ax.set_xlabel(TIME_LABEL)
        do_ylabels(ax, ylabels, outcome_to_plot)
        do_titles(ax, titles, outcome_to_plot)
        
    if legend and group_by:
        make_legend(grouping_labels, figure)
    
    if plotting_util.TIGHT:
        grid.tight_layout(figure)
    
    return figure, axes_dict
Exemplo n.º 24
0
def lines(results, 
          outcomes_to_show = [],
          group_by = None,
          grouping_specifiers = None,
          density='',
          titles={},
          ylabels={},
          legend=True,
          experiments_to_show=None,
          show_envelope=False,
          **kwargs):
    '''
    
    This function takes the results from :meth:`perform_experiments` and 
    visualizes these as line plots. It is thus to be used in case of time 
    series data. The function will try to find a result labeled "TIME". If this
    is present, these values will be used on the X-axis. In case of Vensim 
    models, TIME is present by default.  

    :param results: return from :meth:`perform_experiments`.
    :param outcomes_to_show: list of outcome of interest you want to plot. If 
                             empty, all outcomes are plotted. **Note**:  just 
                             names.
    :param group_by: name of the column in the cases array to group results by.
                     Alternatively, `index` can be used to use indexing arrays 
                     as the basis for grouping.
    :param grouping_specifiers: set of categories to be used as a basis for 
                                grouping by. Grouping_specifiers is only 
                                meaningful if group_by is provided as well. In
                                case of grouping by index, the grouping 
                                specifiers should be in a dictionary where the
                                key denotes the name of the group. 
    :param density: boolean, if true, the density of the endstates will be 
                    plotted.
    :param legend: boolean, if true, and there is a column specified for 
                   grouping, show a legend.
    :param titles: a way for controlling whether each of the axes should have
                   a title. There are three possibilities. If set to None, no
                   title will be shown for any of the axes. If set to an empty 
                   dict, the default, the title is identical to the name of the
                   outcome of interest. If you want to override these default 
                   names, provide a dict with the outcome of interest as key 
                   and the desired title as value. This dict need only contain
                   the outcomes for which you want to use a different title. 
    :param ylabels: a way for controlling the ylabels. Works identical to 
                    titles.
    :param experiments_to_show: numpy array containing the indices of the 
                                experiments to be visualized. Defaults to None,
                                implying that all experiments should be shown.
    :param show_envelope: boolean, indicates whether envelopes should be 
                          plotted in combination with lines. Default is False.
    :rtype: a `figure <http://matplotlib.sourceforge.net/api/figure_api.html>`_ instance
            and a dict with the individual axes.
   
    .. note:: the current implementation is limited to seven different 
          categories in case of column, categories, and/or discretesize.
          This limit is due to the colors specified in COLOR_LIST.
    '''
    
    debug("generating line graph")

    if show_envelope:
        return plot_lines_with_envelopes(results, 
                                outcomes_to_show=outcomes_to_show, 
                                group_by=group_by, legend=legend, density=density,    
                                grouping_specifiers=grouping_specifiers, 
                                experiments_to_show=experiments_to_show, 
                                titles=titles, ylabels=ylabels, **kwargs)
    
    if experiments_to_show != None:
        experiments, outcomes = results
        experiments = experiments[experiments_to_show]
        new_outcomes = {}
        for key, value in outcomes.items():
            new_outcomes[key] = value[experiments_to_show]
        results = experiments, new_outcomes

    data = prepare_data(results, outcomes_to_show, group_by, 
                        grouping_specifiers)
    outcomes, outcomes_to_show, time, grouping_labels = data

    figure, grid = make_grid(outcomes_to_show, density)
    axes_dict = {}

    # do the plotting
    for i, outcome_to_plot in enumerate(outcomes_to_show):
        ax = figure.add_subplot(grid[i,0])
        axes_dict[outcome_to_plot] = ax
        
        ax_d= None
        if density:
            ax_d = figure.add_subplot(grid[i,1])
            axes_dict[outcome_to_plot+"_density"] = ax_d
            
            for tl in ax_d.get_yticklabels():
                tl.set_visible(False)
    
        if group_by:
#            group_by_labels = sorted(outcomes.keys())
            group_by_lines(outcomes,outcome_to_plot, time, density,
                           ax, ax_d, grouping_labels, **kwargs)
        else:
            simple_lines(outcomes, outcome_to_plot, time, density,
                         ax, ax_d, **kwargs)
        ax.set_xlabel(TIME_LABEL)
        do_ylabels(ax, ylabels, outcome_to_plot)
        do_titles(ax, titles, outcome_to_plot)
            
    if legend and group_by:
        make_legend(grouping_labels, figure)
    
    if plotting_util.TIGHT:
        grid.tight_layout(figure)
    
    return figure, axes_dict
Exemplo n.º 25
0
class EnergyTrans(VensimModelStructureInterface):
    def __init__(self, workingDirectory, name):
        """interface to the model"""
        super(EnergyTrans, self).__init__(workingDirectory, name)

        self.modelFile = r'\CESUN_optimized_new.vpm'

        #outcomes
        self.outcomes.append(
            Outcome('total fraction new technologies', time=True))
        self.outcomes.append(Outcome('total capacity installed', time=True))

        #Initial values
        self.uncertainties.append(
            ParameterUncertainty((14000, 16000), "ini cap T1"))  #
        self.uncertainties.append(ParameterUncertainty((1, 2),
                                                       "ini cap T2"))  #
        self.uncertainties.append(ParameterUncertainty((1, 2),
                                                       "ini cap T3"))  #
        self.uncertainties.append(ParameterUncertainty((1, 2),
                                                       "ini cap T4"))  #
        self.uncertainties.append(
            ParameterUncertainty((500000, 1500000), "ini cost T1"))  #1000000
        self.uncertainties.append(
            ParameterUncertainty((5000000, 10000000), "ini cost T2"))  #8000000
        self.uncertainties.append(
            ParameterUncertainty((5000000, 10000000), "ini cost T3"))  #8000000
        self.uncertainties.append(
            ParameterUncertainty((5000000, 10000000), "ini cost T4"))  #8000000
        self.uncertainties.append(
            ParameterUncertainty((5000000, 10000000), "ini cum decom cap T1"))
        self.uncertainties.append(
            ParameterUncertainty((1, 100), "ini cum decom cap T2"))
        self.uncertainties.append(
            ParameterUncertainty((1, 100), "ini cum decom cap T3"))
        self.uncertainties.append(
            ParameterUncertainty((1, 100), "ini cum decom cap T4"))
        self.uncertainties.append(
            ParameterUncertainty(
                (1, 5), "average planning and construction period T1"))
        self.uncertainties.append(
            ParameterUncertainty(
                (1, 5), "average planning and construction period T2"))
        self.uncertainties.append(
            ParameterUncertainty(
                (1, 5), "average planning and construction period T3"))
        self.uncertainties.append(
            ParameterUncertainty(
                (1, 5), "average planning and construction period T4"))
        self.uncertainties.append(
            ParameterUncertainty((0.85, 0.95), "ini PR T1"))
        self.uncertainties.append(
            ParameterUncertainty((0.7, 0.95), "ini PR T2"))
        self.uncertainties.append(
            ParameterUncertainty((0.7, 0.95), "ini PR T3"))
        self.uncertainties.append(
            ParameterUncertainty((0.7, 0.95), "ini PR T4"))

        #Plain Parametric Uncertainties
        self.uncertainties.append(ParameterUncertainty((30, 50),
                                                       "lifetime T1"))
        self.uncertainties.append(ParameterUncertainty((15, 20),
                                                       "lifetime T2"))
        self.uncertainties.append(ParameterUncertainty((15, 20),
                                                       "lifetime T3"))
        self.uncertainties.append(ParameterUncertainty((15, 20),
                                                       "lifetime T4"))
        #
        #        #One uncertain development over time -- smoothed afterwards
        self.uncertainties.append(
            ParameterUncertainty((0.03, 0.035), "ec gr t1"))  #0.03
        self.uncertainties.append(
            ParameterUncertainty((-0.01, 0.03), "ec gr t2"))  #0.03
        self.uncertainties.append(
            ParameterUncertainty((-0.01, 0.03), "ec gr t3"))  #0.03
        self.uncertainties.append(
            ParameterUncertainty((-0.01, 0.03), "ec gr t4"))  #0.03
        self.uncertainties.append(
            ParameterUncertainty((-0.01, 0.03), "ec gr t5"))  #0.03
        self.uncertainties.append(
            ParameterUncertainty((-0.01, 0.03), "ec gr t6"))  #0.03
        self.uncertainties.append(
            ParameterUncertainty((-0.01, 0.03), "ec gr t7"))  #0.03
        self.uncertainties.append(
            ParameterUncertainty((-0.01, 0.03), "ec gr t8"))  #0.03
        self.uncertainties.append(
            ParameterUncertainty((-0.01, 0.03), "ec gr t9"))  #0.03
        self.uncertainties.append(
            ParameterUncertainty((-0.01, 0.03), "ec gr t10"))  #0.03

        #Uncertainties in Random Functions
        self.uncertainties.append(
            ParameterUncertainty((0.9, 1), "random PR min"))
        self.uncertainties.append(
            ParameterUncertainty((1, 1.1), "random PR max"))
        self.uncertainties.append(
            ParameterUncertainty((1, 100), "seed PR T1", integer=True))
        self.uncertainties.append(
            ParameterUncertainty((1, 100), "seed PR T2", integer=True))
        self.uncertainties.append(
            ParameterUncertainty((1, 100), "seed PR T3", integer=True))
        self.uncertainties.append(
            ParameterUncertainty((1, 100), "seed PR T4", integer=True))

        #Uncertainties in Preference Functions
        self.uncertainties.append(
            ParameterUncertainty((2, 5), "absolute preference for MIC"))
        self.uncertainties.append(
            ParameterUncertainty(
                (1, 3), "absolute preference for expected cost per MWe"))
        self.uncertainties.append(
            ParameterUncertainty((2, 5),
                                 "absolute preference against unknown"))
        self.uncertainties.append(
            ParameterUncertainty((1, 3),
                                 "absolute preference for expected progress"))
        self.uncertainties.append(
            ParameterUncertainty(
                (2, 5), "absolute preference against specific CO2 emissions"))
        #TOEVOEGEN SWITCHES ZODAT BOVENSTAANDE CRITERIA WEL OF NIET GEBRUIKT WORDEN...
        self.uncertainties.append(
            CategoricalUncertainty((0, 1),
                                   "SWITCH preference for MIC",
                                   default=1))
        self.uncertainties.append(
            CategoricalUncertainty(
                (0, 1),
                "SWITCH preference for expected cost per MWe",
                default=0))
        self.uncertainties.append(
            CategoricalUncertainty((0, 1),
                                   "SWITCH preference against unknown",
                                   default=0))
        self.uncertainties.append(
            CategoricalUncertainty((0, 1),
                                   "SWITCH preference for expected progress",
                                   default=0))
        self.uncertainties.append(
            CategoricalUncertainty(
                (0, 1),
                "SWITCH preference against specific CO2 emissions",
                default=0))
        #Uncertainties DIE NOG AANGEPAST MOETEN WORDEN
        self.uncertainties.append(
            ParameterUncertainty((1, 2),
                                 "performance expected cost per MWe T1"))
        self.uncertainties.append(
            ParameterUncertainty((1, 5),
                                 "performance expected cost per MWe T2"))
        self.uncertainties.append(
            ParameterUncertainty((1, 5),
                                 "performance expected cost per MWe T3"))
        self.uncertainties.append(
            ParameterUncertainty((1, 5),
                                 "performance expected cost per MWe T4"))
        self.uncertainties.append(
            ParameterUncertainty((4, 5), "performance CO2 avoidance T1"))
        self.uncertainties.append(
            ParameterUncertainty((1, 5), "performance CO2 avoidance T2"))
        self.uncertainties.append(
            ParameterUncertainty((1, 5), "performance CO2 avoidance T3"))
        self.uncertainties.append(
            ParameterUncertainty((1, 5), "performance CO2 avoidance T4"))

        #        #Switches op technologies
        self.uncertainties.append(
            CategoricalUncertainty((0, 1), "SWITCH T3", default=1))
        self.uncertainties.append(
            CategoricalUncertainty((0, 1), "SWITCH T4", default=1))
        self.uncertainties.append(
            CategoricalUncertainty(([(0, 0, 0, 0, 1), (0, 0, 0, 1, 0),
                                     (0, 0, 0, 1, 1), (0, 0, 1, 0, 0),
                                     (0, 0, 1, 0, 1), (0, 0, 1, 1, 0),
                                     (0, 0, 1, 1, 1), (0, 1, 0, 0, 0),
                                     (0, 1, 0, 0, 1), (0, 1, 0, 1, 0),
                                     (0, 1, 0, 1, 1), (0, 1, 1, 0, 0),
                                     (0, 1, 1, 0, 1), (0, 1, 1, 1, 0),
                                     (0, 1, 1, 1, 1), (1, 0, 0, 0, 0),
                                     (1, 0, 0, 0, 1), (1, 0, 0, 1, 0),
                                     (1, 0, 0, 1, 1), (1, 0, 1, 0, 0),
                                     (1, 0, 1, 0, 1), (1, 0, 1, 1, 0),
                                     (1, 0, 1, 1, 1), (1, 1, 0, 0, 0),
                                     (1, 1, 0, 0, 1), (1, 1, 0, 1, 0),
                                     (1, 1, 0, 1, 1), (1, 1, 1, 0, 0),
                                     (1, 1, 1, 0, 1), (1, 1, 1, 1, 0),
                                     (1, 1, 1, 1, 1)]), "preference switches"))

        #switch for addfactor activation
#        self.uncertainties.append(CategoricalUncertainty((0,1), "switchaddfactorco2", default = 1))

    def model_init(self, policy, kwargs):
        try:
            self.modelFile = policy['file']
        except:
            logging.debug("no policy specified")
        super(EnergyTrans, self).model_init(policy, kwargs)

    def run_model(self, case):
        switches = case.pop("preference switches")

        case["SWITCH preference for MIC"] = switches[0]
        case["SWITCH preference for expected cost per MWe"] = switches[1]
        case["SWITCH preference against unknown"] = switches[2]
        case["SWITCH preference for expected progress"] = switches[3]
        case["SWITCH preference against specific CO2 emissions"] = switches[4]

        if np.sum(switches) == 0:
            print "sifir olan cikti haci!"

        for key, value in case.items():
            vensim.set_value(key, value)
        ema_logging.debug("model parameters set successfully")

        ema_logging.debug("run simulation, results stored in " +
                          self.workingDirectory + self.resultFile)
        try:
            vensim.run_simulation(self.workingDirectory + self.resultFile)
        except VensimError as e:
            raise

        results = {}
        error = False
        for output in self.outcomes:
            ema_logging.debug("getting data for %s" % output.name)
            result = vensim.get_data(self.workingDirectory + self.resultFile,
                                     output.name)
            ema_logging.debug("successfully retrieved data for %s" %
                              output.name)
            if not result == []:
                if result.shape[0] != self.runLength:
                    a = np.zeros((self.runLength))
                    a[0:result.shape[0]] = result
                    result = a
                    error = True

            else:
                result = result[0::self.step]
            try:
                results[output.name] = result
            except ValueError as e:
                print "what"


#        for output in self.activation:
#            value = results[output.name]
#            time = results["TIME"]
#            activationTimeStep = time[value>0]
#            if len(activationTimeStep) > 0:
#                activationTimeStep = activationTimeStep[0]
#            else:
#                activationTimeStep = np.array([0])
##            if activationTimeStep.shape[0] > 0:
##                activationTimeStep = activationTimeStep
##            else:
##                activationTimeStep = np.array([0])
#            results[output.name] = activationTimeStep

        self.output = results
        if error:
            raise CaseError("run not completed", case)
Exemplo n.º 26
0
    def _run_optimization(self, toolbox, generate_individual, 
                           evaluate_population, attr_list, keys, obj_function, 
                           pop_size, reporting_interval, weights, 
                           nr_of_generations, crossover_rate, mutation_rate,
                           levers, caching, **kwargs):
        '''
        Helper function that runs the actual optimization
                
        :param toolbox: 
        :param generate_individual: helper function for generating an 
                                    individual
        :param evaluate_population: helper function for evaluating the 
                                    population
        :param attr_list: list of attributes (alleles)
        :param keys: the names of the attributes in the same order as attr_list
        :param obj_function: the objective function
        :param pop_size: the size of the population
        :param reporting_interval: the interval for reporting progress, passed
                                   on to perform_experiments
        :param weights: the weights on the outcomes
        :param nr_of_generations: number of generations for which the GA will 
                                  be run
        :param crossover_rate: the crossover rate of the GA
        :param mutation_rate: the muation rate of the GA
        :param levers: a dictionary with param keys as keys, and as values
                       info used in mutation.
        
        '''
        # figure out whether we are doing single or multi-objective 
        # optimization
        #TODO raise error if not specified
        single_obj = True
        if len(weights) >1: 
            single_obj=False
        
        # Structure initializers
        toolbox.register("individual", 
                         generate_individual, 
                         creator.Individual, #@UndefinedVariable
                         attr_list, keys=keys) 
        toolbox.register("population", tools.initRepeat, list, 
                         toolbox.individual)
    
        # Operator registering
        toolbox.register("evaluate", obj_function)
        toolbox.register("crossover", tools.cxOnePoint)
        toolbox.register("mutate", mut_polynomial_bounded)
       
        if single_obj:
            toolbox.register("select", tools.selTournament)
        else:
            toolbox.register("select", tools.selNSGA2)

        # generate population
        # for some stupid reason, DEAP demands a multiple of four for 
        # population size in case of NSGA-2 
        pop_size = closest_multiple_of_four(pop_size)
        info("population size restricted to %s " % (pop_size))
        pop = toolbox.population(pop_size)
        
        debug("Start of evolution")
        
        # Evaluate the entire population
        evaluate_population(pop, reporting_interval, toolbox, self)
        
        if not single_obj:
            # This is just to assign the crowding distance to the individuals
            tools.assignCrowdingDist(pop)
    
        #some statistics logging
        stats_callback = NSGA2StatisticsCallback(weights=weights,
                                    nr_of_generations=nr_of_generations,
                                    crossover_rate=crossover_rate, 
                                    mutation_rate=mutation_rate, 
                                    pop_size=pop_size, 
                                    caching=caching)
        stats_callback(pop)
        stats_callback.log_stats(0)

        # Begin the generational process
        for gen in range(nr_of_generations):
            pop = self._run_geneneration(pop, crossover_rate, mutation_rate, 
                                          toolbox, reporting_interval, levers, 
                                          evaluate_population, keys, 
                                          single_obj, stats_callback, 
                                          caching, **kwargs)
            stats_callback(pop)
            stats_callback.log_stats(gen)    
        info("-- End of (successful) evolution --")

        return stats_callback, pop        
Exemplo n.º 27
0
def run_interval(model,loop_index,interval,VOI,edges,ind_cons,
                 double_list,case):
    
    # Load the model.
    vensim.load_model(model)
    
    case = copy.deepcopy(case)
    set_lookups(case)
    
    for key,value in case.items():
        vensim.set_value(key,repr(value))
#        print key, repr(value), vensim.get_val(key), value-vensim.get_val(key)

    # We run the model in game mode.
    step = vensim.get_val(r'TIME STEP')
    start_interval = interval[0]*step
    end_interval = interval[1]*step
    venDLL.command('GAME>GAMEINTERVAL|'+str(start_interval))

    # Initiate the model to be run in game mode.
    venDLL.command("MENU>GAME")
    if start_interval > 0:
        venDLL.command('GAME>GAMEON')

    loop_on = 1
    loop_off = 0

    loop_turned_off = False
    while True:

        # Initiate the experiment of interest.
        # In other words set the uncertainties to the same value as in
        # those experiments.
        time = vensim.get_val(r'TIME')
        ema_logging.debug(time)
        
        if time ==(2000+step*interval[0]) and not loop_turned_off:
            loop_turned_off = True
            
            if loop_index != 0:
                
                # If loop elimination method is based on unique edge.
                if loop_index-1 < ind_cons:
                    constant_value = vensim.get_val(edges[int(loop_index-1)][0])
                    
                    if loop_off==1:
                        constant_value = 0
                    
                    vensim.set_value('value loop '+str(loop_index),
                                     constant_value)
                    vensim.set_value('switch loop '+str(loop_index),
                                     loop_off)
        
                # Else it is based on unique consecutive edges.
                else:
                    constant_value = vensim.get_val(edges[int(loop_index-1)][0])
                    
                    if loop_off==1:
                        constant_value = 0
                    
                    # Name of constant value used does not fit loop index minus 'start of cons'-index.
                    if loop_index-ind_cons in double_list:
                        vensim.set_value('value cons loop '+str(loop_index-ind_cons-1),
                                         constant_value)
                        vensim.set_value('switch cons loop '+str(loop_index-ind_cons-1),
                                         loop_off)
                    else:
                        vensim.set_value('value cons loop '+str(loop_index-ind_cons),
                                         constant_value)
                        vensim.set_value('switch cons loop '+str(loop_index-ind_cons),
                                         loop_off)
                        
            venDLL.command('GAME>GAMEINTERVAL|'+str(end_interval-start_interval))
            
        elif time ==(2000+step*interval[1]) and loop_turned_off:
            loop_turned_off = False
            if loop_index != 0:
                # If loop elimination method is based on unique edge.
                if loop_index-1 < ind_cons:
                    constant_value = 0
                    vensim.set_value('value loop '+str(loop_index),
                                     constant_value)
                    vensim.set_value('switch loop '+str(loop_index),
                                     loop_on)
        
                # Else it is based on unique consecutive edges.
                else:
                    constant_value = 0
                    # Name of constant value used does not fit loop index minus 'start of cons'-index.
                    if loop_index-ind_cons in double_list:
                        vensim.set_value('value cons loop '+str(loop_index-ind_cons-1),
                                         constant_value)
                        vensim.set_value('switch cons loop '+str(loop_index-ind_cons-1),
                                         loop_on)
                    else:
                        vensim.set_value('value cons loop '+str(loop_index-ind_cons),
                                         constant_value)
                        vensim.set_value('switch cons loop '+str(loop_index-ind_cons),
                                         loop_on)
            
            finalT = vensim.get_val('FINAL TIME')
            currentT = vensim.get_val('TIME')
            venDLL.command('GAME>GAMEINTERVAL|'+str(finalT - currentT))
        
        else:
            break
        
        finalT = vensim.get_val('FINAL TIME')
        currentT = vensim.get_val('TIME')
        if finalT != currentT:
            venDLL.command('GAME>GAMEON')
    
    venDLL.command('GAME>ENDGAME')
    interval_series = vensim.get_data('Base.vdf',VOI)

    
    return interval_series
Exemplo n.º 28
0
def pairs_density(results, 
                  outcomes_to_show = [],
                  group_by = None,
                  grouping_specifiers = None,
                  ylabels = {},
                  point_in_time=-1,
                  log=True,
                  gridsize=50,
                  colormap='jet',
                  filter_scalar=True): 
    '''
    
    Generate a `R style pairs <http://www.stat.psu.edu/~dhunter/R/html/graphics/html/pairs.html>`_ 
    hexbin density multiplot. In case of time-series data, the end states are 
    used.
    
    hexbin makes hexagonal binning plot of x versus y, where x, y are 1-D 
    sequences of the same length, N. If C is None (the default), this is a 
    histogram of the number of occurences of the observations at (x[i],y[i]).
    For further detail see `matplotlib on hexbin <http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.hexbin>`_
    
    :param results: return from perform_experiments.
    :param outcomes_to_show: list of outcome of interest you want to plot. If 
                             empty, all outcomes are plotted.
    :param group_by: name of the column in the cases array to group results by.
                     Alternatively, `index` can be used to use indexing arrays 
                     as the basis for grouping.
    :param grouping_specifiers: set of categories to be used as a basis for 
                                grouping by. Grouping_specifiers is only 
                                meaningful if group_by is provided as well. In
                                case of grouping by index, the grouping 
                                specifiers should be in a dictionary where the
                                key denotes the name of the group. 
    :param ylabels: ylabels is a dictionary with the outcome names as keys, the 
                    specified values will be used as labels for the y axis. 
    :param point_in_time: the point in time at which the scatter is to be made.
                          If None is provided, the end states are used. 
                          point_in_time should be a valid value on time
    :param log: boolean, indicating whether density should be log scaled. 
                Defaults to True.
    :param gridsize: controls the gridsize for the hexagonal binning
    :param cmap: color map that is to be used in generating the hexbin. For 
                 details on the available maps, 
                 see `pylab <http://matplotlib.sourceforge.net/examples/pylab_examples/show_colormaps.html#pylab-examples-show-colormaps>`_.
                 (Defaults = jet)
    :param filter_scalar: boolean, remove the non-time-series outcomes.  
                          Defaults to True.
    :rtype: a `figure <http://matplotlib.sourceforge.net/api/figure_api.html>`_ instance
            and a dict with the individual axes.
    
    '''
    debug("generating pairwise density plot")
    
    prepared_data = prepare_pairs_data(results, outcomes_to_show, group_by, 
                                       grouping_specifiers, point_in_time,
                                       filter_scalar)
    outcomes, outcomes_to_show, grouping_specifiers = prepared_data
   
    if group_by:
        #figure out the extents for each combination
        extents = determine_extents(outcomes, outcomes_to_show)
        
        axes_dicts = {}
        figures = []
        for key, value in outcomes.items():
            figure, axes_dict = simple_pairs_density(value, outcomes_to_show, 
                                       log, colormap, gridsize, ylabels,
                                       extents=extents, title=key)
            axes_dicts[key] = axes_dict
            figures.append(figure)
        
        # harmonize the color scaling across figures
        combis = [(field1, field2) for field1 in outcomes_to_show\
                           for field2 in outcomes_to_show]
        for combi in combis:
            vmax = -1
            for entry in axes_dicts.values():
                vmax =  max(entry[combi].collections[0].norm.vmax, vmax)
            for entry in axes_dicts.values():
                ax = entry[combi]
                ax.collections[0].set_clim(vmin=0, vmax=vmax)
            del vmax
            
        return figures, axes_dicts
    else:
        return simple_pairs_density(outcomes, outcomes_to_show, log, colormap, 
                                    gridsize, ylabels)
Exemplo n.º 29
0
def pairs_scatter(results, 
                  outcomes_to_show = [],
                  group_by = None,
                  grouping_specifiers = None,
                  ylabels = {},
                  legend=True,
                  point_in_time=-1,
                  filter_scalar=True,
                  **kwargs):
    '''
    
    Generate a `R style pairs <http://www.stat.psu.edu/~dhunter/R/html/graphics/html/pairs.html>`_ 
    scatter multiplot. In case of time-series data, the end states are used.
    
    :param results: return from perform_experiments.
    :param outcomes_to_show: list of outcome of interest you want to plot. If 
                             empty, all outcomes are plotted.
    :param group_by: name of the column in the cases array to group results by.
                     Alternatively, `index` can be used to use indexing arrays 
                     as the basis for grouping.
    :param grouping_specifiers: set of categories to be used as a basis for 
                                grouping by. Grouping_specifiers is only 
                                meaningful if group_by is provided as well. In
                                case of grouping by index, the grouping 
                                specifiers should be in a dictionary where the
                                key denotes the name of the group. 
    :param ylabels: ylabels is a dictionary with the outcome names as keys, the 
                    specified values will be used as labels for the y axis. 
    :param legend: boolean, if true, and there is a column specified for 
                   grouping, show a legend.
    :param point_in_time: the point in time at which the scatter is to be made.
                          If None is provided, the end states are used. 
                          point_in_time should be a valid value on time
    :param filter_scalar: boolean, remove the non-time-series outcomes.  
                          Defaults to True.
    :rtype: a `figure <http://matplotlib.sourceforge.net/api/figure_api.html>`_ instance
            and a dict with the individual axes.
    

    .. note:: the current implementation is limited to seven different 
          categories in case of column, categories, and/or discretesize.
          This limit is due to the colors specified in COLOR_LIST. 
    '''
    
    debug("generating pairwise scatter plot")
   
    prepared_data = prepare_pairs_data(results, outcomes_to_show, group_by, 
                                       grouping_specifiers, point_in_time,
                                       filter_scalar)
    outcomes, outcomes_to_show, grouping_labels = prepared_data
   
    grid = gridspec.GridSpec(len(outcomes_to_show), len(outcomes_to_show))                             
    grid.update(wspace = 0.1,
                hspace = 0.1)    
    
    
    #the plotting
    figure = plt.figure()
    axes_dict = {}
    
    if group_by and legend:
        make_legend(grouping_labels, figure, legend_type='scatter')
     
    combis = [(field1, field2) for field1 in outcomes_to_show\
                               for field2 in outcomes_to_show]
    
    for field1, field2 in combis:
        i = outcomes_to_show.index(field1)
        j = outcomes_to_show.index(field2)
        ax = figure.add_subplot(grid[i,j])
        axes_dict[(field1, field2)] = ax

        if group_by:
            for x, group in enumerate(grouping_labels):
                y_data = outcomes[group][field1]
                x_data = outcomes[group][field2]
                
                facecolor = COLOR_LIST[x]
                edgecolor = 'k'
                if i==j: 
                    facecolor = 'white'
                    edgecolor = 'white'
                ax.scatter(x_data, y_data, 
                           facecolor=facecolor, edgecolor=edgecolor)
        else:
            y_data = outcomes[field1]
            x_data = outcomes[field2]

            facecolor = 'b'
            edgecolor = 'k'
            if i==j: 
                facecolor = 'white'
                edgecolor = 'white'
            ax.scatter(x_data, y_data, 
                       facecolor=facecolor, edgecolor=edgecolor)
        do_text_ticks_labels(ax, i, j, field1, field2, ylabels, 
                             outcomes_to_show)

    return figure, axes_dict
Exemplo n.º 30
0
    def run_model(self, case):
        """
        Method for running an instantiated model structure. 
        the provided implementation assumes that the keys in the 
        case match the variable names in the Vensim model. 
        
        If lookups are to be set specify their transformation from 
        uncertainties to lookup values in the extension of this method, 
        then call this one using super with the updated case dict.
        
        if you want to use cinFiles, set the cin_file, or cinFiles in
        the extension of this method to `self.cin_file`.
        
        :param case: the case to run
        
        
        .. note:: setting parameters should always be done via run_model.
                  The model is reset to its initial values automatically after
                  each run.  
        
        """
                
        if self.cin_file:
            try:
                read_cin_file(self.working_directory+self.cin_file)
            except VensimWarning as w:
                ema_logging.debug(str(w))
            else:
                ema_logging.debug("cin file read successfully")
        
        for lookup_uncertainty in self.lookup_uncertainties:
            # ask the lookup to transform the retreived uncertainties to the 
            # proper lookup value
            case[lookup_uncertainty.name] = lookup_uncertainty.transform(case)
                    
        for key, value in case.items():
            set_value(key, value)
        ema_logging.debug("model parameters set successfully")
        
        ema_logging.debug("run simulation, results stored in " + self.working_directory+self.result_file)
        try:
            run_simulation(self.working_directory+self.result_file)
        except VensimError:
            raise

        results = {}
        error = False
        for output in self.outcomes:
            ema_logging.debug("getting data for %s" %output.name)
            result = get_data(self.working_directory+self.result_file, 
                              output.name 
                              )
            ema_logging.debug("successfully retrieved data for %s" %output.name)
            if not result == []:
                if result.shape[0] != self.runLength:
                    got = result.shape[0]
                    a = np.zeros((self.runLength))
                    a[0:result.shape[0]] = result
                    result = a
                    error = True

            if not output.time:
                result = [-1]
            else:
                result = result[0::self.step]
            try:
                results[output.name] = result
            except ValueError as e:
                print "what"
                raise e
        self.output = results   
        if error:
            raise CaseError("run not completed, got %s, expected %s" %
                            (got, self.runLength), case)  
Exemplo n.º 31
0
 def model_init(self, policy, kwargs):
     try:
         self.modelFile = policy['file']
     except:
         logging.debug("no policy specified")
     super(EnergyTrans, self).model_init(policy, kwargs)
Exemplo n.º 32
0
def pairs_lines(results, 
                outcomes_to_show = [],
                group_by = None,
                grouping_specifiers = None,
                ylabels = {},
                legend=True,
                **kwargs):
    '''
    
    Generate a `R style pairs <http://www.stat.psu.edu/~dhunter/R/html/graphics/html/pairs.html>`_ 
    lines multiplot. It shows the behavior of two outcomes over time against
    each other. The origin is denoted with a circle and the end is denoted
    with a '+'. 
    
    :param results: return from perform_experiments.
    :param outcomes_to_show: list of outcome of interest you want to plot. If 
                             empty, all outcomes are plotted.
    :param group_by: name of the column in the cases array to group results by.
                     Alternatively, `index` can be used to use indexing arrays 
                     as the basis for grouping.
    :param grouping_specifiers: set of categories to be used as a basis for 
                                grouping by. Grouping_specifiers is only 
                                meaningful if group_by is provided as well. In
                                case of grouping by index, the grouping 
                                specifiers should be in a dictionary where the
                                key denotes the name of the group. 
    :param ylabels: ylabels is a dictionary with the outcome names as keys, the 
                    specified values will be used as labels for the y axis. 
    :param legend: boolean, if true, and there is a column specified for 
                   grouping, show a legend.
    :param point_in_time: the point in time at which the scatter is to be made.
                          If None is provided, the end states are used. 
                          point_in_time should be a valid value on time
    :rtype: a `figure <http://matplotlib.sourceforge.net/api/figure_api.html>`_ instance
            and a dict with the individual axes.
    
    '''
    
    #unravel return from run_experiments   
    debug("making a pars lines plot")
    
    prepared_data = prepare_pairs_data(results, outcomes_to_show, group_by, 
                                       grouping_specifiers, None)
    outcomes, outcomes_to_show, grouping_labels = prepared_data
    
    grid = gridspec.GridSpec(len(outcomes_to_show), len(outcomes_to_show))                             
    grid.update(wspace = 0.1,
                hspace = 0.1)
    
    #the plotting
    figure = plt.figure()
    axes_dict = {}
    
    if group_by and legend:
        make_legend(grouping_labels, figure)
     
    combis = [(field1, field2) for field1 in outcomes_to_show\
                               for field2 in outcomes_to_show]
    
    for field1, field2 in combis:
        i = outcomes_to_show.index(field1)
        j = outcomes_to_show.index(field2)
        ax = figure.add_subplot(grid[i,j])
        axes_dict[(field1, field2)] = ax

        if group_by:
            for x, entry in enumerate(grouping_labels):
                data1 = outcomes[entry][field1]
                data2 = outcomes[entry][field2]
                color = COLOR_LIST[x]
                if i==j: 
                    color = 'white'
                simple_pairs_lines(ax, data1, data2, color)
        else:
            data1 = outcomes[field1]
            data2 = outcomes[field2]
            color = 'b'
            if i==j: 
                color = 'white'
            simple_pairs_lines(ax, data1, data2, color)
        do_text_ticks_labels(ax, i, j, field1, field2, ylabels, 
                             outcomes_to_show)
            

    return figure, axes_dict
 def model_init(self, policy, kwargs):
     try:
         self.modelFile = policy['file']
     except:
         logging.debug("no policy specified")
     super(EnergyTrans, self).model_init(policy, kwargs)