def run_model(self, case): for key, value in self.policy.items(): vensim.set_value(key, value) switches = case.pop("preference switches") case["SWITCH preference for MIC"] = switches[0] case["SWITCH preference for expected cost per MWe"]= switches[1] case["SWITCH preference against unknown"]= switches[2] case["SWITCH preference for expected progress"]= switches[3] case["SWITCH preference against specific CO2 emissions"]= switches[4] for key, value in case.items(): vensim.set_value(key, value) ema_logging.debug("model parameters set successfully") ema_logging.debug("run simulation, results stored in " + self.workingDirectory+self.resultFile) try: vensim.run_simulation(self.workingDirectory+self.resultFile) except VensimError: raise results = {} error = False for output in self.outcomes: ema_logging.debug("getting data for %s" %output.name) result = vensim.get_data(self.workingDirectory+self.resultFile, output.name ) ema_logging.debug("successfully retrieved data for %s" %output.name) if not result == []: if result.shape[0] != self.runLength: a = np.zeros((self.runLength)) a[0:result.shape[0]] = result result = a error = True else: result = result[0::self.step] try: results[output.name] = result except ValueError: print "what" a = results.keys() for output in self.activation: value = results[output.name] time = results["TIME"] activationTimeStep = time[value>0] if activationTimeStep.shape[0] > 0: activationTimeStep = activationTimeStep[0] else: activationTimeStep = np.array([0]) results[output.name] = activationTimeStep self.output = results if error: raise CaseError("run not completed", case)
def model_init(self, policy, kwargs): super(FluModel, self).model_init(policy, kwargs) #pop name policy = copy.copy(policy) policy.pop('name') for key, value in policy.items(): vensim.set_value(key, value)
def run_interval(model,loop_index,interval,VOI,edges,ind_cons, double_list,case): # Load the model. vensim.load_model(model) case = copy.deepcopy(case) set_lookups(case) for key,value in case.items(): vensim.set_value(key,repr(value))
def model_init(self, policy, kwargs): # try: # self.modelFile = policy['file'] # except: # EMAlogging.debug("no policy specified") super(EnergyTrans, self).model_init(policy, kwargs) #pop name policy = copy.copy(policy) policy.pop('name') for key, value in policy.items(): vensim.set_value(key, value)
def run_model(self, case): switches = case.pop("preference switches") case["SWITCH preference for MIC"] = switches[0] case["SWITCH preference for expected cost per MWe"] = switches[1] case["SWITCH preference against unknown"] = switches[2] case["SWITCH preference for expected progress"] = switches[3] case["SWITCH preference against specific CO2 emissions"] = switches[4] if np.sum(switches) == 0: print "sifir olan cikti haci!" for key, value in case.items(): vensim.set_value(key, value)
def run_model(self, case): for key, value in case.items(): vensim.set_value(key, value) ema_logging.debug("model parameters set successfully") ema_logging.debug("run simulation, results stored in " + self.workingDirectory+self.resultFile) try: vensim.run_simulation(self.workingDirectory+self.resultFile) except VensimError: raise results = {} error = False for output in self.outcomes: ema_logging.debug("getting data for %s" %output.name) result = vensim.get_data(self.workingDirectory+self.resultFile, output.name ) ema_logging.debug("successfully retrieved data for %s" %output.name) if not result == []: if result.shape[0] != self.runLength: a = np.zeros((self.runLength)) a[0:result.shape[0]] = result result = a error = True else: result = result[0::self.step] try: results[output.name] = result except ValueError: print "what" self.output = results if error: raise CaseError("run not completed", case)
def set_lookups(case): kwargs = case def returnsToScale(x, speed, scale): return (x*1000, scale*1/(1+exp(-1* speed * (x-50)))) def approxLearning(x, speed, scale, start): x = x-start loc = 1 - scale a = (x*10000, scale*1/(1+exp(speed * x))+loc) return a def f(x, speed, loc): return (x/10, loc*1/(1+exp(speed * x))) def priceSubstite(x, speed, begin, end): scale = 2 * end start = begin - scale/2 return (x+2000, scale*1/(1+exp(-1* speed * x)) +start) loc = kwargs.pop("lookup shortage loc") speed = kwargs.pop("lookup shortage speed") lookup = [f(x/10, speed, loc) for x in range(0,100)] vensim.set_value('shortage price effect lookup', lookup ) # print venDLL.get_varattrib('shortage price effect lookup', attribute=3) # print lookup speed = kwargs.pop("lookup price substitute speed") begin = kwargs.pop("lookup price substitute begin") end = kwargs.pop("lookup price substitute end") vensim.set_value('relative price substitute lookup', [priceSubstite(x, speed, begin, end) for x in range(0,100, 10)]) scale = kwargs.pop("lookup returns to scale speed") speed = kwargs.pop("lookup returns to scale scale") vensim.set_value('returns to scale lookup', [returnsToScale(x, speed, scale) for x in range(0, 101, 10)]) scale = kwargs.pop("lookup approximated learning speed") speed = kwargs.pop("lookup approximated learning scale") start = kwargs.pop("lookup approximated learning start") vensim.set_value('approximated learning effect lookup', [approxLearning(x, speed, scale, start) for x in range(0, 101, 10)])
def run_interval(model,loop_index,interval,VOI,edges,ind_cons, double_list,case): # Load the model. vensim.load_model(model) case = copy.deepcopy(case) set_lookups(case) for key,value in case.items(): vensim.set_value(key,repr(value)) # print key, repr(value), vensim.get_val(key), value-vensim.get_val(key) # We run the model in game mode. step = vensim.get_val(r'TIME STEP') start_interval = interval[0]*step end_interval = interval[1]*step venDLL.command('GAME>GAMEINTERVAL|'+str(start_interval)) # Initiate the model to be run in game mode. venDLL.command("MENU>GAME") if start_interval > 0: venDLL.command('GAME>GAMEON') loop_on = 1 loop_off = 0 loop_turned_off = False while True: # Initiate the experiment of interest. # In other words set the uncertainties to the same value as in # those experiments. time = vensim.get_val(r'TIME') ema_logging.debug(time) if time ==(2000+step*interval[0]) and not loop_turned_off: loop_turned_off = True if loop_index != 0: # If loop elimination method is based on unique edge. if loop_index-1 < ind_cons: constant_value = vensim.get_val(edges[int(loop_index-1)][0]) if loop_off==1: constant_value = 0 vensim.set_value('value loop '+str(loop_index), constant_value) vensim.set_value('switch loop '+str(loop_index), loop_off) # Else it is based on unique consecutive edges. else: constant_value = vensim.get_val(edges[int(loop_index-1)][0]) if loop_off==1: constant_value = 0 # Name of constant value used does not fit loop index minus 'start of cons'-index. if loop_index-ind_cons in double_list: vensim.set_value('value cons loop '+str(loop_index-ind_cons-1), constant_value) vensim.set_value('switch cons loop '+str(loop_index-ind_cons-1), loop_off) else: vensim.set_value('value cons loop '+str(loop_index-ind_cons), constant_value) vensim.set_value('switch cons loop '+str(loop_index-ind_cons), loop_off) venDLL.command('GAME>GAMEINTERVAL|'+str(end_interval-start_interval)) elif time ==(2000+step*interval[1]) and loop_turned_off: loop_turned_off = False if loop_index != 0: # If loop elimination method is based on unique edge. if loop_index-1 < ind_cons: constant_value = 0 vensim.set_value('value loop '+str(loop_index), constant_value) vensim.set_value('switch loop '+str(loop_index), loop_on) # Else it is based on unique consecutive edges. else: constant_value = 0 # Name of constant value used does not fit loop index minus 'start of cons'-index. if loop_index-ind_cons in double_list: vensim.set_value('value cons loop '+str(loop_index-ind_cons-1), constant_value) vensim.set_value('switch cons loop '+str(loop_index-ind_cons-1), loop_on) else: vensim.set_value('value cons loop '+str(loop_index-ind_cons), constant_value) vensim.set_value('switch cons loop '+str(loop_index-ind_cons), loop_on) finalT = vensim.get_val('FINAL TIME') currentT = vensim.get_val('TIME') venDLL.command('GAME>GAMEINTERVAL|'+str(finalT - currentT)) else: break finalT = vensim.get_val('FINAL TIME') currentT = vensim.get_val('TIME') if finalT != currentT: venDLL.command('GAME>GAMEON') venDLL.command('GAME>ENDGAME') interval_series = vensim.get_data('Base.vdf',VOI) return interval_series
def run_interval(model, loop_index, interval, VOI, edges, ind_cons, double_list, uncertain_names, uncertain_values): # Load the model. vensim.load_model(model) # We don't want any screens. vensim.be_quiet() # We run the model in game mode. step = vensim.get_val(r"TIME STEP") start_interval = str(interval[0] * step) venDLL.command("GAME>GAMEINTERVAL|" + start_interval) # Initiate the model to be run in game mode. venDLL.command("MENU>GAME") while True: if vensim.get_val(r"TIME") == 2000: # Initiate the experiment of interest. # In other words set the uncertainties to the same value as in # those experiments. for i, value in enumerate(uncertain_values): name = uncertain_names[i] value = uncertain_values[i] vensim.set_value(name, value) print vensim.get_val(r"TIME") try: # Run the model for the length specified in game on-interval. venDLL.command("GAME>GAMEON") step = vensim.get_val(r"TIME STEP") if vensim.get_val(r"TIME") == (2000 + step * interval[0]): if loop_index != 0: # If loop elimination method is based on unique edge. if loop_index - 1 < ind_cons: constant_value = vensim.get_val(edges[int(loop_index - 1)][0]) vensim.set_value("value loop " + str(loop_index), constant_value) vensim.set_value("switch loop " + str(loop_index), 0) # Else it is based on unique consecutive edges. else: constant_value = vensim.get_val(edges[int(loop_index - 1)][0]) print constant_value # Name of constant value used does not fit loop index minus 'start of cons'-index. if loop_index - ind_cons in double_list: vensim.set_value("value cons loop " + str(loop_index - ind_cons - 1), constant_value) vensim.set_value("switch cons loop " + str(loop_index - ind_cons - 1), 0) else: vensim.set_value("value cons loop " + str(loop_index - ind_cons), constant_value) vensim.set_value("switch cons loop " + str(loop_index - ind_cons), 0) except venDLL.VensimWarning: # The game on command will continue to the end of the simulation and # than raise a warning. print "The end of simulation." break venDLL.finish_simulation() interval_series = vensim.get_data("Base.vdf", VOI) interval_series = interval_series[interval[0] : interval[1]] return interval_series
time = vensim.get_val(r'TIME') EMAlogging.debug(time) if time ==(2000+step*interval[0]) and not loop_turned_off: loop_turned_off = True if loop_index != 0: # If loop elimination method is based on unique edge. if loop_index-1 < ind_cons: constant_value = vensim.get_val(edges[int(loop_index-1)][0]) if loop_off==1: constant_value = 0 vensim.set_value('value loop '+str(loop_index), constant_value) vensim.set_value('switch loop '+str(loop_index), loop_off) # Else it is based on unique consecutive edges. else: constant_value = vensim.get_val(edges[int(loop_index-1)][0]) if loop_off==1: constant_value = 0 # Name of constant value used does not fit loop index minus 'start of cons'-index. if loop_index-ind_cons in double_list: vensim.set_value('value cons loop '+str(loop_index-ind_cons-1), constant_value) vensim.set_value('switch cons loop '+str(loop_index-ind_cons-1),
def run_interval(model, loop_index, interval, VOI, edges, ind_cons, double_list, uncertain_names, uncertain_values): # Load the model. vensim.load_model(model) # We don't want any screens. vensim.be_quiet() # We run the model in game mode. step = vensim.get_val(r'TIME STEP') start_interval = str(interval[0] * step) venDLL.command('GAME>GAMEINTERVAL|' + start_interval) # Initiate the model to be run in game mode. venDLL.command("MENU>GAME") while True: if vensim.get_val(r'TIME') == 2000: # Initiate the experiment of interest. # In other words set the uncertainties to the same value as in # those experiments. for i, value in enumerate(uncertain_values): name = uncertain_names[i] value = uncertain_values[i] vensim.set_value(name, value) print vensim.get_val(r'TIME') try: # Run the model for the length specified in game on-interval. venDLL.command('GAME>GAMEON') step = vensim.get_val(r'TIME STEP') if vensim.get_val(r'TIME') == (2000 + step * interval[0]): if loop_index != 0: # If loop elimination method is based on unique edge. if loop_index - 1 < ind_cons: constant_value = vensim.get_val(edges[int(loop_index - 1)][0]) vensim.set_value('value loop ' + str(loop_index), constant_value) vensim.set_value('switch loop ' + str(loop_index), 0) # Else it is based on unique consecutive edges. else: constant_value = vensim.get_val(edges[int(loop_index - 1)][0]) print constant_value # Name of constant value used does not fit loop index minus 'start of cons'-index. if loop_index - ind_cons in double_list: vensim.set_value( 'value cons loop ' + str(loop_index - ind_cons - 1), constant_value) vensim.set_value( 'switch cons loop ' + str(loop_index - ind_cons - 1), 0) else: vensim.set_value( 'value cons loop ' + str(loop_index - ind_cons), constant_value) vensim.set_value( 'switch cons loop ' + str(loop_index - ind_cons), 0) except venDLL.VensimWarning: # The game on command will continue to the end of the simulation and # than raise a warning. print "The end of simulation." break venDLL.finish_simulation() interval_series = vensim.get_data('Base.vdf', VOI) interval_series = interval_series[interval[0]:interval[1]] return interval_series
#load model vensim.load_model(r'C:\workspace\EMA-workbench\src\sandbox\sils\MODEL.vpm') # we don't want any screens vensim.be_quiet() # we run the model in game mode, with 10 timesteps at a time # the interval can be modified even during the game, thus allowing for dynamic # interaction venDLL.command('GAME>GAMEINTERVAL|10') # initiate the model to be run in game mode venDLL.command("MENU>GAME") while True: print vensim.get_val(r'FRAC EP FOR TRAINING') print vensim.get_val(r'TIME') try: #run the model for the length specified via the game interval command venDLL.command('GAME>GAMEON') except venDLL.VensimWarning: # the game on command will continue to the end of the simulation and # than raise a warning print "blaat" break vensim.set_value(r'FRAC EP FOR TRAINING',0.04)