def performReevaluation(model, params, policies, outputFile): if (params.createNewReevaluationResults): with MultiprocessingEvaluator(model) as evaluator: scenarios = params.evaluationScenarios results = evaluator.perform_experiments(scenarios=scenarios, policies=policies) if not os.path.exists(params.reevaluateOutputFolder): os.makedirs(params.reevaluateOutputFolder) save_results(results, params.reevaluateOutputFolder + outputFile) else: print('Loading reevaluation from ' + params.reevaluateOutputFolder + outputFile) results = load_results(params.reevaluateOutputFolder + outputFile) return results
def load_ema_leso_results( run_id: int, exp_prefix: str, results_folder: str, return_db_as_df=True, exclude_solver_errors=True, ) -> Tuple[pd.DataFrame, dict, pd.DataFrame]: """Small helper function to load results easily from the document structure""" ema_results = f"{exp_prefix}_ema_results_{run_id}.tar.gz" experiments, outcomes = ema_workbench.load_results(results_folder / ema_results) db_file = f"{exp_prefix}_db{run_id}.json" db = TinyDB(results_folder / db_file) if return_db_as_df: db = convert_db_to_df(db) if exclude_solver_errors: db = db[db.solver_status == "ok"] return experiments, outcomes, db
''' ''' import matplotlib.pyplot as plt import seaborn as sns from ema_workbench import load_results import ema_workbench.analysis.logistic_regression as logistic_regression # Created on 14 Mar 2019 # # .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl> experiments, outcomes = load_results('./data/1000 flu cases no policy.tar.gz') x = experiments.drop(['model', 'policy'], axis=1) y = outcomes['deceased population region 1'][:, -1] > 1000000 logit = logistic_regression.Logit(x, y) logit.run() logit.show_tradeoff() # when we change the default threshold, the tradeoff curve is # recalculated logit.threshold = 0.8 logit.show_tradeoff() # we can also look at the tradeoff across threshold values
''' ''' import matplotlib.pyplot as plt import seaborn as sns from ema_workbench import load_results import ema_workbench.analysis.logistic_regression as logistic_regression # Created on 14 Mar 2019 # # .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl> experiments, outcomes = load_results('./data/1000 flu cases no policy.tar.gz') x = experiments.drop(['model', 'policy'], axis=1) y = outcomes['deceased population region 1'][:, -1] > 1000000 logit = logistic_regression.Logit(x, y) logit.run() logit.show_tradeoff() # when we change the default threshold, the tradeoff curve is # recalculated logit.threshold = 0.8 logit.show_tradeoff() # we can also look at the tradeoff across threshold values # for a given model logit.show_threshold_tradeoff(3)
scenario_data[i][j] = data[ooi][s] distances = pdist(scenario_data, distance) return distances model = sys.argv[1] selectType = sys.argv[2] nr_policies = sys.argv[3] nr_experiments = sys.argv[4] set_size = int(sys.argv[5]) dir = '../data/multi/scenarioselection/scens_pol' + nr_policies + '/' fn = model + '_' + selectType + 'selected_' + nr_experiments + 'experiments_' + nr_policies + 'policies.tar.gz' print(dir + fn) try: results = load_results(dir + fn) except: print('skipping results') exit(0) exp, outcomes = results norm_new_out = normalize_out_dic(outcomes) oois = list(outcomes.keys()) def evaluate_diversity_single(x, data=norm_new_out, oois=oois, weight=0.5, distance='euclidean'): ''' takes the outcomes and selected scenario set (decision variables),
.. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl> ''' import numpy as np import matplotlib.pyplot as plt from ema_workbench import load_results, ema_logging from ema_workbench.analysis.pairs_plotting import (pairs_lines, pairs_scatter, pairs_density) ema_logging.log_to_stderr(level=ema_logging.DEFAULT_LEVEL) # load the data fh = './data/1000 flu cases no policy.tar.gz' experiments, outcomes = load_results(fh) # transform the results to the required format # that is, we want to know the max peak and the casualties at the end of the # run tr = {} # get time and remove it from the dict time = outcomes.pop('TIME') for key, value in outcomes.items(): if key == 'deceased population region 1': tr[key] = value[:, -1] #we want the end value else: # we want the maximum value of the peak max_peak = np.max(value, axis=1)
from __future__ import (absolute_import, print_function, division, \ unicode_literals) from ema_workbench import load_results # ####################### IMPORT DATA ######################################### # ============================================================================= results = load_results('./800experiments_300s_7%_ForReal.tar.gz') experiments, outcomes = results
Created on May 26, 2015 @author: jhkwakkel ''' import matplotlib.pyplot as plt import ema_workbench.analysis.cart as cart from ema_workbench import ema_logging, load_results ema_logging.log_to_stderr(level=ema_logging.INFO) default_flow = 2.178849944502783e7 # load data fn = './data/5000 runs WCM.tar.gz' results = load_results(fn) x, outcomes = results ooi = 'throughput Rotterdam' outcome = outcomes[ooi] / default_flow y = outcome < 1 cart_alg = cart.CART(x, y) cart_alg.build_tree() # print cart to std_out print(cart_alg.stats_to_dataframe()) print(cart_alg.boxes_to_dataframe()) # visualize cart_alg.show_boxes(together=False)
''' This file illustrated the use of the workbench for using dimensional stacking for scenario discovery .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl> ''' import matplotlib.pyplot as plt from ema_workbench import ema_logging, load_results from ema_workbench.analysis import dimensional_stacking ema_logging.log_to_stderr(level=ema_logging.INFO) # load data fn = './data/1000 flu cases no policy.tar.gz' x, outcomes = load_results(fn) y = outcomes['deceased population region 1'][:, -1] > 1000000 fig = dimensional_stacking.create_pivot_plot(x, y, 2, bin_labels=True) plt.show()
""" Created on Jul 8, 2014 @author: [email protected] """ import matplotlib.pyplot as plt from ema_workbench import ema_logging, load_results from ema_workbench.analysis.plotting import envelopes from ema_workbench.analysis.plotting_util import KDE ema_logging.log_to_stderr(ema_logging.INFO) file_name = r"./data/1000 flu cases.tar.gz" results = load_results(file_name) # the plotting functions return the figure and a dict of axes fig, axes = envelopes(results, group_by="policy", density=KDE, fill=True) # we can access each of the axes and make changes for key, value in axes.iteritems(): # the key is the name of the outcome for the normal plot # and the name plus '_density' for the endstate distribution if key.endswith("_density"): value.set_xscale("log") plt.show()
ScalarOutcome('reliability', kind=ScalarOutcome.MAXIMIZE) ] # override some of the defaults of the model lake_model.constants = [ Constant('alpha', 0.41), Constant('nsamples', 100), Constant('timehorizon', lake_model.time_horizon), ] #Load the initial exploration for scenarios #directory = 'D:/sibeleker/surfdrive/Documents/Notebooks/Lake_model-MORDM/Sibel/data/' #fn = '211_experiments_closedloop_noApollution_inertia.tar.gz' directory = 'H:/MyDocuments/Notebooks/Lake_model-MORDM/Sibel/MORDM_paper/data/' #fn = '206_experiments_openloop_Apollution.tar.gz' fn = '500_experiments_openloop_5policies.tar.gz' scenario_results = load_results(directory + fn) experiments, outcomes = scenario_results #get the selected scenarios #scenarios = [77, 96, 130, 181] scenarios = ['Ref', 77, 96, 130, 181] random_scenarios = [81, 289, 391, 257] #optimize for each selected scenario pool = multiprocessing.Pool(processes=8) timeout = 5000 start = time.time() for s in random_scenarios: if s == 'Ref': result = pool.apply_async(optimize, args=(lake_model, {}, 10000, [0.1, 0.05, 0.005,
''' Created on Jul 8, 2014 @author: [email protected] ''' import matplotlib.pyplot as plt from ema_workbench import ema_logging, load_results from ema_workbench.analysis.plotting import envelopes from ema_workbench.analysis.plotting_util import KDE ema_logging.log_to_stderr(ema_logging.INFO) file_name = r'./data/1000 flu cases.tar.gz' results = load_results(file_name) # the plotting functions return the figure and a dict of axes fig, axes = envelopes(results, group_by='policy', density=KDE, fill=True) # we can access each of the axes and make changes for key, value in axes.iteritems(): # the key is the name of the outcome for the normal plot # and the name plus '_density' for the endstate distribution if key.endswith('_density'): value.set_xscale('log') plt.show()
sqeuclidian, wminkowski returns a list of distance values ''' #make a matrix of the data n_scenarios x oois scenario_data = np.zeros((len(scenarios), len(oois))) for i, s in enumerate(scenarios): for j, ooi in enumerate(oois): scenario_data[i][j] = data[ooi][s] distances = pdist(scenario_data, distance) return distances # dir = 'H:/MyDocuments/Notebooks/Lake_model-MORDM/Sibel/MORDM_paper/data/' # fn = '206_experiments_openloop_Apollution.tar.gz' results = load_results('./selected_results.tar.gz') exp, outcomes = results norm_new_out = normalize_out_dic(outcomes) oois = list(outcomes.keys()) def evaluate_diversity_single(x, data=norm_new_out, oois=oois, weight=0.5, distance='euclidean'): ''' takes the outcomes and selected scenario set (decision variables), returns a single 'diversity' value for the scenario set. outcomes : outcomes dictionary of the scenario ensemble decision vars : indices of the scenario set
.. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl> ''' import numpy as np import matplotlib.pyplot as plt from ema_workbench import load_results, ema_logging from ema_workbench.analysis.pairs_plotting import (pairs_lines, pairs_scatter, pairs_density) ema_logging.log_to_stderr(level=ema_logging.DEFAULT_LEVEL) # load the data fh = './data/1000 flu cases no policy.tar.gz' experiments, outcomes = load_results(fh) # transform the results to the required format # that is, we want to know the max peak and the casualties at the end of the # run tr = {} # get time and remove it from the dict time = outcomes.pop('TIME') for key, value in outcomes.items(): if key == 'deceased population region 1': tr[key] = value[:,-1] #we want the end value else: # we want the maximum value of the peak max_peak = np.max(value, axis=1)
# model_version = 'v4' import os from dest_directories import gz_path, fig_path #%% # n_scenarios = 2000 # n_policies = 50 # run = '36_OE' n_scenarios = 100000 n_policies = 1 run = 'run_35_NordOE' # %% results = load_results( os.path.join(gz_path, 'run_35_NordOE_v7_100000s_.tar.gz')) #%% experiments, outcomes = results #%% experiments = experiments.drop(['model', 'policy'], axis=1) # %% TimeLine = [] for i in range(65): TimeLine.append(2020 + i * 5) TimeLine outcomes["TIME"] = np.array([TimeLine]) # outcomes # %%
import ema_workbench import os from ema_workbench import experiments_to_scenarios from ema_workbench.em_framework import SequentialEvaluator import numpy as np from cablepool_leso_handshake import METRICS RESULT_FOLDER = "C:\\Users\\sethv\\Google Drive\\0 Thesis\\Results\\cablepooling" ema_results = "cabelpooling_ema_results_120821.tar.gz" ema_results_path = os.path.join(RESULT_FOLDER, ema_results) experiments, outcomes = ema_workbench.load_results(ema_results_path) scenarios = experiments_to_scenarios(experiments) from cablepool_ema_model_definition import model def printer(**kwargs): # for key, value in kwargs.items(): # print(f"{key}: {value}") return {key: np.random.random() for key in METRICS} model.function = printer with SequentialEvaluator(model) as evaluator: results = evaluator.perform_experiments(scenarios=scenarios)
@author: [email protected] ''' import matplotlib.pyplot as plt import numpy as np from ema_workbench import ema_logging, load_results from ema_workbench.analysis.plotting import envelopes, lines from ema_workbench.analysis.plotting_util import KDE ema_logging.log_to_stderr(ema_logging.INFO) file_name = r'./data/10 runs.tar.gz' #file_name = r'./data/1000 flu cases no policy.tar.gz' experiments, outcomes = load_results(file_name) results = (experiments, outcomes) default_flow = 2.178849944502783e7 ooi_name = "sheep" outcome = outcomes[ooi_name] outcome = outcome/default_flow ooi = np.zeros(outcome.shape[0]) temp_outcomes = {ooi_name: ooi} print ooi.shape desired__nr_lines = 5 nr_cases = ooi.shape[0]
''' Created on 30 Oct 2018 @author: jhkwakkel ''' import matplotlib.pyplot as plt import numpy as np import seaborn as sns from ema_workbench import ema_logging, load_results from ema_workbench.analysis import feature_scoring ema_logging.log_to_stderr(level=ema_logging.INFO) # load data fn = r'./data/1000 flu cases with policies.tar.gz' x, outcomes = load_results(fn) # we have timeseries so we need scalars y = { 'deceased population': outcomes['deceased population region 1'][:, -1], 'max. infected fraction': np.max(outcomes['infected fraction R1'], axis=1) } scores = feature_scoring.get_feature_scores_all(x, y) sns.heatmap(scores, annot=True, cmap='viridis') plt.show()
''' Created on Jul 8, 2014 @author: [email protected] ''' import matplotlib.pyplot as plt from ema_workbench import ema_logging, load_results from ema_workbench.analysis.plotting import kde_over_time ema_logging.log_to_stderr(ema_logging.INFO) # file_name = r'./data/1000 runs scarcity.tar.gz' file_name = './data/1000 flu cases no policy.tar.gz' experiments, outcomes = load_results(file_name) # the plotting functions return the figure and a dict of axes fig, axes = kde_over_time(experiments, outcomes, log=True) plt.show()
def classify(data): # get the output for deceased population result = data['deceased population region 1'] # if deceased population is higher then 1.000.000 people, # classify as 1 classes = result[:, -1] > 1000000 return classes # load data fn = './data/1000 flu cases with policies.tar.gz' results = load_results(fn) experiments, results = results # extract results for 1 policy logical = experiments['policy'] == 'no policy' new_experiments = experiments[logical] new_results = {} for key, value in results.items(): new_results[key] = value[logical] results = (new_experiments, new_results) # perform cart on modified results tuple cart_alg = cart.setup_cart(results, classify, mass_min=0.05) cart_alg.build_tree()
Created on Jul 8, 2014 @author: [email protected] ''' import matplotlib.pyplot as plt import numpy as np from ema_workbench import ema_logging, load_results from ema_workbench.analysis.plotting import envelopes, lines from ema_workbench.analysis.plotting_util import KDE ema_logging.log_to_stderr(ema_logging.INFO) experiments, outcomes = load_results(r'./Data/500_runs_cap_approach.tar.gz') #results = (experiments, outcomes) #experiments, outcomes = results print(experiments.shape) #500 scenarios print(outcomes['level-of-achievability'].shape) fig, ax = plt.subplots(1) ax.plot(outcomes['level-of-achievability'][:,0,:].T) ax.set_ylabel('Percentage') fig.set_size_inches(6,6) plt.show()