예제 #1
0
def makeFolderForMethodsHeatmapFiles(output_folder_analyses_path):
    # Create folder for matrices per method
    sens_matrices_folder_name = "heatmaps"
    sens_matrices_folder_path = os.path.join(output_folder_analyses_path,
                                             sens_matrices_folder_name)
    files_aux.makeFolderWithPath(sens_matrices_folder_path)
    return sens_matrices_folder_path
예제 #2
0
def perturbateAndAnalyze( model_name, model_file_path, 
                        start_time, stop_time, 
                        parameters_to_perturb, percentage,
                        target_vars, dest_folder_path, base_dir, results_dirname ):
    
    # Create simulations folder
    logger.debug('Create simulations folder')
    perturbations_folder_name = "simulation"
    perturbations_folder_path = os.path.join(dest_folder_path, perturbations_folder_name)
    files_aux.makeFolderWithPath(perturbations_folder_path)
    
    # Create analysis folder
    logger.debug('Create analysis folder')
    analysis_folder_name = "analysis"
    analysis_folder_path = os.path.join(dest_folder_path, analysis_folder_name)
    files_aux.makeFolderWithPath(analysis_folder_path)
    
    # Prepare kwargs for perturbator
    perturbator_kwargs = {
        "model_name": model_name,
        "model_file_path": model_file_path,
        "start_time": start_time,
        "stop_time": stop_time,
        "parameters": parameters_to_perturb,
        "perc_perturb": percentage,
        "build_folder_path": perturbations_folder_path,
        "base_dir"              : base_dir,
        "results_dirname"       : results_dirname
    }
    # Initialize perturbator
    logger.debug('Initialize perturbator')
    logger.debug(perturbator_kwargs)
    perturbator = analysis.indiv_sens.ParametersIsolatedPerturbator(**perturbator_kwargs)
    
    # Run simulations using perturbator
    logger.debug('Isolated perturbations results')
    isolated_perturbations_results = perturbator.runSimulations(perturbations_folder_path)
    analyze_csvs_kwargs = {
        "isolated_perturbations_results": isolated_perturbations_results,
        "target_vars": target_vars,
        "percentage_perturbed": percentage,
        "specific_year": stop_time,
        "output_folder_analyses_path": analysis_folder_path,
        "rms_first_year": start_time,
        "rms_last_year": stop_time,
    }
    # Calculate sensitivities
    analysis_results = analysis.indiv_sens.completeIndividualSensAnalysis(**analyze_csvs_kwargs)
    
    logger.debug("Finishing: save results")
    # Get the dict with the paths
    paths_dict = analysis_results["paths"]
    # Write paths dict as json
    paths_json_str = json.dumps(paths_dict, sort_keys=True, indent=2)
    paths_json_file_name = "result.json"
    paths_json_file_path = os.path.join(dest_folder_path, paths_json_file_name)
    files_aux.writeStrToFile(paths_json_str, paths_json_file_path)
예제 #3
0
def completeIndividualSensAnalysis(isolated_perturbations_results, target_vars,
                                   percentage_perturbed, specific_year,
                                   rms_first_year, rms_last_year,
                                   output_folder_analyses_path):
    # Create perturbed runs info list using the dict output form the mos script
    #  TODO: adapt this function when we stop using tuples inside the analyzer in favor of using proper objects
    # to represent the info
    perturbed_csvs_path_and_info_pairs = perturbationAsTuplesFromDict(
        isolated_perturbations_results)
    # Initialize result with paths
    sens_to_params_per_var = analysisPerParamPerturbedForEachVar(
        isolated_perturbations_results, percentage_perturbed, rms_first_year,
        rms_last_year, specific_year, target_vars)
    # Complete sensitivity information for each variable
    vars_sens_infos_paths = sensitivitiesInformationPathsPerVariable(
        output_folder_analyses_path, percentage_perturbed, rms_first_year,
        rms_last_year, sens_to_params_per_var, specific_year, target_vars)
    # Per sens method analysis
    sens_matrices_dfs_dict = generateSensMatricesPerMethod(
        rms_first_year, rms_last_year, sens_to_params_per_var)
    # Create folder for heatmapas
    sens_heatmaps_folder_path = makeFolderForMethodsHeatmapFiles(
        output_folder_analyses_path)
    # Iterate indices matrices creating a Heatmap for each
    heatmaps_files_paths_per_method = {}
    for method, df_matrix in sens_matrices_dfs_dict.items():
        # Create heatmap instance
        heatmap = heatmap_f.Heatmap(df_matrix)
        # Create folder for heatmaps of this method
        method_heatmap_folder_name = method
        method_heatmap_folder_path = os.path.join(sens_heatmaps_folder_path,
                                                  method_heatmap_folder_name)
        files_aux.makeFolderWithPath(method_heatmap_folder_path)
        # Plot heatmap into folder path
        method_heatmap_files_paths = heatmap.plotInFolder(
            method_heatmap_folder_path)
        # Add this method's heatmaps to the dict
        heatmaps_files_paths_per_method[method] = method_heatmap_files_paths

    # Add paths to dict with paths
    analysis_files_paths = {
        "vars_sens_info": vars_sens_infos_paths,
        "heatmaps": heatmaps_files_paths_per_method,
    }
    # Add dfs to  dict with dfs
    analysis_dfs = {
        "sens_matrices": sens_matrices_dfs_dict,
    }
    # Make main dict with all sub-dicts
    analysis_results = {
        "paths": analysis_files_paths,
        "dfs": analysis_dfs,
    }
    return analysis_results
def perturbateAndAnalyze(model_name, model_file_path, start_time, stop_time,
                         parameters_to_perturb, percentage, target_vars,
                         dest_folder_path):
    # Create simulations folder
    perturbations_folder_name = "simulation"
    perturbations_folder_path = os.path.join(dest_folder_path,
                                             perturbations_folder_name)
    files_aux.makeFolderWithPath(perturbations_folder_path)
    # Create analysis folder
    analysis_folder_name = "analysis"
    analysis_folder_path = os.path.join(dest_folder_path, analysis_folder_name)
    files_aux.makeFolderWithPath(analysis_folder_path)
    # Prepare kwargs for perturbator
    perturbator_kwargs = {
        "model_name": model_name,
        "model_file_path": model_file_path,
        "start_time": start_time,
        "stop_time": stop_time,
        "parameters": parameters_to_perturb,
        "perc_perturb": percentage,
        "build_folder_path": perturbations_folder_path,
    }
    # Initialize perturbator
    perturbator = analysis.indiv_sens.ParametersIsolatedPerturbator(
        **perturbator_kwargs)
    # Run simulations using perturbator
    logger.info("Running Modelica with specified information")
    isolated_perturbations_results = perturbator.runSimulations(
        perturbations_folder_path)
    analyze_csvs_kwargs = {
        "isolated_perturbations_results": isolated_perturbations_results,
        "target_vars": target_vars,
        "percentage_perturbed": percentage,
        "specific_year": stop_time,
        "output_folder_analyses_path": analysis_folder_path,
        "rms_first_year": start_time,
        "rms_last_year": stop_time,
    }
    logger.info("Analyzing variable sensitivities to parameters from CSVs")
    # Calculate sensitivities
    analysis_results = analysis.indiv_sens.completeIndividualSensAnalysis(
        **analyze_csvs_kwargs)
    # Get the dict with the paths
    paths_dict = analysis_results["paths"]
    # Write paths dict as json
    paths_json_str = json.dumps(paths_dict, sort_keys=True, indent=2)
    paths_json_file_name = "result.json"
    paths_json_file_path = os.path.join(dest_folder_path, paths_json_file_name)
    files_aux.writeStrToFile(paths_json_str, paths_json_file_path)
    logger.info(
        "Finished. The file {0} has all the analysis files paths.".format(
            paths_json_file_path))
예제 #5
0
def sensitivitiesInformationPathsPerVariable(output_folder_analyses_path,
                                             percentage_perturbed,
                                             rms_first_year, rms_last_year,
                                             sens_to_params_per_var,
                                             specific_year, target_vars):
    # Create folder for complete sensitivity info per var
    vars_sens_info_folder_name = "vars_sens_info"
    vars_sens_info_folder_path = os.path.join(output_folder_analyses_path,
                                              vars_sens_info_folder_name)
    files_aux.makeFolderWithPath(vars_sens_info_folder_path)
    # Run infos paths, a dict with vars as keys and values the paths to their respective sens info
    vars_sens_infos_paths = writeRunInfosAndReturnThePaths(
        vars_sens_info_folder_path, percentage_perturbed, rms_first_year,
        rms_last_year, specific_year, target_vars, sens_to_params_per_var)
    return vars_sens_infos_paths
예제 #6
0
 def runSimulations(self, dest_folder_path):
     # Make folder for runs
     runs_folder_name = "runs"
     runs_folder_path = os.path.join(dest_folder_path, runs_folder_name)
     files_aux.makeFolderWithPath(runs_folder_path)
     # Run STD run
     std_run_name = "std_run.csv"
     std_run_path = os.path.join(runs_folder_path, std_run_name)
     flags = "-noEventEmit"
     std_run_results = self.compiled_model.simulate(std_run_path,
                                                    optional_flags=flags)
     # Make dir for perturbed runs
     perturbed_runs_folder_name = "perturbed"
     perturbed_runs_folder_path = os.path.join(runs_folder_path,
                                               perturbed_runs_folder_name)
     files_aux.makeFolderWithPath(perturbed_runs_folder_path)
     # Run the simulations for each parameter perturbed in isolation
     runs_per_parameter = {}
     i = 0
     for param_name in self.values_per_param:
         # Get param info for its run
         param_default_val = self.params_defaults[param_name]
         param_perturbed_val = self.values_per_param[param_name]
         # Perturb the parameter
         self.compiled_model.setParameterStartValue(param_name,
                                                    param_perturbed_val)
         # Run the simulation
         simu_csv_name = "run_{0}.csv".format(i)
         simu_csv_path = os.path.join(perturbed_runs_folder_path,
                                      simu_csv_name)
         flags = "-noEventEmit"
         simu_results = self.compiled_model.simulate(simu_csv_path,
                                                     optional_flags=flags)
         # Return the parameter to its original value
         self.compiled_model.setParameterStartValue(param_name,
                                                    param_default_val)
         # Save the simulation results for this perturbed parameter
         perturbed_param_info = simu_run_info.PerturbedParameterInfo(
             param_name, param_default_val, param_perturbed_val)
         iter_results = OneParameterPerturbedResults(
             simu_results, perturbed_param_info)
         runs_per_parameter[param_name] = iter_results
         i = i + 1
     # Prepare the results instance
     isolated_perturbations_results = IsolatedPerturbationsResults(
         self.model_name, std_run_results, runs_per_parameter)
     return isolated_perturbations_results
예제 #7
0
파일: sweep.py 프로젝트: vince-ice/OMSens
 def runSweep(self, dest_folder_path, simu_flags=""):
     # Make folder for runs
     runs_folder_name = "runs"
     runs_folder_path = os.path.join(dest_folder_path, runs_folder_name)
     files_aux.makeFolderWithPath(runs_folder_path)
     # Run STD run
     std_run_name = "std_run.csv"
     std_run_path = os.path.join(runs_folder_path, std_run_name)
     std_run_results = self.compiled_model.simulate(std_run_path)
     # Change the values of the parameters that will be fixed throughout all the runss
     for perturbed_param_info in self.fixed_params:
         param_name = perturbed_param_info.name
         new_val    = perturbed_param_info.new_val
         # Change the value in the model
         self.compiled_model.setParameterStartValue(param_name, new_val)
     # Make dir for perturbed runs
     perturbed_runs_folder_name = "perturbed"
     perturbed_runs_folder_path = os.path.join(runs_folder_path, perturbed_runs_folder_name)
     files_aux.makeFolderWithPath(perturbed_runs_folder_path)
     # Run the different values combinations
     sweep_iterations = []
     perturbed_params_info = list(self.runsPerturbedParameters())
     for i in range(len(perturbed_params_info)):
         swept_params_info = perturbed_params_info[i]
         # Perturb the parameters for this iteration
         for perturbed_param_info in swept_params_info:
             # Disaggregate param info
             param_name = perturbed_param_info.name
             new_val    = perturbed_param_info.new_val
             # Change the value in the model
             self.compiled_model.setParameterStartValue(param_name, new_val)
         # Run the simulation
         simu_csv_name = "run_{0}.csv".format(i)
         simu_csv_path = os.path.join(perturbed_runs_folder_path, simu_csv_name)
         simu_results = self.compiled_model.simulate(simu_csv_path, simu_flags)
         # Instantiate sweep iteration results
         sweep_iter_results = SweepIterationResults(simu_results, swept_params_info)
         # Add results to list
         sweep_iterations.append(sweep_iter_results)
     # Instantiate sweep results
     swept_params_names = [x["name"] for x in self.perturbation_info_per_param]
     sweep_results = ParametersSweepResults(self.model_name, swept_params_names, self.fixed_params, std_run_results,
                                            sweep_iterations, )
     return sweep_results
예제 #8
0
def sweepAndPlotFromJSON(dest_folder_path, json_file_path):
    with open(json_file_path, 'r') as fp:
        full_json = json.load(fp)
    # Prepare sweep init args
    model_mo_path = files_aux.moFilePathFromJSONMoPath(
        full_json["model_mo_path"])
    sweep_kwargs = \
        {
            "model_name"                  : full_json["model_name"],
            "model_file_path"             : model_mo_path,
            "start_time"                  : full_json["start_time"],
            "stop_time"                   : full_json["stop_time"],
            "perturbation_info_per_param" : full_json["parameters_to_sweep"],
            "fixed_params"                : full_json["fixed_params"],
            "build_folder_path"           : dest_folder_path,

        }
    # Initialize sweeper
    sweep_runner = running.sweep.ParametersSweeper(**sweep_kwargs)
    # Run sweep
    sweep_results = sweep_runner.runSweep(dest_folder_path)
    # Initialize sweep results plotter
    sweep_plotter = plot_sweep.SweepPlotter(sweep_results)
    # Make folder for plots
    plot_folder_path = os.path.join(dest_folder_path, "plots")
    files_aux.makeFolderWithPath(plot_folder_path)
    # Plot sweep for each var
    vars_plots_paths = {}
    for var_name in full_json["vars_to_analyze"]:
        plot_path = sweep_plotter.plotInFolder(var_name, plot_folder_path)
        vars_plots_paths[var_name] = plot_path
    # Add sweep plots to paths dict
    paths_dict = \
        {
            "sweep_plots": vars_plots_paths,
        }
    # Write paths dict as json
    paths_json_str = json.dumps(paths_dict, sort_keys=True, indent=2)
    paths_json_file_name = "result.json"
    paths_json_file_path = os.path.join(dest_folder_path, paths_json_file_name)
    files_aux.writeStrToFile(paths_json_str, paths_json_file_path)
    logger.info("Finished. The file {0} has all the sweep files paths.".format(
        paths_json_file_path))
예제 #9
0
def analyzeFromJSON(dest_folder_path, json_file_path):

    # Define destination folder path
    dest_folder_path += "/" + "results/"
    with open(json_file_path, 'r') as fp:
        full_json = json.load(fp)

    # Prepare init args
    model_mo_path = files_aux.moFilePathFromJSONMoPath(
        full_json["model_mo_path"])

    # Note: Check that the string in 'restriction_path' is effectively a directory'
    base_dir = full_json["restriction_path"]
    results_dirname = base_dir + full_json["model_name"] + '_res.csv'

    # Future work: implement 'intelligent alpha' so that the user doesn't need to iterate every time
    plot_std = full_json["plot_std_run"]
    plot_restriction = full_json["plot_restriction"]

    optim_kwargs = {
        "model_file_path": model_mo_path,
        "build_folder_path": dest_folder_path,
        "target_var_name": full_json["target_var_name"],
        "parameters_to_perturb": full_json["parameters_to_perturb"],
        "model_name": full_json["model_name"],
        "start_time": full_json["start_time"],
        "stop_time": full_json["stop_time"],
        "max_or_min": full_json["max_or_min"],
        "objective_function_name": full_json["objective_function_name"],
        "optimizer_name": full_json["optimizer_name"],
        "alpha_value": full_json["alpha_value"],
        "constrained_time_path_file": full_json["constrained_time_path_file"],
        "constrained_variable": full_json["constrained_variable"],
        "constrained_epsilon": full_json["constrained_epsilon"],
        "base_dir": base_dir,
        "results_dirname": results_dirname
    }
    # logger.debug('Generated optim_kwargs')

    # Initialize builder and compile model:
    # We compile the model again for now to avoid having remnants of the optimization in its compiled model
    plots_folder_name = "plots"
    plots_folder_path = os.path.join(dest_folder_path, plots_folder_name)
    files_aux.makeFolderWithPath(plots_folder_path)

    # Make sub-folder for new model. in Windows we can't use "aux" for folder names
    model_folder_name = "aux_folder"
    model_folder_path = os.path.join(plots_folder_path, model_folder_name)
    files_aux.makeFolderWithPath(model_folder_path)
    model_builder = build_model.ModelicaModelBuilder(full_json["model_name"],
                                                     full_json["start_time"],
                                                     full_json["stop_time"],
                                                     model_mo_path)
    compiled_model = model_builder.buildToFolderPath(model_folder_path,
                                                     base_dir, results_dirname)

    # Run STD Model
    x0_csv_name = "x0_run.csv"
    x0_csv_path = os.path.join(model_folder_path, x0_csv_name)
    x0_dict = {
        p: compiled_model.defaultParameterValue(p)
        for p in full_json["parameters_to_perturb"]
    }
    compiled_model.simulate(x0_csv_path, params_vals_dict=x0_dict)
    df_x0_run = pd.read_csv(x0_csv_path, index_col=False)
    df_x0_run.to_csv(results_dirname, index=False)
    # logger.debug('Generated df_x0_run')

    # Run optimization and re-run with optimal parameters
    model_optimizer = model_optimizer_f.ModelOptimizer(**optim_kwargs)
    # logger.debug('model optimizer generated')

    optim_result = model_optimizer.optimize(full_json["percentage"],
                                            full_json["epsilon"])
    x_opt_csv_name = "x_opt_run.csv"
    x_opt_csv_path = os.path.join(model_folder_path, x_opt_csv_name)
    compiled_model.simulate(x_opt_csv_path,
                            params_vals_dict=optim_result.x_opt)
    df_x_opt_run = pd.read_csv(x_opt_csv_path)

    # Update optimum value of 'optim_result'. IMPORTANT: assumes the last contents of csv file is the optimum
    final_optimum_value = df_x_opt_run[
        full_json["target_var_name"]].tolist()[-1]
    optim_result.target_optimum = final_optimum_value

    # Plot in folder
    var_optimization = full_json["target_var_name"]
    var_restriction = full_json['constrained_variable']
    if plot_std and plot_restriction:
        restriction = pd.read_csv(full_json["constrained_time_path_file"],
                                  index_col=False)
        restriction = restriction[
            restriction['time'] <= df_x_opt_run['time'].max()]
        vect_plotter = plot_vect_f.VectorialPlotter(optim_result, df_x_opt_run,
                                                    df_x0_run, restriction,
                                                    var_optimization,
                                                    var_restriction)
    elif plot_std:
        vect_plotter = plot_vect_f.VectorialPlotter(optim_result, df_x_opt_run,
                                                    df_x0_run, None,
                                                    var_optimization, None)
    elif plot_restriction:
        restriction = pd.read_csv(full_json["constrained_time_path_file"],
                                  index_col=False)
        restriction = restriction[
            restriction['time'] <= df_x_opt_run['time'].max()]
        vect_plotter = plot_vect_f.VectorialPlotter(optim_result, df_x_opt_run,
                                                    None, restriction,
                                                    var_optimization,
                                                    var_restriction)
    else:
        raise Exception(
            "Please choose a plot option. We are still working on making this choice automatically."
        )

    # Plot
    plot_path = vect_plotter.plotInFolder(plots_folder_path)

    # Prepare JSON output dict
    vect_json_dict = {
        "x0": optim_result.x0,
        "x_opt": optim_result.x_opt,
        "f(x0)": optim_result.f_x0,
        "f(x)_opt": optim_result.target_optimum,
        "stop_time": optim_result.stop_time,
        "variable": optim_result.variable_name,
        "plot_path": plot_path,
    }
    # Write dict as json
    optim_json_str = json.dumps(vect_json_dict, sort_keys=True, indent=2)
    optim_json_file_name = "result.json"
    optim_json_file_path = os.path.join(dest_folder_path, optim_json_file_name)
    files_aux.writeStrToFile(optim_json_str, optim_json_file_path)
예제 #10
0
def sweepAndPlotFromJSON(dest_folder_path_base,
                         json_file_path,
                         communicator=None):
    logger.debug("Entra en sweepAndPlotFromJSON")
    dest_folder_path = dest_folder_path_base + "/" + "results/"
    with open(json_file_path, 'r') as fp:
        full_json = json.load(fp)

    # Prepare sweep init args
    model_mo_path = files_aux.moFilePathFromJSONMoPath(
        full_json["model_mo_path"])

    # Horcoded for now
    base_dir = "/home/omsens/Documents/results_experiments/logging/"  # HARCODED, ISNT USED RIGHT NOW IN SWEEP
    results_dirname = base_dir + full_json[
        "model_name"] + '_res.csv'  # HARCODED, ISNT USED RIGHT NOW IN SWEEP

    # Fetch upper/lower bounds
    with_upper_and_lower = full_json["plot_upper_lower_limit"]

    # Set kwargs
    logger.debug("setting kwargs")
    sweep_kwargs = \
        {
            "model_name"                  : full_json["model_name"],
            "model_file_path"             : model_mo_path,
            "start_time"                  : full_json["start_time"],
            "stop_time"                   : full_json["stop_time"],
            "perturbation_info_per_param" : full_json["parameters_to_sweep"],
            "fixed_params"                : full_json["fixed_params"],
            "build_folder_path"           : dest_folder_path,

            "base_dir"                    : base_dir,
            "results_dirname"             : results_dirname
        }

    # Initialize sweeper
    logger.debug("Initialize sweeper")
    logger.debug(sweep_kwargs)
    sweep_runner = running.sweep.ParametersSweeper(**sweep_kwargs)

    # Run sweep
    logger.debug("Run sweep")
    sweep_results, perturbed_param_run_id_map = sweep_runner.runSweep(
        dest_folder_path, communicator)
    logger.debug(perturbed_param_run_id_map)

    # Initialize sweep results plotter
    logger.debug("Sweep plotter")
    sweep_plotter = plot_sweep.SweepPlotter(sweep_results)
    # Make folder for plots
    logger.debug("Goto plot folder path")
    plot_folder_path = os.path.join(dest_folder_path, "plots")
    files_aux.makeFolderWithPath(plot_folder_path)

    # Save results.json
    logger.debug("Save results.json")
    vars_plots_paths = {}
    for var_name in full_json['vars_to_analyze']:
        plot_path_without_extension = os.path.join(plot_folder_path, var_name)
        vars_plots_paths[var_name] = plot_path_without_extension
    paths_dict = {"sweep_plots": vars_plots_paths}
    paths_json_str = json.dumps(paths_dict, sort_keys=True, indent=2)
    paths_json_file_name = "result.json"
    paths_json_file_path = os.path.join(dest_folder_path, paths_json_file_name)
    files_aux.writeStrToFile(paths_json_str, paths_json_file_path)

    # Generate plots
    # logger.debug("Generate plots")
    for var_name in full_json["vars_to_analyze"]:
        # logger.debug("Var to analyze: " + var_name)
        # logger.debug(vars_plots_paths)
        # logger.debug(var_name in vars_plots_paths.keys())
        plot_path_without_extension = vars_plots_paths[var_name]
        # logger.debug("Call sweep plotter plot in folder")
        sweep_plotter.plotInFolder(var_name, plot_path_without_extension,
                                   with_upper_and_lower)

    # Paramters run save
    logger.debug("Parameters run save")
    params_df = pd.DataFrame()
    for param_comb, run_id in perturbed_param_run_id_map.items():

        logger.debug("parameter combination")
        logger.debug(param_comb)

        params = {
            v[0]: v[1]
            for v in [x.split(":") for x in param_comb.split(",")]
        }
        params['run_id'] = run_id
        df_row = pd.DataFrame(data=params, index=[0])
        if params_df.shape[0] == 0:
            params_df = df_row
        else:
            params_df = params_df.append(df_row)
        params_df = params_df.reset_index().drop('index', 1)
    params_df.to_csv(dest_folder_path + '/' + 'parameters_run.csv',
                     index=False)

    with open(dest_folder_path_base + '/' + 'model_info.json') as f:
        model_info = json.load(f)
        variables = model_info['aux_variables']
        pd.DataFrame(columns=variables).to_csv(dest_folder_path + '/' +
                                               'variables.csv',
                                               index=False)

    logger.debug("Termina en sweepAndPlotFromJSON")
예제 #11
0
    def runSweep(self, dest_folder_path, communicator=None, simu_flags=""):
        logger.debug("runSweep")
        # Make folder for runs
        runs_folder_name = "runs"
        runs_folder_path = os.path.join(dest_folder_path, runs_folder_name)
        files_aux.makeFolderWithPath(runs_folder_path)

        ##########
        # logger.debug("Phase 1")
        # PHASE 1: STD run
        if communicator is not None:
            communicator.set_total_progress_messages(100)
        std_run_name = "std_run.csv"
        std_run_path = os.path.join(runs_folder_path, std_run_name)
        if communicator is not None:
            communicator.update_completed(1)
        std_run_results = self.compiled_model.simulate(std_run_path)
        if communicator is not None:
            communicator.update_completed(1)

        ##########
        # logger.debug("Phase 2")
        # PHASE 2: Change the values of the parameters that will be fixed throughout all the runs
        # self.fixed_params = list(self.fixed_params)
        if communicator is not None:
            communicator.set_total_progress_messages(100)
        for perturbed_param_info in self.fixed_params:
            # communicator.update_completed(10)
            param_name = perturbed_param_info.name
            new_val = perturbed_param_info.new_val
            # Change the value in the model
            self.compiled_model.setParameterStartValue(param_name, new_val)

        # Make dir for perturbed runs
        perturbed_runs_folder_name = "perturbed"
        perturbed_runs_folder_path = os.path.join(runs_folder_path, perturbed_runs_folder_name)
        files_aux.makeFolderWithPath(perturbed_runs_folder_path)

        ##########
        # PHASE 3: Execute simulations
        # logger.debug("Phase 3")
        sweep_iterations = []
        perturbed_params_info = list(self.runsPerturbedParameters(communicator))
        if communicator is not None:
            communicator.set_total_progress_messages(len(perturbed_params_info)+1)
        perturbed_param_run_id_map = {}
        for i in range(len(perturbed_params_info)):

            # Finished +1 perturbed parameter
            if communicator is not None:
                communicator.update_completed(1)

            # Simulation
            perturbed_param_run_id_str = ""
            swept_params_info = perturbed_params_info[i]
            # Perturb the parameters for this iteration
            for perturbed_param_info in swept_params_info:

                logger.debug("new perturbation")
                logger.debug(perturbed_param_info)

                # Update parameter perturbation
                # Disaggregate param info
                param_name = perturbed_param_info.name
                new_val = perturbed_param_info.new_val
                # Change the value in the model
                self.compiled_model.setParameterStartValue(param_name, new_val)
                # Append new value and enw name
                perturbed_param_run_id_str += param_name + ":" + str(round(new_val, 3)) + ","
            perturbed_param_run_id_str = perturbed_param_run_id_str[:-1]
            perturbed_param_run_id_map[perturbed_param_run_id_str] = i
            # Run the simulation
            simu_csv_name = "run_{0}.csv".format(i)
            simu_csv_path = os.path.join(perturbed_runs_folder_path, simu_csv_name)
            simu_results = self.compiled_model.simulate(simu_csv_path, simu_flags)
            # Instantiate sweep iteration results
            sweep_iter_results = SweepIterationResults(simu_results, swept_params_info)
            # Add results to list
            sweep_iterations.append(sweep_iter_results)
        ##########
        # PHASE 4: Finih
        # Instantiate sweep results
        swept_params_names = [x["name"] for x in self.perturbation_info_per_param]
        sweep_results = ParametersSweepResults(self.model_name,
                                               swept_params_names,
                                               self.fixed_params,
                                               std_run_results,
                                               sweep_iterations, )
        return sweep_results, perturbed_param_run_id_map
예제 #12
0
    def test_values_subtests(self):
        # We add several tests in one because we have to build the model each time so we avoid doing it here
        # Initialize sweep example
        sweep_runner = self.sweepSpecsExample()
        # Test that the values per param are correct
        # Get values for each param
        vals_per_param = sweep_runner.valuesPerParameter()
        correct_vals_per_param = {
            "a": [-0.95, -1.05],
            "b": [-0.9, -1.1],
            "c": [-0.85, -1, -1.15],
        }
        for p in correct_vals_per_param:
            p_vals = vals_per_param[p]
            p_correct_vals = correct_vals_per_param[p]
            for v1, v2 in zip(p_vals, p_correct_vals):
                if not v1 == v2:
                    error_msg = "The parameter '{0}' has val {1} when it should have val {2}".format(
                        p, v1, v2)
                    self.fail(error_msg)
        # Test that the number of combinations are correct
        vals_combinations_n = len(
            list(sweep_runner.runsPerturbedParameters(None)))
        correct_n_runs = 12
        if vals_combinations_n != correct_n_runs:
            error_msg = "The sweep should have {0} runs but it had {1}".format(
                correct_n_runs, vals_combinations_n)
            self.fail(error_msg)
        # Test that the sweep "works"

        sweep_results, perturbed_param_run_id_map = sweep_runner.runSweep(
            self._temp_dir)
        # Check that the swept params are correct<
        swept_params = sweep_results.swept_parameters_names
        correct_swept_params = list(correct_vals_per_param.keys())
        intersection_swept_params = [
            i for i, j in zip(swept_params, correct_swept_params) if i == j
        ]
        if len(intersection_swept_params) != 3:
            error_msg = "The swept params returned were {0} when they should've been {1}".format(
                swept_params, correct_swept_params)
            self.fail(error_msg)
        # Check that the std run has the correct values
        variables = ["xa", "xb", "xc", "y"]
        vars_std_val = -1
        std_run = sweep_results.std_run
        std_run_path = std_run.output_path
        df_std = pandas.read_csv(std_run_path, index_col=0)
        df_std_last_row = df_std.iloc[-1]
        for var in variables:
            var_val = df_std_last_row[var]
            if not numpy.isclose(var_val, vars_std_val):
                error_msg = "The variable {0} should have value {1} but it has value {2} standard run" \
                    .format(var, vars_std_val, var_val)
                self.fail(error_msg)
        # Define matches between parameters and variables in the model
        param_var_match = {
            "a": "xa",
            "b": "xb",
            "c": "xc",
            "d": "y",
        }
        # Save the fixed params info
        fixed_params = sweep_results.fixed_parameters_info
        # Check that the perturbed runs have the correct values
        perturbed_runs = sweep_results.perturbed_runs
        for pert_run in perturbed_runs:
            # Get run .csv path
            run_csv_path = pert_run.simulation_results.output_path
            # Get df for run simulation
            df_pert_run = pandas.read_csv(run_csv_path, index_col=0)
            df_last_row = df_pert_run.iloc[-1]
            params_info_pert_run = pert_run.swept_params_info
            for param_info in params_info_pert_run:
                p_name = param_info.name
                var_for_param = param_var_match[p_name]
                p_new_val = param_info.new_val
                df_var_val = df_last_row[var_for_param]
                if not numpy.isclose(df_var_val, p_new_val):
                    error_msg = "The variable {0} should have value {1} but it has value {2} in run with path {3}"\
                        .format(var_for_param, p_new_val, df_var_val, run_csv_path)
                    self.fail(error_msg)
            # Check the fixed params
            for fixed_p in fixed_params:
                p_name = fixed_p.name
                var_for_param = param_var_match[p_name]
                p_new_val = fixed_p.new_val
                df_var_val = df_last_row[var_for_param]
                if not numpy.isclose(df_var_val, p_new_val):
                    error_msg = "The variable {0} should have value {1} but it has value {2} in run with path {3}" \
                        .format(var_for_param, p_new_val, df_var_val, run_csv_path)
                    self.fail(error_msg)
        # Integration test: we take advantage of the model build and sweep in this test and we test integration
        plot_folder_path = os.path.join(self._temp_dir, "plots")
        files_aux.makeFolderWithPath(plot_folder_path)
        sweep_plotter = plot_sweep.SweepPlotter(sweep_results)
        sweep_plotter.plotInFolder("xa", plot_folder_path)
        # Check that the plots folder is not empty
        files_in_dir = os.listdir(plot_folder_path)
        if len(files_in_dir) < 1:
            error_msg = "The sweep + plot didn't make any files in dest folder"
            self.fail(error_msg)