def sweepSpecsExample(self):
     model_name = "Model"
     model_file_path = os.path.join(self._temp_dir, "model.mo")
     files_aux.writeStrToFile(model_str, model_file_path)
     start_time = 0
     stop_time = 1
     fixed_params = [{"name": "d", "value": 1}]
     perturbation_info_per_param = [
         {
             "name": "a",
             "delta_percentage": 5,
             "iterations": 2
         },
         {
             "name": "b",
             "delta_percentage": 10,
             "iterations": 2
         },
         {
             "name": "c",
             "delta_percentage": 15,
             "iterations": 3
         },
     ]
     sweep_runner = running.sweep.ParametersSweeper(
         model_name,
         model_file_path,
         start_time,
         stop_time,
         perturbation_info_per_param,
         fixed_params,
         self._temp_dir,
         number_of_intervals=2)
     return sweep_runner
Beispiel #2
0
def perturbateAndAnalyze( model_name, model_file_path, 
                        start_time, stop_time, 
                        parameters_to_perturb, percentage,
                        target_vars, dest_folder_path, base_dir, results_dirname ):
    
    # Create simulations folder
    logger.debug('Create simulations folder')
    perturbations_folder_name = "simulation"
    perturbations_folder_path = os.path.join(dest_folder_path, perturbations_folder_name)
    files_aux.makeFolderWithPath(perturbations_folder_path)
    
    # Create analysis folder
    logger.debug('Create analysis folder')
    analysis_folder_name = "analysis"
    analysis_folder_path = os.path.join(dest_folder_path, analysis_folder_name)
    files_aux.makeFolderWithPath(analysis_folder_path)
    
    # Prepare kwargs for perturbator
    perturbator_kwargs = {
        "model_name": model_name,
        "model_file_path": model_file_path,
        "start_time": start_time,
        "stop_time": stop_time,
        "parameters": parameters_to_perturb,
        "perc_perturb": percentage,
        "build_folder_path": perturbations_folder_path,
        "base_dir"              : base_dir,
        "results_dirname"       : results_dirname
    }
    # Initialize perturbator
    logger.debug('Initialize perturbator')
    logger.debug(perturbator_kwargs)
    perturbator = analysis.indiv_sens.ParametersIsolatedPerturbator(**perturbator_kwargs)
    
    # Run simulations using perturbator
    logger.debug('Isolated perturbations results')
    isolated_perturbations_results = perturbator.runSimulations(perturbations_folder_path)
    analyze_csvs_kwargs = {
        "isolated_perturbations_results": isolated_perturbations_results,
        "target_vars": target_vars,
        "percentage_perturbed": percentage,
        "specific_year": stop_time,
        "output_folder_analyses_path": analysis_folder_path,
        "rms_first_year": start_time,
        "rms_last_year": stop_time,
    }
    # Calculate sensitivities
    analysis_results = analysis.indiv_sens.completeIndividualSensAnalysis(**analyze_csvs_kwargs)
    
    logger.debug("Finishing: save results")
    # Get the dict with the paths
    paths_dict = analysis_results["paths"]
    # Write paths dict as json
    paths_json_str = json.dumps(paths_dict, sort_keys=True, indent=2)
    paths_json_file_name = "result.json"
    paths_json_file_path = os.path.join(dest_folder_path, paths_json_file_name)
    files_aux.writeStrToFile(paths_json_str, paths_json_file_path)
Beispiel #3
0
 def modelExample(self):
     model_name = "Model"
     model_file_path = os.path.join(self._temp_dir, "model.mo")
     files_aux.writeStrToFile(model_str, model_file_path)
     start_time = 0
     stop_time = 2
     parameters_to_perturb = ["a", "b", "c"]
     perc_perturb = 5
     build_folder_path = self._temp_dir
     return build_folder_path, model_file_path, model_name, parameters_to_perturb, perc_perturb, start_time, stop_time
Beispiel #4
0
 def overrideFlagFromParamsVals(self, binary_folder_path, params_vals_dict):
     if params_vals_dict:
         override_str = overrrideStringFromParamsDict(params_vals_dict)
         # Write str to disk
         override_file_name = "override.txt"
         override_file_path = os.path.join(binary_folder_path, override_file_name)
         files_aux.writeStrToFile(override_str, override_file_path)
         override_flag = "-overrideFile={0}".format(override_file_name)
     else:
         override_flag = ""
     return override_flag
def perturbateAndAnalyze(model_name, model_file_path, start_time, stop_time,
                         parameters_to_perturb, percentage, target_vars,
                         dest_folder_path):
    # Create simulations folder
    perturbations_folder_name = "simulation"
    perturbations_folder_path = os.path.join(dest_folder_path,
                                             perturbations_folder_name)
    files_aux.makeFolderWithPath(perturbations_folder_path)
    # Create analysis folder
    analysis_folder_name = "analysis"
    analysis_folder_path = os.path.join(dest_folder_path, analysis_folder_name)
    files_aux.makeFolderWithPath(analysis_folder_path)
    # Prepare kwargs for perturbator
    perturbator_kwargs = {
        "model_name": model_name,
        "model_file_path": model_file_path,
        "start_time": start_time,
        "stop_time": stop_time,
        "parameters": parameters_to_perturb,
        "perc_perturb": percentage,
        "build_folder_path": perturbations_folder_path,
    }
    # Initialize perturbator
    perturbator = analysis.indiv_sens.ParametersIsolatedPerturbator(
        **perturbator_kwargs)
    # Run simulations using perturbator
    logger.info("Running Modelica with specified information")
    isolated_perturbations_results = perturbator.runSimulations(
        perturbations_folder_path)
    analyze_csvs_kwargs = {
        "isolated_perturbations_results": isolated_perturbations_results,
        "target_vars": target_vars,
        "percentage_perturbed": percentage,
        "specific_year": stop_time,
        "output_folder_analyses_path": analysis_folder_path,
        "rms_first_year": start_time,
        "rms_last_year": stop_time,
    }
    logger.info("Analyzing variable sensitivities to parameters from CSVs")
    # Calculate sensitivities
    analysis_results = analysis.indiv_sens.completeIndividualSensAnalysis(
        **analyze_csvs_kwargs)
    # Get the dict with the paths
    paths_dict = analysis_results["paths"]
    # Write paths dict as json
    paths_json_str = json.dumps(paths_dict, sort_keys=True, indent=2)
    paths_json_file_name = "result.json"
    paths_json_file_path = os.path.join(dest_folder_path, paths_json_file_name)
    files_aux.writeStrToFile(paths_json_str, paths_json_file_path)
    logger.info(
        "Finished. The file {0} has all the analysis files paths.".format(
            paths_json_file_path))
Beispiel #6
0
 def threeParamsModelOptimizerBaseArgsExample(self):
     model_name = "Model"
     model_file_path = os.path.join(self._temp_dir, "model.mo")
     files_aux.writeStrToFile(model_str, model_file_path)
     start_time = 0
     stop_time = 3
     target_var_name = "x"
     parameters_to_perturb = ["a", "b", "c"]
     percentage = 5
     max_or_min = "min"
     epsilon = 0.001
     build_folder_path = self._temp_dir
     return model_name, start_time, stop_time, model_file_path, target_var_name, parameters_to_perturb,\
            percentage, max_or_min, epsilon, build_folder_path
Beispiel #7
0
def sweepAndPlotFromJSON(dest_folder_path, json_file_path):
    with open(json_file_path, 'r') as fp:
        full_json = json.load(fp)
    # Prepare sweep init args
    model_mo_path = files_aux.moFilePathFromJSONMoPath(
        full_json["model_mo_path"])
    sweep_kwargs = \
        {
            "model_name"                  : full_json["model_name"],
            "model_file_path"             : model_mo_path,
            "start_time"                  : full_json["start_time"],
            "stop_time"                   : full_json["stop_time"],
            "perturbation_info_per_param" : full_json["parameters_to_sweep"],
            "fixed_params"                : full_json["fixed_params"],
            "build_folder_path"           : dest_folder_path,

        }
    # Initialize sweeper
    sweep_runner = running.sweep.ParametersSweeper(**sweep_kwargs)
    # Run sweep
    sweep_results = sweep_runner.runSweep(dest_folder_path)
    # Initialize sweep results plotter
    sweep_plotter = plot_sweep.SweepPlotter(sweep_results)
    # Make folder for plots
    plot_folder_path = os.path.join(dest_folder_path, "plots")
    files_aux.makeFolderWithPath(plot_folder_path)
    # Plot sweep for each var
    vars_plots_paths = {}
    for var_name in full_json["vars_to_analyze"]:
        plot_path = sweep_plotter.plotInFolder(var_name, plot_folder_path)
        vars_plots_paths[var_name] = plot_path
    # Add sweep plots to paths dict
    paths_dict = \
        {
            "sweep_plots": vars_plots_paths,
        }
    # Write paths dict as json
    paths_json_str = json.dumps(paths_dict, sort_keys=True, indent=2)
    paths_json_file_name = "result.json"
    paths_json_file_path = os.path.join(dest_folder_path, paths_json_file_name)
    files_aux.writeStrToFile(paths_json_str, paths_json_file_path)
    logger.info("Finished. The file {0} has all the sweep files paths.".format(
        paths_json_file_path))
Beispiel #8
0
 def overrideFlagFromParamsVals(self, binary_folder_path, params_vals_dict):
     logger.debug('override flag params')
     logger.debug(params_vals_dict)
     if params_vals_dict:
         logger.debug('enter 0')
         override_str = overrrideStringFromParamsDict(params_vals_dict)
         # Write str to disk
         logger.debug('enter 1')
         override_file_name = "override.txt"
         override_file_path = os.path.join(binary_folder_path,
                                           override_file_name)
         logger.debug('enter 2')
         files_aux.writeStrToFile(override_str, override_file_path)
         logger.debug('enter 3')
         override_flag = "-overrideFile={0}".format(override_file_name)
     else:
         override_flag = ""
     # Return value
     logger.debug('override flag')
     logger.debug(override_flag)
     return override_flag
    def threeParamsModelOptimizerBaseArgsExample(self):
        model_name = "Model"
        model_file_path = os.path.join(self._temp_dir, "model.mo")
        files_aux.writeStrToFile(model_str, model_file_path)
        start_time = 0
        stop_time = 3
        target_var_name = "x"
        parameters_to_perturb = ["a", "b", "c"]
        percentage = 5
        max_or_min = "min"
        epsilon = 0.001
        build_folder_path = self._temp_dir

        objective_function_name = "CURVIFGR"
        optimizer_name = "Single variable"
        alpha_value = 0.5
        constrained_time_path_file = "/home/omsens/limit_path.csv"

        return model_name, start_time, stop_time, model_file_path, target_var_name, parameters_to_perturb,\
               percentage, max_or_min, epsilon, build_folder_path, \
               objective_function_name, optimizer_name, alpha_value, constrained_time_path_file
Beispiel #10
0
def createMos(mo_file,
              model_name,
              params_info_list,
              percentage,
              output_mos_path,
              startTime,
              stopTime,
              csv_file_name_modelica_function,
              std_run_filename=None):
    # Create mos script that is OMC compatible
    load_and_build_str = strForLoadingAndBuilding(mo_file, model_name,
                                                  startTime, stopTime)
    run_std_run_str = strStandardRun(model_name, std_run_filename)
    perturbate_param_and_run_str = strForPerturbateParamAndRun(
        params_info_list, model_name, percentage,
        csv_file_name_modelica_function, omc_logger_flags)

    # Join the different strs into one
    final_str = load_and_build_str + run_std_run_str + perturbate_param_and_run_str
    # Write final string to file
    files_aux.writeStrToFile(final_str, output_mos_path)
    return 0
Beispiel #11
0
def analyzeFromJSON(dest_folder_path, json_file_path):

    # Define destination folder path
    dest_folder_path += "/" + "results/"
    with open(json_file_path, 'r') as fp:
        full_json = json.load(fp)

    # Prepare init args
    model_mo_path = files_aux.moFilePathFromJSONMoPath(
        full_json["model_mo_path"])

    # Note: Check that the string in 'restriction_path' is effectively a directory'
    base_dir = full_json["restriction_path"]
    results_dirname = base_dir + full_json["model_name"] + '_res.csv'

    # Future work: implement 'intelligent alpha' so that the user doesn't need to iterate every time
    plot_std = full_json["plot_std_run"]
    plot_restriction = full_json["plot_restriction"]

    optim_kwargs = {
        "model_file_path": model_mo_path,
        "build_folder_path": dest_folder_path,
        "target_var_name": full_json["target_var_name"],
        "parameters_to_perturb": full_json["parameters_to_perturb"],
        "model_name": full_json["model_name"],
        "start_time": full_json["start_time"],
        "stop_time": full_json["stop_time"],
        "max_or_min": full_json["max_or_min"],
        "objective_function_name": full_json["objective_function_name"],
        "optimizer_name": full_json["optimizer_name"],
        "alpha_value": full_json["alpha_value"],
        "constrained_time_path_file": full_json["constrained_time_path_file"],
        "constrained_variable": full_json["constrained_variable"],
        "constrained_epsilon": full_json["constrained_epsilon"],
        "base_dir": base_dir,
        "results_dirname": results_dirname
    }
    # logger.debug('Generated optim_kwargs')

    # Initialize builder and compile model:
    # We compile the model again for now to avoid having remnants of the optimization in its compiled model
    plots_folder_name = "plots"
    plots_folder_path = os.path.join(dest_folder_path, plots_folder_name)
    files_aux.makeFolderWithPath(plots_folder_path)

    # Make sub-folder for new model. in Windows we can't use "aux" for folder names
    model_folder_name = "aux_folder"
    model_folder_path = os.path.join(plots_folder_path, model_folder_name)
    files_aux.makeFolderWithPath(model_folder_path)
    model_builder = build_model.ModelicaModelBuilder(full_json["model_name"],
                                                     full_json["start_time"],
                                                     full_json["stop_time"],
                                                     model_mo_path)
    compiled_model = model_builder.buildToFolderPath(model_folder_path,
                                                     base_dir, results_dirname)

    # Run STD Model
    x0_csv_name = "x0_run.csv"
    x0_csv_path = os.path.join(model_folder_path, x0_csv_name)
    x0_dict = {
        p: compiled_model.defaultParameterValue(p)
        for p in full_json["parameters_to_perturb"]
    }
    compiled_model.simulate(x0_csv_path, params_vals_dict=x0_dict)
    df_x0_run = pd.read_csv(x0_csv_path, index_col=False)
    df_x0_run.to_csv(results_dirname, index=False)
    # logger.debug('Generated df_x0_run')

    # Run optimization and re-run with optimal parameters
    model_optimizer = model_optimizer_f.ModelOptimizer(**optim_kwargs)
    # logger.debug('model optimizer generated')

    optim_result = model_optimizer.optimize(full_json["percentage"],
                                            full_json["epsilon"])
    x_opt_csv_name = "x_opt_run.csv"
    x_opt_csv_path = os.path.join(model_folder_path, x_opt_csv_name)
    compiled_model.simulate(x_opt_csv_path,
                            params_vals_dict=optim_result.x_opt)
    df_x_opt_run = pd.read_csv(x_opt_csv_path)

    # Update optimum value of 'optim_result'. IMPORTANT: assumes the last contents of csv file is the optimum
    final_optimum_value = df_x_opt_run[
        full_json["target_var_name"]].tolist()[-1]
    optim_result.target_optimum = final_optimum_value

    # Plot in folder
    var_optimization = full_json["target_var_name"]
    var_restriction = full_json['constrained_variable']
    if plot_std and plot_restriction:
        restriction = pd.read_csv(full_json["constrained_time_path_file"],
                                  index_col=False)
        restriction = restriction[
            restriction['time'] <= df_x_opt_run['time'].max()]
        vect_plotter = plot_vect_f.VectorialPlotter(optim_result, df_x_opt_run,
                                                    df_x0_run, restriction,
                                                    var_optimization,
                                                    var_restriction)
    elif plot_std:
        vect_plotter = plot_vect_f.VectorialPlotter(optim_result, df_x_opt_run,
                                                    df_x0_run, None,
                                                    var_optimization, None)
    elif plot_restriction:
        restriction = pd.read_csv(full_json["constrained_time_path_file"],
                                  index_col=False)
        restriction = restriction[
            restriction['time'] <= df_x_opt_run['time'].max()]
        vect_plotter = plot_vect_f.VectorialPlotter(optim_result, df_x_opt_run,
                                                    None, restriction,
                                                    var_optimization,
                                                    var_restriction)
    else:
        raise Exception(
            "Please choose a plot option. We are still working on making this choice automatically."
        )

    # Plot
    plot_path = vect_plotter.plotInFolder(plots_folder_path)

    # Prepare JSON output dict
    vect_json_dict = {
        "x0": optim_result.x0,
        "x_opt": optim_result.x_opt,
        "f(x0)": optim_result.f_x0,
        "f(x)_opt": optim_result.target_optimum,
        "stop_time": optim_result.stop_time,
        "variable": optim_result.variable_name,
        "plot_path": plot_path,
    }
    # Write dict as json
    optim_json_str = json.dumps(vect_json_dict, sort_keys=True, indent=2)
    optim_json_file_name = "result.json"
    optim_json_file_path = os.path.join(dest_folder_path, optim_json_file_name)
    files_aux.writeStrToFile(optim_json_str, optim_json_file_path)
Beispiel #12
0
    def test_builder_works_correctly_for_correct_inputs(self):
        # Write model to temp dir
        model_file_path = os.path.join(self._temp_dir, "model.mo")
        files_aux.writeStrToFile(model_str, model_file_path)
        # Test model params
        model_name = "Model"
        start_time = 1
        stop_time = 2

        # Initialize and call model builder
        test_model_builder = build_model.ModelicaModelBuilder(
            model_name, start_time, stop_time, model_file_path)

        base_dir = '/home/omsens/Documents/results_experiments/logging/'

        compiled_model = test_model_builder.buildToFolderPath(
            self._temp_dir, base_dir)
        # Get script extensions regex
        regex = "{0}".format(model_name)
        # Get list of files from regex
        files_in_dir = os.listdir(self._temp_dir)
        files_for_regex = [x for x in files_in_dir if re.match(regex, x)]
        # Check that there is at least one file for regex
        if len(files_for_regex) < 1:
            error_msg = "The model builder should create at least one file in folder."
            self.fail(error_msg)
        # Test that the compiled model wrapper instance works correctly
        compiled_model.setParameterStartValue("a", 0)
        simulation_path = os.path.join(self._temp_dir, "simu.csv")
        simu_results = compiled_model.simulate(simulation_path)
        df_simu = pandas.read_csv(simulation_path)
        x_min = df_simu["x"].min()
        x_max = df_simu["x"].max()
        # We set the derivative slope as 0 so x should be a constant 1
        if not (x_min == x_max == 1):
            error_msg = "The parameter was not changed correctly"
            self.fail(error_msg)
        # Test that the default value for the params are backup-ed correctly
        param_def_val = compiled_model.defaultParameterValue("a")
        # We ask for the default value for a so it should be -1
        if not param_def_val == -1:
            error_msg = "The parameter default value was not backup-ed correctly"
            self.fail(error_msg)
        # Test that the current value for the params is returned correctly
        param_val = compiled_model.parameterValue("a")
        # We ask for the current value for a so it should be 0
        if not param_val == 0:
            error_msg = "The parameter current value was not returned correctly"
            self.fail(error_msg)
        # Test that the "quickSimulate" function returns the right value
        x_quick_simu = compiled_model.quickSimulate("x")
        if not (x_quick_simu == 1):
            error_msg = "The quick simulation didn't return the right value"
            self.fail(error_msg)
        # Test that the "set everything back to defaults" works
        compiled_model.restoreAllParametersToDefaultValues()
        param_val = compiled_model.parameterValue("a")
        if not param_val == -1:
            error_msg = "The parameter default value was not restored correctly"
            self.fail(error_msg)
        # Test that quick simulate with params values works
        params_vals = {"a": 4, "b": 1}
        x_quick_simu = compiled_model.quickSimulate("x", params_vals)
        # Check return value
        if not (x_quick_simu == 5):
            error_msg = "The parameter was not changed correctly"
            self.fail(error_msg)
        # Test that subsequent quick simulations initialize params correctly
        params_vals = {"a": 4, "b": 0}
        x_quick_simu = compiled_model.quickSimulate("x", params_vals)
        # We set the derivative slope as 0 so x should be a constant 1
        if not (x_quick_simu == 1):
            error_msg = "The parameter was not changed correctly"
            self.fail(error_msg)
        # Test that the parameters change inside a quickSimulate don't change the parameter on the model for
        #   subsequent simulations
        param_val = compiled_model.parameterValue("a")
        if not param_val == -1:
            error_msg = "The parameter default value was modified."
            self.fail(error_msg)
        # Test that a simulate after changing parameters as arguments inside quickSimulate works correctly
        simulation_path_2 = os.path.join(self._temp_dir, "simu_2.csv")
        simu_results_2 = compiled_model.simulate(simulation_path_2)
        df_simu_2 = pandas.read_csv(simulation_path_2)
        df_simu_2_last_row = df_simu_2.iloc[-1]
        x_last = df_simu_2_last_row["x"]
        # The parameters changes included in the simulation (and not using "setParameterValue" or similar)
        #   should not change the value for subsequent simulations
        if not numpy.isclose(x_last, 0):
            error_msg = "The parameter default was modified"
            self.fail(error_msg)
        # Test that simulate changes parameters as args correctly
        params_vals = {"a": 4, "b": 1}
        simulation_path_3 = os.path.join(self._temp_dir, "simu_3.csv")
        simu_results_3 = compiled_model.simulate(simulation_path_3,
                                                 params_vals)
        df_simu_3 = pandas.read_csv(simulation_path_3)
        df_simu_3_last_row = df_simu_3.iloc[-1]
        x_last_3 = df_simu_3_last_row["x"]
        # Check value
        if not numpy.isclose(x_last_3, 5):
            error_msg = "The parameter was not changed correctly"
            self.fail(error_msg)
Beispiel #13
0
 def writeMOSScriptToPath(self, file_path):
     # This shouldn't be the responsibility of the builder, but for now we leave it here
     mos_script_str = self.mosScriptString()
     files_aux.writeStrToFile(mos_script_str, file_path)
Beispiel #14
0
def sweepAndPlotFromJSON(dest_folder_path_base,
                         json_file_path,
                         communicator=None):
    logger.debug("Entra en sweepAndPlotFromJSON")
    dest_folder_path = dest_folder_path_base + "/" + "results/"
    with open(json_file_path, 'r') as fp:
        full_json = json.load(fp)

    # Prepare sweep init args
    model_mo_path = files_aux.moFilePathFromJSONMoPath(
        full_json["model_mo_path"])

    # Horcoded for now
    base_dir = "/home/omsens/Documents/results_experiments/logging/"  # HARCODED, ISNT USED RIGHT NOW IN SWEEP
    results_dirname = base_dir + full_json[
        "model_name"] + '_res.csv'  # HARCODED, ISNT USED RIGHT NOW IN SWEEP

    # Fetch upper/lower bounds
    with_upper_and_lower = full_json["plot_upper_lower_limit"]

    # Set kwargs
    logger.debug("setting kwargs")
    sweep_kwargs = \
        {
            "model_name"                  : full_json["model_name"],
            "model_file_path"             : model_mo_path,
            "start_time"                  : full_json["start_time"],
            "stop_time"                   : full_json["stop_time"],
            "perturbation_info_per_param" : full_json["parameters_to_sweep"],
            "fixed_params"                : full_json["fixed_params"],
            "build_folder_path"           : dest_folder_path,

            "base_dir"                    : base_dir,
            "results_dirname"             : results_dirname
        }

    # Initialize sweeper
    logger.debug("Initialize sweeper")
    logger.debug(sweep_kwargs)
    sweep_runner = running.sweep.ParametersSweeper(**sweep_kwargs)

    # Run sweep
    logger.debug("Run sweep")
    sweep_results, perturbed_param_run_id_map = sweep_runner.runSweep(
        dest_folder_path, communicator)
    logger.debug(perturbed_param_run_id_map)

    # Initialize sweep results plotter
    logger.debug("Sweep plotter")
    sweep_plotter = plot_sweep.SweepPlotter(sweep_results)
    # Make folder for plots
    logger.debug("Goto plot folder path")
    plot_folder_path = os.path.join(dest_folder_path, "plots")
    files_aux.makeFolderWithPath(plot_folder_path)

    # Save results.json
    logger.debug("Save results.json")
    vars_plots_paths = {}
    for var_name in full_json['vars_to_analyze']:
        plot_path_without_extension = os.path.join(plot_folder_path, var_name)
        vars_plots_paths[var_name] = plot_path_without_extension
    paths_dict = {"sweep_plots": vars_plots_paths}
    paths_json_str = json.dumps(paths_dict, sort_keys=True, indent=2)
    paths_json_file_name = "result.json"
    paths_json_file_path = os.path.join(dest_folder_path, paths_json_file_name)
    files_aux.writeStrToFile(paths_json_str, paths_json_file_path)

    # Generate plots
    # logger.debug("Generate plots")
    for var_name in full_json["vars_to_analyze"]:
        # logger.debug("Var to analyze: " + var_name)
        # logger.debug(vars_plots_paths)
        # logger.debug(var_name in vars_plots_paths.keys())
        plot_path_without_extension = vars_plots_paths[var_name]
        # logger.debug("Call sweep plotter plot in folder")
        sweep_plotter.plotInFolder(var_name, plot_path_without_extension,
                                   with_upper_and_lower)

    # Paramters run save
    logger.debug("Parameters run save")
    params_df = pd.DataFrame()
    for param_comb, run_id in perturbed_param_run_id_map.items():

        logger.debug("parameter combination")
        logger.debug(param_comb)

        params = {
            v[0]: v[1]
            for v in [x.split(":") for x in param_comb.split(",")]
        }
        params['run_id'] = run_id
        df_row = pd.DataFrame(data=params, index=[0])
        if params_df.shape[0] == 0:
            params_df = df_row
        else:
            params_df = params_df.append(df_row)
        params_df = params_df.reset_index().drop('index', 1)
    params_df.to_csv(dest_folder_path + '/' + 'parameters_run.csv',
                     index=False)

    with open(dest_folder_path_base + '/' + 'model_info.json') as f:
        model_info = json.load(f)
        variables = model_info['aux_variables']
        pd.DataFrame(columns=variables).to_csv(dest_folder_path + '/' +
                                               'variables.csv',
                                               index=False)

    logger.debug("Termina en sweepAndPlotFromJSON")