def setup(): # %% Load in data, model and parameters dataset1 = read_data_file(script_folder.joinpath(DATA_PATH1)) dataset2 = read_data_file(script_folder.joinpath(DATA_PATH2)) model = read_model_from_yaml_file(script_folder.joinpath(MODEL_PATH)) parameters = read_parameters_from_yaml_file( script_folder.joinpath(PARAMETERS_FILE_PATH)) return model, parameters, dataset1, dataset2
def main(): # parameter_file = output_folder.joinpath("optimized_parameters.csv") # if parameter_file.exists(): # print("Optimized parameters exists: please check") # parameters = read_parameters_from_csv_file(str(parameter_file)) # else: # parameters = read_parameters_from_yaml_file(script_folder.joinpath(PARAMETERS_FILE_PATH)) parameters = read_parameters_from_yaml_file( script_folder.joinpath(PARAMETERS_FILE_PATH)) # %% Load in data, model and parameters dataset1 = read_data_file(script_folder.joinpath(DATA_PATH1)) dataset2 = read_data_file(script_folder.joinpath(DATA_PATH2)) model = read_model_from_yaml_file(script_folder.joinpath(MODEL_PATH)) # %% Validate model and parameters print(model.validate(parameters=parameters)) # %% Construct the analysis scheme scheme = Scheme( model, parameters, { "dataset1": dataset1, "dataset2": dataset2 }, optimization_method="Levenberg-Marquardt", # maximum_number_function_evaluations=11, non_negative_least_squares=True, ) # %% Optimize the analysis scheme (and estimate parameters) result = optimize(scheme) # %% Basic print of results print(result.markdown(True)) return result
data_path = script_folder.joinpath("data/data.ascii") # model_path = script_folder.joinpath(GLOBAL_MODEL) # parameter_path = script_folder.joinpath(GLOBAL_PARAMS) model_path = script_folder.joinpath(TARGET_MODEL) parameter_path = script_folder.joinpath(TARGET_PARAMS) result_name = str(model_path.stem).replace("model", "result") output_folder = results_folder.joinpath(result_name) print(f"- Using folder {output_folder.name} to read/write files for this run") # %% result_datafile = output_folder.joinpath("dataset1.nc") if result_datafile.exists() and SKIP_FIT: print(f"Loading earlier fit results from: {result_datafile}") else: dataset = read_data_file(data_path) model = read_model_from_yaml_file(model_path) parameter = read_parameters_from_yaml_file(parameter_path) scheme = Scheme( model, parameter, {"dataset1": dataset}, maximum_number_function_evaluations=9, non_negative_least_squares=True, ) print(model.validate(parameters=parameter)) # The problem is constructed automatically from the scheme by the optimize call, # but can also be created manually for debug purposes: test_problem = Problem(scheme)
from glotaran.analysis.optimize import optimize from glotaran.analysis.scheme import Scheme from glotaran.io import read_data_file DATA_PATH1 = "data/data1.ascii" DATA_PATH2 = "data/data2.ascii" MODEL_PATH = "models/model.yml" PARAMETERS_FILE_PATH = "models/parameters.yml" # %% Setup necessary (output) paths results_folder, script_folder = setup_case_study(Path(__file__)) output_folder = results_folder.joinpath("target_analysis") print(f"- Using folder {output_folder.name} to read/write files for this run") # %% Load in data, model and parameters dataset1 = read_data_file(script_folder.joinpath(DATA_PATH1)) dataset2 = read_data_file(script_folder.joinpath(DATA_PATH2)) model = read_model_from_yaml_file(script_folder.joinpath(MODEL_PATH)) parameters = read_parameters_from_yaml_file(script_folder.joinpath(PARAMETERS_FILE_PATH)) # %% Validate model and parameters print(model.validate(parameters=parameters)) # %% Construct the analysis scheme scheme = Scheme( model, parameters, {"dataset1": dataset1, "dataset2": dataset2}, maximum_number_function_evaluations=11, non_negative_least_squares=True, optimization_method="TrustRegionReflection",
from glotaran.analysis.optimize import optimize from glotaran.analysis.scheme import Scheme from glotaran.io import read_data_file DATA_PATH = "data/demo_data_Hippius_etal_JPCC2007_111_13988_Figs5_9.ascii" MODEL_PATH = "models/model.yml" PARAMETERS_FILE_PATH = "models/parameters.yml" # %% Setup necessary (output) paths script_file = Path(__file__) results_folder, script_folder = setup_case_study(script_file) output_folder = results_folder.joinpath(script_file.stem) print(f"- Using folder {output_folder.name} to read/write files for this run") # %% Load in data, model and parameters dataset = read_data_file(script_folder.joinpath(DATA_PATH)) model = read_model_from_yaml_file(script_folder.joinpath(MODEL_PATH)) parameters = read_parameters_from_yaml_file( script_folder.joinpath(PARAMETERS_FILE_PATH)) # %% Validate model and parameters print(model.validate(parameters=parameters)) # %% Construct the analysis scheme scheme = Scheme(model, parameters, {"dataset1": dataset}, maximum_number_function_evaluations=10) # %% Optimize the analysis scheme (and estimate parameters) result = optimize(scheme)