def setup():
    # %% Load in data, model and parameters
    dataset1 = read_data_file(script_folder.joinpath(DATA_PATH1))
    dataset2 = read_data_file(script_folder.joinpath(DATA_PATH2))
    model = read_model_from_yaml_file(script_folder.joinpath(MODEL_PATH))
    parameters = read_parameters_from_yaml_file(
        script_folder.joinpath(PARAMETERS_FILE_PATH))
    return model, parameters, dataset1, dataset2
def main():

    # parameter_file = output_folder.joinpath("optimized_parameters.csv")
    # if parameter_file.exists():
    #     print("Optimized parameters exists: please check")
    #     parameters = read_parameters_from_csv_file(str(parameter_file))
    # else:
    #     parameters = read_parameters_from_yaml_file(script_folder.joinpath(PARAMETERS_FILE_PATH))
    parameters = read_parameters_from_yaml_file(
        script_folder.joinpath(PARAMETERS_FILE_PATH))
    # %% Load in data, model and parameters
    dataset1 = read_data_file(script_folder.joinpath(DATA_PATH1))
    dataset2 = read_data_file(script_folder.joinpath(DATA_PATH2))
    model = read_model_from_yaml_file(script_folder.joinpath(MODEL_PATH))

    # %% Validate model and parameters
    print(model.validate(parameters=parameters))

    # %% Construct the analysis scheme
    scheme = Scheme(
        model,
        parameters,
        {
            "dataset1": dataset1,
            "dataset2": dataset2
        },
        optimization_method="Levenberg-Marquardt",
        # maximum_number_function_evaluations=11,
        non_negative_least_squares=True,
    )

    # %% Optimize the analysis scheme (and estimate parameters)
    result = optimize(scheme)

    # %% Basic print of results
    print(result.markdown(True))

    return result
Пример #3
0
output_folder = results_folder.joinpath("simultaneous_analysis_3d_weights")

# read in data
data_path = script_folder.joinpath("equareaIRFsim5.ascii")
dataset1 = gta.io.read_data_file(data_path)
# print(dataset1)
data_path2 = script_folder.joinpath("equareaIRFsim6.ascii")
dataset2 = gta.io.read_data_file(data_path2)
# print(dataset2)
data_path3 = script_folder.joinpath("equareaIRFsim8.ascii")
dataset3 = gta.io.read_data_file(data_path3)
# model inlezen + parameters
model_path = script_folder.joinpath("model.yml")
parameters_path = script_folder.joinpath("parameters.yml")

model = gta.read_model_from_yaml_file(model_path)

# if the optimized parameters from a previous run are available, use those
parameter_file = output_folder.joinpath("optimized_parameters.csv")
if parameter_file.exists():
    print("Optimized parameters exists: please check")
    parameters = read_parameters_from_csv_file(str(parameter_file))
else:
    parameters = gta.read_parameters_from_yaml_file(parameters_path)

print(model.validate(parameters=parameters))

# define the analysis scheme to optimize
scheme = Scheme(
    model,
    parameters,
Пример #4
0
    def from_yaml_file(cls, filename: str) -> Scheme:

        try:
            with open(filename) as f:
                try:
                    scheme = yaml.safe_load(f)
                except Exception as e:
                    raise ValueError(f"Error parsing scheme: {e}")
        except Exception as e:
            raise OSError(f"Error opening scheme: {e}")

        if "model" not in scheme:
            raise ValueError("Model file not specified.")

        try:
            model = glotaran.read_model_from_yaml_file(scheme["model"])
        except Exception as e:
            raise ValueError(f"Error loading model: {e}")

        if "parameters" not in scheme:
            raise ValueError("Parameters file not specified.")

        path = scheme["parameters"]
        fmt = scheme.get("parameter_format", None)
        try:
            parameters = glotaran.parameter.ParameterGroup.from_file(path, fmt)
        except Exception as e:
            raise ValueError(f"Error loading parameters: {e}")

        if "data" not in scheme:
            raise ValueError("No data specified.")

        data = {}
        for label, path in scheme["data"].items():
            path = pathlib.Path(path)

            fmt = path.suffix[1:] if path.suffix != "" else "nc"
            if "data_format" in scheme:
                fmt = scheme["data_format"]

            try:
                data[label] = glotaran.io.read_data_file(path, fmt=fmt)
            except Exception as e:
                raise ValueError(f"Error loading dataset '{label}': {e}")

        optimization_method = scheme.get("optimization_method", "TrustRegionReflection")
        nnls = scheme.get("nnls", False)
        nfev = scheme.get("nfev", None)
        ftol = scheme.get("ftol", 1e-8)
        gtol = scheme.get("gtol", 1e-8)
        xtol = scheme.get("xtol", 1e-8)
        group_tolerance = scheme.get("group_tolerance", 0.0)
        return cls(
            model=model,
            parameters=parameters,
            data=data,
            nnls=nnls,
            nfev=nfev,
            ftol=ftol,
            gtol=gtol,
            xtol=xtol,
            group_tolerance=group_tolerance,
            optimization_method=optimization_method,
        )
Пример #5
0
output_folder = results_folder.joinpath(result_name)
print(f"- Using folder {output_folder.name} to read/write files for this run")

result_datafile1 = output_folder.joinpath("dataset1.nc")
result_datafile2 = output_folder.joinpath("dataset2.nc")

# %%

dataset1 = read_data_file(data_path1)  # CO in toluene
dataset2 = read_data_file(data_path2)  # C2O in toluene

print(dataset1)
print(dataset2)

# %%
model = read_model_from_yaml_file(model_path)
parameter = read_parameters_from_yaml_file(parameter_path)
print(model.validate(parameters=parameter))

# %%
start = timer()
scheme = Scheme(
    model,
    parameter,
    {
        "dataset1": dataset1,
        "dataset2": dataset2
    },
    maximum_number_function_evaluations=2,
)
result = optimize(scheme)
Пример #6
0
from glotaran.analysis.scheme import Scheme
from glotaran.examples.sequential import dataset

script_dir = Path(__file__).resolve().parent
print(f"Script folder: {script_dir}")
script_dir.cwd()

plot_data = dataset.data.sel(spectral=[620, 630, 650], method="nearest")
plot_data.plot.line(x="time", aspect=2, size=5)
plot_data = dataset.data.sel(time=[1, 10, 20], method="nearest")
plot_data.plot.line(x="spectral", aspect=2, size=5)
dataset = gta.io.prepare_time_trace_dataset(dataset)
plot_data = dataset.data_singular_values.sel(singular_value_index=range(10))
plot_data.plot(yscale="log", marker="o", linewidth=0, aspect=2, size=5)

model = gta.read_model_from_yaml_file(script_dir.joinpath("model.yml"))
print(model)
parameters = gta.read_parameters_from_yaml_file(
    script_dir.joinpath("parameters.yml"))

print(model.validate(parameters=parameters))
print(model)
print(parameters)

result = optimize(Scheme(model, parameters, {"dataset1": dataset}))
print(result)
print(result.optimized_parameters)
result_dataset = result.get_dataset("dataset1")
result_dataset
plot_data = result_dataset.residual_left_singular_vectors.sel(
    left_singular_value_index=0)
Пример #7
0
from glotaran.io import read_data_file

DATA_PATH1 = "data/data1.ascii"
DATA_PATH2 = "data/data2.ascii"
MODEL_PATH = "models/model.yml"
PARAMETERS_FILE_PATH = "models/parameters.yml"

# %% Setup necessary (output) paths
results_folder, script_folder = setup_case_study(Path(__file__))
output_folder = results_folder.joinpath("target_analysis")
print(f"- Using folder {output_folder.name} to read/write files for this run")

# %% Load in data, model and parameters
dataset1 = read_data_file(script_folder.joinpath(DATA_PATH1))
dataset2 = read_data_file(script_folder.joinpath(DATA_PATH2))
model = read_model_from_yaml_file(script_folder.joinpath(MODEL_PATH))
parameters = read_parameters_from_yaml_file(script_folder.joinpath(PARAMETERS_FILE_PATH))

# %% Validate model and parameters
print(model.validate(parameters=parameters))

# %% Construct the analysis scheme
scheme = Scheme(
    model,
    parameters,
    {"dataset1": dataset1, "dataset2": dataset2},
    maximum_number_function_evaluations=11,
    non_negative_least_squares=True,
    optimization_method="TrustRegionReflection",
)
Пример #8
0
        "parameters": "models/parameters.yml"
    },
    "with_penalties": {
        "model": "models/model_equal_area_penalties.yml",
        "parameters": "models/parameters_equal_area_penalties.yml",
    },
}

# %% Setup necessary (output) paths
script_file = Path(__file__)
results_folder, script_folder = setup_case_study(script_file)
output_folder = results_folder.joinpath(script_file.stem)
print(f"- Using folder {output_folder.name} to read/write files for this run")

# %% Load in data, model and parameters
dataset = read_data_file(script_folder.joinpath(DATA_PATH))

for key, val in MODEL_PATHS.items():
    model = read_model_from_yaml_file(script_folder.joinpath(val["model"]))
    parameters = read_parameters_from_yaml_file(
        script_folder.joinpath(val["parameters"]))
    print(model.markdown(parameters=parameters))
    scheme = Scheme(model, parameters, {"dataset1": dataset})
    result = optimize(scheme)
    # Second optimization with results of the first:
    scheme2 = result.get_scheme()
    result2 = optimize(scheme2)
    simple_plot_overview(result.data["dataset1"], key)
    simple_plot_overview(result2.data["dataset1"], key)
plt.show()