Ejemplo n.º 1
0
def test_single_dataset():
    model = MockModel.from_dict(
        {"dataset": {
            "dataset1": {
                "megacomplex": [],
            },
        }})
    print(model.validate())
    assert model.valid()

    parameter = ParameterGroup.from_list([1, 10])
    print(model.validate(parameter))
    assert model.valid(parameter)
    axis_e = [1, 2, 3]
    axis_c = [5, 7, 9, 12]

    data = {
        'dataset1':
        xr.DataArray(np.ones((3, 4)),
                     coords=[('e', axis_e),
                             ('c', axis_c)]).to_dataset(name="data")
    }

    scheme = Scheme(model, parameter, data)
    bag, datasets = create_grouped_bag(scheme)
    bag = bag.compute()
    assert len(datasets) == 0
    assert len(bag) == 3
    assert all([p.data.size == 4 for p in bag])
    assert all([p.descriptor[0].dataset == 'dataset1' for p in bag])
    assert all([all(p.descriptor[0].axis == axis_c) for p in bag])
    assert [p.descriptor[0].index for p in bag] == axis_e
Ejemplo n.º 2
0
def test_multi_dataset_overlap():
    model = MockModel.from_dict({
        "dataset": {
            "dataset1": {
                "megacomplex": [],
            },
            "dataset2": {
                "megacomplex": [],
            },
        }
    })

    model.grouped = lambda: True
    print(model.validate())
    assert model.valid()
    assert model.grouped()

    parameters = ParameterGroup.from_list([1, 10])
    print(model.validate(parameters))
    assert model.valid(parameters)

    axis_e_1 = [1, 2, 3, 5]
    axis_c_1 = [5, 7]
    axis_e_2 = [0, 1.4, 2.4, 3.4, 9]
    axis_c_2 = [5, 7, 9, 12]
    data = {
        "dataset1":
        xr.DataArray(np.ones((4, 2)),
                     coords=[("e", axis_e_1),
                             ("c", axis_c_1)]).to_dataset(name="data"),
        "dataset2":
        xr.DataArray(np.ones((5, 4)),
                     coords=[("e", axis_e_2),
                             ("c", axis_c_2)]).to_dataset(name="data"),
    }

    scheme = Scheme(model, parameters, data, group_tolerance=5e-1)
    problem = Problem(scheme)
    bag = list(problem.bag)
    assert len(problem.groups) == 3
    assert "dataset1dataset2" in problem.groups
    assert problem.groups["dataset1dataset2"] == ["dataset1", "dataset2"]
    assert len(bag) == 6

    assert all(p.data.size == 4 for p in bag[:1])
    assert all(p.descriptor[0].label == "dataset1" for p in bag[1:5])
    assert all(all(p.descriptor[0].axis == axis_c_1) for p in bag[1:5])
    assert [p.descriptor[0].index for p in bag[1:5]] == axis_e_1

    assert all(p.data.size == 6 for p in bag[1:4])
    assert all(p.descriptor[1].label == "dataset2" for p in bag[1:4])
    assert all(all(p.descriptor[1].axis == axis_c_2) for p in bag[1:4])
    assert [p.descriptor[1].index for p in bag[1:4]] == axis_e_2[1:4]

    assert all(p.data.size == 4 for p in bag[5:])
    assert bag[4].descriptor[0].label == "dataset1"
    assert bag[5].descriptor[0].label == "dataset2"
    assert np.array_equal(bag[4].descriptor[0].axis, axis_c_1)
    assert np.array_equal(bag[5].descriptor[0].axis, axis_c_2)
    assert [p.descriptor[0].index for p in bag[1:4]] == axis_e_1[:-1]
Ejemplo n.º 3
0
    def optimize(self,
                 parameter: ParameterGroup,
                 data: typing.Dict[str, typing.Union[xr.Dataset, xr.DataArray]],
                 nnls: bool = False,
                 verbose: bool = True,
                 max_nfev: int = None,
                 group_tolerance: int = 0,
                 client=None,
                 ) -> Result:
        """Optimizes the parameter for this model.

        Parameters
        ----------
        data :
            A dictonary containing all datasets with their labels as keys.
        parameter : glotaran.model.ParameterGroup
            The initial parameter.
        nnls :
            If `True` non-linear least squaes optimizing is used instead of variable projection.
        verbose :
            If `True` feedback is printed at every iteration.
        max_nfev :
            Maximum number of function evaluations. `None` for unlimited.
        group_tolerance :
            The tolerance for grouping datasets along the global dimension.
        """
        scheme = Scheme(model=self, parameter=parameter, data=data,
                        nnls=nnls, group_tolerance=group_tolerance, nfev=max_nfev)
        result = optimize(scheme, verbose=verbose, client=client)
        return result
Ejemplo n.º 4
0
def test_single_dataset():
    model = MockModel.from_dict(
        {"dataset": {
            "dataset1": {
                "megacomplex": [],
            },
        }})
    model.grouped = lambda: True
    print(model.validate())
    assert model.valid()
    assert model.grouped()

    parameters = ParameterGroup.from_list([1, 10])
    print(model.validate(parameters))
    assert model.valid(parameters)
    axis_e = [1, 2, 3]
    axis_c = [5, 7, 9, 12]

    data = {
        "dataset1":
        xr.DataArray(np.ones((3, 4)),
                     coords=[("e", axis_e),
                             ("c", axis_c)]).to_dataset(name="data")
    }

    scheme = Scheme(model, parameters, data)
    problem = Problem(scheme)
    bag = problem.bag
    datasets = problem.groups
    assert len(datasets) == 1
    assert len(bag) == 3
    assert all(p.data.size == 4 for p in bag)
    assert all(p.descriptor[0].label == "dataset1" for p in bag)
    assert all(all(p.descriptor[0].axis == axis_c) for p in bag)
    assert [p.descriptor[0].index for p in bag] == axis_e
Ejemplo n.º 5
0
def test_multi_dataset_overlap():
    model = MockModel.from_dict(
        {
            "dataset": {
                "dataset1": {
                    "megacomplex": [],
                },
                "dataset2": {
                    "megacomplex": [],
                },
            }
        }
    )

    print(model.validate())
    assert model.valid()

    parameter = ParameterGroup.from_list([1, 10])
    print(model.validate(parameter))
    assert model.valid(parameter)

    axis_e_1 = [1, 2, 3, 5]
    axis_c_1 = [5, 7]
    axis_e_2 = [0, 1.4, 2.4, 3.4, 9]
    axis_c_2 = [5, 7, 9, 12]
    data = {
        "dataset1": xr.DataArray(
            np.ones((4, 2)), coords=[("e", axis_e_1), ("c", axis_c_1)]
        ).to_dataset(name="data"),
        "dataset2": xr.DataArray(
            np.ones((5, 4)), coords=[("e", axis_e_2), ("c", axis_c_2)]
        ).to_dataset(name="data"),
    }

    scheme = Scheme(model, parameter, data, group_tolerance=5e-1)
    bag, datasets = create_grouped_bag(scheme)
    bag = bag.compute()
    assert len(datasets) == 1
    assert "dataset1dataset2" in datasets
    assert datasets["dataset1dataset2"] == ["dataset1", "dataset2"]
    assert len(bag) == 6

    assert all([p.data.size == 4 for p in bag[:1]])
    assert all([p.descriptor[0].dataset == "dataset1" for p in bag[1:5]])
    assert all([all(p.descriptor[0].axis == axis_c_1) for p in bag[1:5]])
    assert [p.descriptor[0].index for p in bag[1:5]] == axis_e_1

    assert all([p.data.size == 6 for p in bag[1:4]])
    assert all([p.descriptor[1].dataset == "dataset2" for p in bag[1:4]])
    assert all([all(p.descriptor[1].axis == axis_c_2) for p in bag[1:4]])
    assert [p.descriptor[1].index for p in bag[1:4]] == axis_e_2[1:4]

    assert all([p.data.size == 4 for p in bag[5:]])
    assert bag[4].descriptor[0].dataset == "dataset1"
    assert bag[5].descriptor[0].dataset == "dataset2"
    assert np.array_equal(bag[4].descriptor[0].axis, axis_c_1)
    assert np.array_equal(bag[5].descriptor[0].axis, axis_c_2)
    assert [p.descriptor[0].index for p in bag[1:4]] == axis_e_1[:-1]
def test_spectral_irf(suite):

    model = suite.model
    print(model.validate())
    assert model.valid()

    sim_model = suite.sim_model
    print(sim_model.validate())
    assert sim_model.valid()

    wanted_parameters = suite.wanted_parameters
    print(sim_model.validate(wanted_parameters))
    print(wanted_parameters)
    assert sim_model.valid(wanted_parameters)

    initial_parameters = suite.initial_parameters
    print(model.validate(initial_parameters))
    assert model.valid(initial_parameters)

    print(model.markdown(wanted_parameters))

    dataset = sim_model.simulate("dataset1", wanted_parameters, suite.axis)

    assert dataset.data.shape == (suite.axis["time"].size,
                                  suite.axis["spectral"].size)

    data = {"dataset1": dataset}

    scheme = Scheme(model=model,
                    parameters=initial_parameters,
                    data=data,
                    nfev=20)
    result = optimize(scheme)
    print(result.optimized_parameters)

    for label, param in result.optimized_parameters.all():
        assert np.allclose(param.value,
                           wanted_parameters.get(label).value,
                           rtol=1e-1)

    resultdata = result.data["dataset1"]

    print(resultdata)

    assert np.array_equal(dataset["time"], resultdata["time"])
    assert np.array_equal(dataset["spectral"], resultdata["spectral"])
    assert dataset.data.shape == resultdata.data.shape
    assert dataset.data.shape == resultdata.fitted_data.shape
    assert np.allclose(dataset.data, resultdata.fitted_data, atol=1e-14)

    print(resultdata.fitted_data.isel(spectral=0).argmax())
    print(resultdata.fitted_data.isel(spectral=-1).argmax())
    assert (resultdata.fitted_data.isel(spectral=0).argmax() !=
            resultdata.fitted_data.isel(spectral=-1).argmax())

    assert "species_associated_spectra" in resultdata
    assert "decay_associated_spectra" in resultdata
Ejemplo n.º 7
0
def test_fitting(suite, index_dependend, grouped):
    model = suite.model

    def gr():
        return grouped
    model.grouped = gr

    def id():
        return index_dependend
    model.index_dependend = id

    sim_model = suite.sim_model
    est_axis = suite.e_axis
    cal_axis = suite.c_axis

    print(model.validate())
    assert model.valid()

    print(sim_model.validate())
    assert sim_model.valid()

    wanted = suite.wanted
    print(wanted)
    print(sim_model.validate(wanted))
    assert sim_model.valid(wanted)

    initial = suite.initial
    print(initial)
    print(model.validate(initial))
    assert model.valid(initial)

    dataset = simulate(sim_model, 'dataset1', wanted, {'e': est_axis, 'c': cal_axis})
    print(dataset)

    assert dataset.data.shape == (cal_axis.size, est_axis.size)

    data = {'dataset1': dataset}
    scheme = Scheme(model=model, parameter=initial, data=data, nfev=5)

    result = optimize(scheme)
    print(result.optimized_parameter)
    print(result.data['dataset1'])

    for _, param in result.optimized_parameter.all():
        assert np.allclose(param.value, wanted.get(param.full_label).value,
                           rtol=1e-1)

    resultdata = result.data["dataset1"]
    assert np.array_equal(dataset.c, resultdata.c)
    assert np.array_equal(dataset.e, resultdata.e)
    assert dataset.data.shape == resultdata.data.shape
    print(dataset.data[0, 0], resultdata.data[0, 0])
    assert np.allclose(dataset.data, resultdata.data)
Ejemplo n.º 8
0
def test_multi_dataset_no_overlap():
    model = MockModel.from_dict({
        "dataset": {
            "dataset1": {
                "megacomplex": [],
            },
            "dataset2": {
                "megacomplex": [],
            },
        }
    })

    model.grouped = lambda: True
    print(model.validate())
    assert model.valid()
    assert model.grouped()

    parameters = ParameterGroup.from_list([1, 10])
    print(model.validate(parameters))
    assert model.valid(parameters)

    axis_e_1 = [1, 2, 3]
    axis_c_1 = [5, 7]
    axis_e_2 = [4, 5, 6]
    axis_c_2 = [5, 7, 9]
    data = {
        "dataset1":
        xr.DataArray(np.ones((3, 2)),
                     coords=[("e", axis_e_1),
                             ("c", axis_c_1)]).to_dataset(name="data"),
        "dataset2":
        xr.DataArray(np.ones((3, 3)),
                     coords=[("e", axis_e_2),
                             ("c", axis_c_2)]).to_dataset(name="data"),
    }

    scheme = Scheme(model, parameters, data)
    problem = Problem(scheme)
    bag = list(problem.bag)
    assert len(problem.groups) == 2
    assert len(bag) == 6
    assert all(p.data.size == 2 for p in bag[:3])
    assert all(p.descriptor[0].label == "dataset1" for p in bag[:3])
    assert all(all(p.descriptor[0].axis == axis_c_1) for p in bag[:3])
    assert [p.descriptor[0].index for p in bag[:3]] == axis_e_1

    assert all(p.data.size == 3 for p in bag[3:])
    assert all(p.descriptor[0].label == "dataset2" for p in bag[3:])
    assert all(all(p.descriptor[0].axis == axis_c_2) for p in bag[3:])
    assert [p.descriptor[0].index for p in bag[3:]] == axis_e_2
Ejemplo n.º 9
0
def test_scheme(scheme):
    scheme = Scheme.from_yml_file(scheme)
    assert scheme.model is not None
    assert scheme.model.model_type == "mock"

    assert scheme.parameter is not None
    assert scheme.parameter.get("1") == 1.0
    assert scheme.parameter.get("2") == 67.0

    assert scheme.nnls
    assert scheme.nfev == 42

    assert 'dataset1' in scheme.data
    assert scheme.data['dataset1'].data.size == 3
Ejemplo n.º 10
0
def test_scheme(scheme):
    scheme = Scheme.from_yml_file(scheme)
    assert scheme.model is not None
    assert scheme.model.model_type == "mock"

    assert scheme.parameter is not None
    assert scheme.parameter.get("1") == 1.0
    assert scheme.parameter.get("2") == 67.0

    assert scheme.nnls
    assert scheme.nfev == 42

    assert "dataset1" in scheme.data
    assert scheme.data["dataset1"].data.size == 3
def test_kinetic_model(suite, nnls):

    model = suite.model
    print(model.validate())
    assert model.valid()

    wanted_parameters = suite.wanted_parameters
    print(model.validate(wanted_parameters))
    print(wanted_parameters)
    assert model.valid(wanted_parameters)

    initial_parameters = suite.initial_parameters
    print(model.validate(initial_parameters))
    assert model.valid(initial_parameters)

    print(model.markdown(initial_parameters))

    dataset = model.simulate("dataset1", wanted_parameters, suite.axis,
                             suite.clp)

    assert dataset.data.shape == (suite.axis["time"].size,
                                  suite.axis["pixel"].size)

    data = {"dataset1": dataset}

    scheme = Scheme(model=model,
                    parameters=initial_parameters,
                    data=data,
                    nfev=20)
    result = optimize(scheme)
    print(result.optimized_parameters)

    for label, param in result.optimized_parameters.all():
        assert np.allclose(param.value,
                           wanted_parameters.get(label).value,
                           rtol=1e-1)

    resultdata = result.data["dataset1"]
    assert np.array_equal(dataset["time"], resultdata["time"])
    assert np.array_equal(dataset["pixel"], resultdata["pixel"])
    assert dataset.data.shape == resultdata.data.shape
    assert dataset.data.shape == resultdata.fitted_data.shape
    assert np.allclose(dataset.data, resultdata.fitted_data, rtol=1e-2)
    assert "species_associated_images" in resultdata
    assert "decay_associated_images" in resultdata

    if len(model.irf) != 0:
        assert "irf" in resultdata
Ejemplo n.º 12
0
def test_multi_dataset_no_overlap():
    model = MockModel.from_dict({
        "dataset": {
            "dataset1": {
                "megacomplex": [],
            },
            "dataset2": {
                "megacomplex": [],
            },
        }
    })

    print(model.validate())
    assert model.valid()

    parameter = ParameterGroup.from_list([1, 10])
    print(model.validate(parameter))
    assert model.valid(parameter)

    axis_e_1 = [1, 2, 3]
    axis_c_1 = [5, 7]
    axis_e_2 = [4, 5, 6]
    axis_c_2 = [5, 7, 9]
    data = {
        'dataset1':
        xr.DataArray(np.ones((3, 2)),
                     coords=[('e', axis_e_1),
                             ('c', axis_c_1)]).to_dataset(name="data"),
        'dataset2':
        xr.DataArray(np.ones((3, 3)),
                     coords=[('e', axis_e_2),
                             ('c', axis_c_2)]).to_dataset(name="data"),
    }

    scheme = Scheme(model, parameter, data)
    bag, datasets = create_grouped_bag(scheme)
    bag = bag.compute()
    assert len(datasets) == 0
    assert len(bag) == 6
    assert all([p.data.size == 2 for p in bag[:3]])
    assert all([p.descriptor[0].dataset == 'dataset1' for p in bag[:3]])
    assert all([all(p.descriptor[0].axis == axis_c_1) for p in bag[:3]])
    assert [p.descriptor[0].index for p in bag[:3]] == axis_e_1

    assert all([p.data.size == 3 for p in bag[3:]])
    assert all([p.descriptor[0].dataset == 'dataset2' for p in bag[3:]])
    assert all([all(p.descriptor[0].axis == axis_c_2) for p in bag[3:]])
    assert [p.descriptor[0].index for p in bag[3:]] == axis_e_2
def optimize_benchmark(model, parameters, dataset1, dataset2):
    # %% Construct the analysis scheme
    scheme = Scheme(
        model,
        parameters,
        {
            "dataset1": dataset1,
            "dataset2": dataset2
        },
        maximum_number_function_evaluations=11,
        non_negative_least_squares=True,
        optimization_method="TrustRegionReflection",
    )
    # %% Optimize the analysis scheme (and estimate parameters)
    result = optimize(scheme)
    result2 = optimize(result.get_scheme())
    return result, result2
Ejemplo n.º 14
0
def problem(request) -> Problem:
    model = suite.model
    model.is_grouped = request.param[0]
    model.is_index_dependent = request.param[1]

    dataset = simulate(
        suite.sim_model,
        "dataset1",
        suite.wanted_parameters,
        {
            "e": suite.e_axis,
            "c": suite.c_axis
        },
    )
    scheme = Scheme(model=model,
                    parameters=suite.initial_parameters,
                    data={"dataset1": dataset})
    return Problem(scheme)
def main():

    # parameter_file = output_folder.joinpath("optimized_parameters.csv")
    # if parameter_file.exists():
    #     print("Optimized parameters exists: please check")
    #     parameters = read_parameters_from_csv_file(str(parameter_file))
    # else:
    #     parameters = read_parameters_from_yaml_file(script_folder.joinpath(PARAMETERS_FILE_PATH))
    parameters = read_parameters_from_yaml_file(
        script_folder.joinpath(PARAMETERS_FILE_PATH))
    # %% Load in data, model and parameters
    dataset1 = read_data_file(script_folder.joinpath(DATA_PATH1))
    dataset2 = read_data_file(script_folder.joinpath(DATA_PATH2))
    model = read_model_from_yaml_file(script_folder.joinpath(MODEL_PATH))

    # %% Validate model and parameters
    print(model.validate(parameters=parameters))

    # %% Construct the analysis scheme
    scheme = Scheme(
        model,
        parameters,
        {
            "dataset1": dataset1,
            "dataset2": dataset2
        },
        optimization_method="Levenberg-Marquardt",
        # maximum_number_function_evaluations=11,
        non_negative_least_squares=True,
    )

    # %% Optimize the analysis scheme (and estimate parameters)
    result = optimize(scheme)

    # %% Basic print of results
    print(result.markdown(True))

    return result
Ejemplo n.º 16
0
def test_spectral_constraint():
    model = KineticSpectrumModel.from_dict({
        "initial_concentration": {
            "j1": {
                "compartments": ["s1", "s2"],
                "parameters": ["i.1", "i.2"],
            },
        },
        "megacomplex": {
            "mc1": {
                "k_matrix": ["k1"]
            },
        },
        "k_matrix": {
            "k1": {
                "matrix": {
                    ("s2", "s1"): "kinetic.1",
                    ("s2", "s2"): "kinetic.2",
                }
            }
        },
        "spectral_constraints": [
            {
                "type": "zero",
                "compartment": "s2",
                "interval": (float("-inf"), float("inf"))
            },
        ],
        "dataset": {
            "dataset1": {
                "initial_concentration": "j1",
                "megacomplex": ["mc1"],
            },
        },
    })
    print(model)

    wanted_parameters = ParameterGroup.from_dict({
        "kinetic": [1e-4, 1e-5],
        "i": [1, 2],
    })
    initial_parameters = ParameterGroup.from_dict({
        "kinetic": [2e-4, 2e-5],
        "i": [1, 2, {
            "vary": False
        }],
    })

    time = np.asarray(np.arange(0, 50, 1.5))
    dataset = model.dataset["dataset1"].fill(model, wanted_parameters)
    compartments, matrix = kinetic_image_matrix(dataset, time, 0)

    assert len(compartments) == 2
    assert matrix.shape == (time.size, 2)

    reduced_compartments, reduced_matrix = apply_spectral_constraints(
        model, compartments, matrix, 1)

    print(reduced_matrix)
    assert len(reduced_compartments) == 1
    assert reduced_matrix.shape == (time.size, 1)

    reduced_compartments, reduced_matrix = model.constrain_matrix_function(
        "dataset1", wanted_parameters, compartments, matrix, 1)

    assert reduced_matrix.shape == (time.size, 1)

    clp = xr.DataArray([[1.0, 10.0, 20.0, 1]],
                       coords=(("spectral", [1]), ("clp_label",
                                                   ["s1", "s2", "s3", "s4"])))

    data = model.simulate("dataset1",
                          wanted_parameters,
                          clp=clp,
                          axes={
                              "time": time,
                              "spectral": np.array([1])
                          })

    dataset = {"dataset1": data}
    scheme = Scheme(model=model,
                    parameters=initial_parameters,
                    data=dataset,
                    nfev=20)

    # the resulting jacobian is singular
    with pytest.warns(UserWarning):
        result = optimize(scheme)

    result_data = result.data["dataset1"]
    print(result_data.clp_label)
    print(result_data.clp)
    #  TODO: save reduced clp
    #  assert result_data.clp.shape == (1, 1)

    print(result_data.species_associated_spectra)
    assert result_data.species_associated_spectra.shape == (1, 2)
    assert result_data.species_associated_spectra[0, 1] == 0
Ejemplo n.º 17
0
print(dataset1)
print(dataset2)

# %%
model = read_model_from_yaml_file(model_path)
parameter = read_parameters_from_yaml_file(parameter_path)
print(model.validate(parameters=parameter))

# %%
start = timer()
scheme = Scheme(
    model,
    parameter,
    {
        "dataset1": dataset1,
        "dataset2": dataset2
    },
    maximum_number_function_evaluations=2,
)
result = optimize(scheme)

end = timer()
print(f"Total time: {end - start}")

result.save(str(output_folder))
end2 = timer()
print(f"Saving took: {end2 - end}")

# %%
print(result.markdown(True))
Ejemplo n.º 18
0
def test_weight():
    model_dict = {
        "dataset": {
            "dataset1": {
                "megacomplex": [],
            },
        },
        "weights": [
            {
                "datasets": ["dataset1"],
                "global_interval": (np.inf, 200),
                "model_interval": (4, 8),
                "value": 0.5,
            },
        ],
    }
    model = MockModel.from_dict(model_dict)
    print(model.validate())
    assert model.valid()

    parameter = ParameterGroup.from_list([])

    global_axis = np.asarray(range(50, 300))
    model_axis = np.asarray(range(15))

    dataset = xr.DataArray(
        np.ones((global_axis.size, model_axis.size)),
        coords={
            "e": global_axis,
            "c": model_axis
        },
        dims=("e", "c"),
    )

    scheme = Scheme(model, parameter, {"dataset1": dataset})

    data = scheme.prepare_data()["dataset1"]
    print(data)
    assert "data" in data
    assert "weight" in data

    assert data.data.shape == data.weight.shape
    assert np.all(
        data.weight.sel(e=slice(0, 200), c=slice(4, 8)).values == 0.5)
    assert np.all(data.weight.sel(c=slice(0, 3)).values == 1)

    model_dict["weights"].append({
        "datasets": ["dataset1"],
        "value": 0.2,
    })
    model = MockModel.from_dict(model_dict)
    print(model.validate())
    assert model.valid()

    scheme = Scheme(model, parameter, {"dataset1": dataset})
    data = scheme.prepare_data()["dataset1"]
    assert np.all(
        data.weight.sel(e=slice(0, 200), c=slice(4, 8)).values == 0.5 * 0.2)
    assert np.all(data.weight.sel(c=slice(0, 3)).values == 0.2)

    scheme = Scheme(model, parameter, {"dataset1": data})
    with pytest.warns(
            UserWarning,
            match="Ignoring model weight for dataset 'dataset1'"
            " because weight is already supplied by dataset.",
    ):
        # unnesscary, but the linter complains if we just call the function without doing anything
        assert "dataset1" in scheme.prepare_data()
def test_optimization(suite, index_dependent, grouped, weight, method):
    model = suite.model

    model.is_grouped = grouped
    model.is_index_dependent = index_dependent
    print("Grouped:", grouped)
    print("Index dependent:", index_dependent)

    assert model.grouped() == grouped
    assert model.index_dependent() == index_dependent

    sim_model = suite.sim_model
    sim_model.is_grouped = grouped
    sim_model.is_index_dependent = index_dependent

    print(model.validate())
    assert model.valid()

    print(sim_model.validate())
    assert sim_model.valid()

    wanted_parameters = suite.wanted_parameters
    print(wanted_parameters)
    print(sim_model.validate(wanted_parameters))
    assert sim_model.valid(wanted_parameters)

    initial_parameters = suite.initial_parameters
    print(initial_parameters)
    print(model.validate(initial_parameters))
    assert model.valid(initial_parameters)

    nr_datasets = 3 if issubclass(suite, ThreeDatasetDecay) else 1
    data = {}
    for i in range(nr_datasets):
        e_axis = getattr(suite, "e_axis" if i == 0 else f"e_axis{i+1}")
        c_axis = getattr(suite, "c_axis" if i == 0 else f"c_axis{i+1}")

        dataset = simulate(
            sim_model, f"dataset{i+1}", wanted_parameters, {"e": e_axis, "c": c_axis}
        )
        print(f"Dataset {i+1}")
        print("=============")
        print(dataset)

        if hasattr(suite, "scale"):
            dataset["data"] /= suite.scale

        if weight:
            dataset["weight"] = xr.DataArray(
                np.ones_like(dataset.data) * 0.5, coords=dataset.coords
            )

        assert dataset.data.shape == (c_axis.size, e_axis.size)

        data[f"dataset{i+1}"] = dataset

    scheme = Scheme(
        model=model,
        parameters=initial_parameters,
        data=data,
        nfev=10,
        group_tolerance=0.1,
        optimization_method=method,
    )

    result = optimize(scheme)
    print(result.optimized_parameters)

    for label, param in result.optimized_parameters.all():
        if param.vary:
            assert np.allclose(param.value, wanted_parameters.get(label).value, rtol=1e-1)

    for i, dataset in enumerate(data.values()):
        resultdata = result.data[f"dataset{i+1}"]
        print(f"Result Data {i+1}")
        print("=================")
        print(resultdata)
        assert "residual" in resultdata
        assert "residual_left_singular_vectors" in resultdata
        assert "residual_right_singular_vectors" in resultdata
        assert "residual_singular_values" in resultdata
        assert np.array_equal(dataset.c, resultdata.c)
        assert np.array_equal(dataset.e, resultdata.e)
        assert dataset.data.shape == resultdata.data.shape
        print(dataset.data[0, 0], resultdata.data[0, 0])
        assert np.allclose(dataset.data, resultdata.data)
        if weight:
            assert "weight" in resultdata
            assert "weighted_data" in resultdata
            assert np.allclose(resultdata.data, resultdata.weighted_data * 2)
            assert "weighted_residual" in resultdata
            assert "weighted_residual_left_singular_vectors" in resultdata
            assert "weighted_residual_right_singular_vectors" in resultdata
            assert "weighted_residual_singular_values" in resultdata

    assert callable(model.additional_penalty_function)
    assert model.additional_penalty_function_called

    if isinstance(model, DecayModel):
        assert callable(model.constrain_matrix_function)
        assert model.constrain_matrix_function_called
        assert callable(model.retrieve_clp_function)
        assert model.retrieve_clp_function_called
    else:
        assert not model.constrain_matrix_function_called
        assert not model.retrieve_clp_function_called
Ejemplo n.º 20
0
plot_data.plot.line(x="time", aspect=2, size=5)
plot_data = dataset.data.sel(time=[1, 10, 20], method="nearest")
plot_data.plot.line(x="spectral", aspect=2, size=5)
dataset = gta.io.prepare_time_trace_dataset(dataset)
plot_data = dataset.data_singular_values.sel(singular_value_index=range(10))
plot_data.plot(yscale="log", marker="o", linewidth=0, aspect=2, size=5)

model = gta.read_model_from_yaml_file(script_dir.joinpath("model.yml"))
print(model)
parameters = gta.read_parameters_from_yaml_file(
    script_dir.joinpath("parameters.yml"))

print(model.validate(parameters=parameters))
print(model)
print(parameters)

result = optimize(Scheme(model, parameters, {"dataset1": dataset}))
print(result)
print(result.optimized_parameters)
result_dataset = result.get_dataset("dataset1")
result_dataset
plot_data = result_dataset.residual_left_singular_vectors.sel(
    left_singular_value_index=0)
plot_data.plot.line(x="time", aspect=2, size=5)
plot_data = result_dataset.residual_right_singular_vectors.sel(
    right_singular_value_index=0)
plot_data.plot.line(x="spectral", aspect=2, size=5)
result_dataset.to_netcdf("dataset1.nc")

plt.show(block=True)
result_name = str(model_path.stem).replace("model", "result")
output_folder = results_folder.joinpath(result_name)
print(f"- Using folder {output_folder.name} to read/write files for this run")

# %%
result_datafile = output_folder.joinpath("dataset1.nc")
if result_datafile.exists() and SKIP_FIT:
    print(f"Loading earlier fit results from: {result_datafile}")
else:
    dataset = read_data_file(data_path)
    model = read_model_from_yaml_file(model_path)
    parameter = read_parameters_from_yaml_file(parameter_path)
    scheme = Scheme(
        model,
        parameter,
        {"dataset1": dataset},
        maximum_number_function_evaluations=9,
        non_negative_least_squares=True,
    )

    print(model.validate(parameters=parameter))

    # The problem is constructed automatically from the scheme by the optimize call,
    # but can also be created manually for debug purposes:
    test_problem = Problem(scheme)

    # %%
    start = timer()
    # Warning: this may take a while (several seconds per iteration)
    result = optimize(scheme, verbose=True)
    end = timer()
def test_spectral_relation():
    model = KineticSpectrumModel.from_dict({
        "initial_concentration": {
            "j1": {
                "compartments": ["s1", "s2", "s3", "s4"],
                "parameters": ["i.1", "i.2", "i.3", "i.4"],
            },
        },
        "megacomplex": {
            "mc1": {
                "k_matrix": ["k1"]
            },
        },
        "k_matrix": {
            "k1": {
                "matrix": {
                    ("s1", "s1"): "kinetic.1",
                    ("s2", "s2"): "kinetic.1",
                    ("s3", "s3"): "kinetic.1",
                    ("s4", "s4"): "kinetic.1",
                }
            }
        },
        "spectral_relations": [
            {
                "compartment": "s1",
                "target": "s2",
                "parameter": "rel.1",
                "interval": [(0, 2)],
            },
            {
                "compartment": "s1",
                "target": "s3",
                "parameter": "rel.2",
                "interval": [(0, 2)],
            },
        ],
        "dataset": {
            "dataset1": {
                "initial_concentration": "j1",
                "megacomplex": ["mc1"],
            },
        },
    })
    print(model)

    rel1, rel2 = 10, 20
    parameters = ParameterGroup.from_dict({
        "kinetic": [1e-4],
        "i": [1, 2, 3, 4],
        "rel": [rel1, rel2],
    })

    time = np.asarray(np.arange(0, 50, 1.5))
    dataset = model.dataset["dataset1"].fill(model, parameters)
    compartments, matrix = kinetic_image_matrix(dataset, time, 0)

    assert len(compartments) == 4
    assert matrix.shape == (time.size, 4)

    reduced_compartments, relation_matrix = create_spectral_relation_matrix(
        model, "dataset1", parameters, compartments, matrix, 1)

    print(relation_matrix)
    assert len(reduced_compartments) == 2
    assert relation_matrix.shape == (4, 2)
    assert np.array_equal(
        relation_matrix,
        [
            [1.0, 0.0],
            [10.0, 0.0],
            [20.0, 0.0],
            [0.0, 1.0],
        ],
    )

    reduced_compartments, reduced_matrix = model.constrain_matrix_function(
        "dataset1", parameters, compartments, matrix, 1)

    assert reduced_matrix.shape == (time.size, 2)

    print(reduced_matrix[0, 0], matrix[0, 0], matrix[0, 1], matrix[0, 2])
    assert np.allclose(
        reduced_matrix[:, 0],
        matrix[:, 0] + rel1 * matrix[:, 1] + rel2 * matrix[:, 2])

    clp = xr.DataArray([[1.0, 10.0, 20.0, 1]],
                       coords=(("spectral", [1]), ("clp_label",
                                                   ["s1", "s2", "s3", "s4"])))

    data = model.simulate("dataset1",
                          parameters,
                          clp=clp,
                          axes={
                              "time": time,
                              "spectral": np.array([1])
                          })

    dataset = {"dataset1": data}
    scheme = Scheme(model=model, parameters=parameters, data=dataset, nfev=20)
    result = optimize(scheme)

    for label, param in result.optimized_parameters.all():
        if param.vary:
            assert np.allclose(param.value,
                               parameters.get(label).value,
                               rtol=1e-1)

    result_data = result.data["dataset1"]
    print(result_data.species_associated_spectra)
    assert result_data.species_associated_spectra.shape == (1, 4)
    assert (result_data.species_associated_spectra[0, 1] == rel1 *
            result_data.species_associated_spectra[0, 0])
    assert np.allclose(
        result_data.species_associated_spectra[0, 2].values,
        rel2 * result_data.species_associated_spectra[0, 0].values,
    )
Ejemplo n.º 23
0
        "parameters": "models/parameters.yml"
    },
    "with_penalties": {
        "model": "models/model_equal_area_penalties.yml",
        "parameters": "models/parameters_equal_area_penalties.yml",
    },
}

# %% Setup necessary (output) paths
script_file = Path(__file__)
results_folder, script_folder = setup_case_study(script_file)
output_folder = results_folder.joinpath(script_file.stem)
print(f"- Using folder {output_folder.name} to read/write files for this run")

# %% Load in data, model and parameters
dataset = read_data_file(script_folder.joinpath(DATA_PATH))

for key, val in MODEL_PATHS.items():
    model = read_model_from_yaml_file(script_folder.joinpath(val["model"]))
    parameters = read_parameters_from_yaml_file(
        script_folder.joinpath(val["parameters"]))
    print(model.markdown(parameters=parameters))
    scheme = Scheme(model, parameters, {"dataset1": dataset})
    result = optimize(scheme)
    # Second optimization with results of the first:
    scheme2 = result.get_scheme()
    result2 = optimize(scheme2)
    simple_plot_overview(result.data["dataset1"], key)
    simple_plot_overview(result2.data["dataset1"], key)
plt.show()
Ejemplo n.º 24
0
def test_fitting(suite, index_dependent, grouped, weight):
    model = suite.model

    def gr():
        return grouped

    model.grouped = gr

    def id():
        return index_dependent

    model.index_dependent = id

    sim_model = suite.sim_model
    est_axis = suite.e_axis
    cal_axis = suite.c_axis

    print(model.validate())
    assert model.valid()

    print(sim_model.validate())
    assert sim_model.valid()

    wanted = suite.wanted
    print(wanted)
    print(sim_model.validate(wanted))
    assert sim_model.valid(wanted)

    initial = suite.initial
    print(initial)
    print(model.validate(initial))
    assert model.valid(initial)

    dataset = simulate(sim_model, "dataset1", wanted, {
        "e": est_axis,
        "c": cal_axis
    })
    print(dataset)

    if weight:
        dataset["weight"] = xr.DataArray(np.ones_like(dataset.data) * 0.5,
                                         coords=dataset.coords)

    assert dataset.data.shape == (cal_axis.size, est_axis.size)

    data = {"dataset1": dataset}
    scheme = Scheme(model=model, parameter=initial, data=data, nfev=10)

    result = optimize(scheme)
    print(result.optimized_parameter)
    print(result.data["dataset1"])

    for _, param in result.optimized_parameter.all():
        assert np.allclose(param.value,
                           wanted.get(param.full_label).value,
                           rtol=1e-1)

    resultdata = result.data["dataset1"]
    print(resultdata)
    assert "residual" in resultdata
    assert "residual_left_singular_vectors" in resultdata
    assert "residual_right_singular_vectors" in resultdata
    assert "residual_singular_values" in resultdata
    assert np.array_equal(dataset.c, resultdata.c)
    assert np.array_equal(dataset.e, resultdata.e)
    assert dataset.data.shape == resultdata.data.shape
    print(dataset.data[0, 0], resultdata.data[0, 0])
    assert np.allclose(dataset.data, resultdata.data)

    if weight:
        assert "weight" in resultdata
        assert "weighted_residual" in resultdata
        assert "weighted_residual_left_singular_vectors" in resultdata
        assert "weighted_residual_right_singular_vectors" in resultdata
        assert "weighted_residual_singular_values" in resultdata
results_folder, script_folder = setup_case_study(script_file)
output_folder = results_folder.joinpath(script_file.stem)
print(f"- Using folder {output_folder.name} to read/write files for this run")

# %% Load in data, model and parameters
dataset = read_data_file(script_folder.joinpath(DATA_PATH))
model = read_model_from_yaml_file(script_folder.joinpath(MODEL_PATH))
parameters = read_parameters_from_yaml_file(
    script_folder.joinpath(PARAMETERS_FILE_PATH))

# %% Validate model and parameters
print(model.validate(parameters=parameters))

# %% Construct the analysis scheme
scheme = Scheme(model,
                parameters, {"dataset1": dataset},
                maximum_number_function_evaluations=10)

# %% Optimize the analysis scheme (and estimate parameters)
result = optimize(scheme)

# %% Basic print of results
print(result.markdown(True))

# %% Save the results
result.save(str(output_folder))

# %% Plot and save as PDF
# This set subsequent plots to the glotaran style
plot_style = PlotStyle()
plt.rc("axes", prop_cycle=plot_style.cycler)
Ejemplo n.º 26
0
def test_equal_area_penalties(debug=False):
    # %%

    optim_spec = OptimizationSpec(nnls=True, max_nfev=999)
    noise_spec = NoiseSpec(active=True, seed=1, std_dev=1e-8)

    wavelengths = np.arange(650, 670, 2)
    time_p1 = np.linspace(-1, 2, 50, endpoint=False)
    time_p2 = np.linspace(2, 10, 30, endpoint=False)
    time_p3 = np.geomspace(10, 50, num=20)
    times = np.concatenate([time_p1, time_p2, time_p3])

    irf_loc = float(times[20])
    irf_width = float((times[1] - times[0]) * 10)
    irf = IrfSpec(irf_loc, irf_width)

    amplitude = 1
    location1 = float(wavelengths[2])  # 2
    location2 = float(wavelengths[-3])  # -3
    width1 = float((wavelengths[1] - wavelengths[0]) * 5)
    width2 = float((wavelengths[1] - wavelengths[0]) * 3)
    shape1 = ShapeSpec(amplitude, location1, width1)
    shape2 = ShapeSpec(amplitude, location2, width2)
    dataset_spec = DatasetSpec(times, wavelengths, irf, [shape1, shape2])

    wavelengths = dataset_spec.wavelengths
    equ_interval = [(min(wavelengths), max(wavelengths))]
    weight = 0.01
    # %% The base model specification (mspec)
    base = {
        "initial_concentration": {
            "j1": {
                "compartments": ["s1", "s2"],
                "parameters": ["i.1", "i.2"],
            },
        },
        "megacomplex": {
            "mc1": {
                "k_matrix": ["k1"]
            },
        },
        "k_matrix": {
            "k1": {
                "matrix": {
                    ("s1", "s1"): "kinetic.1",
                    ("s2", "s2"): "kinetic.2",
                }
            }
        },
        "irf": {
            "irf1": {
                "type": "gaussian",
                "center": "irf.center",
                "width": "irf.width"
            },
        },
        "dataset": {
            "dataset1": {
                "initial_concentration": "j1",
                "megacomplex": ["mc1"],
                "irf": "irf1",
            },
        },
    }

    shape = {
        "shape": {
            "sh1": {
                "type": "gaussian",
                "amplitude": "shapes.amps.1",
                "location": "shapes.locs.1",
                "width": "shapes.width.1",
            },
            "sh2": {
                "type": "gaussian",
                "amplitude": "shapes.amps.2",
                "location": "shapes.locs.2",
                "width": "shapes.width.2",
            },
        }
    }

    dataset_shape = {
        "shape": {
            "s1": "sh1",
            "s2": "sh2",
        }
    }

    equ_area = {
        "equal_area_penalties": [
            {
                "source": "s1",
                "target": "s2",
                "parameter": "rela.1",
                "source_intervals": equ_interval,
                "target_intervals": equ_interval,
                "weight": weight,
            },
        ],
    }
    mspec = ModelSpec(base, shape, dataset_shape, equ_area)

    rela = 1.0  # relation between areas
    irf = dataset_spec.irf
    [sh1, sh2] = dataset_spec.shapes
    pspec_base = {
        "kinetic": [1e-1, 5e-3],
        "i": [0.5, 0.5, {
            "vary": False
        }],
        "irf": [["center", irf.location], ["width", irf.width]],
    }
    pspec_equa_area = {
        "rela": [rela, {
            "vary": False
        }],
    }
    pspec_shape = {
        "shapes": {
            "amps": [sh1.amplitude, sh2.amplitude],
            "locs": [sh1.location, sh2.location],
            "width": [sh1.width, sh2.width],
        },
    }
    pspec = ParameterSpec(pspec_base, pspec_equa_area, pspec_shape)

    # derivates:
    mspec_sim = dict(deepcopy(mspec.base), **mspec.shape)
    mspec_sim["dataset"]["dataset1"].update(mspec.dataset_shape)

    mspec_fit_wp = dict(deepcopy(mspec.base), **mspec.equ_area)
    mspec_fit_np = dict(deepcopy(mspec.base))

    model_sim = KineticSpectrumModel.from_dict(mspec_sim)
    model_wp = KineticSpectrumModel.from_dict(mspec_fit_wp)
    model_np = KineticSpectrumModel.from_dict(mspec_fit_np)
    print(model_np)

    # %% Parameter specification (pspec)

    pspec_sim = dict(deepcopy(pspec.base), **pspec.shapes)
    param_sim = ParameterGroup.from_dict(pspec_sim)

    # For the wp model we create two version of the parameter specification
    # One has all inputs fixed, the other has all but the first free
    # for both we perturb kinetic parameters a bit to give the optimizer some work
    pspec_wp = dict(deepcopy(pspec.base), **pspec.equal_area)
    pspec_wp["kinetic"] = [v * 1.01 for v in pspec_wp["kinetic"]]
    pspec_wp.update({"i": [[1, {"vary": False}], 1]})

    pspec_np = dict(deepcopy(pspec.base))

    param_wp = ParameterGroup.from_dict(pspec_wp)
    param_np = ParameterGroup.from_dict(pspec_np)

    # %% Print models with parameters
    print(model_sim.markdown(param_sim))
    print(model_wp.markdown(param_wp))
    print(model_np.markdown(param_np))

    # %%
    simulated_data = model_sim.simulate(
        "dataset1",
        param_sim,
        axes={
            "time": times,
            "spectral": wavelengths
        },
        noise=noise_spec.active,
        noise_std_dev=noise_spec.std_dev,
        noise_seed=noise_spec.seed,
    )
    # %%
    simulated_data = prepare_time_trace_dataset(simulated_data)
    # make a copy to keep an intact reference
    data = deepcopy(simulated_data)

    # %% Optimizing model without penalty (np)

    dataset = {"dataset1": data}
    scheme_np = Scheme(
        model=model_np,
        parameters=param_np,
        data=dataset,
        nnls=optim_spec.nnls,
        nfev=optim_spec.max_nfev,
    )
    result_np = optimize(scheme_np)
    print(result_np)

    # %% Optimizing model with penalty fixed inputs (wp_ifix)
    scheme_wp = Scheme(
        model=model_wp,
        parameters=param_wp,
        data=dataset,
        nnls=optim_spec.nnls,
        nfev=optim_spec.max_nfev,
    )
    result_wp = optimize(scheme_wp)
    print(result_wp)

    if debug:
        # %% Plot results
        plt_spec = importlib.util.find_spec("matplotlib")
        if plt_spec is not None:
            import matplotlib.pyplot as plt

            plot_overview(result_np.data["dataset1"], "no penalties")
            plot_overview(result_wp.data["dataset1"], "with penalties")
            plt.show()

    # %% Test calculation
    print(result_wp.data["dataset1"])
    area1_np = np.sum(
        result_np.data["dataset1"].species_associated_spectra.sel(
            species="s1"))
    area2_np = np.sum(
        result_np.data["dataset1"].species_associated_spectra.sel(
            species="s2"))
    assert not np.isclose(area1_np, area2_np)

    area1_wp = np.sum(
        result_wp.data["dataset1"].species_associated_spectra.sel(
            species="s1"))
    area2_wp = np.sum(
        result_wp.data["dataset1"].species_associated_spectra.sel(
            species="s2"))
    assert np.isclose(area1_wp, area2_wp)

    input_ratio = result_wp.optimized_parameters.get(
        "i.1") / result_wp.optimized_parameters.get("i.2")
    assert np.isclose(input_ratio, 1.5038858115)
Ejemplo n.º 27
0
print(f"- Using folder {output_folder.name} to read/write files for this run")

# %% Load in data, model and parameters
dataset1 = read_data_file(script_folder.joinpath(DATA_PATH1))
dataset2 = read_data_file(script_folder.joinpath(DATA_PATH2))
model = read_model_from_yaml_file(script_folder.joinpath(MODEL_PATH))
parameters = read_parameters_from_yaml_file(script_folder.joinpath(PARAMETERS_FILE_PATH))

# %% Validate model and parameters
print(model.validate(parameters=parameters))

# %% Construct the analysis scheme
scheme = Scheme(
    model,
    parameters,
    {"dataset1": dataset1, "dataset2": dataset2},
    maximum_number_function_evaluations=11,
    non_negative_least_squares=True,
    optimization_method="TrustRegionReflection",
)

# %% Optimize the analysis scheme (and estimate parameters)
result = optimize(scheme)

# %% Basic print of results
print(result.markdown(True))

# %% Save the results
result.save(str(output_folder))

# %% Plot and save as PDF
# This set subsequent plots to the glotaran style
Ejemplo n.º 28
0
parameter_file = output_folder.joinpath("optimized_parameters.csv")
if parameter_file.exists():
    print("Optimized parameters exists: please check")
    parameters = read_parameters_from_csv_file(str(parameter_file))
else:
    parameters = gta.read_parameters_from_yaml_file(parameters_path)

print(model.validate(parameters=parameters))

# define the analysis scheme to optimize
scheme = Scheme(
    model,
    parameters,
    {
        "dataset1": dataset1,
        "dataset2": dataset2,
        "dataset3": dataset3
    },
    maximum_number_function_evaluations=99,
    non_negative_least_squares=True,
    # optimization_method="Levenberg-Marquardt",
)
# optimize
result = optimize(scheme)
# %% Save results
result.save(str(output_folder))

# %% Plot results
# Set subsequent plots to the glotaran style
plot_style = PlotStyle()
plt.rc("axes", prop_cycle=plot_style.cycler)
Ejemplo n.º 29
0
def test_coherent_artifact():
    model_dict = {
        "initial_concentration": {
            "j1": {
                "compartments": ["s1"],
                "parameters": ["2"]
            },
        },
        "megacomplex": {
            "mc1": {
                "k_matrix": ["k1"]
            },
        },
        "k_matrix": {
            "k1": {
                "matrix": {
                    ("s1", "s1"): "1",
                }
            }
        },
        "irf": {
            "irf1": {
                "type": "gaussian-coherent-artifact",
                "center": "2",
                "width": "3",
                "coherent_artifact_order": 3,
            },
        },
        "dataset": {
            "dataset1": {
                "initial_concentration": "j1",
                "megacomplex": ["mc1"],
                "irf": "irf1",
            },
        },
    }
    model = KineticSpectrumModel.from_dict(model_dict.copy())

    parameters = ParameterGroup.from_list([
        101e-4,
        [10, {
            "vary": False,
            "non-negative": False
        }],
        [20, {
            "vary": False,
            "non-negative": False
        }],
        [30, {
            "vary": False,
            "non-negative": False
        }],
    ])

    time = np.asarray(np.arange(0, 50, 1.5))

    irf = model.irf["irf1"].fill(model, parameters)
    irf_same_width = irf.calculate_coherent_artifact(time)

    model_dict["irf"]["irf1"]["coherent_artifact_width"] = "4"
    model = KineticSpectrumModel.from_dict(model_dict)

    irf = model.irf["irf1"].fill(model, parameters)
    irf_diff_width = irf.calculate_coherent_artifact(time)

    assert not np.array_equal(irf_same_width, irf_diff_width)

    data = model.dataset["dataset1"].fill(model, parameters)
    compartments, matrix = kinetic_spectrum_matrix(data, time, 0)

    assert len(compartments) == 4
    for i in range(1, 4):
        assert compartments[i] == f"coherent_artifact_{i}"

    assert matrix.shape == (time.size, 4)

    clp = xr.DataArray(
        [[1, 1, 1, 1]],
        coords=[
            ("spectral", [0]),
            (
                "clp_label",
                [
                    "s1",
                    "coherent_artifact_1",
                    "coherent_artifact_2",
                    "coherent_artifact_3",
                ],
            ),
        ],
    )
    axis = {"time": time, "spectral": clp.spectral}
    data = model.simulate("dataset1", parameters, axis, clp)

    dataset = {"dataset1": data}
    scheme = Scheme(model=model, parameters=parameters, data=dataset, nfev=20)
    result = optimize(scheme)
    print(result.optimized_parameters)

    for label, param in result.optimized_parameters.all():
        assert np.allclose(param.value, parameters.get(label).value, rtol=1e-1)

    resultdata = result.data["dataset1"]
    assert np.array_equal(data.time, resultdata.time)
    assert np.array_equal(data.spectral, resultdata.spectral)
    assert data.data.shape == resultdata.data.shape
    assert data.data.shape == resultdata.fitted_data.shape
    assert np.allclose(data.data, resultdata.fitted_data, rtol=1e-2)

    assert "coherent_artifact_concentration" in resultdata
    assert resultdata["coherent_artifact_concentration"].shape == (time.size,
                                                                   3)

    assert "coherent_artifact_associated_spectra" in resultdata
    assert resultdata["coherent_artifact_associated_spectra"].shape == (1, 3)