Esempio n. 1
0
 def setUp(self):
     self.true_values = lambda: dict(off_c=0.25, amp_c=1.0, x0=2.0)
     self.guess = lambda: dict(off_c=0.20, amp_c=1.5, x0=2.5)
     self.expression = "off_c + amp_c * exp(-x/x0)"
     self.model_constructor = (
         lambda *args, **kwargs: models.ExpressionModel(self.expression, *args, **kwargs))
     super().setUp()
Esempio n. 2
0
def generate_model(model_specs):
    """
    generate a composite model given information of the models to create, their guesses of params and bounds on parameters
    :param model_specs: a dictionary containing all the info on the model
    :return: a composite lmfit model
    """
    logging.debug('generating model specs')
    composite_model = None
    params = None
    for i, spec in enumerate(model_specs):
        prefix = f'm{i}__'
        if spec['type'] == 'ExpressionModel':
            expr = spec['expr']
            model = models.ExpressionModel(expr)
        elif spec['type'] in ['StepModel', 'RectangleModel']:
            form = spec['form']
            model = getattr(models, spec['type'])(prefix=prefix, form=form)
        elif spec['type'] == 'PolynomialModel':
            model = getattr(models, spec['type'])(prefix=prefix,
                                                  degree=spec['degree'])
        else:
            # generate the lmfit model based on the type specified
            model = getattr(models, spec['type'])(prefix=prefix)
        # call another function to decide what to do
        model = decide_model_actions(spec, model)
        model_params = model.make_params()  # make the params object
        if params is None:  # first loop
            params = model_params
            composite_model = model
        else:  # subsequent loops
            params.update(model_params)
            composite_model = composite_model + model
    return composite_model, params
Esempio n. 3
0
    def get_params_and_update_model(self):
        """Get parameter names and update the model function.

        Based on the mathematical expression for the model function, determine
        what are the parameters of the model. If the model compiles, the model
        object is updated as well.

        Raises VariableError when the dependent variable is part of the model
        function.

        Returns:
            A set of parameter names.
        """
        model_expr = self.ui.model_func.toPlainText().replace("\n", "")
        code = compile(model_expr, "<string>", "eval")
        params = set(code.co_names) - set([self.x_var]) - self._symbols
        if self.y_var in params:
            raise VariableError(
                f"Dependent variable {self.y_var} must not be in function definition"
            )
        else:
            try:
                self.model = models.ExpressionModel(
                    model_expr, independent_vars=[self.x_var])
            except ValueError as exc:
                raise VariableError(exc)
            return params
Esempio n. 4
0
    def test_composite_with_expression(self):
        expression_model = models.ExpressionModel("exp(-x/x0)", name='exp')
        amp_model = models.ConstantModel(prefix='amp_')
        off_model = models.ConstantModel(prefix='off_', name="off")

        comp_model = off_model + amp_model * expression_model

        x = self.x
        true_values = self.true_values()
        data = comp_model.eval(x=x, **true_values) + self.noise
        # data = 0.25 + 1 * np.exp(-x / 2.)

        params = comp_model.make_params(**self.guess())

        result = comp_model.fit(data, x=x, params=params)
        assert_results_close(result.values, true_values, rtol=0.01, atol=0.01)

        data_components = comp_model.eval_components(x=x)
        self.assertIn('exp', data_components)
Esempio n. 5
0
S_dist = reduceData(S_dist_j, lambda s, r: s, 'S', 'R')

# S_class = -\sum_j p_j log(p_j)
S_class = reduceData(p_j, lambda p, r: -p * np.log2(p), 'S', 'R')

# S_rep = \sum_j p_j log(2j+1)
S_rep = reduceData(
    p_j,
    lambda p, r: p * np.log2(r + 1),
    'S',
    'R',
)

S_all = reduceData(S_j, lambda s, r: s, 'S', 'R')

modelD = models.ExpressionModel("S + F1/D", independent_vars=['D'])
modelLog = Model(lambda g, c, b, s: -(c / 6) * np.log2(g) + b * g + s)

reducedS_dist = reduceFitData(groupBy(S_dist, ['L', 'g', 'r']), modelD, 'D',
                              'S', {
                                  'S': -2,
                                  'F1': 1
                              })
reducedS_class = reduceFitData(groupBy(S_class, ['L', 'g', 'r']), modelD, 'D',
                               'S', {
                                   'S': -2,
                                   'F1': 1
                               })

reducedS_all = reduceFitData(groupBy(S_all, ['L', 'g', 'r']), modelD, 'D', 'S',
                             {
Esempio n. 6
0
currDir = os.path.dirname(os.path.realpath(__file__))
dataDir = currDir + "/"

energies = np.sort(np.loadtxt(dataDir + "energies.txt",
                              ndmin=1,
                              dtype=np.dtype([('D', '<i4'), ('L', '<i4'),
                                              ('g', '<f8'), ('r', '<f8'),
                                              ('x', '<f8'), ('y', '<f8'),
                                              ('E', '<f8'), ('var', '<f8')])),
                   order=['L', 'g', 'r', 'D'])

param_dict = defaultdict(lambda: {'E': -10000, 'F1': 0})
param_dict[('200.00000000', '0.11000000', '0.80000000')]['F1'] = -0.1

modelD = models.ExpressionModel("E + F1/D", independent_vars=['D'])
modelL = models.ExpressionModel("w0 + A/L", independent_vars=['L'])
modelE = models.ExpressionModel("a*g*g + b*g + E0", independent_vars=['g'])

reducedEL = reduceFitDataE(groupBy(energies, ['L', 'g', 'r']), modelD, 'D',
                           'E', param_dict)

energiesEL1 = mapData(reducedEL, 'w0', lambda r: r['E'] /
                      (2 * r['x'] * r['L']))
energiesEL = mapData(energiesEL1, 'dw0', lambda r: r['dE'] /
                     (2 * r['x'] * r['L']))

reducedE = reduceFitData(groupBy(energiesEL, ['g', 'r']), modelL, 'L', 'w0', {
    'w0': -0.6,
    'A': 1
})
Esempio n. 7
0
    def load_state_from_obj(self, save_obj):
        """Load all data and state from save object.

        Args:
            save_obj: a dictionary that contains the saved data and state.
        """
        self.create_plot(
            save_obj["x_var"],
            save_obj["y_var"],
            save_obj["x_err_var"],
            save_obj["y_err_var"],
        )

        start, end = save_obj["fit_domain"]
        self.ui.fit_start_box.setValue(start)
        self.ui.fit_end_box.setValue(end)

        # load linedit strings
        for name in [
                "xlabel",
                "xmin",
                "xmax",
                "ylabel",
                "ymin",
                "ymax",
        ]:
            text = save_obj[name]
            widget = getattr(self.ui, name)
            widget.setText(text)

        # load plaintext strings
        self.ui.model_func.setPlainText(save_obj["model_func"])

        # load checkbox state
        for name in ["use_fit_domain"]:
            state = QtCore.Qt.CheckState(save_obj[name])
            getattr(self.ui, name).setCheckState(state)

        # load combobox state
        for name in ["draw_curve_option"]:
            state = QtCore.Qt.CheckState(save_obj[name])
            getattr(self.ui, name).setCurrentIndex(state)

        # set parameter hints
        params = save_obj["parameters"].keys()
        self.update_params_ui(params)
        for p, hints in save_obj["parameters"].items():
            if hints["vary"]:
                fixed_state = QtCore.Qt.Unchecked
            else:
                fixed_state = QtCore.Qt.Checked
            layout = self._params[p]
            layout.itemAt(self._idx_min_value_box).widget().setValue(
                hints["min"])
            layout.itemAt(self._idx_value_box).widget().setValue(
                hints["value"])
            layout.itemAt(self._idx_max_value_box).widget().setValue(
                hints["max"])
            layout.itemAt(
                self._idx_fixed_checkbox).widget().setCheckState(fixed_state)

        # manually recreate (possibly outdated!) fit
        if "saved_fit" in save_obj:
            saved_fit = save_obj["saved_fit"]

            # workaround for older projects which did not explicitly store the
            # fit objects x-variable
            if "x_var" in saved_fit:
                x_var = saved_fit["x_var"]
            else:
                x_var = self.x_var

            model = models.ExpressionModel(saved_fit["model"],
                                           independent_vars=[x_var])

            for param, hint in saved_fit["param_hints"].items():
                model.set_param_hint(param, **hint)

            xdata = {x_var: np.array(saved_fit["xdata"])}
            weights = saved_fit["weights"]
            if weights is not None:
                weights = np.array(weights)
            self.fit = model.fit(
                np.array(saved_fit["data"]),
                **xdata,
                # weights MUST BE a NumPy array or calculations will fail
                weights=weights,
                nan_policy="omit",
            )

            self.update_info_box()
            self.update_best_fit_plot(x_var)

        # set state of show_initial_fit, will have changed when setting parameters
        state = QtCore.Qt.CheckState(save_obj["show_initial_fit"])
        self.ui.show_initial_fit.setCheckState(state)